]>
Commit | Line | Data |
---|---|---|
b920de1b DH |
1 | /* MN10300 userspace access functions |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | #ifndef _ASM_UACCESS_H | |
12 | #define _ASM_UACCESS_H | |
13 | ||
14 | /* | |
15 | * User space memory access functions | |
16 | */ | |
7c7fcf76 | 17 | #include <linux/thread_info.h> |
b920de1b | 18 | #include <asm/page.h> |
b920de1b DH |
19 | #include <asm/errno.h> |
20 | ||
21 | #define VERIFY_READ 0 | |
22 | #define VERIFY_WRITE 1 | |
23 | ||
24 | /* | |
25 | * The fs value determines whether argument validity checking should be | |
26 | * performed or not. If get_fs() == USER_DS, checking is performed, with | |
27 | * get_fs() == KERNEL_DS, checking is bypassed. | |
28 | * | |
29 | * For historical reasons, these macros are grossly misnamed. | |
30 | */ | |
b920de1b DH |
31 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
32 | ||
33 | #define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF) | |
34 | #define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF) | |
35 | #define USER_DS MAKE_MM_SEG(TASK_SIZE) | |
36 | ||
37 | #define get_ds() (KERNEL_DS) | |
38 | #define get_fs() (current_thread_info()->addr_limit) | |
39 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | |
40 | #define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF) | |
41 | ||
42 | #define segment_eq(a, b) ((a).seg == (b).seg) | |
43 | ||
44 | #define __addr_ok(addr) \ | |
45 | ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) | |
46 | ||
47 | /* | |
48 | * check that a range of addresses falls within the current address limit | |
49 | */ | |
50 | static inline int ___range_ok(unsigned long addr, unsigned int size) | |
51 | { | |
52 | int flag = 1, tmp; | |
53 | ||
54 | asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */ | |
55 | " bcs 0f \n" | |
56 | " cmp %4,%1 \n" /* jump if addr+size>limit (error) */ | |
57 | " bhi 0f \n" | |
58 | " clr %0 \n" /* mark okay */ | |
59 | "0: \n" | |
60 | : "=r"(flag), "=&r"(tmp) | |
61 | : "1"(addr), "ir"(size), | |
62 | "r"(current_thread_info()->addr_limit.seg), "0"(flag) | |
63 | : "cc" | |
64 | ); | |
65 | ||
66 | return flag; | |
67 | } | |
68 | ||
69 | #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size)) | |
70 | ||
71 | #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) | |
72 | #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0) | |
73 | ||
74 | static inline int verify_area(int type, const void *addr, unsigned long size) | |
75 | { | |
76 | return access_ok(type, addr, size) ? 0 : -EFAULT; | |
77 | } | |
78 | ||
79 | ||
80 | /* | |
81 | * The exception table consists of pairs of addresses: the first is the | |
82 | * address of an instruction that is allowed to fault, and the second is | |
83 | * the address at which the program should continue. No registers are | |
84 | * modified, so it is entirely up to the continuation code to figure out | |
85 | * what to do. | |
86 | * | |
87 | * All the routines below use bits of fixup code that are out of line | |
88 | * with the main instruction path. This means when everything is well, | |
89 | * we don't even have to jump over them. Further, they do not intrude | |
90 | * on our cache or tlb entries. | |
91 | */ | |
92 | ||
93 | struct exception_table_entry | |
94 | { | |
95 | unsigned long insn, fixup; | |
96 | }; | |
97 | ||
98 | /* Returns 0 if exception not found and fixup otherwise. */ | |
99 | extern int fixup_exception(struct pt_regs *regs); | |
100 | ||
101 | #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) | |
102 | #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) | |
103 | ||
104 | /* | |
105 | * The "__xxx" versions do not do address space checking, useful when | |
106 | * doing multiple accesses to the same area (the user has to do the | |
107 | * checks by hand with "access_ok()") | |
108 | */ | |
109 | #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) | |
110 | #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | |
111 | ||
112 | /* | |
113 | * The "xxx_ret" versions return constant specified in third argument, if | |
114 | * something bad happens. These macros can be optimized for the | |
115 | * case of just returning from the function xxx_ret is used. | |
116 | */ | |
117 | ||
118 | #define put_user_ret(x, ptr, ret) \ | |
119 | ({ if (put_user((x), (ptr))) return (ret); }) | |
120 | #define get_user_ret(x, ptr, ret) \ | |
121 | ({ if (get_user((x), (ptr))) return (ret); }) | |
122 | #define __put_user_ret(x, ptr, ret) \ | |
123 | ({ if (__put_user((x), (ptr))) return (ret); }) | |
124 | #define __get_user_ret(x, ptr, ret) \ | |
125 | ({ if (__get_user((x), (ptr))) return (ret); }) | |
126 | ||
127 | struct __large_struct { unsigned long buf[100]; }; | |
128 | #define __m(x) (*(struct __large_struct *)(x)) | |
129 | ||
d22a001b MS |
130 | #define __get_user_nocheck(x, ptr, size) \ |
131 | ({ \ | |
132 | unsigned long __gu_addr; \ | |
133 | int __gu_err; \ | |
134 | __gu_addr = (unsigned long) (ptr); \ | |
135 | switch (size) { \ | |
136 | case 1: { \ | |
137 | unsigned char __gu_val; \ | |
138 | __get_user_asm("bu"); \ | |
139 | (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \ | |
140 | break; \ | |
141 | } \ | |
142 | case 2: { \ | |
143 | unsigned short __gu_val; \ | |
144 | __get_user_asm("hu"); \ | |
145 | (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \ | |
146 | break; \ | |
147 | } \ | |
148 | case 4: { \ | |
149 | unsigned int __gu_val; \ | |
150 | __get_user_asm(""); \ | |
151 | (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \ | |
152 | break; \ | |
153 | } \ | |
154 | default: \ | |
155 | __get_user_unknown(); \ | |
156 | break; \ | |
157 | } \ | |
158 | __gu_err; \ | |
b920de1b DH |
159 | }) |
160 | ||
d22a001b MS |
161 | #define __get_user_check(x, ptr, size) \ |
162 | ({ \ | |
163 | int _e; \ | |
164 | if (likely(__access_ok((unsigned long) (ptr), (size)))) \ | |
165 | _e = __get_user_nocheck((x), (ptr), (size)); \ | |
166 | else { \ | |
167 | _e = -EFAULT; \ | |
168 | (x) = (__typeof__(x))0; \ | |
169 | } \ | |
170 | _e; \ | |
b920de1b DH |
171 | }) |
172 | ||
173 | #define __get_user_asm(INSN) \ | |
174 | ({ \ | |
175 | asm volatile( \ | |
176 | "1:\n" \ | |
177 | " mov"INSN" %2,%1\n" \ | |
178 | " mov 0,%0\n" \ | |
179 | "2:\n" \ | |
180 | " .section .fixup,\"ax\"\n" \ | |
181 | "3:\n\t" \ | |
182 | " mov %3,%0\n" \ | |
183 | " jmp 2b\n" \ | |
184 | " .previous\n" \ | |
185 | " .section __ex_table,\"a\"\n" \ | |
186 | " .balign 4\n" \ | |
187 | " .long 1b, 3b\n" \ | |
188 | " .previous" \ | |
189 | : "=&r" (__gu_err), "=&r" (__gu_val) \ | |
190 | : "m" (__m(__gu_addr)), "i" (-EFAULT)); \ | |
191 | }) | |
192 | ||
193 | extern int __get_user_unknown(void); | |
194 | ||
195 | #define __put_user_nocheck(x, ptr, size) \ | |
196 | ({ \ | |
197 | union { \ | |
198 | __typeof__(*(ptr)) val; \ | |
199 | u32 bits[2]; \ | |
200 | } __pu_val; \ | |
201 | unsigned long __pu_addr; \ | |
202 | int __pu_err; \ | |
203 | __pu_val.val = (x); \ | |
204 | __pu_addr = (unsigned long) (ptr); \ | |
205 | switch (size) { \ | |
206 | case 1: __put_user_asm("bu"); break; \ | |
207 | case 2: __put_user_asm("hu"); break; \ | |
208 | case 4: __put_user_asm("" ); break; \ | |
209 | case 8: __put_user_asm8(); break; \ | |
210 | default: __pu_err = __put_user_unknown(); break; \ | |
211 | } \ | |
212 | __pu_err; \ | |
213 | }) | |
214 | ||
215 | #define __put_user_check(x, ptr, size) \ | |
216 | ({ \ | |
217 | union { \ | |
218 | __typeof__(*(ptr)) val; \ | |
219 | u32 bits[2]; \ | |
220 | } __pu_val; \ | |
221 | unsigned long __pu_addr; \ | |
222 | int __pu_err; \ | |
223 | __pu_val.val = (x); \ | |
224 | __pu_addr = (unsigned long) (ptr); \ | |
225 | if (likely(__access_ok(__pu_addr, size))) { \ | |
226 | switch (size) { \ | |
227 | case 1: __put_user_asm("bu"); break; \ | |
228 | case 2: __put_user_asm("hu"); break; \ | |
229 | case 4: __put_user_asm("" ); break; \ | |
230 | case 8: __put_user_asm8(); break; \ | |
231 | default: __pu_err = __put_user_unknown(); break; \ | |
232 | } \ | |
233 | } \ | |
234 | else { \ | |
235 | __pu_err = -EFAULT; \ | |
236 | } \ | |
237 | __pu_err; \ | |
238 | }) | |
239 | ||
240 | #define __put_user_asm(INSN) \ | |
241 | ({ \ | |
242 | asm volatile( \ | |
243 | "1:\n" \ | |
244 | " mov"INSN" %1,%2\n" \ | |
245 | " mov 0,%0\n" \ | |
246 | "2:\n" \ | |
247 | " .section .fixup,\"ax\"\n" \ | |
248 | "3:\n" \ | |
249 | " mov %3,%0\n" \ | |
250 | " jmp 2b\n" \ | |
251 | " .previous\n" \ | |
252 | " .section __ex_table,\"a\"\n" \ | |
253 | " .balign 4\n" \ | |
254 | " .long 1b, 3b\n" \ | |
255 | " .previous" \ | |
256 | : "=&r" (__pu_err) \ | |
257 | : "r" (__pu_val.val), "m" (__m(__pu_addr)), \ | |
258 | "i" (-EFAULT) \ | |
259 | ); \ | |
260 | }) | |
261 | ||
262 | #define __put_user_asm8() \ | |
263 | ({ \ | |
264 | asm volatile( \ | |
265 | "1: mov %1,%3 \n" \ | |
266 | "2: mov %2,%4 \n" \ | |
267 | " mov 0,%0 \n" \ | |
268 | "3: \n" \ | |
269 | " .section .fixup,\"ax\" \n" \ | |
270 | "4: \n" \ | |
271 | " mov %5,%0 \n" \ | |
54b71fba | 272 | " jmp 3b \n" \ |
b920de1b DH |
273 | " .previous \n" \ |
274 | " .section __ex_table,\"a\"\n" \ | |
275 | " .balign 4 \n" \ | |
276 | " .long 1b, 4b \n" \ | |
277 | " .long 2b, 4b \n" \ | |
278 | " .previous \n" \ | |
279 | : "=&r" (__pu_err) \ | |
280 | : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \ | |
281 | "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \ | |
282 | "i" (-EFAULT) \ | |
283 | ); \ | |
284 | }) | |
285 | ||
286 | extern int __put_user_unknown(void); | |
287 | ||
288 | ||
289 | /* | |
290 | * Copy To/From Userspace | |
291 | */ | |
292 | /* Generic arbitrary sized copy. */ | |
293 | #define __copy_user(to, from, size) \ | |
294 | do { \ | |
295 | if (size) { \ | |
296 | void *__to = to; \ | |
297 | const void *__from = from; \ | |
298 | int w; \ | |
299 | asm volatile( \ | |
300 | "0: movbu (%0),%3;\n" \ | |
301 | "1: movbu %3,(%1);\n" \ | |
302 | " inc %0;\n" \ | |
303 | " inc %1;\n" \ | |
304 | " add -1,%2;\n" \ | |
305 | " bne 0b;\n" \ | |
306 | "2:\n" \ | |
307 | " .section .fixup,\"ax\"\n" \ | |
308 | "3: jmp 2b\n" \ | |
309 | " .previous\n" \ | |
310 | " .section __ex_table,\"a\"\n" \ | |
311 | " .balign 4\n" \ | |
312 | " .long 0b,3b\n" \ | |
313 | " .long 1b,3b\n" \ | |
314 | " .previous\n" \ | |
315 | : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\ | |
316 | : "0"(__from), "1"(__to), "2"(size) \ | |
d6bb7a1a | 317 | : "cc", "memory"); \ |
b920de1b DH |
318 | } \ |
319 | } while (0) | |
320 | ||
321 | #define __copy_user_zeroing(to, from, size) \ | |
322 | do { \ | |
323 | if (size) { \ | |
324 | void *__to = to; \ | |
325 | const void *__from = from; \ | |
326 | int w; \ | |
327 | asm volatile( \ | |
328 | "0: movbu (%0),%3;\n" \ | |
329 | "1: movbu %3,(%1);\n" \ | |
330 | " inc %0;\n" \ | |
331 | " inc %1;\n" \ | |
332 | " add -1,%2;\n" \ | |
333 | " bne 0b;\n" \ | |
334 | "2:\n" \ | |
335 | " .section .fixup,\"ax\"\n" \ | |
336 | "3:\n" \ | |
337 | " mov %2,%0\n" \ | |
338 | " clr %3\n" \ | |
339 | "4: movbu %3,(%1);\n" \ | |
340 | " inc %1;\n" \ | |
341 | " add -1,%2;\n" \ | |
342 | " bne 4b;\n" \ | |
343 | " mov %0,%2\n" \ | |
344 | " jmp 2b\n" \ | |
345 | " .previous\n" \ | |
346 | " .section __ex_table,\"a\"\n" \ | |
347 | " .balign 4\n" \ | |
348 | " .long 0b,3b\n" \ | |
349 | " .long 1b,3b\n" \ | |
350 | " .previous\n" \ | |
351 | : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\ | |
352 | : "0"(__from), "1"(__to), "2"(size) \ | |
d6bb7a1a | 353 | : "cc", "memory"); \ |
b920de1b DH |
354 | } \ |
355 | } while (0) | |
356 | ||
357 | /* We let the __ versions of copy_from/to_user inline, because they're often | |
358 | * used in fast paths and have only a small space overhead. | |
359 | */ | |
360 | static inline | |
361 | unsigned long __generic_copy_from_user_nocheck(void *to, const void *from, | |
362 | unsigned long n) | |
363 | { | |
364 | __copy_user_zeroing(to, from, n); | |
365 | return n; | |
366 | } | |
367 | ||
368 | static inline | |
369 | unsigned long __generic_copy_to_user_nocheck(void *to, const void *from, | |
370 | unsigned long n) | |
371 | { | |
372 | __copy_user(to, from, n); | |
373 | return n; | |
374 | } | |
375 | ||
376 | ||
377 | #if 0 | |
368dd5ac | 378 | #error "don't use - these macros don't increment to & from pointers" |
b920de1b DH |
379 | /* Optimize just a little bit when we know the size of the move. */ |
380 | #define __constant_copy_user(to, from, size) \ | |
381 | do { \ | |
382 | asm volatile( \ | |
383 | " mov %0,a0;\n" \ | |
384 | "0: movbu (%1),d3;\n" \ | |
385 | "1: movbu d3,(%2);\n" \ | |
386 | " add -1,a0;\n" \ | |
387 | " bne 0b;\n" \ | |
388 | "2:;" \ | |
389 | ".section .fixup,\"ax\"\n" \ | |
390 | "3: jmp 2b\n" \ | |
391 | ".previous\n" \ | |
392 | ".section __ex_table,\"a\"\n" \ | |
393 | " .balign 4\n" \ | |
394 | " .long 0b,3b\n" \ | |
395 | " .long 1b,3b\n" \ | |
396 | ".previous" \ | |
397 | : \ | |
398 | : "d"(size), "d"(to), "d"(from) \ | |
399 | : "d3", "a0"); \ | |
400 | } while (0) | |
401 | ||
402 | /* Optimize just a little bit when we know the size of the move. */ | |
403 | #define __constant_copy_user_zeroing(to, from, size) \ | |
404 | do { \ | |
405 | asm volatile( \ | |
406 | " mov %0,a0;\n" \ | |
407 | "0: movbu (%1),d3;\n" \ | |
408 | "1: movbu d3,(%2);\n" \ | |
409 | " add -1,a0;\n" \ | |
410 | " bne 0b;\n" \ | |
411 | "2:;" \ | |
412 | ".section .fixup,\"ax\"\n" \ | |
413 | "3: jmp 2b\n" \ | |
414 | ".previous\n" \ | |
415 | ".section __ex_table,\"a\"\n" \ | |
416 | " .balign 4\n" \ | |
417 | " .long 0b,3b\n" \ | |
418 | " .long 1b,3b\n" \ | |
419 | ".previous" \ | |
420 | : \ | |
421 | : "d"(size), "d"(to), "d"(from) \ | |
422 | : "d3", "a0"); \ | |
423 | } while (0) | |
424 | ||
425 | static inline | |
426 | unsigned long __constant_copy_to_user(void *to, const void *from, | |
427 | unsigned long n) | |
428 | { | |
429 | if (access_ok(VERIFY_WRITE, to, n)) | |
430 | __constant_copy_user(to, from, n); | |
431 | return n; | |
432 | } | |
433 | ||
434 | static inline | |
435 | unsigned long __constant_copy_from_user(void *to, const void *from, | |
436 | unsigned long n) | |
437 | { | |
438 | if (access_ok(VERIFY_READ, from, n)) | |
439 | __constant_copy_user_zeroing(to, from, n); | |
440 | return n; | |
441 | } | |
442 | ||
443 | static inline | |
444 | unsigned long __constant_copy_to_user_nocheck(void *to, const void *from, | |
445 | unsigned long n) | |
446 | { | |
447 | __constant_copy_user(to, from, n); | |
448 | return n; | |
449 | } | |
450 | ||
451 | static inline | |
452 | unsigned long __constant_copy_from_user_nocheck(void *to, const void *from, | |
453 | unsigned long n) | |
454 | { | |
455 | __constant_copy_user_zeroing(to, from, n); | |
456 | return n; | |
457 | } | |
458 | #endif | |
459 | ||
460 | extern unsigned long __generic_copy_to_user(void __user *, const void *, | |
461 | unsigned long); | |
462 | extern unsigned long __generic_copy_from_user(void *, const void __user *, | |
463 | unsigned long); | |
464 | ||
465 | #define __copy_to_user_inatomic(to, from, n) \ | |
466 | __generic_copy_to_user_nocheck((to), (from), (n)) | |
467 | #define __copy_from_user_inatomic(to, from, n) \ | |
468 | __generic_copy_from_user_nocheck((to), (from), (n)) | |
469 | ||
470 | #define __copy_to_user(to, from, n) \ | |
471 | ({ \ | |
472 | might_sleep(); \ | |
473 | __copy_to_user_inatomic((to), (from), (n)); \ | |
474 | }) | |
475 | ||
476 | #define __copy_from_user(to, from, n) \ | |
477 | ({ \ | |
478 | might_sleep(); \ | |
479 | __copy_from_user_inatomic((to), (from), (n)); \ | |
480 | }) | |
481 | ||
482 | ||
483 | #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) | |
484 | #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) | |
485 | ||
486 | extern long strncpy_from_user(char *dst, const char __user *src, long count); | |
487 | extern long __strncpy_from_user(char *dst, const char __user *src, long count); | |
488 | extern long strnlen_user(const char __user *str, long n); | |
489 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | |
490 | extern unsigned long clear_user(void __user *mem, unsigned long len); | |
491 | extern unsigned long __clear_user(void __user *mem, unsigned long len); | |
492 | ||
493 | #endif /* _ASM_UACCESS_H */ |