1c9edd63dda74e9b1eed703b705ea8acfecc5084
[pandora-kernel.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  */
10 #ifndef _ASM_UACCESS_H
11 #define _ASM_UACCESS_H
12
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/thread_info.h>
16
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24 #ifdef CONFIG_32BIT
25
26 #define __UA_LIMIT      0x80000000UL
27
28 #define __UA_ADDR       ".word"
29 #define __UA_LA         "la"
30 #define __UA_ADDU       "addu"
31 #define __UA_t0         "$8"
32 #define __UA_t1         "$9"
33
34 #endif /* CONFIG_32BIT */
35
36 #ifdef CONFIG_64BIT
37
38 extern u64 __ua_limit;
39
40 #define __UA_LIMIT      __ua_limit
41
42 #define __UA_ADDR       ".dword"
43 #define __UA_LA         "dla"
44 #define __UA_ADDU       "daddu"
45 #define __UA_t0         "$12"
46 #define __UA_t1         "$13"
47
48 #endif /* CONFIG_64BIT */
49
50 /*
51  * USER_DS is a bitmask that has the bits set that may not be set in a valid
52  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
53  * the arithmetic we're doing only works if the limit is a power of two, so
54  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
55  * address in this range it's the process's problem, not ours :-)
56  */
57
58 #define KERNEL_DS       ((mm_segment_t) { 0UL })
59 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
60
61 #define VERIFY_READ    0
62 #define VERIFY_WRITE   1
63
64 #define get_ds()        (KERNEL_DS)
65 #define get_fs()        (current_thread_info()->addr_limit)
66 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
67
68 #define segment_eq(a, b)        ((a).seg == (b).seg)
69
70
71 /*
72  * Is a address valid? This does a straighforward calculation rather
73  * than tests.
74  *
75  * Address valid if:
76  *  - "addr" doesn't have any high-bits set
77  *  - AND "size" doesn't have any high-bits set
78  *  - AND "addr+size" doesn't have any high-bits set
79  *  - OR we are in kernel mode.
80  *
81  * __ua_size() is a trick to avoid runtime checking of positive constant
82  * sizes; for those we already know at compile time that the size is ok.
83  */
84 #define __ua_size(size)                                                 \
85         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
86
87 /*
88  * access_ok: - Checks if a user space pointer is valid
89  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
90  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
91  *        to write to a block, it is always safe to read from it.
92  * @addr: User space pointer to start of block to check
93  * @size: Size of block to check
94  *
95  * Context: User context only.  This function may sleep.
96  *
97  * Checks if a pointer to a block of memory in user space is valid.
98  *
99  * Returns true (nonzero) if the memory block may be valid, false (zero)
100  * if it is definitely invalid.
101  *
102  * Note that, depending on architecture, this function probably just
103  * checks that the pointer is in the user space range - after calling
104  * this function, memory access functions may still return -EFAULT.
105  */
106
107 #define __access_mask get_fs().seg
108
109 #define __access_ok(addr, size, mask)                                   \
110 ({                                                                      \
111         unsigned long __addr = (unsigned long) (addr);                  \
112         unsigned long __size = size;                                    \
113         unsigned long __mask = mask;                                    \
114         unsigned long __ok;                                             \
115                                                                         \
116         __chk_user_ptr(addr);                                           \
117         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
118                 __ua_size(__size)));                                    \
119         __ok == 0;                                                      \
120 })
121
122 #define access_ok(type, addr, size)                                     \
123         likely(__access_ok((addr), (size), __access_mask))
124
125 /*
126  * put_user: - Write a simple value into user space.
127  * @x:   Value to copy to user space.
128  * @ptr: Destination address, in user space.
129  *
130  * Context: User context only.  This function may sleep.
131  *
132  * This macro copies a single simple value from kernel space to user
133  * space.  It supports simple types like char and int, but not larger
134  * data types like structures or arrays.
135  *
136  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
137  * to the result of dereferencing @ptr.
138  *
139  * Returns zero on success, or -EFAULT on error.
140  */
141 #define put_user(x,ptr) \
142         __put_user_check((x), (ptr), sizeof(*(ptr)))
143
144 /*
145  * get_user: - Get a simple variable from user space.
146  * @x:   Variable to store result.
147  * @ptr: Source address, in user space.
148  *
149  * Context: User context only.  This function may sleep.
150  *
151  * This macro copies a single simple variable from user space to kernel
152  * space.  It supports simple types like char and int, but not larger
153  * data types like structures or arrays.
154  *
155  * @ptr must have pointer-to-simple-variable type, and the result of
156  * dereferencing @ptr must be assignable to @x without a cast.
157  *
158  * Returns zero on success, or -EFAULT on error.
159  * On error, the variable @x is set to zero.
160  */
161 #define get_user(x,ptr) \
162         __get_user_check((x), (ptr), sizeof(*(ptr)))
163
164 /*
165  * __put_user: - Write a simple value into user space, with less checking.
166  * @x:   Value to copy to user space.
167  * @ptr: Destination address, in user space.
168  *
169  * Context: User context only.  This function may sleep.
170  *
171  * This macro copies a single simple value from kernel space to user
172  * space.  It supports simple types like char and int, but not larger
173  * data types like structures or arrays.
174  *
175  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
176  * to the result of dereferencing @ptr.
177  *
178  * Caller must check the pointer with access_ok() before calling this
179  * function.
180  *
181  * Returns zero on success, or -EFAULT on error.
182  */
183 #define __put_user(x,ptr) \
184         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
185
186 /*
187  * __get_user: - Get a simple variable from user space, with less checking.
188  * @x:   Variable to store result.
189  * @ptr: Source address, in user space.
190  *
191  * Context: User context only.  This function may sleep.
192  *
193  * This macro copies a single simple variable from user space to kernel
194  * space.  It supports simple types like char and int, but not larger
195  * data types like structures or arrays.
196  *
197  * @ptr must have pointer-to-simple-variable type, and the result of
198  * dereferencing @ptr must be assignable to @x without a cast.
199  *
200  * Caller must check the pointer with access_ok() before calling this
201  * function.
202  *
203  * Returns zero on success, or -EFAULT on error.
204  * On error, the variable @x is set to zero.
205  */
206 #define __get_user(x,ptr) \
207         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
208
209 struct __large_struct { unsigned long buf[100]; };
210 #define __m(x) (*(struct __large_struct __user *)(x))
211
212 /*
213  * Yuck.  We need two variants, one for 64bit operation and one
214  * for 32 bit mode and old iron.
215  */
216 #ifdef CONFIG_32BIT
217 #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
218 #endif
219 #ifdef CONFIG_64BIT
220 #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
221 #endif
222
223 extern void __get_user_unknown(void);
224
225 #define __get_user_common(val, size, ptr)                               \
226 do {                                                                    \
227         switch (size) {                                                 \
228         case 1: __get_user_asm(val, "lb", ptr); break;                  \
229         case 2: __get_user_asm(val, "lh", ptr); break;                  \
230         case 4: __get_user_asm(val, "lw", ptr); break;                  \
231         case 8: __GET_USER_DW(val, ptr); break;                         \
232         default: __get_user_unknown(); break;                           \
233         }                                                               \
234 } while (0)
235
236 #define __get_user_nocheck(x, ptr, size)                                \
237 ({                                                                      \
238         int __gu_err;                                                   \
239                                                                         \
240         __chk_user_ptr(ptr);                                            \
241         __get_user_common((x), size, ptr);                              \
242         __gu_err;                                                       \
243 })
244
245 #define __get_user_check(x, ptr, size)                                  \
246 ({                                                                      \
247         int __gu_err = -EFAULT;                                         \
248         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
249                                                                         \
250         might_fault();                                                  \
251         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
252                 __get_user_common((x), size, __gu_ptr);                 \
253                                                                         \
254         __gu_err;                                                       \
255 })
256
257 #define __get_user_asm(val, insn, addr)                                 \
258 {                                                                       \
259         long __gu_tmp;                                                  \
260                                                                         \
261         __asm__ __volatile__(                                           \
262         "1:     " insn "        %1, %3                          \n"     \
263         "2:                                                     \n"     \
264         "       .insn                                           \n"     \
265         "       .section .fixup,\"ax\"                          \n"     \
266         "3:     li      %0, %4                                  \n"     \
267         "       j       2b                                      \n"     \
268         "       .previous                                       \n"     \
269         "       .section __ex_table,\"a\"                       \n"     \
270         "       "__UA_ADDR "\t1b, 3b                            \n"     \
271         "       .previous                                       \n"     \
272         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
273         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
274                                                                         \
275         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
276 }
277
278 /*
279  * Get a long long 64 using 32 bit registers.
280  */
281 #define __get_user_asm_ll32(val, addr)                                  \
282 {                                                                       \
283         union {                                                         \
284                 unsigned long long      l;                              \
285                 __typeof__(*(addr))     t;                              \
286         } __gu_tmp;                                                     \
287                                                                         \
288         __asm__ __volatile__(                                           \
289         "1:     lw      %1, (%3)                                \n"     \
290         "2:     lw      %D1, 4(%3)                              \n"     \
291         "3:                                                     \n"     \
292         "       .insn                                           \n"     \
293         "       .section        .fixup,\"ax\"                   \n"     \
294         "4:     li      %0, %4                                  \n"     \
295         "       move    %1, $0                                  \n"     \
296         "       move    %D1, $0                                 \n"     \
297         "       j       3b                                      \n"     \
298         "       .previous                                       \n"     \
299         "       .section        __ex_table,\"a\"                \n"     \
300         "       " __UA_ADDR "   1b, 4b                          \n"     \
301         "       " __UA_ADDR "   2b, 4b                          \n"     \
302         "       .previous                                       \n"     \
303         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
304         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
305                                                                         \
306         (val) = __gu_tmp.t;                                             \
307 }
308
309 /*
310  * Yuck.  We need two variants, one for 64bit operation and one
311  * for 32 bit mode and old iron.
312  */
313 #ifdef CONFIG_32BIT
314 #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
315 #endif
316 #ifdef CONFIG_64BIT
317 #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
318 #endif
319
320 #define __put_user_nocheck(x, ptr, size)                                \
321 ({                                                                      \
322         __typeof__(*(ptr)) __pu_val;                                    \
323         int __pu_err = 0;                                               \
324                                                                         \
325         __chk_user_ptr(ptr);                                            \
326         __pu_val = (x);                                                 \
327         switch (size) {                                                 \
328         case 1: __put_user_asm("sb", ptr); break;                       \
329         case 2: __put_user_asm("sh", ptr); break;                       \
330         case 4: __put_user_asm("sw", ptr); break;                       \
331         case 8: __PUT_USER_DW(ptr); break;                              \
332         default: __put_user_unknown(); break;                           \
333         }                                                               \
334         __pu_err;                                                       \
335 })
336
337 #define __put_user_check(x, ptr, size)                                  \
338 ({                                                                      \
339         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
340         __typeof__(*(ptr)) __pu_val = (x);                              \
341         int __pu_err = -EFAULT;                                         \
342                                                                         \
343         might_fault();                                                  \
344         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
345                 switch (size) {                                         \
346                 case 1: __put_user_asm("sb", __pu_addr); break;         \
347                 case 2: __put_user_asm("sh", __pu_addr); break;         \
348                 case 4: __put_user_asm("sw", __pu_addr); break;         \
349                 case 8: __PUT_USER_DW(__pu_addr); break;                \
350                 default: __put_user_unknown(); break;                   \
351                 }                                                       \
352         }                                                               \
353         __pu_err;                                                       \
354 })
355
356 #define __put_user_asm(insn, ptr)                                       \
357 {                                                                       \
358         __asm__ __volatile__(                                           \
359         "1:     " insn "        %z2, %3         # __put_user_asm\n"     \
360         "2:                                                     \n"     \
361         "       .insn                                           \n"     \
362         "       .section        .fixup,\"ax\"                   \n"     \
363         "3:     li      %0, %4                                  \n"     \
364         "       j       2b                                      \n"     \
365         "       .previous                                       \n"     \
366         "       .section        __ex_table,\"a\"                \n"     \
367         "       " __UA_ADDR "   1b, 3b                          \n"     \
368         "       .previous                                       \n"     \
369         : "=r" (__pu_err)                                               \
370         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
371           "i" (-EFAULT));                                               \
372 }
373
374 #define __put_user_asm_ll32(ptr)                                        \
375 {                                                                       \
376         __asm__ __volatile__(                                           \
377         "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     \
378         "2:     sw      %D2, 4(%3)                              \n"     \
379         "3:                                                     \n"     \
380         "       .insn                                           \n"     \
381         "       .section        .fixup,\"ax\"                   \n"     \
382         "4:     li      %0, %4                                  \n"     \
383         "       j       3b                                      \n"     \
384         "       .previous                                       \n"     \
385         "       .section        __ex_table,\"a\"                \n"     \
386         "       " __UA_ADDR "   1b, 4b                          \n"     \
387         "       " __UA_ADDR "   2b, 4b                          \n"     \
388         "       .previous"                                              \
389         : "=r" (__pu_err)                                               \
390         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
391           "i" (-EFAULT));                                               \
392 }
393
394 extern void __put_user_unknown(void);
395
396 /*
397  * put_user_unaligned: - Write a simple value into user space.
398  * @x:   Value to copy to user space.
399  * @ptr: Destination address, in user space.
400  *
401  * Context: User context only.  This function may sleep.
402  *
403  * This macro copies a single simple value from kernel space to user
404  * space.  It supports simple types like char and int, but not larger
405  * data types like structures or arrays.
406  *
407  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
408  * to the result of dereferencing @ptr.
409  *
410  * Returns zero on success, or -EFAULT on error.
411  */
412 #define put_user_unaligned(x,ptr)       \
413         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
414
415 /*
416  * get_user_unaligned: - Get a simple variable from user space.
417  * @x:   Variable to store result.
418  * @ptr: Source address, in user space.
419  *
420  * Context: User context only.  This function may sleep.
421  *
422  * This macro copies a single simple variable from user space to kernel
423  * space.  It supports simple types like char and int, but not larger
424  * data types like structures or arrays.
425  *
426  * @ptr must have pointer-to-simple-variable type, and the result of
427  * dereferencing @ptr must be assignable to @x without a cast.
428  *
429  * Returns zero on success, or -EFAULT on error.
430  * On error, the variable @x is set to zero.
431  */
432 #define get_user_unaligned(x,ptr) \
433         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
434
435 /*
436  * __put_user_unaligned: - Write a simple value into user space, with less checking.
437  * @x:   Value to copy to user space.
438  * @ptr: Destination address, in user space.
439  *
440  * Context: User context only.  This function may sleep.
441  *
442  * This macro copies a single simple value from kernel space to user
443  * space.  It supports simple types like char and int, but not larger
444  * data types like structures or arrays.
445  *
446  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
447  * to the result of dereferencing @ptr.
448  *
449  * Caller must check the pointer with access_ok() before calling this
450  * function.
451  *
452  * Returns zero on success, or -EFAULT on error.
453  */
454 #define __put_user_unaligned(x,ptr) \
455         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
456
457 /*
458  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
459  * @x:   Variable to store result.
460  * @ptr: Source address, in user space.
461  *
462  * Context: User context only.  This function may sleep.
463  *
464  * This macro copies a single simple variable from user space to kernel
465  * space.  It supports simple types like char and int, but not larger
466  * data types like structures or arrays.
467  *
468  * @ptr must have pointer-to-simple-variable type, and the result of
469  * dereferencing @ptr must be assignable to @x without a cast.
470  *
471  * Caller must check the pointer with access_ok() before calling this
472  * function.
473  *
474  * Returns zero on success, or -EFAULT on error.
475  * On error, the variable @x is set to zero.
476  */
477 #define __get_user_unaligned(x,ptr) \
478         __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
479
480 /*
481  * Yuck.  We need two variants, one for 64bit operation and one
482  * for 32 bit mode and old iron.
483  */
484 #ifdef CONFIG_32BIT
485 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
486         __get_user_unaligned_asm_ll32(val, ptr)
487 #endif
488 #ifdef CONFIG_64BIT
489 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
490         __get_user_unaligned_asm(val, "uld", ptr)
491 #endif
492
493 extern void __get_user_unaligned_unknown(void);
494
495 #define __get_user_unaligned_common(val, size, ptr)                     \
496 do {                                                                    \
497         switch (size) {                                                 \
498         case 1: __get_user_asm(val, "lb", ptr); break;                  \
499         case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       \
500         case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       \
501         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
502         default: __get_user_unaligned_unknown(); break;                 \
503         }                                                               \
504 } while (0)
505
506 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
507 ({                                                                      \
508         int __gu_err;                                                   \
509                                                                         \
510         __get_user_unaligned_common((x), size, ptr);                    \
511         __gu_err;                                                       \
512 })
513
514 #define __get_user_unaligned_check(x,ptr,size)                          \
515 ({                                                                      \
516         int __gu_err = -EFAULT;                                         \
517         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
518                                                                         \
519         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
520                 __get_user_unaligned_common((x), size, __gu_ptr);       \
521                                                                         \
522         __gu_err;                                                       \
523 })
524
525 #define __get_user_unaligned_asm(val, insn, addr)                       \
526 {                                                                       \
527         long __gu_tmp;                                                  \
528                                                                         \
529         __asm__ __volatile__(                                           \
530         "1:     " insn "        %1, %3                          \n"     \
531         "2:                                                     \n"     \
532         "       .insn                                           \n"     \
533         "       .section .fixup,\"ax\"                          \n"     \
534         "3:     li      %0, %4                                  \n"     \
535         "       j       2b                                      \n"     \
536         "       .previous                                       \n"     \
537         "       .section __ex_table,\"a\"                       \n"     \
538         "       "__UA_ADDR "\t1b, 3b                            \n"     \
539         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
540         "       .previous                                       \n"     \
541         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
542         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
543                                                                         \
544         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
545 }
546
547 /*
548  * Get a long long 64 using 32 bit registers.
549  */
550 #define __get_user_unaligned_asm_ll32(val, addr)                        \
551 {                                                                       \
552         unsigned long long __gu_tmp;                                    \
553                                                                         \
554         __asm__ __volatile__(                                           \
555         "1:     ulw     %1, (%3)                                \n"     \
556         "2:     ulw     %D1, 4(%3)                              \n"     \
557         "       move    %0, $0                                  \n"     \
558         "3:                                                     \n"     \
559         "       .insn                                           \n"     \
560         "       .section        .fixup,\"ax\"                   \n"     \
561         "4:     li      %0, %4                                  \n"     \
562         "       move    %1, $0                                  \n"     \
563         "       move    %D1, $0                                 \n"     \
564         "       j       3b                                      \n"     \
565         "       .previous                                       \n"     \
566         "       .section        __ex_table,\"a\"                \n"     \
567         "       " __UA_ADDR "   1b, 4b                          \n"     \
568         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
569         "       " __UA_ADDR "   2b, 4b                          \n"     \
570         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
571         "       .previous                                       \n"     \
572         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
573         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
574         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
575 }
576
577 /*
578  * Yuck.  We need two variants, one for 64bit operation and one
579  * for 32 bit mode and old iron.
580  */
581 #ifdef CONFIG_32BIT
582 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
583 #endif
584 #ifdef CONFIG_64BIT
585 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
586 #endif
587
588 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
589 ({                                                                      \
590         __typeof__(*(ptr)) __pu_val;                                    \
591         int __pu_err = 0;                                               \
592                                                                         \
593         __pu_val = (x);                                                 \
594         switch (size) {                                                 \
595         case 1: __put_user_asm("sb", ptr); break;                       \
596         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
597         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
598         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
599         default: __put_user_unaligned_unknown(); break;                 \
600         }                                                               \
601         __pu_err;                                                       \
602 })
603
604 #define __put_user_unaligned_check(x,ptr,size)                          \
605 ({                                                                      \
606         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
607         __typeof__(*(ptr)) __pu_val = (x);                              \
608         int __pu_err = -EFAULT;                                         \
609                                                                         \
610         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
611                 switch (size) {                                         \
612                 case 1: __put_user_asm("sb", __pu_addr); break;         \
613                 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
614                 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
615                 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break;      \
616                 default: __put_user_unaligned_unknown(); break;         \
617                 }                                                       \
618         }                                                               \
619         __pu_err;                                                       \
620 })
621
622 #define __put_user_unaligned_asm(insn, ptr)                             \
623 {                                                                       \
624         __asm__ __volatile__(                                           \
625         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
626         "2:                                                     \n"     \
627         "       .insn                                           \n"     \
628         "       .section        .fixup,\"ax\"                   \n"     \
629         "3:     li      %0, %4                                  \n"     \
630         "       j       2b                                      \n"     \
631         "       .previous                                       \n"     \
632         "       .section        __ex_table,\"a\"                \n"     \
633         "       " __UA_ADDR "   1b, 3b                          \n"     \
634         "       .previous                                       \n"     \
635         : "=r" (__pu_err)                                               \
636         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
637           "i" (-EFAULT));                                               \
638 }
639
640 #define __put_user_unaligned_asm_ll32(ptr)                              \
641 {                                                                       \
642         __asm__ __volatile__(                                           \
643         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
644         "2:     sw      %D2, 4(%3)                              \n"     \
645         "3:                                                     \n"     \
646         "       .insn                                           \n"     \
647         "       .section        .fixup,\"ax\"                   \n"     \
648         "4:     li      %0, %4                                  \n"     \
649         "       j       3b                                      \n"     \
650         "       .previous                                       \n"     \
651         "       .section        __ex_table,\"a\"                \n"     \
652         "       " __UA_ADDR "   1b, 4b                          \n"     \
653         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
654         "       " __UA_ADDR "   2b, 4b                          \n"     \
655         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
656         "       .previous"                                              \
657         : "=r" (__pu_err)                                               \
658         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
659           "i" (-EFAULT));                                               \
660 }
661
662 extern void __put_user_unaligned_unknown(void);
663
664 /*
665  * We're generating jump to subroutines which will be outside the range of
666  * jump instructions
667  */
668 #ifdef MODULE
669 #define __MODULE_JAL(destination)                                       \
670         ".set\tnoat\n\t"                                                \
671         __UA_LA "\t$1, " #destination "\n\t"                            \
672         "jalr\t$1\n\t"                                                  \
673         ".set\tat\n\t"
674 #else
675 #define __MODULE_JAL(destination)                                       \
676         "jal\t" #destination "\n\t"
677 #endif
678
679 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
680 #define DADDI_SCRATCH "$0"
681 #else
682 #define DADDI_SCRATCH "$3"
683 #endif
684
685 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
686
687 #define __invoke_copy_to_user(to, from, n)                              \
688 ({                                                                      \
689         register void __user *__cu_to_r __asm__("$4");                  \
690         register const void *__cu_from_r __asm__("$5");                 \
691         register long __cu_len_r __asm__("$6");                         \
692                                                                         \
693         __cu_to_r = (to);                                               \
694         __cu_from_r = (from);                                           \
695         __cu_len_r = (n);                                               \
696         __asm__ __volatile__(                                           \
697         __MODULE_JAL(__copy_user)                                       \
698         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
699         :                                                               \
700         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
701           DADDI_SCRATCH, "memory");                                     \
702         __cu_len_r;                                                     \
703 })
704
705 /*
706  * __copy_to_user: - Copy a block of data into user space, with less checking.
707  * @to:   Destination address, in user space.
708  * @from: Source address, in kernel space.
709  * @n:    Number of bytes to copy.
710  *
711  * Context: User context only.  This function may sleep.
712  *
713  * Copy data from kernel space to user space.  Caller must check
714  * the specified block with access_ok() before calling this function.
715  *
716  * Returns number of bytes that could not be copied.
717  * On success, this will be zero.
718  */
719 #define __copy_to_user(to, from, n)                                     \
720 ({                                                                      \
721         void __user *__cu_to;                                           \
722         const void *__cu_from;                                          \
723         long __cu_len;                                                  \
724                                                                         \
725         __cu_to = (to);                                                 \
726         __cu_from = (from);                                             \
727         __cu_len = (n);                                                 \
728         might_fault();                                                  \
729         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
730         __cu_len;                                                       \
731 })
732
733 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
734
735 #define __copy_to_user_inatomic(to, from, n)                            \
736 ({                                                                      \
737         void __user *__cu_to;                                           \
738         const void *__cu_from;                                          \
739         long __cu_len;                                                  \
740                                                                         \
741         __cu_to = (to);                                                 \
742         __cu_from = (from);                                             \
743         __cu_len = (n);                                                 \
744         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
745         __cu_len;                                                       \
746 })
747
748 #define __copy_from_user_inatomic(to, from, n)                          \
749 ({                                                                      \
750         void *__cu_to;                                                  \
751         const void __user *__cu_from;                                   \
752         long __cu_len;                                                  \
753                                                                         \
754         __cu_to = (to);                                                 \
755         __cu_from = (from);                                             \
756         __cu_len = (n);                                                 \
757         __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
758                                                     __cu_len);          \
759         __cu_len;                                                       \
760 })
761
762 /*
763  * copy_to_user: - Copy a block of data into user space.
764  * @to:   Destination address, in user space.
765  * @from: Source address, in kernel space.
766  * @n:    Number of bytes to copy.
767  *
768  * Context: User context only.  This function may sleep.
769  *
770  * Copy data from kernel space to user space.
771  *
772  * Returns number of bytes that could not be copied.
773  * On success, this will be zero.
774  */
775 #define copy_to_user(to, from, n)                                       \
776 ({                                                                      \
777         void __user *__cu_to;                                           \
778         const void *__cu_from;                                          \
779         long __cu_len;                                                  \
780                                                                         \
781         __cu_to = (to);                                                 \
782         __cu_from = (from);                                             \
783         __cu_len = (n);                                                 \
784         if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {               \
785                 might_fault();                                          \
786                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
787                                                  __cu_len);             \
788         }                                                               \
789         __cu_len;                                                       \
790 })
791
792 #define __invoke_copy_from_user(to, from, n)                            \
793 ({                                                                      \
794         register void *__cu_to_r __asm__("$4");                         \
795         register const void __user *__cu_from_r __asm__("$5");          \
796         register long __cu_len_r __asm__("$6");                         \
797                                                                         \
798         __cu_to_r = (to);                                               \
799         __cu_from_r = (from);                                           \
800         __cu_len_r = (n);                                               \
801         __asm__ __volatile__(                                           \
802         ".set\tnoreorder\n\t"                                           \
803         __MODULE_JAL(__copy_user)                                       \
804         ".set\tnoat\n\t"                                                \
805         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
806         ".set\tat\n\t"                                                  \
807         ".set\treorder"                                                 \
808         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
809         :                                                               \
810         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
811           DADDI_SCRATCH, "memory");                                     \
812         __cu_len_r;                                                     \
813 })
814
815 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
816 ({                                                                      \
817         register void *__cu_to_r __asm__("$4");                         \
818         register const void __user *__cu_from_r __asm__("$5");          \
819         register long __cu_len_r __asm__("$6");                         \
820                                                                         \
821         __cu_to_r = (to);                                               \
822         __cu_from_r = (from);                                           \
823         __cu_len_r = (n);                                               \
824         __asm__ __volatile__(                                           \
825         ".set\tnoreorder\n\t"                                           \
826         __MODULE_JAL(__copy_user_inatomic)                              \
827         ".set\tnoat\n\t"                                                \
828         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
829         ".set\tat\n\t"                                                  \
830         ".set\treorder"                                                 \
831         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
832         :                                                               \
833         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
834           DADDI_SCRATCH, "memory");                                     \
835         __cu_len_r;                                                     \
836 })
837
838 /*
839  * __copy_from_user: - Copy a block of data from user space, with less checking.
840  * @to:   Destination address, in kernel space.
841  * @from: Source address, in user space.
842  * @n:    Number of bytes to copy.
843  *
844  * Context: User context only.  This function may sleep.
845  *
846  * Copy data from user space to kernel space.  Caller must check
847  * the specified block with access_ok() before calling this function.
848  *
849  * Returns number of bytes that could not be copied.
850  * On success, this will be zero.
851  *
852  * If some data could not be copied, this function will pad the copied
853  * data to the requested size using zero bytes.
854  */
855 #define __copy_from_user(to, from, n)                                   \
856 ({                                                                      \
857         void *__cu_to;                                                  \
858         const void __user *__cu_from;                                   \
859         long __cu_len;                                                  \
860                                                                         \
861         __cu_to = (to);                                                 \
862         __cu_from = (from);                                             \
863         __cu_len = (n);                                                 \
864         might_fault();                                                  \
865         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
866                                            __cu_len);                   \
867         __cu_len;                                                       \
868 })
869
870 /*
871  * copy_from_user: - Copy a block of data from user space.
872  * @to:   Destination address, in kernel space.
873  * @from: Source address, in user space.
874  * @n:    Number of bytes to copy.
875  *
876  * Context: User context only.  This function may sleep.
877  *
878  * Copy data from user space to kernel space.
879  *
880  * Returns number of bytes that could not be copied.
881  * On success, this will be zero.
882  *
883  * If some data could not be copied, this function will pad the copied
884  * data to the requested size using zero bytes.
885  */
886 #define copy_from_user(to, from, n)                                     \
887 ({                                                                      \
888         void *__cu_to;                                                  \
889         const void __user *__cu_from;                                   \
890         long __cu_len;                                                  \
891                                                                         \
892         __cu_to = (to);                                                 \
893         __cu_from = (from);                                             \
894         __cu_len = (n);                                                 \
895         if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {              \
896                 might_fault();                                          \
897                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
898                                                    __cu_len);           \
899         }                                                               \
900         __cu_len;                                                       \
901 })
902
903 #define __copy_in_user(to, from, n)                                     \
904 ({                                                                      \
905         void __user *__cu_to;                                           \
906         const void __user *__cu_from;                                   \
907         long __cu_len;                                                  \
908                                                                         \
909         __cu_to = (to);                                                 \
910         __cu_from = (from);                                             \
911         __cu_len = (n);                                                 \
912         might_fault();                                                  \
913         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
914                                            __cu_len);                   \
915         __cu_len;                                                       \
916 })
917
918 #define copy_in_user(to, from, n)                                       \
919 ({                                                                      \
920         void __user *__cu_to;                                           \
921         const void __user *__cu_from;                                   \
922         long __cu_len;                                                  \
923                                                                         \
924         __cu_to = (to);                                                 \
925         __cu_from = (from);                                             \
926         __cu_len = (n);                                                 \
927         if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&       \
928                    access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       \
929                 might_fault();                                          \
930                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
931                                                    __cu_len);           \
932         }                                                               \
933         __cu_len;                                                       \
934 })
935
936 /*
937  * __clear_user: - Zero a block of memory in user space, with less checking.
938  * @to:   Destination address, in user space.
939  * @n:    Number of bytes to zero.
940  *
941  * Zero a block of memory in user space.  Caller must check
942  * the specified block with access_ok() before calling this function.
943  *
944  * Returns number of bytes that could not be cleared.
945  * On success, this will be zero.
946  */
947 static inline __kernel_size_t
948 __clear_user(void __user *addr, __kernel_size_t size)
949 {
950         __kernel_size_t res;
951
952         might_fault();
953         __asm__ __volatile__(
954                 "move\t$4, %1\n\t"
955                 "move\t$5, $0\n\t"
956                 "move\t$6, %2\n\t"
957                 __MODULE_JAL(__bzero)
958                 "move\t%0, $6"
959                 : "=r" (res)
960                 : "r" (addr), "r" (size)
961                 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
962
963         return res;
964 }
965
966 #define clear_user(addr,n)                                              \
967 ({                                                                      \
968         void __user * __cl_addr = (addr);                               \
969         unsigned long __cl_size = (n);                                  \
970         if (__cl_size && access_ok(VERIFY_WRITE,                        \
971                                         __cl_addr, __cl_size))          \
972                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
973         __cl_size;                                                      \
974 })
975
976 /*
977  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
978  * @dst:   Destination address, in kernel space.  This buffer must be at
979  *         least @count bytes long.
980  * @src:   Source address, in user space.
981  * @count: Maximum number of bytes to copy, including the trailing NUL.
982  *
983  * Copies a NUL-terminated string from userspace to kernel space.
984  * Caller must check the specified block with access_ok() before calling
985  * this function.
986  *
987  * On success, returns the length of the string (not including the trailing
988  * NUL).
989  *
990  * If access to userspace fails, returns -EFAULT (some data may have been
991  * copied).
992  *
993  * If @count is smaller than the length of the string, copies @count bytes
994  * and returns @count.
995  */
996 static inline long
997 __strncpy_from_user(char *__to, const char __user *__from, long __len)
998 {
999         long res;
1000
1001         might_fault();
1002         __asm__ __volatile__(
1003                 "move\t$4, %1\n\t"
1004                 "move\t$5, %2\n\t"
1005                 "move\t$6, %3\n\t"
1006                 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1007                 "move\t%0, $2"
1008                 : "=r" (res)
1009                 : "r" (__to), "r" (__from), "r" (__len)
1010                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1011
1012         return res;
1013 }
1014
1015 /*
1016  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1017  * @dst:   Destination address, in kernel space.  This buffer must be at
1018  *         least @count bytes long.
1019  * @src:   Source address, in user space.
1020  * @count: Maximum number of bytes to copy, including the trailing NUL.
1021  *
1022  * Copies a NUL-terminated string from userspace to kernel space.
1023  *
1024  * On success, returns the length of the string (not including the trailing
1025  * NUL).
1026  *
1027  * If access to userspace fails, returns -EFAULT (some data may have been
1028  * copied).
1029  *
1030  * If @count is smaller than the length of the string, copies @count bytes
1031  * and returns @count.
1032  */
1033 static inline long
1034 strncpy_from_user(char *__to, const char __user *__from, long __len)
1035 {
1036         long res;
1037
1038         might_fault();
1039         __asm__ __volatile__(
1040                 "move\t$4, %1\n\t"
1041                 "move\t$5, %2\n\t"
1042                 "move\t$6, %3\n\t"
1043                 __MODULE_JAL(__strncpy_from_user_asm)
1044                 "move\t%0, $2"
1045                 : "=r" (res)
1046                 : "r" (__to), "r" (__from), "r" (__len)
1047                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1048
1049         return res;
1050 }
1051
1052 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1053 static inline long __strlen_user(const char __user *s)
1054 {
1055         long res;
1056
1057         might_fault();
1058         __asm__ __volatile__(
1059                 "move\t$4, %1\n\t"
1060                 __MODULE_JAL(__strlen_user_nocheck_asm)
1061                 "move\t%0, $2"
1062                 : "=r" (res)
1063                 : "r" (s)
1064                 : "$2", "$4", __UA_t0, "$31");
1065
1066         return res;
1067 }
1068
1069 /*
1070  * strlen_user: - Get the size of a string in user space.
1071  * @str: The string to measure.
1072  *
1073  * Context: User context only.  This function may sleep.
1074  *
1075  * Get the size of a NUL-terminated string in user space.
1076  *
1077  * Returns the size of the string INCLUDING the terminating NUL.
1078  * On exception, returns 0.
1079  *
1080  * If there is a limit on the length of a valid string, you may wish to
1081  * consider using strnlen_user() instead.
1082  */
1083 static inline long strlen_user(const char __user *s)
1084 {
1085         long res;
1086
1087         might_fault();
1088         __asm__ __volatile__(
1089                 "move\t$4, %1\n\t"
1090                 __MODULE_JAL(__strlen_user_asm)
1091                 "move\t%0, $2"
1092                 : "=r" (res)
1093                 : "r" (s)
1094                 : "$2", "$4", __UA_t0, "$31");
1095
1096         return res;
1097 }
1098
1099 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1100 static inline long __strnlen_user(const char __user *s, long n)
1101 {
1102         long res;
1103
1104         might_fault();
1105         __asm__ __volatile__(
1106                 "move\t$4, %1\n\t"
1107                 "move\t$5, %2\n\t"
1108                 __MODULE_JAL(__strnlen_user_nocheck_asm)
1109                 "move\t%0, $2"
1110                 : "=r" (res)
1111                 : "r" (s), "r" (n)
1112                 : "$2", "$4", "$5", __UA_t0, "$31");
1113
1114         return res;
1115 }
1116
1117 /*
1118  * strlen_user: - Get the size of a string in user space.
1119  * @str: The string to measure.
1120  *
1121  * Context: User context only.  This function may sleep.
1122  *
1123  * Get the size of a NUL-terminated string in user space.
1124  *
1125  * Returns the size of the string INCLUDING the terminating NUL.
1126  * On exception, returns 0.
1127  *
1128  * If there is a limit on the length of a valid string, you may wish to
1129  * consider using strnlen_user() instead.
1130  */
1131 static inline long strnlen_user(const char __user *s, long n)
1132 {
1133         long res;
1134
1135         might_fault();
1136         __asm__ __volatile__(
1137                 "move\t$4, %1\n\t"
1138                 "move\t$5, %2\n\t"
1139                 __MODULE_JAL(__strnlen_user_asm)
1140                 "move\t%0, $2"
1141                 : "=r" (res)
1142                 : "r" (s), "r" (n)
1143                 : "$2", "$4", "$5", __UA_t0, "$31");
1144
1145         return res;
1146 }
1147
1148 struct exception_table_entry
1149 {
1150         unsigned long insn;
1151         unsigned long nextinsn;
1152 };
1153
1154 extern int fixup_exception(struct pt_regs *regs);
1155
1156 #endif /* _ASM_UACCESS_H */