x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
[pandora-kernel.git] / arch / x86 / include / asm / uaccess_64.h
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/lockdep.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeature.h>
12 #include <asm/page.h>
13
14 /*
15  * Copy To/From Userspace
16  */
17
18 /* Handles exceptions in both to and from, but doesn't do access_ok */
19 __must_check unsigned long
20 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_user_generic_string(void *to, const void *from, unsigned len);
23 __must_check unsigned long
24 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26 static __always_inline __must_check unsigned long
27 copy_user_generic(void *to, const void *from, unsigned len)
28 {
29         unsigned ret;
30
31         /*
32          * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33          * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34          * Otherwise, use copy_user_generic_unrolled.
35          */
36         alternative_call_2(copy_user_generic_unrolled,
37                          copy_user_generic_string,
38                          X86_FEATURE_REP_GOOD,
39                          copy_user_enhanced_fast_string,
40                          X86_FEATURE_ERMS,
41                          ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42                                      "=d" (len)),
43                          "1" (to), "2" (from), "3" (len)
44                          : "memory", "rcx", "r8", "r9", "r10", "r11");
45         return ret;
46 }
47
48 __must_check unsigned long
49 _copy_to_user(void __user *to, const void *from, unsigned len);
50 __must_check unsigned long
51 _copy_from_user(void *to, const void __user *from, unsigned len);
52 __must_check unsigned long
53 copy_in_user(void __user *to, const void __user *from, unsigned len);
54
55 static inline unsigned long __must_check copy_from_user(void *to,
56                                           const void __user *from,
57                                           unsigned long n)
58 {
59         int sz = __compiletime_object_size(to);
60
61         might_fault();
62         if (likely(sz == -1 || sz >= n))
63                 n = _copy_from_user(to, from, n);
64 #ifdef CONFIG_DEBUG_VM
65         else
66                 WARN(1, "Buffer overflow detected!\n");
67 #endif
68         return n;
69 }
70
71 static __always_inline __must_check
72 int copy_to_user(void __user *dst, const void *src, unsigned size)
73 {
74         might_fault();
75
76         return _copy_to_user(dst, src, size);
77 }
78
79 static __always_inline __must_check
80 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
81 {
82         int ret = 0;
83
84         if (!__builtin_constant_p(size))
85                 return copy_user_generic(dst, (__force void *)src, size);
86         switch (size) {
87         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
88                               ret, "b", "b", "=q", 1);
89                 return ret;
90         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
91                               ret, "w", "w", "=r", 2);
92                 return ret;
93         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
94                               ret, "l", "k", "=r", 4);
95                 return ret;
96         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
97                               ret, "q", "", "=r", 8);
98                 return ret;
99         case 10:
100                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
101                                ret, "q", "", "=r", 10);
102                 if (unlikely(ret))
103                         return ret;
104                 __get_user_asm(*(u16 *)(8 + (char *)dst),
105                                (u16 __user *)(8 + (char __user *)src),
106                                ret, "w", "w", "=r", 2);
107                 return ret;
108         case 16:
109                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
110                                ret, "q", "", "=r", 16);
111                 if (unlikely(ret))
112                         return ret;
113                 __get_user_asm(*(u64 *)(8 + (char *)dst),
114                                (u64 __user *)(8 + (char __user *)src),
115                                ret, "q", "", "=r", 8);
116                 return ret;
117         default:
118                 return copy_user_generic(dst, (__force void *)src, size);
119         }
120 }
121
122 static __always_inline __must_check
123 int __copy_from_user(void *dst, const void __user *src, unsigned size)
124 {
125         might_fault();
126         return __copy_from_user_nocheck(dst, src, size);
127 }
128
129 static __always_inline __must_check
130 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
131 {
132         int ret = 0;
133
134         if (!__builtin_constant_p(size))
135                 return copy_user_generic((__force void *)dst, src, size);
136         switch (size) {
137         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
138                               ret, "b", "b", "iq", 1);
139                 return ret;
140         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
141                               ret, "w", "w", "ir", 2);
142                 return ret;
143         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
144                               ret, "l", "k", "ir", 4);
145                 return ret;
146         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
147                               ret, "q", "", "er", 8);
148                 return ret;
149         case 10:
150                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
151                                ret, "q", "", "er", 10);
152                 if (unlikely(ret))
153                         return ret;
154                 asm("":::"memory");
155                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
156                                ret, "w", "w", "ir", 2);
157                 return ret;
158         case 16:
159                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
160                                ret, "q", "", "er", 16);
161                 if (unlikely(ret))
162                         return ret;
163                 asm("":::"memory");
164                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
165                                ret, "q", "", "er", 8);
166                 return ret;
167         default:
168                 return copy_user_generic((__force void *)dst, src, size);
169         }
170 }
171
172 static __always_inline __must_check
173 int __copy_to_user(void __user *dst, const void *src, unsigned size)
174 {
175         might_fault();
176         return __copy_to_user_nocheck(dst, src, size);
177 }
178
179 static __always_inline __must_check
180 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
181 {
182         int ret = 0;
183
184         might_fault();
185         if (!__builtin_constant_p(size))
186                 return copy_user_generic((__force void *)dst,
187                                          (__force void *)src, size);
188         switch (size) {
189         case 1: {
190                 u8 tmp;
191                 __get_user_asm(tmp, (u8 __user *)src,
192                                ret, "b", "b", "=q", 1);
193                 if (likely(!ret))
194                         __put_user_asm(tmp, (u8 __user *)dst,
195                                        ret, "b", "b", "iq", 1);
196                 return ret;
197         }
198         case 2: {
199                 u16 tmp;
200                 __get_user_asm(tmp, (u16 __user *)src,
201                                ret, "w", "w", "=r", 2);
202                 if (likely(!ret))
203                         __put_user_asm(tmp, (u16 __user *)dst,
204                                        ret, "w", "w", "ir", 2);
205                 return ret;
206         }
207
208         case 4: {
209                 u32 tmp;
210                 __get_user_asm(tmp, (u32 __user *)src,
211                                ret, "l", "k", "=r", 4);
212                 if (likely(!ret))
213                         __put_user_asm(tmp, (u32 __user *)dst,
214                                        ret, "l", "k", "ir", 4);
215                 return ret;
216         }
217         case 8: {
218                 u64 tmp;
219                 __get_user_asm(tmp, (u64 __user *)src,
220                                ret, "q", "", "=r", 8);
221                 if (likely(!ret))
222                         __put_user_asm(tmp, (u64 __user *)dst,
223                                        ret, "q", "", "er", 8);
224                 return ret;
225         }
226         default:
227                 return copy_user_generic((__force void *)dst,
228                                          (__force void *)src, size);
229         }
230 }
231
232 static __must_check __always_inline int
233 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
234 {
235         return __copy_from_user_nocheck(dst, (__force const void *)src, size);
236 }
237
238 static __must_check __always_inline int
239 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
240 {
241         return __copy_to_user_nocheck((__force void *)dst, src, size);
242 }
243
244 extern long __copy_user_nocache(void *dst, const void __user *src,
245                                 unsigned size, int zerorest);
246
247 static inline int
248 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
249 {
250         might_fault();
251         return __copy_user_nocache(dst, src, size, 1);
252 }
253
254 static inline int
255 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
256                                   unsigned size)
257 {
258         return __copy_user_nocache(dst, src, size, 0);
259 }
260
261 unsigned long
262 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
263
264 #endif /* _ASM_X86_UACCESS_64_H */