s390/uaccess: test if current->mm is set before walking page tables
[pandora-kernel.git] / arch / s390 / lib / uaccess_pt.c
1 /*
2  *  User access functions based on page table walks for enhanced
3  *  system layout without hardware support.
4  *
5  *    Copyright IBM Corp. 2006, 2012
6  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7  */
8
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
15 #include "uaccess.h"
16
17 #ifndef CONFIG_64BIT
18 #define AHI     "ahi"
19 #define SLR     "slr"
20 #else
21 #define AHI     "aghi"
22 #define SLR     "slgr"
23 #endif
24
25 static size_t strnlen_kernel(size_t count, const char __user *src)
26 {
27         register unsigned long reg0 asm("0") = 0UL;
28         unsigned long tmp1, tmp2;
29
30         asm volatile(
31                 "   la    %2,0(%1)\n"
32                 "   la    %3,0(%0,%1)\n"
33                 "  "SLR"  %0,%0\n"
34                 "0: srst  %3,%2\n"
35                 "   jo    0b\n"
36                 "   la    %0,1(%3)\n"   /* strnlen_kernel results includes \0 */
37                 "  "SLR"  %0,%1\n"
38                 "1:\n"
39                 EX_TABLE(0b,1b)
40                 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41                 : "d" (reg0) : "cc", "memory");
42         return count;
43 }
44
45 static size_t copy_in_kernel(size_t count, void __user *to,
46                              const void __user *from)
47 {
48         unsigned long tmp1;
49
50         asm volatile(
51                 "  "AHI"  %0,-1\n"
52                 "   jo    5f\n"
53                 "   bras  %3,3f\n"
54                 "0:"AHI"  %0,257\n"
55                 "1: mvc   0(1,%1),0(%2)\n"
56                 "   la    %1,1(%1)\n"
57                 "   la    %2,1(%2)\n"
58                 "  "AHI"  %0,-1\n"
59                 "   jnz   1b\n"
60                 "   j     5f\n"
61                 "2: mvc   0(256,%1),0(%2)\n"
62                 "   la    %1,256(%1)\n"
63                 "   la    %2,256(%2)\n"
64                 "3:"AHI"  %0,-256\n"
65                 "   jnm   2b\n"
66                 "4: ex    %0,1b-0b(%3)\n"
67                 "5:"SLR"  %0,%0\n"
68                 "6:\n"
69                 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70                 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
71                 : : "cc", "memory");
72         return count;
73 }
74
75 /*
76  * Returns kernel address for user virtual address. If the returned address is
77  * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
78  * address contains the (negative) exception code.
79  */
80 #ifdef CONFIG_64BIT
81
82 static unsigned long follow_table(struct mm_struct *mm,
83                                   unsigned long address, int write)
84 {
85         unsigned long *table = (unsigned long *)__pa(mm->pgd);
86
87         if (unlikely(address > mm->context.asce_limit - 1))
88                 return -0x38UL;
89         switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
90         case _ASCE_TYPE_REGION1:
91                 table = table + ((address >> 53) & 0x7ff);
92                 if (unlikely(*table & _REGION_ENTRY_INVALID))
93                         return -0x39UL;
94                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
95                 /* fallthrough */
96         case _ASCE_TYPE_REGION2:
97                 table = table + ((address >> 42) & 0x7ff);
98                 if (unlikely(*table & _REGION_ENTRY_INVALID))
99                         return -0x3aUL;
100                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
101                 /* fallthrough */
102         case _ASCE_TYPE_REGION3:
103                 table = table + ((address >> 31) & 0x7ff);
104                 if (unlikely(*table & _REGION_ENTRY_INVALID))
105                         return -0x3bUL;
106                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
107                 /* fallthrough */
108         case _ASCE_TYPE_SEGMENT:
109                 table = table + ((address >> 20) & 0x7ff);
110                 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
111                         return -0x10UL;
112                 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
113                         if (write && (*table & _SEGMENT_ENTRY_PROTECT))
114                                 return -0x04UL;
115                         return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
116                                 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
117                 }
118                 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
119         }
120         table = table + ((address >> 12) & 0xff);
121         if (unlikely(*table & _PAGE_INVALID))
122                 return -0x11UL;
123         if (write && (*table & _PAGE_PROTECT))
124                 return -0x04UL;
125         return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
126 }
127
128 #else /* CONFIG_64BIT */
129
130 static unsigned long follow_table(struct mm_struct *mm,
131                                   unsigned long address, int write)
132 {
133         unsigned long *table = (unsigned long *)__pa(mm->pgd);
134
135         table = table + ((address >> 20) & 0x7ff);
136         if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
137                 return -0x10UL;
138         table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
139         table = table + ((address >> 12) & 0xff);
140         if (unlikely(*table & _PAGE_INVALID))
141                 return -0x11UL;
142         if (write && (*table & _PAGE_PROTECT))
143                 return -0x04UL;
144         return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
145 }
146
147 #endif /* CONFIG_64BIT */
148
149 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
150                                              size_t n, int write_user)
151 {
152         struct mm_struct *mm = current->mm;
153         unsigned long offset, done, size, kaddr;
154         void *from, *to;
155
156         if (!mm)
157                 return n;
158         done = 0;
159 retry:
160         spin_lock(&mm->page_table_lock);
161         do {
162                 kaddr = follow_table(mm, uaddr, write_user);
163                 if (IS_ERR_VALUE(kaddr))
164                         goto fault;
165
166                 offset = uaddr & ~PAGE_MASK;
167                 size = min(n - done, PAGE_SIZE - offset);
168                 if (write_user) {
169                         to = (void *) kaddr;
170                         from = kptr + done;
171                 } else {
172                         from = (void *) kaddr;
173                         to = kptr + done;
174                 }
175                 memcpy(to, from, size);
176                 done += size;
177                 uaddr += size;
178         } while (done < n);
179         spin_unlock(&mm->page_table_lock);
180         return n - done;
181 fault:
182         spin_unlock(&mm->page_table_lock);
183         if (__handle_fault(uaddr, -kaddr, write_user))
184                 return n - done;
185         goto retry;
186 }
187
188 /*
189  * Do DAT for user address by page table walk, return kernel address.
190  * This function needs to be called with current->mm->page_table_lock held.
191  */
192 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
193                                                      int write)
194 {
195         struct mm_struct *mm = current->mm;
196         unsigned long kaddr;
197         int rc;
198
199 retry:
200         kaddr = follow_table(mm, uaddr, write);
201         if (IS_ERR_VALUE(kaddr))
202                 goto fault;
203
204         return kaddr;
205 fault:
206         spin_unlock(&mm->page_table_lock);
207         rc = __handle_fault(uaddr, -kaddr, write);
208         spin_lock(&mm->page_table_lock);
209         if (!rc)
210                 goto retry;
211         return 0;
212 }
213
214 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
215 {
216         size_t rc;
217
218         if (segment_eq(get_fs(), KERNEL_DS))
219                 return copy_in_kernel(n, (void __user *) to, from);
220         rc = __user_copy_pt((unsigned long) from, to, n, 0);
221         if (unlikely(rc))
222                 memset(to + n - rc, 0, rc);
223         return rc;
224 }
225
226 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
227 {
228         if (segment_eq(get_fs(), KERNEL_DS))
229                 return copy_in_kernel(n, to, (void __user *) from);
230         return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
231 }
232
233 static size_t clear_user_pt(size_t n, void __user *to)
234 {
235         void *zpage = (void *) empty_zero_page;
236         long done, size, ret;
237
238         done = 0;
239         do {
240                 if (n - done > PAGE_SIZE)
241                         size = PAGE_SIZE;
242                 else
243                         size = n - done;
244                 if (segment_eq(get_fs(), KERNEL_DS))
245                         ret = copy_in_kernel(n, to, (void __user *) zpage);
246                 else
247                         ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
248                 done += size;
249                 to += size;
250                 if (ret)
251                         return ret + n - done;
252         } while (done < n);
253         return 0;
254 }
255
256 static size_t strnlen_user_pt(size_t count, const char __user *src)
257 {
258         unsigned long uaddr = (unsigned long) src;
259         struct mm_struct *mm = current->mm;
260         unsigned long offset, done, len, kaddr;
261         size_t len_str;
262
263         if (unlikely(!count))
264                 return 0;
265         if (segment_eq(get_fs(), KERNEL_DS))
266                 return strnlen_kernel(count, src);
267         if (!mm)
268                 return 0;
269         done = 0;
270 retry:
271         spin_lock(&mm->page_table_lock);
272         do {
273                 kaddr = follow_table(mm, uaddr, 0);
274                 if (IS_ERR_VALUE(kaddr))
275                         goto fault;
276
277                 offset = uaddr & ~PAGE_MASK;
278                 len = min(count - done, PAGE_SIZE - offset);
279                 len_str = strnlen((char *) kaddr, len);
280                 done += len_str;
281                 uaddr += len_str;
282         } while ((len_str == len) && (done < count));
283         spin_unlock(&mm->page_table_lock);
284         return done + 1;
285 fault:
286         spin_unlock(&mm->page_table_lock);
287         if (__handle_fault(uaddr, -kaddr, 0))
288                 return 0;
289         goto retry;
290 }
291
292 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
293                                    char *dst)
294 {
295         size_t done, len, offset, len_str;
296
297         if (unlikely(!count))
298                 return 0;
299         done = 0;
300         do {
301                 offset = (size_t)src & ~PAGE_MASK;
302                 len = min(count - done, PAGE_SIZE - offset);
303                 if (segment_eq(get_fs(), KERNEL_DS)) {
304                         if (copy_in_kernel(len, (void __user *) dst, src))
305                                 return -EFAULT;
306                 } else {
307                         if (__user_copy_pt((unsigned long) src, dst, len, 0))
308                                 return -EFAULT;
309                 }
310                 len_str = strnlen(dst, len);
311                 done += len_str;
312                 src += len_str;
313                 dst += len_str;
314         } while ((len_str == len) && (done < count));
315         return done;
316 }
317
318 static size_t copy_in_user_pt(size_t n, void __user *to,
319                               const void __user *from)
320 {
321         struct mm_struct *mm = current->mm;
322         unsigned long offset_max, uaddr, done, size, error_code;
323         unsigned long uaddr_from = (unsigned long) from;
324         unsigned long uaddr_to = (unsigned long) to;
325         unsigned long kaddr_to, kaddr_from;
326         int write_user;
327
328         if (segment_eq(get_fs(), KERNEL_DS))
329                 return copy_in_kernel(n, to, from);
330         if (!mm)
331                 return n;
332         done = 0;
333 retry:
334         spin_lock(&mm->page_table_lock);
335         do {
336                 write_user = 0;
337                 uaddr = uaddr_from;
338                 kaddr_from = follow_table(mm, uaddr_from, 0);
339                 error_code = kaddr_from;
340                 if (IS_ERR_VALUE(error_code))
341                         goto fault;
342
343                 write_user = 1;
344                 uaddr = uaddr_to;
345                 kaddr_to = follow_table(mm, uaddr_to, 1);
346                 error_code = (unsigned long) kaddr_to;
347                 if (IS_ERR_VALUE(error_code))
348                         goto fault;
349
350                 offset_max = max(uaddr_from & ~PAGE_MASK,
351                                  uaddr_to & ~PAGE_MASK);
352                 size = min(n - done, PAGE_SIZE - offset_max);
353
354                 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
355                 done += size;
356                 uaddr_from += size;
357                 uaddr_to += size;
358         } while (done < n);
359         spin_unlock(&mm->page_table_lock);
360         return n - done;
361 fault:
362         spin_unlock(&mm->page_table_lock);
363         if (__handle_fault(uaddr, -error_code, write_user))
364                 return n - done;
365         goto retry;
366 }
367
368 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)      \
369         asm volatile("0: l   %1,0(%6)\n"                                \
370                      "1: " insn                                         \
371                      "2: cs  %1,%2,0(%6)\n"                             \
372                      "3: jl  1b\n"                                      \
373                      "   lhi %0,0\n"                                    \
374                      "4:\n"                                             \
375                      EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)    \
376                      : "=d" (ret), "=&d" (oldval), "=&d" (newval),      \
377                        "=m" (*uaddr)                                    \
378                      : "0" (-EFAULT), "d" (oparg), "a" (uaddr),         \
379                        "m" (*uaddr) : "cc" );
380
381 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
382 {
383         int oldval = 0, newval, ret;
384
385         switch (op) {
386         case FUTEX_OP_SET:
387                 __futex_atomic_op("lr %2,%5\n",
388                                   ret, oldval, newval, uaddr, oparg);
389                 break;
390         case FUTEX_OP_ADD:
391                 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
392                                   ret, oldval, newval, uaddr, oparg);
393                 break;
394         case FUTEX_OP_OR:
395                 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
396                                   ret, oldval, newval, uaddr, oparg);
397                 break;
398         case FUTEX_OP_ANDN:
399                 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
400                                   ret, oldval, newval, uaddr, oparg);
401                 break;
402         case FUTEX_OP_XOR:
403                 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
404                                   ret, oldval, newval, uaddr, oparg);
405                 break;
406         default:
407                 ret = -ENOSYS;
408         }
409         if (ret == 0)
410                 *old = oldval;
411         return ret;
412 }
413
414 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
415 {
416         int ret;
417
418         if (segment_eq(get_fs(), KERNEL_DS))
419                 return __futex_atomic_op_pt(op, uaddr, oparg, old);
420         if (unlikely(!current->mm))
421                 return -EFAULT;
422         spin_lock(&current->mm->page_table_lock);
423         uaddr = (u32 __force __user *)
424                 __dat_user_addr((__force unsigned long) uaddr, 1);
425         if (!uaddr) {
426                 spin_unlock(&current->mm->page_table_lock);
427                 return -EFAULT;
428         }
429         get_page(virt_to_page(uaddr));
430         spin_unlock(&current->mm->page_table_lock);
431         ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
432         put_page(virt_to_page(uaddr));
433         return ret;
434 }
435
436 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
437                                      u32 oldval, u32 newval)
438 {
439         int ret;
440
441         asm volatile("0: cs   %1,%4,0(%5)\n"
442                      "1: la   %0,0\n"
443                      "2:\n"
444                      EX_TABLE(0b,2b) EX_TABLE(1b,2b)
445                      : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
446                      : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
447                      : "cc", "memory" );
448         *uval = oldval;
449         return ret;
450 }
451
452 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
453                             u32 oldval, u32 newval)
454 {
455         int ret;
456
457         if (segment_eq(get_fs(), KERNEL_DS))
458                 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
459         if (unlikely(!current->mm))
460                 return -EFAULT;
461         spin_lock(&current->mm->page_table_lock);
462         uaddr = (u32 __force __user *)
463                 __dat_user_addr((__force unsigned long) uaddr, 1);
464         if (!uaddr) {
465                 spin_unlock(&current->mm->page_table_lock);
466                 return -EFAULT;
467         }
468         get_page(virt_to_page(uaddr));
469         spin_unlock(&current->mm->page_table_lock);
470         ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
471         put_page(virt_to_page(uaddr));
472         return ret;
473 }
474
475 struct uaccess_ops uaccess_pt = {
476         .copy_from_user         = copy_from_user_pt,
477         .copy_to_user           = copy_to_user_pt,
478         .copy_in_user           = copy_in_user_pt,
479         .clear_user             = clear_user_pt,
480         .strnlen_user           = strnlen_user_pt,
481         .strncpy_from_user      = strncpy_from_user_pt,
482         .futex_atomic_op        = futex_atomic_op_pt,
483         .futex_atomic_cmpxchg   = futex_atomic_cmpxchg_pt,
484 };