2 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
9 #include <linux/highmem.h>
10 #include <linux/blkdev.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/interrupt.h>
14 #include <asm/uaccess.h>
17 #ifdef CONFIG_X86_INTEL_USERCOPY
19 * Alignment at which movsl is preferred for bulk memory copies.
21 struct movsl_mask movsl_mask __read_mostly;
24 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
26 #ifdef CONFIG_X86_INTEL_USERCOPY
27 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
32 #define movsl_is_ok(a1, a2, n) \
33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
39 #define __do_clear_user(addr,size) \
43 __asm__ __volatile__( \
48 ".section .fixup,\"ax\"\n" \
49 "3: lea 0(%2,%0,4),%0\n" \
54 : "=&c"(size), "=&D" (__d0) \
55 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
59 * clear_user: - Zero a block of memory in user space.
60 * @to: Destination address, in user space.
61 * @n: Number of bytes to zero.
63 * Zero a block of memory in user space.
65 * Returns number of bytes that could not be cleared.
66 * On success, this will be zero.
69 clear_user(void __user *to, unsigned long n)
72 if (access_ok(VERIFY_WRITE, to, n))
73 __do_clear_user(to, n);
76 EXPORT_SYMBOL(clear_user);
79 * __clear_user: - Zero a block of memory in user space, with less checking.
80 * @to: Destination address, in user space.
81 * @n: Number of bytes to zero.
83 * Zero a block of memory in user space. Caller must check
84 * the specified block with access_ok() before calling this function.
86 * Returns number of bytes that could not be cleared.
87 * On success, this will be zero.
90 __clear_user(void __user *to, unsigned long n)
92 __do_clear_user(to, n);
95 EXPORT_SYMBOL(__clear_user);
98 * strnlen_user: - Get the size of a string in user space.
99 * @s: The string to measure.
100 * @n: The maximum valid length
102 * Get the size of a NUL-terminated string in user space.
104 * Returns the size of the string INCLUDING the terminating NUL.
105 * On exception, returns 0.
106 * If the string is too long, returns a value greater than @n.
108 long strnlen_user(const char __user *s, long n)
110 unsigned long mask = -__addr_ok(s);
111 unsigned long res, tmp;
115 __asm__ __volatile__(
124 ".section .fixup,\"ax\"\n"
125 "2: xorl %%eax,%%eax\n"
130 ".section __ex_table,\"a\"\n"
134 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
135 :"0" (n), "1" (s), "2" (0), "3" (mask)
139 EXPORT_SYMBOL(strnlen_user);
141 #ifdef CONFIG_X86_INTEL_USERCOPY
143 __copy_user_intel(void __user *to, const void *from, unsigned long size)
146 __asm__ __volatile__(
148 "1: movl 32(%4), %%eax\n"
151 "2: movl 64(%4), %%eax\n"
153 "3: movl 0(%4), %%eax\n"
154 "4: movl 4(%4), %%edx\n"
155 "5: movl %%eax, 0(%3)\n"
156 "6: movl %%edx, 4(%3)\n"
157 "7: movl 8(%4), %%eax\n"
158 "8: movl 12(%4),%%edx\n"
159 "9: movl %%eax, 8(%3)\n"
160 "10: movl %%edx, 12(%3)\n"
161 "11: movl 16(%4), %%eax\n"
162 "12: movl 20(%4), %%edx\n"
163 "13: movl %%eax, 16(%3)\n"
164 "14: movl %%edx, 20(%3)\n"
165 "15: movl 24(%4), %%eax\n"
166 "16: movl 28(%4), %%edx\n"
167 "17: movl %%eax, 24(%3)\n"
168 "18: movl %%edx, 28(%3)\n"
169 "19: movl 32(%4), %%eax\n"
170 "20: movl 36(%4), %%edx\n"
171 "21: movl %%eax, 32(%3)\n"
172 "22: movl %%edx, 36(%3)\n"
173 "23: movl 40(%4), %%eax\n"
174 "24: movl 44(%4), %%edx\n"
175 "25: movl %%eax, 40(%3)\n"
176 "26: movl %%edx, 44(%3)\n"
177 "27: movl 48(%4), %%eax\n"
178 "28: movl 52(%4), %%edx\n"
179 "29: movl %%eax, 48(%3)\n"
180 "30: movl %%edx, 52(%3)\n"
181 "31: movl 56(%4), %%eax\n"
182 "32: movl 60(%4), %%edx\n"
183 "33: movl %%eax, 56(%3)\n"
184 "34: movl %%edx, 60(%3)\n"
190 "35: movl %0, %%eax\n"
195 "36: movl %%eax, %0\n"
198 ".section .fixup,\"ax\"\n"
199 "101: lea 0(%%eax,%0,4),%0\n"
202 ".section __ex_table,\"a\"\n"
243 : "=&c"(size), "=&D" (d0), "=&S" (d1)
244 : "1"(to), "2"(from), "0"(size)
245 : "eax", "edx", "memory");
250 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
253 __asm__ __volatile__(
255 "0: movl 32(%4), %%eax\n"
258 "1: movl 64(%4), %%eax\n"
260 "2: movl 0(%4), %%eax\n"
261 "21: movl 4(%4), %%edx\n"
262 " movl %%eax, 0(%3)\n"
263 " movl %%edx, 4(%3)\n"
264 "3: movl 8(%4), %%eax\n"
265 "31: movl 12(%4),%%edx\n"
266 " movl %%eax, 8(%3)\n"
267 " movl %%edx, 12(%3)\n"
268 "4: movl 16(%4), %%eax\n"
269 "41: movl 20(%4), %%edx\n"
270 " movl %%eax, 16(%3)\n"
271 " movl %%edx, 20(%3)\n"
272 "10: movl 24(%4), %%eax\n"
273 "51: movl 28(%4), %%edx\n"
274 " movl %%eax, 24(%3)\n"
275 " movl %%edx, 28(%3)\n"
276 "11: movl 32(%4), %%eax\n"
277 "61: movl 36(%4), %%edx\n"
278 " movl %%eax, 32(%3)\n"
279 " movl %%edx, 36(%3)\n"
280 "12: movl 40(%4), %%eax\n"
281 "71: movl 44(%4), %%edx\n"
282 " movl %%eax, 40(%3)\n"
283 " movl %%edx, 44(%3)\n"
284 "13: movl 48(%4), %%eax\n"
285 "81: movl 52(%4), %%edx\n"
286 " movl %%eax, 48(%3)\n"
287 " movl %%edx, 52(%3)\n"
288 "14: movl 56(%4), %%eax\n"
289 "91: movl 60(%4), %%edx\n"
290 " movl %%eax, 56(%3)\n"
291 " movl %%edx, 60(%3)\n"
297 "5: movl %0, %%eax\n"
305 ".section .fixup,\"ax\"\n"
306 "9: lea 0(%%eax,%0,4),%0\n"
309 " xorl %%eax,%%eax\n"
315 ".section __ex_table,\"a\"\n"
338 : "=&c"(size), "=&D" (d0), "=&S" (d1)
339 : "1"(to), "2"(from), "0"(size)
340 : "eax", "edx", "memory");
345 * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
346 * hyoshiok@miraclelinux.com
349 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
350 const void __user *from, unsigned long size)
354 __asm__ __volatile__(
356 "0: movl 32(%4), %%eax\n"
359 "1: movl 64(%4), %%eax\n"
361 "2: movl 0(%4), %%eax\n"
362 "21: movl 4(%4), %%edx\n"
363 " movnti %%eax, 0(%3)\n"
364 " movnti %%edx, 4(%3)\n"
365 "3: movl 8(%4), %%eax\n"
366 "31: movl 12(%4),%%edx\n"
367 " movnti %%eax, 8(%3)\n"
368 " movnti %%edx, 12(%3)\n"
369 "4: movl 16(%4), %%eax\n"
370 "41: movl 20(%4), %%edx\n"
371 " movnti %%eax, 16(%3)\n"
372 " movnti %%edx, 20(%3)\n"
373 "10: movl 24(%4), %%eax\n"
374 "51: movl 28(%4), %%edx\n"
375 " movnti %%eax, 24(%3)\n"
376 " movnti %%edx, 28(%3)\n"
377 "11: movl 32(%4), %%eax\n"
378 "61: movl 36(%4), %%edx\n"
379 " movnti %%eax, 32(%3)\n"
380 " movnti %%edx, 36(%3)\n"
381 "12: movl 40(%4), %%eax\n"
382 "71: movl 44(%4), %%edx\n"
383 " movnti %%eax, 40(%3)\n"
384 " movnti %%edx, 44(%3)\n"
385 "13: movl 48(%4), %%eax\n"
386 "81: movl 52(%4), %%edx\n"
387 " movnti %%eax, 48(%3)\n"
388 " movnti %%edx, 52(%3)\n"
389 "14: movl 56(%4), %%eax\n"
390 "91: movl 60(%4), %%edx\n"
391 " movnti %%eax, 56(%3)\n"
392 " movnti %%edx, 60(%3)\n"
399 "5: movl %0, %%eax\n"
407 ".section .fixup,\"ax\"\n"
408 "9: lea 0(%%eax,%0,4),%0\n"
411 " xorl %%eax,%%eax\n"
417 ".section __ex_table,\"a\"\n"
440 : "=&c"(size), "=&D" (d0), "=&S" (d1)
441 : "1"(to), "2"(from), "0"(size)
442 : "eax", "edx", "memory");
446 static unsigned long __copy_user_intel_nocache(void *to,
447 const void __user *from, unsigned long size)
451 __asm__ __volatile__(
453 "0: movl 32(%4), %%eax\n"
456 "1: movl 64(%4), %%eax\n"
458 "2: movl 0(%4), %%eax\n"
459 "21: movl 4(%4), %%edx\n"
460 " movnti %%eax, 0(%3)\n"
461 " movnti %%edx, 4(%3)\n"
462 "3: movl 8(%4), %%eax\n"
463 "31: movl 12(%4),%%edx\n"
464 " movnti %%eax, 8(%3)\n"
465 " movnti %%edx, 12(%3)\n"
466 "4: movl 16(%4), %%eax\n"
467 "41: movl 20(%4), %%edx\n"
468 " movnti %%eax, 16(%3)\n"
469 " movnti %%edx, 20(%3)\n"
470 "10: movl 24(%4), %%eax\n"
471 "51: movl 28(%4), %%edx\n"
472 " movnti %%eax, 24(%3)\n"
473 " movnti %%edx, 28(%3)\n"
474 "11: movl 32(%4), %%eax\n"
475 "61: movl 36(%4), %%edx\n"
476 " movnti %%eax, 32(%3)\n"
477 " movnti %%edx, 36(%3)\n"
478 "12: movl 40(%4), %%eax\n"
479 "71: movl 44(%4), %%edx\n"
480 " movnti %%eax, 40(%3)\n"
481 " movnti %%edx, 44(%3)\n"
482 "13: movl 48(%4), %%eax\n"
483 "81: movl 52(%4), %%edx\n"
484 " movnti %%eax, 48(%3)\n"
485 " movnti %%edx, 52(%3)\n"
486 "14: movl 56(%4), %%eax\n"
487 "91: movl 60(%4), %%edx\n"
488 " movnti %%eax, 56(%3)\n"
489 " movnti %%edx, 60(%3)\n"
496 "5: movl %0, %%eax\n"
504 ".section .fixup,\"ax\"\n"
505 "9: lea 0(%%eax,%0,4),%0\n"
508 ".section __ex_table,\"a\"\n"
531 : "=&c"(size), "=&D" (d0), "=&S" (d1)
532 : "1"(to), "2"(from), "0"(size)
533 : "eax", "edx", "memory");
540 * Leave these declared but undefined. They should not be any references to
543 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
545 unsigned long __copy_user_intel(void __user *to, const void *from,
547 unsigned long __copy_user_zeroing_intel_nocache(void *to,
548 const void __user *from, unsigned long size);
549 #endif /* CONFIG_X86_INTEL_USERCOPY */
551 /* Generic arbitrary sized copy. */
552 #define __copy_user(to, from, size) \
554 int __d0, __d1, __d2; \
555 __asm__ __volatile__( \
571 ".section .fixup,\"ax\"\n" \
574 "3: lea 0(%3,%0,4),%0\n" \
577 ".section __ex_table,\"a\"\n" \
583 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
584 : "3"(size), "0"(size), "1"(to), "2"(from) \
588 #define __copy_user_zeroing(to, from, size) \
590 int __d0, __d1, __d2; \
591 __asm__ __volatile__( \
607 ".section .fixup,\"ax\"\n" \
610 "3: lea 0(%3,%0,4),%0\n" \
613 " xorl %%eax,%%eax\n" \
619 ".section __ex_table,\"a\"\n" \
625 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
626 : "3"(size), "0"(size), "1"(to), "2"(from) \
630 unsigned long __copy_to_user_ll(void __user *to, const void *from,
633 #ifndef CONFIG_X86_WP_WORKS_OK
634 if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
635 ((unsigned long)to) < TASK_SIZE) {
637 * When we are in an atomic section (see
638 * mm/filemap.c:file_read_actor), return the full
639 * length to take the slow path.
645 * CPU does not honor the WP bit when writing
646 * from supervisory mode, and due to preemption or SMP,
647 * the page tables can change at any time.
648 * Do it manually. Manfred <manfred@colorfullife.com>
651 unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
652 unsigned long len = PAGE_SIZE - offset;
661 down_read(¤t->mm->mmap_sem);
662 retval = get_user_pages(current, current->mm,
663 (unsigned long)to, 1, 1, 0, &pg, NULL);
665 if (retval == -ENOMEM && is_global_init(current)) {
666 up_read(¤t->mm->mmap_sem);
667 congestion_wait(BLK_RW_ASYNC, HZ/50);
672 up_read(¤t->mm->mmap_sem);
676 maddr = kmap_atomic(pg);
677 memcpy(maddr + offset, from, len);
678 kunmap_atomic(maddr);
679 set_page_dirty_lock(pg);
681 up_read(¤t->mm->mmap_sem);
690 if (movsl_is_ok(to, from, n))
691 __copy_user(to, from, n);
693 n = __copy_user_intel(to, from, n);
696 EXPORT_SYMBOL(__copy_to_user_ll);
698 unsigned long __copy_from_user_ll(void *to, const void __user *from,
701 if (movsl_is_ok(to, from, n))
702 __copy_user_zeroing(to, from, n);
704 n = __copy_user_zeroing_intel(to, from, n);
707 EXPORT_SYMBOL(__copy_from_user_ll);
709 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
712 if (movsl_is_ok(to, from, n))
713 __copy_user(to, from, n);
715 n = __copy_user_intel((void __user *)to,
716 (const void *)from, n);
719 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
721 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
724 #ifdef CONFIG_X86_INTEL_USERCOPY
725 if (n > 64 && cpu_has_xmm2)
726 n = __copy_user_zeroing_intel_nocache(to, from, n);
728 __copy_user_zeroing(to, from, n);
730 __copy_user_zeroing(to, from, n);
734 EXPORT_SYMBOL(__copy_from_user_ll_nocache);
736 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
739 #ifdef CONFIG_X86_INTEL_USERCOPY
740 if (n > 64 && cpu_has_xmm2)
741 n = __copy_user_intel_nocache(to, from, n);
743 __copy_user(to, from, n);
745 __copy_user(to, from, n);
749 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
752 * copy_to_user: - Copy a block of data into user space.
753 * @to: Destination address, in user space.
754 * @from: Source address, in kernel space.
755 * @n: Number of bytes to copy.
757 * Context: User context only. This function may sleep.
759 * Copy data from kernel space to user space.
761 * Returns number of bytes that could not be copied.
762 * On success, this will be zero.
765 copy_to_user(void __user *to, const void *from, unsigned long n)
767 if (access_ok(VERIFY_WRITE, to, n))
768 n = __copy_to_user(to, from, n);
771 EXPORT_SYMBOL(copy_to_user);
774 * copy_from_user: - Copy a block of data from user space.
775 * @to: Destination address, in kernel space.
776 * @from: Source address, in user space.
777 * @n: Number of bytes to copy.
779 * Context: User context only. This function may sleep.
781 * Copy data from user space to kernel space.
783 * Returns number of bytes that could not be copied.
784 * On success, this will be zero.
786 * If some data could not be copied, this function will pad the copied
787 * data to the requested size using zero bytes.
790 _copy_from_user(void *to, const void __user *from, unsigned long n)
792 if (access_ok(VERIFY_READ, from, n))
793 n = __copy_from_user(to, from, n);
798 EXPORT_SYMBOL(_copy_from_user);
800 void copy_from_user_overflow(void)
802 WARN(1, "Buffer overflow detected!\n");
804 EXPORT_SYMBOL(copy_from_user_overflow);