From 733037c3c89b259235ff5217096b1c846fe7e6e2 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 16 Aug 2013 14:17:19 -0700 Subject: [PATCH] x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic commit ff47ab4ff3cddfa7bc1b25b990e24abe2ae474ff upstream. The 64bit __copy_{from,to}_user_inatomic always called copy_from_user_generic, but skipped the special optimizations for 1/2/4/8 byte accesses. This especially hurts the futex call, which accesses the 4 byte futex user value with a complicated fast string operation in a function call, instead of a single movl. Use __copy_{from,to}_user for _inatomic instead to get the same optimizations. The only problem was the might_fault() in those functions. So move that to new wrapper and call __copy_{f,t}_user_nocheck() from *_inatomic directly. 32bit already did this correctly by duplicating the code. Signed-off-by: Andi Kleen Link: http://lkml.kernel.org/r/1376687844-19857-2-git-send-email-andi@firstfloor.org Signed-off-by: H. Peter Anvin Signed-off-by: Ben Hutchings Cc: Jaccon Bastiaansen Cc: Thomas Gleixner Cc: mingo@redhat.com Cc: Peter Zijlstra Cc: h.zuidam@computer.org --- arch/x86/include/asm/uaccess_64.h | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 1c66d30971ad..4f0194bed4cb 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -68,11 +68,10 @@ int copy_to_user(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) +int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) { int ret = 0; - might_fault(); if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { @@ -112,11 +111,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) +int __copy_from_user(void *dst, const void __user *src, unsigned size) +{ + might_fault(); + return __copy_from_user_nocheck(dst, src, size); +} + +static __always_inline __must_check +int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) { int ret = 0; - might_fault(); if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst, src, size); switch (size) { @@ -155,6 +160,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) } } +static __always_inline __must_check +int __copy_to_user(void __user *dst, const void *src, unsigned size) +{ + might_fault(); + return __copy_to_user_nocheck(dst, src, size); +} + static __always_inline __must_check int __copy_in_user(void __user *dst, const void __user *src, unsigned size) { @@ -221,13 +233,13 @@ __must_check unsigned long __clear_user(void __user *mem, unsigned long len); static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { - return copy_user_generic(dst, (__force const void *)src, size); + return __copy_from_user_nocheck(dst, (__force const void *)src, size); } static __must_check __always_inline int __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { - return copy_user_generic((__force void *)dst, src, size); + return __copy_to_user_nocheck((__force void *)dst, src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, -- 2.39.2