From 68f7d993a826bc3f8542b61ca85673eb15e43b86 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 Jan 2018 17:02:49 -0800 Subject: [PATCH] x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec commit 304ec1b050310548db33063e567123fae8fd0301 upstream. Quoting Linus: I do think that it would be a good idea to very expressly document the fact that it's not that the user access itself is unsafe. I do agree that things like "get_user()" want to be protected, but not because of any direct bugs or problems with get_user() and friends, but simply because get_user() is an excellent source of a pointer that is obviously controlled from a potentially attacking user space. So it's a prime candidate for then finding _subsequent_ accesses that can then be used to perturb the cache. __uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the limit check is far away from the user pointer de-reference. In those cases a barrier_nospec() prevents speculation with a potential pointer to privileged memory. uaccess_try_nospec covers get_user_try. Suggested-by: Linus Torvalds Suggested-by: Andi Kleen Signed-off-by: Dan Williams Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Kees Cook Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@dwillia2-desk3.amr.corp.intel.com [bwh: Backported to 3.2: - There's no SMAP support, so use barrier_nospec() directly instead of __uaccess_begin_nospec() - Convert several more functions to use barrier_nospec(), that are just wrappers in mainline - There's no 'case 8' in __copy_to_user_inatomic() - Adjust context] Signed-off-by: Ben Hutchings --- arch/x86/include/asm/uaccess.h | 3 ++- arch/x86/include/asm/uaccess_32.h | 12 ++++++++++++ arch/x86/include/asm/uaccess_64.h | 22 ++++++++++++++++++---- arch/x86/lib/usercopy_32.c | 5 +++++ 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index d74bf3a41706..7dd47ba20f75 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -423,6 +423,7 @@ do { \ ({ \ int __gu_err; \ unsigned long __gu_val; \ + barrier_nospec(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -529,7 +530,7 @@ struct __large_struct { unsigned long buf[100]; }; * get_user_ex(...); * } get_user_catch(err) */ -#define get_user_try uaccess_try +#define get_user_try uaccess_try_nospec #define get_user_catch(err) uaccess_catch(err) #define get_user_ex(x, ptr) do { \ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 566e803cc602..b6f5617e8d27 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -48,14 +48,17 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) switch (n) { case 1: + barrier_nospec(); __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); return ret; case 2: + barrier_nospec(); __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); return ret; case 4: + barrier_nospec(); __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); return ret; @@ -98,12 +101,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) switch (n) { case 1: + barrier_nospec(); __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: + barrier_nospec(); __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: + barrier_nospec(); __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } @@ -142,12 +148,15 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) switch (n) { case 1: + barrier_nospec(); __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: + barrier_nospec(); __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: + barrier_nospec(); __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } @@ -164,12 +173,15 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, switch (n) { case 1: + barrier_nospec(); __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: + barrier_nospec(); __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: + barrier_nospec(); __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 31fed191a41e..dda633ca71fa 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -75,19 +75,28 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, + case 1: + barrier_nospec(); + __get_user_asm(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); return ret; - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, + case 2: + barrier_nospec(); + __get_user_asm(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); return ret; - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, + case 4: + barrier_nospec(); + __get_user_asm(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); return ret; - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, + case 8: + barrier_nospec(); + __get_user_asm(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); return ret; case 10: + barrier_nospec(); __get_user_asm(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (unlikely(ret)) @@ -97,6 +106,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) ret, "w", "w", "=r", 2); return ret; case 16: + barrier_nospec(); __get_user_asm(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (unlikely(ret)) @@ -179,6 +189,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) switch (size) { case 1: { u8 tmp; + barrier_nospec(); __get_user_asm(tmp, (u8 __user *)src, ret, "b", "b", "=q", 1); if (likely(!ret)) @@ -188,6 +199,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) } case 2: { u16 tmp; + barrier_nospec(); __get_user_asm(tmp, (u16 __user *)src, ret, "w", "w", "=r", 2); if (likely(!ret)) @@ -198,6 +210,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) case 4: { u32 tmp; + barrier_nospec(); __get_user_asm(tmp, (u32 __user *)src, ret, "l", "k", "=r", 4); if (likely(!ret)) @@ -207,6 +220,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) } case 8: { u64 tmp; + barrier_nospec(); __get_user_asm(tmp, (u64 __user *)src, ret, "q", "", "=r", 8); if (likely(!ret)) diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e218d5df85ff..0462f6531ed6 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -774,6 +774,7 @@ survive: return n; } #endif + barrier_nospec(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else @@ -785,6 +786,7 @@ EXPORT_SYMBOL(__copy_to_user_ll); unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long n) { + barrier_nospec(); if (movsl_is_ok(to, from, n)) __copy_user_zeroing(to, from, n); else @@ -796,6 +798,7 @@ EXPORT_SYMBOL(__copy_from_user_ll); unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long n) { + barrier_nospec(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else @@ -808,6 +811,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero); unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n) { + barrier_nospec(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && cpu_has_xmm2) n = __copy_user_zeroing_intel_nocache(to, from, n); @@ -823,6 +827,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { + barrier_nospec(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && cpu_has_xmm2) n = __copy_user_intel_nocache(to, from, n); -- 2.39.2