x86,vdso: Use LSL unconditionally for vgetcpu
authorAndy Lutomirski <luto@amacapital.net>
Thu, 30 Oct 2014 21:58:01 +0000 (14:58 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 3 Nov 2014 12:41:53 +0000 (13:41 +0100)
LSL is faster than RDTSCP and works everywhere; there's no need to
switch between them depending on CPU.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Andi Kleen <andi@firstfloor.org>
Link: http://lkml.kernel.org/r/72f73d5ec4514e02bba345b9759177ef03742efb.1414706021.git.luto@amacapital.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/vgtod.h
arch/x86/include/asm/vsyscall.h
arch/x86/include/asm/vvar.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/vsyscall_64.c
arch/x86/vdso/vgetcpu.c

index 3c3366c..e7e9682 100644 (file)
@@ -70,4 +70,23 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)
        ++s->seq;
 }
 
+#ifdef CONFIG_X86_64
+
+#define VGETCPU_CPU_MASK 0xfff
+
+static inline unsigned int __getcpu(void)
+{
+       unsigned int p;
+
+       /*
+        * Load per CPU data from GDT.  LSL is faster than RDTSCP and
+        * works on all CPUs.
+        */
+       asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+
+       return p;
+}
+
+#endif /* CONFIG_X86_64 */
+
 #endif /* _ASM_X86_VGTOD_H */
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge