X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fsystem.h;h=e4f246d62c46bf57c6218df7738ca17dc6356671;hb=721e2629fa2167c0e5a9f10d704b1fee1621a8cb;hp=bd376bc8c4ab26b5702bf667ad84f18f38fb5184;hpb=cdb8355add9b1d87ecfcb58b12879897dc1e3e36;p=pandora-kernel.git diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index bd376bc8c4ab..e4f246d62c46 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -3,7 +3,7 @@ #include #include -#include +#include #ifdef __KERNEL__ @@ -39,7 +39,7 @@ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)),\ [tif_fork] "i" (TIF_FORK), \ - [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ + [thread_info] "i" (offsetof(struct task_struct, stack)), \ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ : "memory", "cc" __EXTRA_CLOBBER) @@ -89,6 +89,11 @@ static inline unsigned long read_cr3(void) return cr3; } +static inline void write_cr3(unsigned long val) +{ + asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); +} + static inline unsigned long read_cr4(void) { unsigned long cr4; @@ -98,7 +103,7 @@ static inline unsigned long read_cr4(void) static inline void write_cr4(unsigned long val) { - asm volatile("movq %0,%%cr4" :: "r" (val)); + asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); } #define stts() write_cr0(8 | read_cr0()) @@ -106,113 +111,10 @@ static inline void write_cr4(unsigned long val) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible. - */ -static inline void sched_cacheflush(void) -{ - wbinvd(); -} - #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) - -#define tas(ptr) (xchg((ptr),1)) - -#define __xg(x) ((volatile long *)(x)) - -static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) -{ - *ptr = val; -} - -#define _set_64bit set_64bit - -/* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway - * Note 2: xchg has side effect, so that attribute volatile is necessary, - * but generally the primitive is invalid, *ptr is output argument. --ANK - */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %k0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 8: - __asm__ __volatile__("xchgq %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - } - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 8: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) - #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb()