1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
9 #include <asm/system-um.h>
11 #include <linux/kernel.h>
12 #include <linux/irqflags.h>
14 /* entries in ARCH_DLINFO: */
15 #ifdef CONFIG_IA32_EMULATION
16 # define AT_VECTOR_SIZE_ARCH 2
18 # define AT_VECTOR_SIZE_ARCH 1
21 extern unsigned long arch_align_stack(unsigned long sp);
23 void default_idle(void);
26 * Force strict CPU ordering.
27 * And yes, this is required on UP too when we're talking
32 * Some non-Intel clones support out of order store. wmb() ceases to be a
35 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
36 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
37 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
39 #define mb() asm volatile("mfence":::"memory")
40 #define rmb() asm volatile("lfence":::"memory")
41 #define wmb() asm volatile("sfence" ::: "memory")
45 * read_barrier_depends - Flush all pending reads that subsequents reads
48 * No data-dependent reads from memory-like regions are ever reordered
49 * over this barrier. All reads preceding this primitive are guaranteed
50 * to access memory (but not necessarily other CPUs' caches) before any
51 * reads following this primitive that depend on the data return by
52 * any of the preceding reads. This primitive is much lighter weight than
53 * rmb() on most CPUs, and is never heavier weight than is
56 * These ordering constraints are respected by both the local CPU
59 * Ordering is not guaranteed by anything other than these primitives,
60 * not even by data dependencies. See the documentation for
61 * memory_barrier() for examples and URLs to more information.
63 * For example, the following code would force ordering (the initial
64 * value of "a" is zero, "b" is one, and "p" is "&a"):
72 * read_barrier_depends();
76 * because the read of "*q" depends on the read of "p" and these
77 * two reads are separated by a read_barrier_depends(). However,
78 * the following code, with the same initial values for "a" and "b":
86 * read_barrier_depends();
90 * does not enforce ordering, since there is no data dependency between
91 * the read of "a" and the read of "b". Therefore, on some CPUs, such
92 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
93 * in cases like this where there are no data dependencies.
96 #define read_barrier_depends() do { } while (0)
100 #ifdef CONFIG_X86_PPRO_FENCE
101 # define smp_rmb() rmb()
103 # define smp_rmb() barrier()
105 #ifdef CONFIG_X86_OOSTORE
106 # define smp_wmb() wmb()
108 # define smp_wmb() barrier()
110 #define smp_read_barrier_depends() read_barrier_depends()
111 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
113 #define smp_mb() barrier()
114 #define smp_rmb() barrier()
115 #define smp_wmb() barrier()
116 #define smp_read_barrier_depends() do { } while (0)
117 #define set_mb(var, value) do { var = value; barrier(); } while (0)
121 * Stop RDTSC speculation. This is needed when you need to use RDTSC
122 * (or get_cycles or vread that possibly accesses the TSC) in a defined
125 * (Could use an alternative three way for this if there was one.)
127 static inline void rdtsc_barrier(void)
129 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
130 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);