1 #ifndef _ASM_X86_SYSTEM_H
2 #define _ASM_X86_SYSTEM_H
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
10 #include <linux/kernel.h>
11 #include <linux/irqflags.h>
13 /* entries in ARCH_DLINFO: */
14 #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15 # define AT_VECTOR_SIZE_ARCH 2
16 #else /* else it's non-compat x86-64 */
17 # define AT_VECTOR_SIZE_ARCH 1
20 struct task_struct; /* one of the stranger aspects of C forward declarations */
21 struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
24 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
26 extern void show_regs_common(void);
30 #ifdef CONFIG_CC_STACKPROTECTOR
31 #define __switch_canary \
32 "movl %P[task_canary](%[next]), %%ebx\n\t" \
33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
34 #define __switch_canary_oparam \
35 , [stack_canary] "=m" (stack_canary.canary)
36 #define __switch_canary_iparam \
37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
38 #else /* CC_STACKPROTECTOR */
39 #define __switch_canary
40 #define __switch_canary_oparam
41 #define __switch_canary_iparam
42 #endif /* CC_STACKPROTECTOR */
45 * Saving eflags is important. It switches not only IOPL between tasks,
46 * it also protects other tasks from NT leaking through sysenter etc.
48 #define switch_to(prev, next, last) \
51 * Context-switching clobbers all registers, so we clobber \
52 * them explicitly, via unused output variables. \
53 * (EAX and EBP is not listed because EBP is saved/restored \
54 * explicitly for wchan access and EAX is the return value of \
57 unsigned long ebx, ecx, edx, esi, edi; \
59 asm volatile("pushfl\n\t" /* save flags */ \
60 "pushl %%ebp\n\t" /* save EBP */ \
61 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
62 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
63 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
64 "pushl %[next_ip]\n\t" /* restore EIP */ \
66 "jmp __switch_to\n" /* regparm call */ \
68 "popl %%ebp\n\t" /* restore EBP */ \
69 "popfl\n" /* restore flags */ \
71 /* output parameters */ \
72 : [prev_sp] "=m" (prev->thread.sp), \
73 [prev_ip] "=m" (prev->thread.ip), \
76 /* clobbered output registers: */ \
77 "=b" (ebx), "=c" (ecx), "=d" (edx), \
78 "=S" (esi), "=D" (edi) \
80 __switch_canary_oparam \
82 /* input parameters: */ \
83 : [next_sp] "m" (next->thread.sp), \
84 [next_ip] "m" (next->thread.ip), \
86 /* regparm parameters for __switch_to(): */ \
90 __switch_canary_iparam \
92 : /* reloaded segment registers */ \
98 /* frame pointer must be last for get_wchan */
99 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
100 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
102 #define __EXTRA_CLOBBER \
103 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
104 "r12", "r13", "r14", "r15"
106 #ifdef CONFIG_CC_STACKPROTECTOR
107 #define __switch_canary \
108 "movq %P[task_canary](%%rsi),%%r8\n\t" \
109 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
110 #define __switch_canary_oparam \
111 , [gs_canary] "=m" (irq_stack_union.stack_canary)
112 #define __switch_canary_iparam \
113 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
114 #else /* CC_STACKPROTECTOR */
115 #define __switch_canary
116 #define __switch_canary_oparam
117 #define __switch_canary_iparam
118 #endif /* CC_STACKPROTECTOR */
120 /* Save restore flags to clear handle leaking NT */
121 #define switch_to(prev, next, last) \
122 asm volatile(SAVE_CONTEXT \
123 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
124 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
125 "call __switch_to\n\t" \
126 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
128 "movq %P[thread_info](%%rsi),%%r8\n\t" \
129 "movq %%rax,%%rdi\n\t" \
130 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
131 "jnz ret_from_fork\n\t" \
134 __switch_canary_oparam \
135 : [next] "S" (next), [prev] "D" (prev), \
136 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
137 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
138 [_tif_fork] "i" (_TIF_FORK), \
139 [thread_info] "i" (offsetof(struct task_struct, stack)), \
140 [current_task] "m" (current_task) \
141 __switch_canary_iparam \
142 : "memory", "cc" __EXTRA_CLOBBER)
147 extern void native_load_gs_index(unsigned);
150 * Load a segment. Fall back on loading the zero
151 * segment if something goes wrong..
153 #define loadsegment(seg, value) \
155 unsigned short __val = (value); \
158 "1: movl %k0,%%" #seg " \n" \
160 ".section .fixup,\"ax\" \n" \
161 "2: xorl %k0,%k0 \n" \
165 _ASM_EXTABLE(1b, 2b) \
167 : "+r" (__val) : : "memory"); \
171 * Save a segment register away
173 #define savesegment(seg, value) \
174 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
177 * x86_32 user gs accessors.
180 #ifdef CONFIG_X86_32_LAZY_GS
181 #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
182 #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
183 #define task_user_gs(tsk) ((tsk)->thread.gs)
184 #define lazy_save_gs(v) savesegment(gs, (v))
185 #define lazy_load_gs(v) loadsegment(gs, (v))
186 #else /* X86_32_LAZY_GS */
187 #define get_user_gs(regs) (u16)((regs)->gs)
188 #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
189 #define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
190 #define lazy_save_gs(v) do { } while (0)
191 #define lazy_load_gs(v) do { } while (0)
192 #endif /* X86_32_LAZY_GS */
195 static inline unsigned long get_limit(unsigned long segment)
197 unsigned long __limit;
198 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
202 static inline void native_clts(void)
204 asm volatile("clts");
208 * Volatile isn't enough to prevent the compiler from reordering the
209 * read/write functions for the control registers and messing everything up.
210 * A memory clobber would solve the problem, but would prevent reordering of
211 * all loads stores around it, which can hurt performance. Solution is to
212 * use a variable and mimic reads and writes to it to enforce serialization
214 static unsigned long __force_order;
216 static inline unsigned long native_read_cr0(void)
219 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
223 static inline void native_write_cr0(unsigned long val)
225 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
228 static inline unsigned long native_read_cr2(void)
231 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
235 static inline void native_write_cr2(unsigned long val)
237 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
240 static inline unsigned long native_read_cr3(void)
243 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
247 static inline void native_write_cr3(unsigned long val)
249 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
252 static inline unsigned long native_read_cr4(void)
255 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
259 static inline unsigned long native_read_cr4_safe(void)
262 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
263 * exists, so it will never fail. */
265 asm volatile("1: mov %%cr4, %0\n"
268 : "=r" (val), "=m" (__force_order) : "0" (0));
270 val = native_read_cr4();
275 static inline void native_write_cr4(unsigned long val)
277 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
281 static inline unsigned long native_read_cr8(void)
284 asm volatile("movq %%cr8,%0" : "=r" (cr8));
288 static inline void native_write_cr8(unsigned long val)
290 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
294 static inline void native_wbinvd(void)
296 asm volatile("wbinvd": : :"memory");
299 #ifdef CONFIG_PARAVIRT
300 #include <asm/paravirt.h>
303 static inline unsigned long read_cr0(void)
305 return native_read_cr0();
308 static inline void write_cr0(unsigned long x)
313 static inline unsigned long read_cr2(void)
315 return native_read_cr2();
318 static inline void write_cr2(unsigned long x)
323 static inline unsigned long read_cr3(void)
325 return native_read_cr3();
328 static inline void write_cr3(unsigned long x)
333 static inline unsigned long read_cr4(void)
335 return native_read_cr4();
338 static inline unsigned long read_cr4_safe(void)
340 return native_read_cr4_safe();
343 static inline void write_cr4(unsigned long x)
348 static inline void wbinvd(void)
355 static inline unsigned long read_cr8(void)
357 return native_read_cr8();
360 static inline void write_cr8(unsigned long x)
365 static inline void load_gs_index(unsigned selector)
367 native_load_gs_index(selector);
372 /* Clear the 'TS' bit */
373 static inline void clts(void)
378 #endif/* CONFIG_PARAVIRT */
380 #define stts() write_cr0(read_cr0() | X86_CR0_TS)
382 #endif /* __KERNEL__ */
384 static inline void clflush(volatile void *__p)
386 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
389 #define nop() asm volatile ("nop")
391 void cpu_idle_wait(void);
393 extern unsigned long arch_align_stack(unsigned long sp);
394 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
396 void default_idle(void);
397 bool set_pm_idle_to_default(void);
399 void stop_this_cpu(void *dummy);
402 * Force strict CPU ordering.
403 * And yes, this is required on UP too when we're talking
408 * Some non-Intel clones support out of order store. wmb() ceases to be a
411 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
412 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
413 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
415 #define mb() asm volatile("mfence":::"memory")
416 #define rmb() asm volatile("lfence":::"memory")
417 #define wmb() asm volatile("sfence" ::: "memory")
421 * read_barrier_depends - Flush all pending reads that subsequents reads
424 * No data-dependent reads from memory-like regions are ever reordered
425 * over this barrier. All reads preceding this primitive are guaranteed
426 * to access memory (but not necessarily other CPUs' caches) before any
427 * reads following this primitive that depend on the data return by
428 * any of the preceding reads. This primitive is much lighter weight than
429 * rmb() on most CPUs, and is never heavier weight than is
432 * These ordering constraints are respected by both the local CPU
435 * Ordering is not guaranteed by anything other than these primitives,
436 * not even by data dependencies. See the documentation for
437 * memory_barrier() for examples and URLs to more information.
439 * For example, the following code would force ordering (the initial
440 * value of "a" is zero, "b" is one, and "p" is "&a"):
448 * read_barrier_depends();
452 * because the read of "*q" depends on the read of "p" and these
453 * two reads are separated by a read_barrier_depends(). However,
454 * the following code, with the same initial values for "a" and "b":
462 * read_barrier_depends();
466 * does not enforce ordering, since there is no data dependency between
467 * the read of "a" and the read of "b". Therefore, on some CPUs, such
468 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
469 * in cases like this where there are no data dependencies.
472 #define read_barrier_depends() do { } while (0)
475 #define smp_mb() mb()
476 #ifdef CONFIG_X86_PPRO_FENCE
477 # define smp_rmb() rmb()
479 # define smp_rmb() barrier()
481 #ifdef CONFIG_X86_OOSTORE
482 # define smp_wmb() wmb()
484 # define smp_wmb() barrier()
486 #define smp_read_barrier_depends() read_barrier_depends()
487 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
489 #define smp_mb() barrier()
490 #define smp_rmb() barrier()
491 #define smp_wmb() barrier()
492 #define smp_read_barrier_depends() do { } while (0)
493 #define set_mb(var, value) do { var = value; barrier(); } while (0)
497 * Stop RDTSC speculation. This is needed when you need to use RDTSC
498 * (or get_cycles or vread that possibly accesses the TSC) in a defined
501 * (Could use an alternative three way for this if there was one.)
503 static __always_inline void rdtsc_barrier(void)
505 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
506 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
510 * We handle most unaligned accesses in hardware. On the other hand
511 * unaligned DMA can be quite expensive on some Nehalem processors.
513 * Based on this we disable the IP header alignment in network drivers.
515 #define NET_IP_ALIGN 0
516 #endif /* _ASM_X86_SYSTEM_H */