2 * Copyright (C) 1994 Linus Torvalds
5 #ifndef __ASM_I386_PROCESSOR_H
6 #define __ASM_I386_PROCESSOR_H
9 #include <asm/math_emu.h>
10 #include <asm/segment.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
16 #include <asm/system.h>
17 #include <linux/cache.h>
18 #include <linux/threads.h>
19 #include <asm/percpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/init.h>
22 #include <asm/desc_defs.h>
25 * Default implementation of macro that returns current
26 * instruction pointer ("program counter").
28 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
31 * CPU type and hardware bug flags. Kept separately for each CPU.
32 * Members of this structure are referenced in head.S, so think twice
33 * before touching them. [mj]
37 __u8 x86; /* CPU family */
38 __u8 x86_vendor; /* CPU vendor */
41 char wp_works_ok; /* It doesn't on 386's */
42 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
45 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
46 unsigned long x86_capability[NCAPINTS];
47 char x86_vendor_id[16];
48 char x86_model_id[64];
49 int x86_cache_size; /* in KB - valid for CPUS which support this
51 int x86_cache_alignment; /* In bytes */
57 unsigned long loops_per_jiffy;
59 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
61 unsigned char x86_max_cores; /* cpuid returned max cores value */
63 unsigned short x86_clflush_size;
65 unsigned char booted_cores; /* number of cores as seen by OS */
66 __u8 phys_proc_id; /* Physical processor id. */
67 __u8 cpu_core_id; /* Core id */
68 __u8 cpu_index; /* index into per_cpu list */
70 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
72 #define X86_VENDOR_INTEL 0
73 #define X86_VENDOR_CYRIX 1
74 #define X86_VENDOR_AMD 2
75 #define X86_VENDOR_UMC 3
76 #define X86_VENDOR_NEXGEN 4
77 #define X86_VENDOR_CENTAUR 5
78 #define X86_VENDOR_TRANSMETA 7
79 #define X86_VENDOR_NSC 8
80 #define X86_VENDOR_NUM 9
81 #define X86_VENDOR_UNKNOWN 0xff
84 * capabilities of CPUs
87 extern struct cpuinfo_x86 boot_cpu_data;
88 extern struct cpuinfo_x86 new_cpu_data;
89 extern struct tss_struct doublefault_tss;
90 DECLARE_PER_CPU(struct tss_struct, init_tss);
93 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
94 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
95 #define current_cpu_data cpu_data(smp_processor_id())
97 #define cpu_data(cpu) boot_cpu_data
98 #define current_cpu_data boot_cpu_data
102 * the following now lives in the per cpu area:
103 * extern int cpu_llc_id[NR_CPUS];
105 DECLARE_PER_CPU(u8, cpu_llc_id);
106 extern char ignore_fpu_irq;
108 void __init cpu_detect(struct cpuinfo_x86 *c);
110 extern void identify_boot_cpu(void);
111 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
112 extern void print_cpu_info(struct cpuinfo_x86 *);
113 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
114 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
115 extern unsigned short num_cache_leaves;
118 extern void detect_ht(struct cpuinfo_x86 *c);
120 static inline void detect_ht(struct cpuinfo_x86 *c) {}
123 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
126 * Save the cr4 feature set we're using (ie
127 * Pentium 4MB enable and PPro Global page
128 * enable), so that any CPU's that boot up
129 * after us can get the correct flags.
131 extern unsigned long mmu_cr4_features;
133 static inline void set_in_cr4 (unsigned long mask)
136 mmu_cr4_features |= mask;
142 static inline void clear_in_cr4 (unsigned long mask)
145 mmu_cr4_features &= ~mask;
151 /* Stop speculative execution */
152 static inline void sync_core(void)
155 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
158 static inline void __monitor(const void *eax, unsigned long ecx,
161 /* "monitor %eax,%ecx,%edx;" */
163 ".byte 0x0f,0x01,0xc8;"
164 : :"a" (eax), "c" (ecx), "d"(edx));
167 static inline void __mwait(unsigned long eax, unsigned long ecx)
169 /* "mwait %eax,%ecx;" */
171 ".byte 0x0f,0x01,0xc9;"
172 : :"a" (eax), "c" (ecx));
175 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
177 /* from system description table in BIOS. Mostly for MCA use, but
178 others may find it useful. */
179 extern unsigned int machine_id;
180 extern unsigned int machine_submodel_id;
181 extern unsigned int BIOS_revision;
182 extern unsigned int mca_pentium_flag;
184 /* Boot loader type from the setup header */
185 extern int bootloader_type;
188 * User space process size: 3GB (default).
190 #define TASK_SIZE (PAGE_OFFSET)
192 /* This decides where the kernel will search for a free chunk of vm
193 * space during mmap's.
195 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
197 #define HAVE_ARCH_PICK_MMAP_LAYOUT
202 #define IO_BITMAP_BITS 65536
203 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
204 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
205 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
206 #define INVALID_IO_BITMAP_OFFSET 0x8000
207 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
209 struct i387_fsave_struct {
217 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
218 long status; /* software status information */
221 struct i387_fxsave_struct {
232 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
233 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
235 } __attribute__ ((aligned (16)));
237 struct i387_soft_struct {
245 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
246 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
248 unsigned long entry_eip;
252 struct i387_fsave_struct fsave;
253 struct i387_fxsave_struct fxsave;
254 struct i387_soft_struct soft;
261 struct thread_struct;
263 /* This is the TSS defined by the hardware. */
265 unsigned short back_link,__blh;
267 unsigned short ss0,__ss0h;
269 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
271 unsigned short ss2,__ss2h;
275 unsigned long ax, cx, dx, bx;
276 unsigned long sp, bp, si, di;
277 unsigned short es, __esh;
278 unsigned short cs, __csh;
279 unsigned short ss, __ssh;
280 unsigned short ds, __dsh;
281 unsigned short fs, __fsh;
282 unsigned short gs, __gsh;
283 unsigned short ldt, __ldth;
284 unsigned short trace, io_bitmap_base;
285 } __attribute__((packed));
288 struct i386_hw_tss x86_tss;
291 * The extra 1 is there because the CPU will access an
292 * additional byte beyond the end of the IO permission
293 * bitmap. The extra byte must be all 1 bits, and must
294 * be within the limit.
296 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
298 * Cache the current maximum and the last task that used the bitmap:
300 unsigned long io_bitmap_max;
301 struct thread_struct *io_bitmap_owner;
303 * pads the TSS to be cacheline-aligned (size is 0x100)
305 unsigned long __cacheline_filler[35];
307 * .. and then another 0x100 bytes for emergency kernel stack
309 unsigned long stack[64];
310 } __attribute__((packed));
312 #define ARCH_MIN_TASKALIGN 16
314 struct thread_struct {
315 /* cached TLS descriptors. */
316 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
318 unsigned long sysenter_cs;
323 /* Hardware debugging registers */
324 unsigned long debugreg0;
325 unsigned long debugreg1;
326 unsigned long debugreg2;
327 unsigned long debugreg3;
328 unsigned long debugreg6;
329 unsigned long debugreg7;
331 unsigned long cr2, trap_no, error_code;
332 /* floating point info */
333 union i387_union i387;
334 /* virtual 86 mode info */
335 struct vm86_struct __user * vm86_info;
336 unsigned long screen_bitmap;
337 unsigned long v86flags, v86mask, saved_sp0;
338 unsigned int saved_fs, saved_gs;
340 unsigned long *io_bitmap_ptr;
342 /* max allowed port in the bitmap, in bytes: */
343 unsigned long io_bitmap_max;
344 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
345 unsigned long debugctlmsr;
346 /* Debug Store - if not 0 points to a DS Save Area configuration;
347 * goes into MSR_IA32_DS_AREA */
348 unsigned long ds_area_msr;
351 #define INIT_THREAD { \
352 .sp0 = sizeof(init_stack) + (long)&init_stack, \
354 .sysenter_cs = __KERNEL_CS, \
355 .io_bitmap_ptr = NULL, \
356 .fs = __KERNEL_PERCPU, \
360 * Note that the .io_bitmap member must be extra-big. This is because
361 * the CPU will access an additional byte beyond the end of the IO
362 * permission bitmap. The extra byte must be all 1 bits, and must
363 * be within the limit.
367 .sp0 = sizeof(init_stack) + (long)&init_stack, \
368 .ss0 = __KERNEL_DS, \
369 .ss1 = __KERNEL_CS, \
370 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
372 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
375 #define start_thread(regs, new_eip, new_esp) do { \
376 __asm__("movl %0,%%gs": :"r" (0)); \
379 regs->ds = __USER_DS; \
380 regs->es = __USER_DS; \
381 regs->ss = __USER_DS; \
382 regs->cs = __USER_CS; \
383 regs->ip = new_eip; \
384 regs->sp = new_esp; \
387 /* Forward declaration, a strange C thing */
391 /* Free all resources held by a thread. */
392 extern void release_thread(struct task_struct *);
394 /* Prepare to copy thread state - unlazy all lazy status */
395 extern void prepare_to_copy(struct task_struct *tsk);
398 * create a kernel thread without removing it from tasklists
400 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
402 extern unsigned long thread_saved_pc(struct task_struct *tsk);
404 unsigned long get_wchan(struct task_struct *p);
406 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
407 #define KSTK_TOP(info) \
409 unsigned long *__ptr = (unsigned long *)(info); \
410 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
414 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
415 * This is necessary to guarantee that the entire "struct pt_regs"
416 * is accessable even if the CPU haven't stored the SS/ESP registers
417 * on the stack (interrupt gate does not save these registers
418 * when switching to the same priv ring).
419 * Therefore beware: accessing the ss/esp fields of the
420 * "struct pt_regs" is possible, but they may contain the
421 * completely wrong values.
423 #define task_pt_regs(task) \
425 struct pt_regs *__regs__; \
426 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
430 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
431 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
434 struct microcode_header {
442 unsigned int datasize;
443 unsigned int totalsize;
444 unsigned int reserved[3];
448 struct microcode_header hdr;
449 unsigned int bits[0];
452 typedef struct microcode microcode_t;
453 typedef struct microcode_header microcode_header_t;
455 /* microcode format is extended from prescott processors */
456 struct extended_signature {
462 struct extended_sigtable {
465 unsigned int reserved[3];
466 struct extended_signature sigs[0];
469 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
470 static inline void rep_nop(void)
472 __asm__ __volatile__("rep;nop": : :"memory");
475 #define cpu_relax() rep_nop()
477 static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
479 tss->x86_tss.sp0 = thread->sp0;
480 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
481 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
482 tss->x86_tss.ss1 = thread->sysenter_cs;
483 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
488 static inline unsigned long native_get_debugreg(int regno)
490 unsigned long val = 0; /* Damn you, gcc! */
494 asm("movl %%db0, %0" :"=r" (val)); break;
496 asm("movl %%db1, %0" :"=r" (val)); break;
498 asm("movl %%db2, %0" :"=r" (val)); break;
500 asm("movl %%db3, %0" :"=r" (val)); break;
502 asm("movl %%db6, %0" :"=r" (val)); break;
504 asm("movl %%db7, %0" :"=r" (val)); break;
511 static inline void native_set_debugreg(int regno, unsigned long value)
515 asm("movl %0,%%db0" : /* no output */ :"r" (value));
518 asm("movl %0,%%db1" : /* no output */ :"r" (value));
521 asm("movl %0,%%db2" : /* no output */ :"r" (value));
524 asm("movl %0,%%db3" : /* no output */ :"r" (value));
527 asm("movl %0,%%db6" : /* no output */ :"r" (value));
530 asm("movl %0,%%db7" : /* no output */ :"r" (value));
538 * Set IOPL bits in EFLAGS from given mask
540 static inline void native_set_iopl_mask(unsigned mask)
543 __asm__ __volatile__ ("pushfl;"
550 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
553 #ifdef CONFIG_PARAVIRT
554 #include <asm/paravirt.h>
556 #define paravirt_enabled() 0
558 static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread)
560 native_load_sp0(tss, thread);
564 * These special macros can be used to get or set a debugging register
566 #define get_debugreg(var, register) \
567 (var) = native_get_debugreg(register)
568 #define set_debugreg(value, register) \
569 native_set_debugreg(register, value)
571 #define set_iopl_mask native_set_iopl_mask
572 #endif /* CONFIG_PARAVIRT */
574 /* generic versions from gas */
575 #define GENERIC_NOP1 ".byte 0x90\n"
576 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
577 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
578 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
579 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
580 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
581 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
582 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
585 #define K8_NOP1 GENERIC_NOP1
586 #define K8_NOP2 ".byte 0x66,0x90\n"
587 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
588 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
589 #define K8_NOP5 K8_NOP3 K8_NOP2
590 #define K8_NOP6 K8_NOP3 K8_NOP3
591 #define K8_NOP7 K8_NOP4 K8_NOP3
592 #define K8_NOP8 K8_NOP4 K8_NOP4
595 /* uses eax dependencies (arbitary choice) */
596 #define K7_NOP1 GENERIC_NOP1
597 #define K7_NOP2 ".byte 0x8b,0xc0\n"
598 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
599 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
600 #define K7_NOP5 K7_NOP4 ASM_NOP1
601 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
602 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
603 #define K7_NOP8 K7_NOP7 ASM_NOP1
606 /* uses eax dependencies (Intel-recommended choice) */
607 #define P6_NOP1 GENERIC_NOP1
608 #define P6_NOP2 ".byte 0x66,0x90\n"
609 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
610 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
611 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
612 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
613 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
614 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
617 #define ASM_NOP1 K8_NOP1
618 #define ASM_NOP2 K8_NOP2
619 #define ASM_NOP3 K8_NOP3
620 #define ASM_NOP4 K8_NOP4
621 #define ASM_NOP5 K8_NOP5
622 #define ASM_NOP6 K8_NOP6
623 #define ASM_NOP7 K8_NOP7
624 #define ASM_NOP8 K8_NOP8
625 #elif defined(CONFIG_MK7)
626 #define ASM_NOP1 K7_NOP1
627 #define ASM_NOP2 K7_NOP2
628 #define ASM_NOP3 K7_NOP3
629 #define ASM_NOP4 K7_NOP4
630 #define ASM_NOP5 K7_NOP5
631 #define ASM_NOP6 K7_NOP6
632 #define ASM_NOP7 K7_NOP7
633 #define ASM_NOP8 K7_NOP8
634 #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
635 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
636 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
637 #define ASM_NOP1 P6_NOP1
638 #define ASM_NOP2 P6_NOP2
639 #define ASM_NOP3 P6_NOP3
640 #define ASM_NOP4 P6_NOP4
641 #define ASM_NOP5 P6_NOP5
642 #define ASM_NOP6 P6_NOP6
643 #define ASM_NOP7 P6_NOP7
644 #define ASM_NOP8 P6_NOP8
646 #define ASM_NOP1 GENERIC_NOP1
647 #define ASM_NOP2 GENERIC_NOP2
648 #define ASM_NOP3 GENERIC_NOP3
649 #define ASM_NOP4 GENERIC_NOP4
650 #define ASM_NOP5 GENERIC_NOP5
651 #define ASM_NOP6 GENERIC_NOP6
652 #define ASM_NOP7 GENERIC_NOP7
653 #define ASM_NOP8 GENERIC_NOP8
656 #define ASM_NOP_MAX 8
658 /* Prefetch instructions for Pentium III and AMD Athlon */
659 /* It's not worth to care about 3dnow! prefetches for the K6
660 because they are microcoded there and very slow.
661 However we don't do prefetches for pre XP Athlons currently
662 That should be fixed. */
663 #define ARCH_HAS_PREFETCH
664 static inline void prefetch(const void *x)
666 alternative_input(ASM_NOP4,
672 #define ARCH_HAS_PREFETCH
673 #define ARCH_HAS_PREFETCHW
674 #define ARCH_HAS_SPINLOCK_PREFETCH
676 /* 3dnow! prefetch to get an exclusive cache line. Useful for
677 spinlocks to avoid one state transition in the cache coherency protocol. */
678 static inline void prefetchw(const void *x)
680 alternative_input(ASM_NOP4,
685 #define spin_lock_prefetch(x) prefetchw(x)
687 extern void select_idle_routine(const struct cpuinfo_x86 *c);
689 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
691 extern unsigned long boot_option_idle_override;
692 extern void enable_sep_cpu(void);
693 extern int sysenter_setup(void);
695 /* Defined in head.S */
696 extern struct desc_ptr early_gdt_descr;
698 extern void cpu_set_gdt(int);
699 extern void switch_to_new_gdt(void);
700 extern void cpu_init(void);
701 extern void init_gdt(int cpu);
703 extern int force_mwait;
705 #endif /* __ASM_I386_PROCESSOR_H */