1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/system.h>
10 #ifdef CONFIG_PARAVIRT
11 #include <asm/paravirt.h>
13 #define __flush_tlb() __native_flush_tlb()
14 #define __flush_tlb_global() __native_flush_tlb_global()
15 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
18 static inline void __native_flush_tlb(void)
21 * If current->mm == NULL then we borrow a mm which may change during a
22 * task switch and therefore we must not be preempted while we write CR3
26 native_write_cr3(native_read_cr3());
30 static inline void __native_flush_tlb_global(void)
36 * Read-modify-write to CR4 - protect it from preemption and
37 * from interrupts. (Use the raw variant because this code can
38 * be called from deep inside debugging code.)
40 raw_local_irq_save(flags);
42 cr4 = native_read_cr4();
44 native_write_cr4(cr4 & ~X86_CR4_PGE);
45 /* write old PGE again and flush TLBs */
46 native_write_cr4(cr4);
48 raw_local_irq_restore(flags);
51 static inline void __native_flush_tlb_single(unsigned long addr)
53 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
56 static inline void __flush_tlb_all(void)
64 static inline void __flush_tlb_one(unsigned long addr)
67 __flush_tlb_single(addr);
73 # define TLB_FLUSH_ALL 0xffffffff
75 # define TLB_FLUSH_ALL -1ULL
81 * - flush_tlb() flushes the current mm struct TLBs
82 * - flush_tlb_all() flushes all processes TLBs
83 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
84 * - flush_tlb_page(vma, vmaddr) flushes one page
85 * - flush_tlb_range(vma, start, end) flushes a range of pages
86 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
87 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
89 * ..but the i386 has somewhat limited tlb flushing capabilities,
90 * and page-granular flushes are available only on i486 and up.
92 * x86-64 can only flush individual pages or full VMs. For a range flush
93 * we always do the full VM. Might be worth trying if for a small
94 * range a few INVLPGs in a row are a win.
99 #define flush_tlb() __flush_tlb()
100 #define flush_tlb_all() __flush_tlb_all()
101 #define local_flush_tlb() __flush_tlb()
103 static inline void flush_tlb_mm(struct mm_struct *mm)
105 if (mm == current->active_mm)
109 static inline void flush_tlb_page(struct vm_area_struct *vma,
112 if (vma->vm_mm == current->active_mm)
113 __flush_tlb_one(addr);
116 static inline void flush_tlb_range(struct vm_area_struct *vma,
117 unsigned long start, unsigned long end)
119 if (vma->vm_mm == current->active_mm)
123 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
124 struct mm_struct *mm,
129 static inline void reset_lazy_tlbstate(void)
137 #define local_flush_tlb() __flush_tlb()
139 extern void flush_tlb_all(void);
140 extern void flush_tlb_current_task(void);
141 extern void flush_tlb_mm(struct mm_struct *);
142 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
144 #define flush_tlb() flush_tlb_current_task()
146 static inline void flush_tlb_range(struct vm_area_struct *vma,
147 unsigned long start, unsigned long end)
149 flush_tlb_mm(vma->vm_mm);
152 void native_flush_tlb_others(const struct cpumask *cpumask,
153 struct mm_struct *mm, unsigned long va);
155 #define TLBSTATE_OK 1
156 #define TLBSTATE_LAZY 2
159 struct mm_struct *active_mm;
162 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
164 static inline void reset_lazy_tlbstate(void)
166 percpu_write(cpu_tlbstate.state, 0);
167 percpu_write(cpu_tlbstate.active_mm, &init_mm);
172 #ifndef CONFIG_PARAVIRT
173 #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
176 static inline void flush_tlb_kernel_range(unsigned long start,
182 #endif /* _ASM_X86_TLBFLUSH_H */