1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/system.h>
10 static inline void __invpcid(unsigned long pcid, unsigned long addr,
13 struct { u64 d[2]; } desc = { { pcid, addr } };
16 * The memory clobber is because the whole point is to invalidate
17 * stale TLB entries and, especially if we're flushing global
18 * mappings, we don't want the compiler to reorder any subsequent
19 * memory accesses before the TLB flush.
21 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
22 * invpcid (%rcx), %rax in long mode.
24 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
25 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
28 #define INVPCID_TYPE_INDIV_ADDR 0
29 #define INVPCID_TYPE_SINGLE_CTXT 1
30 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
31 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
33 /* Flush all mappings for a given pcid and addr, not including globals. */
34 static inline void invpcid_flush_one(unsigned long pcid,
37 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
40 /* Flush all mappings for a given PCID, not including globals. */
41 static inline void invpcid_flush_single_context(unsigned long pcid)
43 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
46 /* Flush all mappings, including globals, for all PCIDs. */
47 static inline void invpcid_flush_all(void)
49 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
52 /* Flush all mappings for all PCIDs except globals. */
53 static inline void invpcid_flush_all_nonglobals(void)
55 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
58 #ifdef CONFIG_PARAVIRT
59 #include <asm/paravirt.h>
61 #define __flush_tlb() __native_flush_tlb()
62 #define __flush_tlb_global() __native_flush_tlb_global()
63 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
66 static inline void __native_flush_tlb(void)
69 * If current->mm == NULL then we borrow a mm which may change during a
70 * task switch and therefore we must not be preempted while we write CR3
74 native_write_cr3(native_read_cr3());
78 static inline void __native_flush_tlb_global(void)
83 if (static_cpu_has(X86_FEATURE_INVPCID)) {
85 * Using INVPCID is considerably faster than a pair of writes
86 * to CR4 sandwiched inside an IRQ flag save/restore.
93 * Read-modify-write to CR4 - protect it from preemption and
94 * from interrupts. (Use the raw variant because this code can
95 * be called from deep inside debugging code.)
97 raw_local_irq_save(flags);
99 cr4 = native_read_cr4();
101 native_write_cr4(cr4 & ~X86_CR4_PGE);
102 /* write old PGE again and flush TLBs */
103 native_write_cr4(cr4);
105 raw_local_irq_restore(flags);
108 static inline void __native_flush_tlb_single(unsigned long addr)
110 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
113 static inline void __flush_tlb_all(void)
116 __flush_tlb_global();
121 static inline void __flush_tlb_one(unsigned long addr)
124 __flush_tlb_single(addr);
130 # define TLB_FLUSH_ALL 0xffffffff
132 # define TLB_FLUSH_ALL -1ULL
138 * - flush_tlb() flushes the current mm struct TLBs
139 * - flush_tlb_all() flushes all processes TLBs
140 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
141 * - flush_tlb_page(vma, vmaddr) flushes one page
142 * - flush_tlb_range(vma, start, end) flushes a range of pages
143 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
144 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
146 * ..but the i386 has somewhat limited tlb flushing capabilities,
147 * and page-granular flushes are available only on i486 and up.
149 * x86-64 can only flush individual pages or full VMs. For a range flush
150 * we always do the full VM. Might be worth trying if for a small
151 * range a few INVLPGs in a row are a win.
156 #define flush_tlb() __flush_tlb()
157 #define flush_tlb_all() __flush_tlb_all()
158 #define local_flush_tlb() __flush_tlb()
160 static inline void flush_tlb_mm(struct mm_struct *mm)
162 if (mm == current->active_mm)
166 static inline void flush_tlb_page(struct vm_area_struct *vma,
169 if (vma->vm_mm == current->active_mm)
170 __flush_tlb_one(addr);
173 static inline void flush_tlb_range(struct vm_area_struct *vma,
174 unsigned long start, unsigned long end)
176 if (vma->vm_mm == current->active_mm)
180 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
181 struct mm_struct *mm,
186 static inline void reset_lazy_tlbstate(void)
194 #define local_flush_tlb() __flush_tlb()
196 extern void flush_tlb_all(void);
197 extern void flush_tlb_current_task(void);
198 extern void flush_tlb_mm(struct mm_struct *);
199 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
201 #define flush_tlb() flush_tlb_current_task()
203 static inline void flush_tlb_range(struct vm_area_struct *vma,
204 unsigned long start, unsigned long end)
206 flush_tlb_mm(vma->vm_mm);
209 void native_flush_tlb_others(const struct cpumask *cpumask,
210 struct mm_struct *mm, unsigned long va);
212 #define TLBSTATE_OK 1
213 #define TLBSTATE_LAZY 2
216 struct mm_struct *active_mm;
219 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
221 static inline void reset_lazy_tlbstate(void)
223 percpu_write(cpu_tlbstate.state, 0);
224 percpu_write(cpu_tlbstate.active_mm, &init_mm);
229 #ifndef CONFIG_PARAVIRT
230 #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
233 static inline void flush_tlb_kernel_range(unsigned long start,
239 #endif /* _ASM_X86_TLBFLUSH_H */