1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/system.h>
11 static inline void __invpcid(unsigned long pcid, unsigned long addr,
14 struct { u64 d[2]; } desc = { { pcid, addr } };
17 * The memory clobber is because the whole point is to invalidate
18 * stale TLB entries and, especially if we're flushing global
19 * mappings, we don't want the compiler to reorder any subsequent
20 * memory accesses before the TLB flush.
22 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
23 * invpcid (%rcx), %rax in long mode.
25 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
26 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
29 #define INVPCID_TYPE_INDIV_ADDR 0
30 #define INVPCID_TYPE_SINGLE_CTXT 1
31 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
32 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
34 /* Flush all mappings for a given pcid and addr, not including globals. */
35 static inline void invpcid_flush_one(unsigned long pcid,
38 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
41 /* Flush all mappings for a given PCID, not including globals. */
42 static inline void invpcid_flush_single_context(unsigned long pcid)
44 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
47 /* Flush all mappings, including globals, for all PCIDs. */
48 static inline void invpcid_flush_all(void)
50 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
53 /* Flush all mappings for all PCIDs except globals. */
54 static inline void invpcid_flush_all_nonglobals(void)
56 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
59 #ifdef CONFIG_PARAVIRT
60 #include <asm/paravirt.h>
62 #define __flush_tlb() __native_flush_tlb()
63 #define __flush_tlb_global() __native_flush_tlb_global()
64 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
68 * Declare a couple of kaiser interfaces here for convenience,
69 * to avoid the need for asm/kaiser.h in unexpected places.
72 extern void kaiser_setup_pcid(void);
73 extern void kaiser_flush_tlb_on_return_to_user(void);
75 static inline void kaiser_setup_pcid(void)
78 static inline void kaiser_flush_tlb_on_return_to_user(void)
83 static inline void __native_flush_tlb(void)
85 if (this_cpu_has(X86_FEATURE_INVPCID)) {
87 * Note, this works with CR4.PCIDE=0 or 1.
89 invpcid_flush_all_nonglobals();
94 * If current->mm == NULL then we borrow a mm which may change during a
95 * task switch and therefore we must not be preempted while we write CR3
99 if (this_cpu_has(X86_FEATURE_PCID))
100 kaiser_flush_tlb_on_return_to_user();
101 native_write_cr3(native_read_cr3());
105 static inline void __native_flush_tlb_global(void)
108 /* Globals are not used at all */
109 __native_flush_tlb();
114 if (this_cpu_has(X86_FEATURE_INVPCID)) {
116 * Using INVPCID is considerably faster than a pair of writes
117 * to CR4 sandwiched inside an IRQ flag save/restore.
119 * Note, this works with CR4.PCIDE=0 or 1.
126 * Read-modify-write to CR4 - protect it from preemption and
127 * from interrupts. (Use the raw variant because this code can
128 * be called from deep inside debugging code.)
130 raw_local_irq_save(flags);
132 cr4 = native_read_cr4();
134 native_write_cr4(cr4 & ~X86_CR4_PGE);
135 /* write old PGE again and flush TLBs */
136 native_write_cr4(cr4);
138 raw_local_irq_restore(flags);
142 static inline void __native_flush_tlb_single(unsigned long addr)
145 * SIMICS #GP's if you run INVPCID with type 2/3
146 * and X86_CR4_PCIDE clear. Shame!
148 * The ASIDs used below are hard-coded. But, we must not
149 * call invpcid(type=1/2) before CR4.PCIDE=1. Just call
150 * invlpg in the case we are called early.
153 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
154 if (this_cpu_has(X86_FEATURE_PCID))
155 kaiser_flush_tlb_on_return_to_user();
156 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
159 /* Flush the address out of both PCIDs. */
161 * An optimization here might be to determine addresses
162 * that are only kernel-mapped and only flush the kernel
163 * ASID. But, userspace flushes are probably much more
164 * important performance-wise.
166 * Make sure to do only a single invpcid when KAISER is
167 * disabled and we have only a single ASID.
169 if (X86_CR3_PCID_ASID_KERN != X86_CR3_PCID_ASID_USER)
170 invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
171 invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr);
174 static inline void __flush_tlb_all(void)
177 __flush_tlb_global();
182 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
183 * we'd end up flushing kernel translations for the current ASID but
184 * we might fail to flush kernel translations for other cached ASIDs.
186 * To avoid this issue, we force PCID off if PGE is off.
190 static inline void __flush_tlb_one(unsigned long addr)
193 __flush_tlb_single(addr);
199 # define TLB_FLUSH_ALL 0xffffffff
201 # define TLB_FLUSH_ALL -1ULL
207 * - flush_tlb() flushes the current mm struct TLBs
208 * - flush_tlb_all() flushes all processes TLBs
209 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
210 * - flush_tlb_page(vma, vmaddr) flushes one page
211 * - flush_tlb_range(vma, start, end) flushes a range of pages
212 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
213 * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
215 * ..but the i386 has somewhat limited tlb flushing capabilities,
216 * and page-granular flushes are available only on i486 and up.
219 #define local_flush_tlb() __flush_tlb()
221 extern void flush_tlb_all(void);
222 extern void flush_tlb_current_task(void);
223 extern void flush_tlb_mm(struct mm_struct *);
224 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
226 #define flush_tlb() flush_tlb_current_task()
228 static inline void flush_tlb_range(struct vm_area_struct *vma,
229 unsigned long start, unsigned long end)
231 flush_tlb_mm(vma->vm_mm);
234 void native_flush_tlb_others(const struct cpumask *cpumask,
235 struct mm_struct *mm, unsigned long va);
237 #define TLBSTATE_OK 1
238 #define TLBSTATE_LAZY 2
241 struct mm_struct *active_mm;
244 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
246 static inline void reset_lazy_tlbstate(void)
248 percpu_write(cpu_tlbstate.state, 0);
249 percpu_write(cpu_tlbstate.active_mm, &init_mm);
252 #ifndef CONFIG_PARAVIRT
253 #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
256 static inline void flush_tlb_kernel_range(unsigned long start,
262 #endif /* _ASM_X86_TLBFLUSH_H */