1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
16 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
20 * TLB flushing, formerly SMP-only
23 * These mean you can really definitely utterly forget about
24 * writing to user space from interrupts. (Its not allowed anyway).
26 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 * More scalable flush, from Andi Kleen
30 * To avoid global state use 8 different call vectors.
31 * Each CPU uses a specific vector to trigger flushes on other
32 * CPUs. Depending on the received vector the target CPUs look into
33 * the right array slot for the flush data.
35 * With more than 8 CPUs they are hashed to the 8 available
36 * vectors. The limited global vector space forces us to this right now.
37 * In future when interrupts are split into per CPU domains this could be
38 * fixed, at the cost of triggering multiple IPIs in some cases.
41 union smp_flush_state {
43 struct mm_struct *flush_mm;
44 unsigned long flush_va;
45 raw_spinlock_t tlbstate_lock;
46 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
48 char pad[INTERNODE_CACHE_BYTES];
49 } ____cacheline_internodealigned_in_smp;
51 /* State is put into the per CPU data section, but padded
52 to a full cache line because other CPUs can access it and we don't
53 want false sharing in the per cpu data segment. */
54 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
56 static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
59 * We cannot call mmdrop() because we are in interrupt context,
60 * instead update mm->cpu_vm_mask.
62 void leave_mm(int cpu)
64 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
66 cpumask_clear_cpu(cpu,
67 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
68 load_cr3(swapper_pg_dir);
70 EXPORT_SYMBOL_GPL(leave_mm);
72 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
73 struct task_struct *tsk)
77 local_irq_save(flags);
78 switch_mm_irqs_off(prev, next, tsk);
79 local_irq_restore(flags);
82 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
83 struct task_struct *tsk)
85 unsigned cpu = smp_processor_id();
87 if (likely(prev != next)) {
88 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
89 percpu_write(cpu_tlbstate.active_mm, next);
90 cpumask_set_cpu(cpu, mm_cpumask(next));
93 * Re-load page tables.
95 * This logic has an ordering constraint:
97 * CPU 0: Write to a PTE for 'next'
98 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
99 * CPU 1: set bit 1 in next's mm_cpumask
100 * CPU 1: load from the PTE that CPU 0 writes (implicit)
102 * We need to prevent an outcome in which CPU 1 observes
103 * the new PTE value and CPU 0 observes bit 1 clear in
104 * mm_cpumask. (If that occurs, then the IPI will never
105 * be sent, and CPU 0's TLB will contain a stale entry.)
107 * The bad outcome can occur if either CPU's load is
108 * reordered before that CPU's store, so both CPUs must
109 * execute full barriers to prevent this from happening.
111 * Thus, switch_mm needs a full barrier between the
112 * store to mm_cpumask and any operation that could load
113 * from next->pgd. TLB fills are special and can happen
114 * due to instruction fetches or for no reason at all,
115 * and neither LOCK nor MFENCE orders them.
116 * Fortunately, load_cr3() is serializing and gives the
117 * ordering guarantee we need.
122 /* stop flush ipis for the previous mm */
123 cpumask_clear_cpu(cpu, mm_cpumask(prev));
126 * load the LDT, if the LDT is different:
128 if (unlikely(prev->context.ldt != next->context.ldt))
131 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
132 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
134 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
135 /* We were in lazy tlb mode and leave_mm disabled
136 * tlb flush IPI delivery. We must reload CR3
137 * to make sure to use no freed page tables.
139 * As above, load_cr3() is serializing and orders TLB
140 * fills with respect to the mm_cpumask write.
150 * The flush IPI assumes that a thread switch happens in this order:
151 * [cpu0: the cpu that switches]
152 * 1) switch_mm() either 1a) or 1b)
153 * 1a) thread switch to a different mm
154 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
155 * Stop ipi delivery for the old mm. This is not synchronized with
156 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
157 * for the wrong mm, and in the worst case we perform a superfluous
159 * 1a2) set cpu mmu_state to TLBSTATE_OK
160 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
161 * was in lazy tlb mode.
162 * 1a3) update cpu active_mm
163 * Now cpu0 accepts tlb flushes for the new mm.
164 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
165 * Now the other cpus will send tlb flush ipis.
167 * 1b) thread switch without mm change
168 * cpu active_mm is correct, cpu0 already handles
170 * 1b1) set cpu mmu_state to TLBSTATE_OK
171 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
172 * Atomically set the bit [other cpus will start sending flush ipis],
174 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
175 * 2) switch %%esp, ie current
177 * The interrupt must handle 2 special cases:
178 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
179 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
180 * runs in kernel space, the cpu could load tlb entries for user space
183 * The good news is that cpu mmu_state is local to each cpu, no
184 * write/read ordering problems.
190 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
191 * 2) Leave the mm if we are in the lazy tlb mode.
193 * Interrupts are disabled.
197 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
198 * but still used for documentation purpose but the usage is slightly
199 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
200 * entry calls in with the first parameter in %eax. Maybe define
206 void smp_invalidate_interrupt(struct pt_regs *regs)
210 union smp_flush_state *f;
212 cpu = smp_processor_id();
214 * orig_rax contains the negated interrupt vector.
215 * Use that to determine where the sender put the data.
217 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
218 f = &flush_state[sender];
220 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
223 * This was a BUG() but until someone can quote me the
224 * line from the intel manual that guarantees an IPI to
225 * multiple CPUs is retried _only_ on the erroring CPUs
226 * its staying as a return
231 if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
232 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
233 if (f->flush_va == TLB_FLUSH_ALL)
236 __flush_tlb_one(f->flush_va);
242 smp_mb__before_clear_bit();
243 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
244 smp_mb__after_clear_bit();
245 inc_irq_stat(irq_tlb_count);
248 static void flush_tlb_others_ipi(const struct cpumask *cpumask,
249 struct mm_struct *mm, unsigned long va)
253 union smp_flush_state *f;
255 /* Caller has disabled preemption */
256 sender = this_cpu_read(tlb_vector_offset);
257 f = &flush_state[sender];
259 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
260 raw_spin_lock(&f->tlbstate_lock);
264 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
266 * We have to send the IPI only to
269 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
270 INVALIDATE_TLB_VECTOR_START + sender);
272 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
278 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
279 raw_spin_unlock(&f->tlbstate_lock);
283 void native_flush_tlb_others(const struct cpumask *cpumask,
284 struct mm_struct *mm, unsigned long va)
286 if (is_uv_system()) {
289 cpu = smp_processor_id();
290 cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
292 flush_tlb_others_ipi(cpumask, mm, va);
295 flush_tlb_others_ipi(cpumask, mm, va);
298 static void __cpuinit calculate_tlb_offset(void)
300 int cpu, node, nr_node_vecs, idx = 0;
302 * we are changing tlb_vector_offset for each CPU in runtime, but this
303 * will not cause inconsistency, as the write is atomic under X86. we
304 * might see more lock contentions in a short time, but after all CPU's
305 * tlb_vector_offset are changed, everything should go normal
307 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
308 * waste some vectors.
310 if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
313 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
315 for_each_online_node(node) {
316 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
319 for_each_cpu(cpu, cpumask_of_node(node)) {
320 per_cpu(tlb_vector_offset, cpu) = node_offset +
323 cpu_offset = cpu_offset % nr_node_vecs;
329 static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
330 unsigned long action, void *hcpu)
332 switch (action & 0xf) {
335 calculate_tlb_offset();
340 static int __cpuinit init_smp_flush(void)
344 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
345 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
347 calculate_tlb_offset();
348 hotcpu_notifier(tlb_cpuhp_notify, 0);
351 core_initcall(init_smp_flush);
353 void flush_tlb_current_task(void)
355 struct mm_struct *mm = current->mm;
359 /* This is an implicit full barrier that synchronizes with switch_mm. */
362 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
363 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
367 void flush_tlb_mm(struct mm_struct *mm)
371 if (current->active_mm == mm) {
374 * This is an implicit full barrier (MOV to CR) that
375 * synchronizes with switch_mm.
379 leave_mm(smp_processor_id());
380 /* Synchronize with switch_mm. */
384 /* Synchronize with switch_mm. */
387 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
388 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
393 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
395 struct mm_struct *mm = vma->vm_mm;
399 if (current->active_mm == mm) {
402 * Implicit full barrier (INVLPG) that synchronizes
407 leave_mm(smp_processor_id());
409 /* Synchronize with switch_mm. */
414 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
415 flush_tlb_others(mm_cpumask(mm), mm, va);
420 static void do_flush_tlb_all(void *info)
423 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
424 leave_mm(smp_processor_id());
427 void flush_tlb_all(void)
429 on_each_cpu(do_flush_tlb_all, NULL, 1);