1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8 #include <asm/paravirt.h>
9 #ifndef CONFIG_PARAVIRT
10 #include <asm-generic/mm_hooks.h>
12 static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
16 #endif /* !CONFIG_PARAVIRT */
19 * ldt_structs can be allocated, used, and freed, but they are never
20 * modified while live.
24 * Xen requires page-aligned LDTs with special permissions. This is
25 * needed to prevent us from installing evil descriptors such as
26 * call gates. On native, we could merge the ldt_struct and LDT
27 * allocations, but it's not worth trying to optimize.
29 struct desc_struct *entries;
33 static inline void load_mm_ldt(struct mm_struct *mm)
35 struct ldt_struct *ldt;
37 /* smp_read_barrier_depends synchronizes with barrier in install_ldt */
38 ldt = ACCESS_ONCE(mm->context.ldt);
39 smp_read_barrier_depends();
42 * Any change to mm->context.ldt is followed by an IPI to all
43 * CPUs with the mm active. The LDT will not be freed until
44 * after the IPI is handled by all such CPUs. This means that,
45 * if the ldt_struct changes before we return, the values we see
46 * will be safe, and the new values will be loaded before we run
49 * NB: don't try to convert this to use RCU without extreme care.
50 * We would still need IRQs off, because we don't want to change
51 * the local LDT after an IPI loaded a newer value than the one
56 set_ldt(ldt->entries, ldt->size);
60 DEBUG_LOCKS_WARN_ON(preemptible());
64 * Used for LDT copy/destruction.
66 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
67 void destroy_context(struct mm_struct *mm);
70 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
73 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
74 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
78 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
79 struct task_struct *tsk)
81 unsigned cpu = smp_processor_id();
83 if (likely(prev != next)) {
85 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
86 percpu_write(cpu_tlbstate.active_mm, next);
88 cpumask_set_cpu(cpu, mm_cpumask(next));
91 * Re-load page tables.
93 * This logic has an ordering constraint:
95 * CPU 0: Write to a PTE for 'next'
96 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
97 * CPU 1: set bit 1 in next's mm_cpumask
98 * CPU 1: load from the PTE that CPU 0 writes (implicit)
100 * We need to prevent an outcome in which CPU 1 observes
101 * the new PTE value and CPU 0 observes bit 1 clear in
102 * mm_cpumask. (If that occurs, then the IPI will never
103 * be sent, and CPU 0's TLB will contain a stale entry.)
105 * The bad outcome can occur if either CPU's load is
106 * reordered before that CPU's store, so both CPUs must
107 * execute full barriers to prevent this from happening.
109 * Thus, switch_mm needs a full barrier between the
110 * store to mm_cpumask and any operation that could load
111 * from next->pgd. TLB fills are special and can happen
112 * due to instruction fetches or for no reason at all,
113 * and neither LOCK nor MFENCE orders them.
114 * Fortunately, load_cr3() is serializing and gives the
115 * ordering guarantee we need.
120 /* stop flush ipis for the previous mm */
121 cpumask_clear_cpu(cpu, mm_cpumask(prev));
124 * load the LDT, if the LDT is different:
126 if (unlikely(prev->context.ldt != next->context.ldt))
131 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
132 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
134 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
135 /* We were in lazy tlb mode and leave_mm disabled
136 * tlb flush IPI delivery. We must reload CR3
137 * to make sure to use no freed page tables.
139 * As above, load_cr3() is serializing and orders TLB
140 * fills with respect to the mm_cpumask write.
149 #define activate_mm(prev, next) \
151 paravirt_activate_mm((prev), (next)); \
152 switch_mm((prev), (next), NULL); \
156 #define deactivate_mm(tsk, mm) \
161 #define deactivate_mm(tsk, mm) \
164 loadsegment(fs, 0); \
168 #endif /* _ASM_X86_MMU_CONTEXT_H */