x86/mm: Disable preemption during CR3 read+write
[pandora-kernel.git] / arch / x86 / include / asm / tlbflush.h
1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6
7 #include <asm/processor.h>
8 #include <asm/system.h>
9
10 #ifdef CONFIG_PARAVIRT
11 #include <asm/paravirt.h>
12 #else
13 #define __flush_tlb() __native_flush_tlb()
14 #define __flush_tlb_global() __native_flush_tlb_global()
15 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
16 #endif
17
18 static inline void __native_flush_tlb(void)
19 {
20         /*
21          * If current->mm == NULL then we borrow a mm which may change during a
22          * task switch and therefore we must not be preempted while we write CR3
23          * back:
24          */
25         preempt_disable();
26         native_write_cr3(native_read_cr3());
27         preempt_enable();
28 }
29
30 static inline void __native_flush_tlb_global(void)
31 {
32         unsigned long flags;
33         unsigned long cr4;
34
35         /*
36          * Read-modify-write to CR4 - protect it from preemption and
37          * from interrupts. (Use the raw variant because this code can
38          * be called from deep inside debugging code.)
39          */
40         raw_local_irq_save(flags);
41
42         cr4 = native_read_cr4();
43         /* clear PGE */
44         native_write_cr4(cr4 & ~X86_CR4_PGE);
45         /* write old PGE again and flush TLBs */
46         native_write_cr4(cr4);
47
48         raw_local_irq_restore(flags);
49 }
50
51 static inline void __native_flush_tlb_single(unsigned long addr)
52 {
53         asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
54 }
55
56 static inline void __flush_tlb_all(void)
57 {
58         if (cpu_has_pge)
59                 __flush_tlb_global();
60         else
61                 __flush_tlb();
62 }
63
64 static inline void __flush_tlb_one(unsigned long addr)
65 {
66         if (cpu_has_invlpg)
67                 __flush_tlb_single(addr);
68         else
69                 __flush_tlb();
70 }
71
72 #ifdef CONFIG_X86_32
73 # define TLB_FLUSH_ALL  0xffffffff
74 #else
75 # define TLB_FLUSH_ALL  -1ULL
76 #endif
77
78 /*
79  * TLB flushing:
80  *
81  *  - flush_tlb() flushes the current mm struct TLBs
82  *  - flush_tlb_all() flushes all processes TLBs
83  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
84  *  - flush_tlb_page(vma, vmaddr) flushes one page
85  *  - flush_tlb_range(vma, start, end) flushes a range of pages
86  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
87  *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
88  *
89  * ..but the i386 has somewhat limited tlb flushing capabilities,
90  * and page-granular flushes are available only on i486 and up.
91  *
92  * x86-64 can only flush individual pages or full VMs. For a range flush
93  * we always do the full VM. Might be worth trying if for a small
94  * range a few INVLPGs in a row are a win.
95  */
96
97 #ifndef CONFIG_SMP
98
99 #define flush_tlb() __flush_tlb()
100 #define flush_tlb_all() __flush_tlb_all()
101 #define local_flush_tlb() __flush_tlb()
102
103 static inline void flush_tlb_mm(struct mm_struct *mm)
104 {
105         if (mm == current->active_mm)
106                 __flush_tlb();
107 }
108
109 static inline void flush_tlb_page(struct vm_area_struct *vma,
110                                   unsigned long addr)
111 {
112         if (vma->vm_mm == current->active_mm)
113                 __flush_tlb_one(addr);
114 }
115
116 static inline void flush_tlb_range(struct vm_area_struct *vma,
117                                    unsigned long start, unsigned long end)
118 {
119         if (vma->vm_mm == current->active_mm)
120                 __flush_tlb();
121 }
122
123 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
124                                            struct mm_struct *mm,
125                                            unsigned long va)
126 {
127 }
128
129 static inline void reset_lazy_tlbstate(void)
130 {
131 }
132
133 #else  /* SMP */
134
135 #include <asm/smp.h>
136
137 #define local_flush_tlb() __flush_tlb()
138
139 extern void flush_tlb_all(void);
140 extern void flush_tlb_current_task(void);
141 extern void flush_tlb_mm(struct mm_struct *);
142 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
143
144 #define flush_tlb()     flush_tlb_current_task()
145
146 static inline void flush_tlb_range(struct vm_area_struct *vma,
147                                    unsigned long start, unsigned long end)
148 {
149         flush_tlb_mm(vma->vm_mm);
150 }
151
152 void native_flush_tlb_others(const struct cpumask *cpumask,
153                              struct mm_struct *mm, unsigned long va);
154
155 #define TLBSTATE_OK     1
156 #define TLBSTATE_LAZY   2
157
158 struct tlb_state {
159         struct mm_struct *active_mm;
160         int state;
161 };
162 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
163
164 static inline void reset_lazy_tlbstate(void)
165 {
166         percpu_write(cpu_tlbstate.state, 0);
167         percpu_write(cpu_tlbstate.active_mm, &init_mm);
168 }
169
170 #endif  /* SMP */
171
172 #ifndef CONFIG_PARAVIRT
173 #define flush_tlb_others(mask, mm, va)  native_flush_tlb_others(mask, mm, va)
174 #endif
175
176 static inline void flush_tlb_kernel_range(unsigned long start,
177                                           unsigned long end)
178 {
179         flush_tlb_all();
180 }
181
182 #endif /* _ASM_X86_TLBFLUSH_H */