Merge branches 'imx/pata' and 'imx/sata' into next/driver
[pandora-kernel.git] / arch / x86 / kernel / paravirt.c
1 /*  Paravirtualization interfaces
2     Copyright (C) 2006 Rusty Russell IBM Corporation
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17
18     2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
19 */
20
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/efi.h>
24 #include <linux/bcd.h>
25 #include <linux/highmem.h>
26
27 #include <asm/bug.h>
28 #include <asm/paravirt.h>
29 #include <asm/desc.h>
30 #include <asm/setup.h>
31 #include <asm/pgtable.h>
32 #include <asm/time.h>
33 #include <asm/pgalloc.h>
34 #include <asm/irq.h>
35 #include <asm/delay.h>
36 #include <asm/fixmap.h>
37 #include <asm/apic.h>
38 #include <asm/tlbflush.h>
39 #include <asm/timer.h>
40
41 /* nop stub */
42 void _paravirt_nop(void)
43 {
44 }
45
46 /* identity function, which can be inlined */
47 u32 _paravirt_ident_32(u32 x)
48 {
49         return x;
50 }
51
52 u64 _paravirt_ident_64(u64 x)
53 {
54         return x;
55 }
56
57 void __init default_banner(void)
58 {
59         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
60                pv_info.name);
61 }
62
63 /* Simple instruction patching code. */
64 #define DEF_NATIVE(ops, name, code)                                     \
65         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
66         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
67
68 /* Undefined instruction for dealing with missing ops pointers. */
69 static const unsigned char ud2a[] = { 0x0f, 0x0b };
70
71 unsigned paravirt_patch_nop(void)
72 {
73         return 0;
74 }
75
76 unsigned paravirt_patch_ignore(unsigned len)
77 {
78         return len;
79 }
80
81 struct branch {
82         unsigned char opcode;
83         u32 delta;
84 } __attribute__((packed));
85
86 unsigned paravirt_patch_call(void *insnbuf,
87                              const void *target, u16 tgt_clobbers,
88                              unsigned long addr, u16 site_clobbers,
89                              unsigned len)
90 {
91         struct branch *b = insnbuf;
92         unsigned long delta = (unsigned long)target - (addr+5);
93
94         if (tgt_clobbers & ~site_clobbers)
95                 return len;     /* target would clobber too much for this site */
96         if (len < 5)
97                 return len;     /* call too long for patch site */
98
99         b->opcode = 0xe8; /* call */
100         b->delta = delta;
101         BUILD_BUG_ON(sizeof(*b) != 5);
102
103         return 5;
104 }
105
106 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
107                             unsigned long addr, unsigned len)
108 {
109         struct branch *b = insnbuf;
110         unsigned long delta = (unsigned long)target - (addr+5);
111
112         if (len < 5)
113                 return len;     /* call too long for patch site */
114
115         b->opcode = 0xe9;       /* jmp */
116         b->delta = delta;
117
118         return 5;
119 }
120
121 /* Neat trick to map patch type back to the call within the
122  * corresponding structure. */
123 static void *get_call_destination(u8 type)
124 {
125         struct paravirt_patch_template tmpl = {
126                 .pv_init_ops = pv_init_ops,
127                 .pv_time_ops = pv_time_ops,
128                 .pv_cpu_ops = pv_cpu_ops,
129                 .pv_irq_ops = pv_irq_ops,
130                 .pv_apic_ops = pv_apic_ops,
131                 .pv_mmu_ops = pv_mmu_ops,
132 #ifdef CONFIG_PARAVIRT_SPINLOCKS
133                 .pv_lock_ops = pv_lock_ops,
134 #endif
135         };
136         return *((void **)&tmpl + type);
137 }
138
139 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
140                                 unsigned long addr, unsigned len)
141 {
142         void *opfunc = get_call_destination(type);
143         unsigned ret;
144
145         if (opfunc == NULL)
146                 /* If there's no function, patch it with a ud2a (BUG) */
147                 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
148         else if (opfunc == _paravirt_nop)
149                 /* If the operation is a nop, then nop the callsite */
150                 ret = paravirt_patch_nop();
151
152         /* identity functions just return their single argument */
153         else if (opfunc == _paravirt_ident_32)
154                 ret = paravirt_patch_ident_32(insnbuf, len);
155         else if (opfunc == _paravirt_ident_64)
156                 ret = paravirt_patch_ident_64(insnbuf, len);
157
158         else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
159                  type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
160                  type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
161                  type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
162                 /* If operation requires a jmp, then jmp */
163                 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
164         else
165                 /* Otherwise call the function; assume target could
166                    clobber any caller-save reg */
167                 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
168                                           addr, clobbers, len);
169
170         return ret;
171 }
172
173 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
174                               const char *start, const char *end)
175 {
176         unsigned insn_len = end - start;
177
178         if (insn_len > len || start == NULL)
179                 insn_len = len;
180         else
181                 memcpy(insnbuf, start, insn_len);
182
183         return insn_len;
184 }
185
186 static void native_flush_tlb(void)
187 {
188         __native_flush_tlb();
189 }
190
191 /*
192  * Global pages have to be flushed a bit differently. Not a real
193  * performance problem because this does not happen often.
194  */
195 static void native_flush_tlb_global(void)
196 {
197         __native_flush_tlb_global();
198 }
199
200 static void native_flush_tlb_single(unsigned long addr)
201 {
202         __native_flush_tlb_single(addr);
203 }
204
205 struct jump_label_key paravirt_steal_enabled;
206 struct jump_label_key paravirt_steal_rq_enabled;
207
208 static u64 native_steal_clock(int cpu)
209 {
210         return 0;
211 }
212
213 /* These are in entry.S */
214 extern void native_iret(void);
215 extern void native_irq_enable_sysexit(void);
216 extern void native_usergs_sysret32(void);
217 extern void native_usergs_sysret64(void);
218
219 static struct resource reserve_ioports = {
220         .start = 0,
221         .end = IO_SPACE_LIMIT,
222         .name = "paravirt-ioport",
223         .flags = IORESOURCE_IO | IORESOURCE_BUSY,
224 };
225
226 /*
227  * Reserve the whole legacy IO space to prevent any legacy drivers
228  * from wasting time probing for their hardware.  This is a fairly
229  * brute-force approach to disabling all non-virtual drivers.
230  *
231  * Note that this must be called very early to have any effect.
232  */
233 int paravirt_disable_iospace(void)
234 {
235         return request_resource(&ioport_resource, &reserve_ioports);
236 }
237
238 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
239
240 static inline void enter_lazy(enum paravirt_lazy_mode mode)
241 {
242         BUG_ON(percpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
243
244         percpu_write(paravirt_lazy_mode, mode);
245 }
246
247 static void leave_lazy(enum paravirt_lazy_mode mode)
248 {
249         BUG_ON(percpu_read(paravirt_lazy_mode) != mode);
250
251         percpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
252 }
253
254 void paravirt_enter_lazy_mmu(void)
255 {
256         enter_lazy(PARAVIRT_LAZY_MMU);
257 }
258
259 void paravirt_leave_lazy_mmu(void)
260 {
261         leave_lazy(PARAVIRT_LAZY_MMU);
262 }
263
264 void paravirt_start_context_switch(struct task_struct *prev)
265 {
266         BUG_ON(preemptible());
267
268         if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
269                 arch_leave_lazy_mmu_mode();
270                 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
271         }
272         enter_lazy(PARAVIRT_LAZY_CPU);
273 }
274
275 void paravirt_end_context_switch(struct task_struct *next)
276 {
277         BUG_ON(preemptible());
278
279         leave_lazy(PARAVIRT_LAZY_CPU);
280
281         if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
282                 arch_enter_lazy_mmu_mode();
283 }
284
285 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
286 {
287         if (in_interrupt())
288                 return PARAVIRT_LAZY_NONE;
289
290         return percpu_read(paravirt_lazy_mode);
291 }
292
293 void arch_flush_lazy_mmu_mode(void)
294 {
295         preempt_disable();
296
297         if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
298                 arch_leave_lazy_mmu_mode();
299                 arch_enter_lazy_mmu_mode();
300         }
301
302         preempt_enable();
303 }
304
305 struct pv_info pv_info = {
306         .name = "bare hardware",
307         .paravirt_enabled = 0,
308         .kernel_rpl = 0,
309         .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
310
311 #ifdef CONFIG_X86_64
312         .extra_user_64bit_cs = __USER_CS,
313 #endif
314 };
315
316 struct pv_init_ops pv_init_ops = {
317         .patch = native_patch,
318 };
319
320 struct pv_time_ops pv_time_ops = {
321         .sched_clock = native_sched_clock,
322         .steal_clock = native_steal_clock,
323 };
324
325 struct pv_irq_ops pv_irq_ops = {
326         .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
327         .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
328         .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
329         .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
330         .safe_halt = native_safe_halt,
331         .halt = native_halt,
332 #ifdef CONFIG_X86_64
333         .adjust_exception_frame = paravirt_nop,
334 #endif
335 };
336
337 struct pv_cpu_ops pv_cpu_ops = {
338         .cpuid = native_cpuid,
339         .get_debugreg = native_get_debugreg,
340         .set_debugreg = native_set_debugreg,
341         .clts = native_clts,
342         .read_cr0 = native_read_cr0,
343         .write_cr0 = native_write_cr0,
344         .read_cr4 = native_read_cr4,
345         .read_cr4_safe = native_read_cr4_safe,
346         .write_cr4 = native_write_cr4,
347 #ifdef CONFIG_X86_64
348         .read_cr8 = native_read_cr8,
349         .write_cr8 = native_write_cr8,
350 #endif
351         .wbinvd = native_wbinvd,
352         .read_msr = native_read_msr_safe,
353         .rdmsr_regs = native_rdmsr_safe_regs,
354         .write_msr = native_write_msr_safe,
355         .wrmsr_regs = native_wrmsr_safe_regs,
356         .read_tsc = native_read_tsc,
357         .read_pmc = native_read_pmc,
358         .read_tscp = native_read_tscp,
359         .load_tr_desc = native_load_tr_desc,
360         .set_ldt = native_set_ldt,
361         .load_gdt = native_load_gdt,
362         .load_idt = native_load_idt,
363         .store_gdt = native_store_gdt,
364         .store_idt = native_store_idt,
365         .store_tr = native_store_tr,
366         .load_tls = native_load_tls,
367 #ifdef CONFIG_X86_64
368         .load_gs_index = native_load_gs_index,
369 #endif
370         .write_ldt_entry = native_write_ldt_entry,
371         .write_gdt_entry = native_write_gdt_entry,
372         .write_idt_entry = native_write_idt_entry,
373
374         .alloc_ldt = paravirt_nop,
375         .free_ldt = paravirt_nop,
376
377         .load_sp0 = native_load_sp0,
378
379 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
380         .irq_enable_sysexit = native_irq_enable_sysexit,
381 #endif
382 #ifdef CONFIG_X86_64
383 #ifdef CONFIG_IA32_EMULATION
384         .usergs_sysret32 = native_usergs_sysret32,
385 #endif
386         .usergs_sysret64 = native_usergs_sysret64,
387 #endif
388         .iret = native_iret,
389         .swapgs = native_swapgs,
390
391         .set_iopl_mask = native_set_iopl_mask,
392         .io_delay = native_io_delay,
393
394         .start_context_switch = paravirt_nop,
395         .end_context_switch = paravirt_nop,
396 };
397
398 struct pv_apic_ops pv_apic_ops = {
399 #ifdef CONFIG_X86_LOCAL_APIC
400         .startup_ipi_hook = paravirt_nop,
401 #endif
402 };
403
404 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
405 /* 32-bit pagetable entries */
406 #define PTE_IDENT       __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
407 #else
408 /* 64-bit pagetable entries */
409 #define PTE_IDENT       __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
410 #endif
411
412 struct pv_mmu_ops pv_mmu_ops = {
413
414         .read_cr2 = native_read_cr2,
415         .write_cr2 = native_write_cr2,
416         .read_cr3 = native_read_cr3,
417         .write_cr3 = native_write_cr3,
418
419         .flush_tlb_user = native_flush_tlb,
420         .flush_tlb_kernel = native_flush_tlb_global,
421         .flush_tlb_single = native_flush_tlb_single,
422         .flush_tlb_others = native_flush_tlb_others,
423
424         .pgd_alloc = __paravirt_pgd_alloc,
425         .pgd_free = paravirt_nop,
426
427         .alloc_pte = paravirt_nop,
428         .alloc_pmd = paravirt_nop,
429         .alloc_pud = paravirt_nop,
430         .release_pte = paravirt_nop,
431         .release_pmd = paravirt_nop,
432         .release_pud = paravirt_nop,
433
434         .set_pte = native_set_pte,
435         .set_pte_at = native_set_pte_at,
436         .set_pmd = native_set_pmd,
437         .set_pmd_at = native_set_pmd_at,
438         .pte_update = paravirt_nop,
439         .pte_update_defer = paravirt_nop,
440         .pmd_update = paravirt_nop,
441         .pmd_update_defer = paravirt_nop,
442
443         .ptep_modify_prot_start = __ptep_modify_prot_start,
444         .ptep_modify_prot_commit = __ptep_modify_prot_commit,
445
446 #if PAGETABLE_LEVELS >= 3
447 #ifdef CONFIG_X86_PAE
448         .set_pte_atomic = native_set_pte_atomic,
449         .pte_clear = native_pte_clear,
450         .pmd_clear = native_pmd_clear,
451 #endif
452         .set_pud = native_set_pud,
453
454         .pmd_val = PTE_IDENT,
455         .make_pmd = PTE_IDENT,
456
457 #if PAGETABLE_LEVELS == 4
458         .pud_val = PTE_IDENT,
459         .make_pud = PTE_IDENT,
460
461         .set_pgd = native_set_pgd,
462 #endif
463 #endif /* PAGETABLE_LEVELS >= 3 */
464
465         .pte_val = PTE_IDENT,
466         .pgd_val = PTE_IDENT,
467
468         .make_pte = PTE_IDENT,
469         .make_pgd = PTE_IDENT,
470
471         .dup_mmap = paravirt_nop,
472         .exit_mmap = paravirt_nop,
473         .activate_mm = paravirt_nop,
474
475         .lazy_mode = {
476                 .enter = paravirt_nop,
477                 .leave = paravirt_nop,
478         },
479
480         .set_fixmap = native_set_fixmap,
481 };
482
483 EXPORT_SYMBOL_GPL(pv_time_ops);
484 EXPORT_SYMBOL    (pv_cpu_ops);
485 EXPORT_SYMBOL    (pv_mmu_ops);
486 EXPORT_SYMBOL_GPL(pv_apic_ops);
487 EXPORT_SYMBOL_GPL(pv_info);
488 EXPORT_SYMBOL    (pv_irq_ops);