Merge branch 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[pandora-kernel.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9
10 #include <asm/paravirt_types.h>
11
12 #ifndef __ASSEMBLY__
13 #include <linux/types.h>
14 #include <linux/cpumask.h>
15
16 static inline int paravirt_enabled(void)
17 {
18         return pv_info.paravirt_enabled;
19 }
20
21 static inline void load_sp0(struct tss_struct *tss,
22                              struct thread_struct *thread)
23 {
24         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
25 }
26
27 #define ARCH_SETUP                      pv_init_ops.arch_setup();
28 static inline unsigned long get_wallclock(void)
29 {
30         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
31 }
32
33 static inline int set_wallclock(unsigned long nowtime)
34 {
35         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
36 }
37
38 static inline void (*choose_time_init(void))(void)
39 {
40         return pv_time_ops.time_init;
41 }
42
43 /* The paravirtualized CPUID instruction. */
44 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
45                            unsigned int *ecx, unsigned int *edx)
46 {
47         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
48 }
49
50 /*
51  * These special macros can be used to get or set a debugging register
52  */
53 static inline unsigned long paravirt_get_debugreg(int reg)
54 {
55         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
56 }
57 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
58 static inline void set_debugreg(unsigned long val, int reg)
59 {
60         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
61 }
62
63 static inline void clts(void)
64 {
65         PVOP_VCALL0(pv_cpu_ops.clts);
66 }
67
68 static inline unsigned long read_cr0(void)
69 {
70         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
71 }
72
73 static inline void write_cr0(unsigned long x)
74 {
75         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
76 }
77
78 static inline unsigned long read_cr2(void)
79 {
80         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
81 }
82
83 static inline void write_cr2(unsigned long x)
84 {
85         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
86 }
87
88 static inline unsigned long read_cr3(void)
89 {
90         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
91 }
92
93 static inline void write_cr3(unsigned long x)
94 {
95         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
96 }
97
98 static inline unsigned long read_cr4(void)
99 {
100         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
101 }
102 static inline unsigned long read_cr4_safe(void)
103 {
104         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
105 }
106
107 static inline void write_cr4(unsigned long x)
108 {
109         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
110 }
111
112 #ifdef CONFIG_X86_64
113 static inline unsigned long read_cr8(void)
114 {
115         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
116 }
117
118 static inline void write_cr8(unsigned long x)
119 {
120         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
121 }
122 #endif
123
124 static inline void raw_safe_halt(void)
125 {
126         PVOP_VCALL0(pv_irq_ops.safe_halt);
127 }
128
129 static inline void halt(void)
130 {
131         PVOP_VCALL0(pv_irq_ops.safe_halt);
132 }
133
134 static inline void wbinvd(void)
135 {
136         PVOP_VCALL0(pv_cpu_ops.wbinvd);
137 }
138
139 #define get_kernel_rpl()  (pv_info.kernel_rpl)
140
141 static inline u64 paravirt_read_msr(unsigned msr, int *err)
142 {
143         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
144 }
145
146 static inline int paravirt_rdmsr_regs(u32 *regs)
147 {
148         return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
149 }
150
151 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
152 {
153         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
154 }
155
156 static inline int paravirt_wrmsr_regs(u32 *regs)
157 {
158         return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
159 }
160
161 /* These should all do BUG_ON(_err), but our headers are too tangled. */
162 #define rdmsr(msr, val1, val2)                  \
163 do {                                            \
164         int _err;                               \
165         u64 _l = paravirt_read_msr(msr, &_err); \
166         val1 = (u32)_l;                         \
167         val2 = _l >> 32;                        \
168 } while (0)
169
170 #define wrmsr(msr, val1, val2)                  \
171 do {                                            \
172         paravirt_write_msr(msr, val1, val2);    \
173 } while (0)
174
175 #define rdmsrl(msr, val)                        \
176 do {                                            \
177         int _err;                               \
178         val = paravirt_read_msr(msr, &_err);    \
179 } while (0)
180
181 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
182 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
183
184 /* rdmsr with exception handling */
185 #define rdmsr_safe(msr, a, b)                   \
186 ({                                              \
187         int _err;                               \
188         u64 _l = paravirt_read_msr(msr, &_err); \
189         (*a) = (u32)_l;                         \
190         (*b) = _l >> 32;                        \
191         _err;                                   \
192 })
193
194 #define rdmsr_safe_regs(regs)   paravirt_rdmsr_regs(regs)
195 #define wrmsr_safe_regs(regs)   paravirt_wrmsr_regs(regs)
196
197 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
198 {
199         int err;
200
201         *p = paravirt_read_msr(msr, &err);
202         return err;
203 }
204 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
205 {
206         u32 gprs[8] = { 0 };
207         int err;
208
209         gprs[1] = msr;
210         gprs[7] = 0x9c5a203a;
211
212         err = paravirt_rdmsr_regs(gprs);
213
214         *p = gprs[0] | ((u64)gprs[2] << 32);
215
216         return err;
217 }
218
219 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
220 {
221         u32 gprs[8] = { 0 };
222
223         gprs[0] = (u32)val;
224         gprs[1] = msr;
225         gprs[2] = val >> 32;
226         gprs[7] = 0x9c5a203a;
227
228         return paravirt_wrmsr_regs(gprs);
229 }
230
231 static inline u64 paravirt_read_tsc(void)
232 {
233         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
234 }
235
236 #define rdtscl(low)                             \
237 do {                                            \
238         u64 _l = paravirt_read_tsc();           \
239         low = (int)_l;                          \
240 } while (0)
241
242 #define rdtscll(val) (val = paravirt_read_tsc())
243
244 static inline unsigned long long paravirt_sched_clock(void)
245 {
246         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
247 }
248 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
249
250 static inline unsigned long long paravirt_read_pmc(int counter)
251 {
252         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
253 }
254
255 #define rdpmc(counter, low, high)               \
256 do {                                            \
257         u64 _l = paravirt_read_pmc(counter);    \
258         low = (u32)_l;                          \
259         high = _l >> 32;                        \
260 } while (0)
261
262 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
263 {
264         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
265 }
266
267 #define rdtscp(low, high, aux)                          \
268 do {                                                    \
269         int __aux;                                      \
270         unsigned long __val = paravirt_rdtscp(&__aux);  \
271         (low) = (u32)__val;                             \
272         (high) = (u32)(__val >> 32);                    \
273         (aux) = __aux;                                  \
274 } while (0)
275
276 #define rdtscpll(val, aux)                              \
277 do {                                                    \
278         unsigned long __aux;                            \
279         val = paravirt_rdtscp(&__aux);                  \
280         (aux) = __aux;                                  \
281 } while (0)
282
283 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
284 {
285         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
286 }
287
288 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
289 {
290         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
291 }
292
293 static inline void load_TR_desc(void)
294 {
295         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
296 }
297 static inline void load_gdt(const struct desc_ptr *dtr)
298 {
299         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
300 }
301 static inline void load_idt(const struct desc_ptr *dtr)
302 {
303         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
304 }
305 static inline void set_ldt(const void *addr, unsigned entries)
306 {
307         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
308 }
309 static inline void store_gdt(struct desc_ptr *dtr)
310 {
311         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
312 }
313 static inline void store_idt(struct desc_ptr *dtr)
314 {
315         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
316 }
317 static inline unsigned long paravirt_store_tr(void)
318 {
319         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
320 }
321 #define store_tr(tr)    ((tr) = paravirt_store_tr())
322 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
323 {
324         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
325 }
326
327 #ifdef CONFIG_X86_64
328 static inline void load_gs_index(unsigned int gs)
329 {
330         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
331 }
332 #endif
333
334 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
335                                    const void *desc)
336 {
337         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
338 }
339
340 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
341                                    void *desc, int type)
342 {
343         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
344 }
345
346 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
347 {
348         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
349 }
350 static inline void set_iopl_mask(unsigned mask)
351 {
352         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
353 }
354
355 /* The paravirtualized I/O functions */
356 static inline void slow_down_io(void)
357 {
358         pv_cpu_ops.io_delay();
359 #ifdef REALLY_SLOW_IO
360         pv_cpu_ops.io_delay();
361         pv_cpu_ops.io_delay();
362         pv_cpu_ops.io_delay();
363 #endif
364 }
365
366 #ifdef CONFIG_X86_LOCAL_APIC
367 static inline void setup_boot_clock(void)
368 {
369         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
370 }
371
372 static inline void setup_secondary_clock(void)
373 {
374         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
375 }
376 #endif
377
378 static inline void paravirt_post_allocator_init(void)
379 {
380         if (pv_init_ops.post_allocator_init)
381                 (*pv_init_ops.post_allocator_init)();
382 }
383
384 static inline void paravirt_pagetable_setup_start(pgd_t *base)
385 {
386         (*pv_mmu_ops.pagetable_setup_start)(base);
387 }
388
389 static inline void paravirt_pagetable_setup_done(pgd_t *base)
390 {
391         (*pv_mmu_ops.pagetable_setup_done)(base);
392 }
393
394 #ifdef CONFIG_SMP
395 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
396                                     unsigned long start_esp)
397 {
398         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
399                     phys_apicid, start_eip, start_esp);
400 }
401 #endif
402
403 static inline void paravirt_activate_mm(struct mm_struct *prev,
404                                         struct mm_struct *next)
405 {
406         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
407 }
408
409 static inline void arch_dup_mmap(struct mm_struct *oldmm,
410                                  struct mm_struct *mm)
411 {
412         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
413 }
414
415 static inline void arch_exit_mmap(struct mm_struct *mm)
416 {
417         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
418 }
419
420 static inline void __flush_tlb(void)
421 {
422         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
423 }
424 static inline void __flush_tlb_global(void)
425 {
426         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
427 }
428 static inline void __flush_tlb_single(unsigned long addr)
429 {
430         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
431 }
432
433 static inline void flush_tlb_others(const struct cpumask *cpumask,
434                                     struct mm_struct *mm,
435                                     unsigned long va)
436 {
437         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
438 }
439
440 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
441 {
442         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
443 }
444
445 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
446 {
447         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
448 }
449
450 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
451 {
452         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
453 }
454 static inline void paravirt_release_pte(unsigned long pfn)
455 {
456         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
457 }
458
459 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
460 {
461         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
462 }
463
464 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
465                                             unsigned long start, unsigned long count)
466 {
467         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
468 }
469 static inline void paravirt_release_pmd(unsigned long pfn)
470 {
471         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
472 }
473
474 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
475 {
476         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
477 }
478 static inline void paravirt_release_pud(unsigned long pfn)
479 {
480         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
481 }
482
483 #ifdef CONFIG_HIGHPTE
484 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
485 {
486         unsigned long ret;
487         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
488         return (void *)ret;
489 }
490 #endif
491
492 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
493                               pte_t *ptep)
494 {
495         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
496 }
497
498 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
499                                     pte_t *ptep)
500 {
501         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
502 }
503
504 static inline pte_t __pte(pteval_t val)
505 {
506         pteval_t ret;
507
508         if (sizeof(pteval_t) > sizeof(long))
509                 ret = PVOP_CALLEE2(pteval_t,
510                                    pv_mmu_ops.make_pte,
511                                    val, (u64)val >> 32);
512         else
513                 ret = PVOP_CALLEE1(pteval_t,
514                                    pv_mmu_ops.make_pte,
515                                    val);
516
517         return (pte_t) { .pte = ret };
518 }
519
520 static inline pteval_t pte_val(pte_t pte)
521 {
522         pteval_t ret;
523
524         if (sizeof(pteval_t) > sizeof(long))
525                 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
526                                    pte.pte, (u64)pte.pte >> 32);
527         else
528                 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
529                                    pte.pte);
530
531         return ret;
532 }
533
534 static inline pgd_t __pgd(pgdval_t val)
535 {
536         pgdval_t ret;
537
538         if (sizeof(pgdval_t) > sizeof(long))
539                 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
540                                    val, (u64)val >> 32);
541         else
542                 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
543                                    val);
544
545         return (pgd_t) { ret };
546 }
547
548 static inline pgdval_t pgd_val(pgd_t pgd)
549 {
550         pgdval_t ret;
551
552         if (sizeof(pgdval_t) > sizeof(long))
553                 ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
554                                     pgd.pgd, (u64)pgd.pgd >> 32);
555         else
556                 ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
557                                     pgd.pgd);
558
559         return ret;
560 }
561
562 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
563 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
564                                            pte_t *ptep)
565 {
566         pteval_t ret;
567
568         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
569                          mm, addr, ptep);
570
571         return (pte_t) { .pte = ret };
572 }
573
574 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
575                                            pte_t *ptep, pte_t pte)
576 {
577         if (sizeof(pteval_t) > sizeof(long))
578                 /* 5 arg words */
579                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
580         else
581                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
582                             mm, addr, ptep, pte.pte);
583 }
584
585 static inline void set_pte(pte_t *ptep, pte_t pte)
586 {
587         if (sizeof(pteval_t) > sizeof(long))
588                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
589                             pte.pte, (u64)pte.pte >> 32);
590         else
591                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
592                             pte.pte);
593 }
594
595 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
596                               pte_t *ptep, pte_t pte)
597 {
598         if (sizeof(pteval_t) > sizeof(long))
599                 /* 5 arg words */
600                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
601         else
602                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
603 }
604
605 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
606 {
607         pmdval_t val = native_pmd_val(pmd);
608
609         if (sizeof(pmdval_t) > sizeof(long))
610                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
611         else
612                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
613 }
614
615 #if PAGETABLE_LEVELS >= 3
616 static inline pmd_t __pmd(pmdval_t val)
617 {
618         pmdval_t ret;
619
620         if (sizeof(pmdval_t) > sizeof(long))
621                 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
622                                    val, (u64)val >> 32);
623         else
624                 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
625                                    val);
626
627         return (pmd_t) { ret };
628 }
629
630 static inline pmdval_t pmd_val(pmd_t pmd)
631 {
632         pmdval_t ret;
633
634         if (sizeof(pmdval_t) > sizeof(long))
635                 ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
636                                     pmd.pmd, (u64)pmd.pmd >> 32);
637         else
638                 ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
639                                     pmd.pmd);
640
641         return ret;
642 }
643
644 static inline void set_pud(pud_t *pudp, pud_t pud)
645 {
646         pudval_t val = native_pud_val(pud);
647
648         if (sizeof(pudval_t) > sizeof(long))
649                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
650                             val, (u64)val >> 32);
651         else
652                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
653                             val);
654 }
655 #if PAGETABLE_LEVELS == 4
656 static inline pud_t __pud(pudval_t val)
657 {
658         pudval_t ret;
659
660         if (sizeof(pudval_t) > sizeof(long))
661                 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
662                                    val, (u64)val >> 32);
663         else
664                 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
665                                    val);
666
667         return (pud_t) { ret };
668 }
669
670 static inline pudval_t pud_val(pud_t pud)
671 {
672         pudval_t ret;
673
674         if (sizeof(pudval_t) > sizeof(long))
675                 ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
676                                     pud.pud, (u64)pud.pud >> 32);
677         else
678                 ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
679                                     pud.pud);
680
681         return ret;
682 }
683
684 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
685 {
686         pgdval_t val = native_pgd_val(pgd);
687
688         if (sizeof(pgdval_t) > sizeof(long))
689                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
690                             val, (u64)val >> 32);
691         else
692                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
693                             val);
694 }
695
696 static inline void pgd_clear(pgd_t *pgdp)
697 {
698         set_pgd(pgdp, __pgd(0));
699 }
700
701 static inline void pud_clear(pud_t *pudp)
702 {
703         set_pud(pudp, __pud(0));
704 }
705
706 #endif  /* PAGETABLE_LEVELS == 4 */
707
708 #endif  /* PAGETABLE_LEVELS >= 3 */
709
710 #ifdef CONFIG_X86_PAE
711 /* Special-case pte-setting operations for PAE, which can't update a
712    64-bit pte atomically */
713 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
714 {
715         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
716                     pte.pte, pte.pte >> 32);
717 }
718
719 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
720                              pte_t *ptep)
721 {
722         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
723 }
724
725 static inline void pmd_clear(pmd_t *pmdp)
726 {
727         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
728 }
729 #else  /* !CONFIG_X86_PAE */
730 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
731 {
732         set_pte(ptep, pte);
733 }
734
735 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
736                              pte_t *ptep)
737 {
738         set_pte_at(mm, addr, ptep, __pte(0));
739 }
740
741 static inline void pmd_clear(pmd_t *pmdp)
742 {
743         set_pmd(pmdp, __pmd(0));
744 }
745 #endif  /* CONFIG_X86_PAE */
746
747 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
748 static inline void arch_start_context_switch(struct task_struct *prev)
749 {
750         PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
751 }
752
753 static inline void arch_end_context_switch(struct task_struct *next)
754 {
755         PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
756 }
757
758 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
759 static inline void arch_enter_lazy_mmu_mode(void)
760 {
761         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
762 }
763
764 static inline void arch_leave_lazy_mmu_mode(void)
765 {
766         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
767 }
768
769 void arch_flush_lazy_mmu_mode(void);
770
771 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
772                                 phys_addr_t phys, pgprot_t flags)
773 {
774         pv_mmu_ops.set_fixmap(idx, phys, flags);
775 }
776
777 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
778
779 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
780 {
781         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
782 }
783
784 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
785 {
786         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
787 }
788 #define __raw_spin_is_contended __raw_spin_is_contended
789
790 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
791 {
792         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
793 }
794
795 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
796                                                   unsigned long flags)
797 {
798         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
799 }
800
801 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
802 {
803         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
804 }
805
806 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
807 {
808         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
809 }
810
811 #endif
812
813 #ifdef CONFIG_X86_32
814 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
815 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
816
817 /* save and restore all caller-save registers, except return value */
818 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
819 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
820
821 #define PV_FLAGS_ARG "0"
822 #define PV_EXTRA_CLOBBERS
823 #define PV_VEXTRA_CLOBBERS
824 #else
825 /* save and restore all caller-save registers, except return value */
826 #define PV_SAVE_ALL_CALLER_REGS                                         \
827         "push %rcx;"                                                    \
828         "push %rdx;"                                                    \
829         "push %rsi;"                                                    \
830         "push %rdi;"                                                    \
831         "push %r8;"                                                     \
832         "push %r9;"                                                     \
833         "push %r10;"                                                    \
834         "push %r11;"
835 #define PV_RESTORE_ALL_CALLER_REGS                                      \
836         "pop %r11;"                                                     \
837         "pop %r10;"                                                     \
838         "pop %r9;"                                                      \
839         "pop %r8;"                                                      \
840         "pop %rdi;"                                                     \
841         "pop %rsi;"                                                     \
842         "pop %rdx;"                                                     \
843         "pop %rcx;"
844
845 /* We save some registers, but all of them, that's too much. We clobber all
846  * caller saved registers but the argument parameter */
847 #define PV_SAVE_REGS "pushq %%rdi;"
848 #define PV_RESTORE_REGS "popq %%rdi;"
849 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
850 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
851 #define PV_FLAGS_ARG "D"
852 #endif
853
854 /*
855  * Generate a thunk around a function which saves all caller-save
856  * registers except for the return value.  This allows C functions to
857  * be called from assembler code where fewer than normal registers are
858  * available.  It may also help code generation around calls from C
859  * code if the common case doesn't use many registers.
860  *
861  * When a callee is wrapped in a thunk, the caller can assume that all
862  * arg regs and all scratch registers are preserved across the
863  * call. The return value in rax/eax will not be saved, even for void
864  * functions.
865  */
866 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
867         extern typeof(func) __raw_callee_save_##func;                   \
868         static void *__##func##__ __used = func;                        \
869                                                                         \
870         asm(".pushsection .text;"                                       \
871             "__raw_callee_save_" #func ": "                             \
872             PV_SAVE_ALL_CALLER_REGS                                     \
873             "call " #func ";"                                           \
874             PV_RESTORE_ALL_CALLER_REGS                                  \
875             "ret;"                                                      \
876             ".popsection")
877
878 /* Get a reference to a callee-save function */
879 #define PV_CALLEE_SAVE(func)                                            \
880         ((struct paravirt_callee_save) { __raw_callee_save_##func })
881
882 /* Promise that "func" already uses the right calling convention */
883 #define __PV_IS_CALLEE_SAVE(func)                       \
884         ((struct paravirt_callee_save) { func })
885
886 static inline unsigned long __raw_local_save_flags(void)
887 {
888         unsigned long f;
889
890         asm volatile(paravirt_alt(PARAVIRT_CALL)
891                      : "=a"(f)
892                      : paravirt_type(pv_irq_ops.save_fl),
893                        paravirt_clobber(CLBR_EAX)
894                      : "memory", "cc");
895         return f;
896 }
897
898 static inline void raw_local_irq_restore(unsigned long f)
899 {
900         asm volatile(paravirt_alt(PARAVIRT_CALL)
901                      : "=a"(f)
902                      : PV_FLAGS_ARG(f),
903                        paravirt_type(pv_irq_ops.restore_fl),
904                        paravirt_clobber(CLBR_EAX)
905                      : "memory", "cc");
906 }
907
908 static inline void raw_local_irq_disable(void)
909 {
910         asm volatile(paravirt_alt(PARAVIRT_CALL)
911                      :
912                      : paravirt_type(pv_irq_ops.irq_disable),
913                        paravirt_clobber(CLBR_EAX)
914                      : "memory", "eax", "cc");
915 }
916
917 static inline void raw_local_irq_enable(void)
918 {
919         asm volatile(paravirt_alt(PARAVIRT_CALL)
920                      :
921                      : paravirt_type(pv_irq_ops.irq_enable),
922                        paravirt_clobber(CLBR_EAX)
923                      : "memory", "eax", "cc");
924 }
925
926 static inline unsigned long __raw_local_irq_save(void)
927 {
928         unsigned long f;
929
930         f = __raw_local_save_flags();
931         raw_local_irq_disable();
932         return f;
933 }
934
935
936 /* Make sure as little as possible of this mess escapes. */
937 #undef PARAVIRT_CALL
938 #undef __PVOP_CALL
939 #undef __PVOP_VCALL
940 #undef PVOP_VCALL0
941 #undef PVOP_CALL0
942 #undef PVOP_VCALL1
943 #undef PVOP_CALL1
944 #undef PVOP_VCALL2
945 #undef PVOP_CALL2
946 #undef PVOP_VCALL3
947 #undef PVOP_CALL3
948 #undef PVOP_VCALL4
949 #undef PVOP_CALL4
950
951 #else  /* __ASSEMBLY__ */
952
953 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
954 771:;                                           \
955         ops;                                    \
956 772:;                                           \
957         .pushsection .parainstructions,"a";     \
958          .align algn;                           \
959          word 771b;                             \
960          .byte ptype;                           \
961          .byte 772b-771b;                       \
962          .short clobbers;                       \
963         .popsection
964
965
966 #define COND_PUSH(set, mask, reg)                       \
967         .if ((~(set)) & mask); push %reg; .endif
968 #define COND_POP(set, mask, reg)                        \
969         .if ((~(set)) & mask); pop %reg; .endif
970
971 #ifdef CONFIG_X86_64
972
973 #define PV_SAVE_REGS(set)                       \
974         COND_PUSH(set, CLBR_RAX, rax);          \
975         COND_PUSH(set, CLBR_RCX, rcx);          \
976         COND_PUSH(set, CLBR_RDX, rdx);          \
977         COND_PUSH(set, CLBR_RSI, rsi);          \
978         COND_PUSH(set, CLBR_RDI, rdi);          \
979         COND_PUSH(set, CLBR_R8, r8);            \
980         COND_PUSH(set, CLBR_R9, r9);            \
981         COND_PUSH(set, CLBR_R10, r10);          \
982         COND_PUSH(set, CLBR_R11, r11)
983 #define PV_RESTORE_REGS(set)                    \
984         COND_POP(set, CLBR_R11, r11);           \
985         COND_POP(set, CLBR_R10, r10);           \
986         COND_POP(set, CLBR_R9, r9);             \
987         COND_POP(set, CLBR_R8, r8);             \
988         COND_POP(set, CLBR_RDI, rdi);           \
989         COND_POP(set, CLBR_RSI, rsi);           \
990         COND_POP(set, CLBR_RDX, rdx);           \
991         COND_POP(set, CLBR_RCX, rcx);           \
992         COND_POP(set, CLBR_RAX, rax)
993
994 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
995 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
996 #define PARA_INDIRECT(addr)     *addr(%rip)
997 #else
998 #define PV_SAVE_REGS(set)                       \
999         COND_PUSH(set, CLBR_EAX, eax);          \
1000         COND_PUSH(set, CLBR_EDI, edi);          \
1001         COND_PUSH(set, CLBR_ECX, ecx);          \
1002         COND_PUSH(set, CLBR_EDX, edx)
1003 #define PV_RESTORE_REGS(set)                    \
1004         COND_POP(set, CLBR_EDX, edx);           \
1005         COND_POP(set, CLBR_ECX, ecx);           \
1006         COND_POP(set, CLBR_EDI, edi);           \
1007         COND_POP(set, CLBR_EAX, eax)
1008
1009 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1010 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1011 #define PARA_INDIRECT(addr)     *%cs:addr
1012 #endif
1013
1014 #define INTERRUPT_RETURN                                                \
1015         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1016                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1017
1018 #define DISABLE_INTERRUPTS(clobbers)                                    \
1019         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1020                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
1021                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1022                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1023
1024 #define ENABLE_INTERRUPTS(clobbers)                                     \
1025         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1026                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
1027                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1028                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1029
1030 #define USERGS_SYSRET32                                                 \
1031         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1032                   CLBR_NONE,                                            \
1033                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1034
1035 #ifdef CONFIG_X86_32
1036 #define GET_CR0_INTO_EAX                                \
1037         push %ecx; push %edx;                           \
1038         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1039         pop %edx; pop %ecx
1040
1041 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1042         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1043                   CLBR_NONE,                                            \
1044                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1045
1046
1047 #else   /* !CONFIG_X86_32 */
1048
1049 /*
1050  * If swapgs is used while the userspace stack is still current,
1051  * there's no way to call a pvop.  The PV replacement *must* be
1052  * inlined, or the swapgs instruction must be trapped and emulated.
1053  */
1054 #define SWAPGS_UNSAFE_STACK                                             \
1055         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1056                   swapgs)
1057
1058 /*
1059  * Note: swapgs is very special, and in practise is either going to be
1060  * implemented with a single "swapgs" instruction or something very
1061  * special.  Either way, we don't need to save any registers for
1062  * it.
1063  */
1064 #define SWAPGS                                                          \
1065         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1066                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
1067                  )
1068
1069 #define GET_CR2_INTO_RCX                                \
1070         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1071         movq %rax, %rcx;                                \
1072         xorq %rax, %rax;
1073
1074 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1075         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1076                   CLBR_NONE,                                            \
1077                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1078
1079 #define USERGS_SYSRET64                                                 \
1080         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1081                   CLBR_NONE,                                            \
1082                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1083
1084 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1085         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1086                   CLBR_NONE,                                            \
1087                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1088 #endif  /* CONFIG_X86_32 */
1089
1090 #endif /* __ASSEMBLY__ */
1091 #endif /* CONFIG_PARAVIRT */
1092 #endif /* _ASM_X86_PARAVIRT_H */