Merge branch 'topic/hda' into for-linus
[pandora-kernel.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX  (1 << 0)
13 #define CLBR_ECX  (1 << 1)
14 #define CLBR_EDX  (1 << 2)
15 #define CLBR_EDI  (1 << 3)
16
17 #ifdef CONFIG_X86_32
18 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
19 #define CLBR_ANY  ((1 << 4) - 1)
20
21 #define CLBR_ARG_REGS   (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22 #define CLBR_RET_REG    (CLBR_EAX | CLBR_EDX)
23 #define CLBR_SCRATCH    (0)
24 #else
25 #define CLBR_RAX  CLBR_EAX
26 #define CLBR_RCX  CLBR_ECX
27 #define CLBR_RDX  CLBR_EDX
28 #define CLBR_RDI  CLBR_EDI
29 #define CLBR_RSI  (1 << 4)
30 #define CLBR_R8   (1 << 5)
31 #define CLBR_R9   (1 << 6)
32 #define CLBR_R10  (1 << 7)
33 #define CLBR_R11  (1 << 8)
34
35 #define CLBR_ANY  ((1 << 9) - 1)
36
37 #define CLBR_ARG_REGS   (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38                          CLBR_RCX | CLBR_R8 | CLBR_R9)
39 #define CLBR_RET_REG    (CLBR_RAX)
40 #define CLBR_SCRATCH    (CLBR_R10 | CLBR_R11)
41
42 #include <asm/desc_defs.h>
43 #endif /* X86_64 */
44
45 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
47 #ifndef __ASSEMBLY__
48 #include <linux/types.h>
49 #include <linux/cpumask.h>
50 #include <asm/kmap_types.h>
51 #include <asm/desc_defs.h>
52
53 struct page;
54 struct thread_struct;
55 struct desc_ptr;
56 struct tss_struct;
57 struct mm_struct;
58 struct desc_struct;
59
60 /*
61  * Wrapper type for pointers to code which uses the non-standard
62  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
63  */
64 struct paravirt_callee_save {
65         void *func;
66 };
67
68 /* general info */
69 struct pv_info {
70         unsigned int kernel_rpl;
71         int shared_kernel_pmd;
72         int paravirt_enabled;
73         const char *name;
74 };
75
76 struct pv_init_ops {
77         /*
78          * Patch may replace one of the defined code sequences with
79          * arbitrary code, subject to the same register constraints.
80          * This generally means the code is not free to clobber any
81          * registers other than EAX.  The patch function should return
82          * the number of bytes of code generated, as we nop pad the
83          * rest in generic code.
84          */
85         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
86                           unsigned long addr, unsigned len);
87
88         /* Basic arch-specific setup */
89         void (*arch_setup)(void);
90         char *(*memory_setup)(void);
91         void (*post_allocator_init)(void);
92
93         /* Print a banner to identify the environment */
94         void (*banner)(void);
95 };
96
97
98 struct pv_lazy_ops {
99         /* Set deferred update mode, used for batching operations. */
100         void (*enter)(void);
101         void (*leave)(void);
102 };
103
104 struct pv_time_ops {
105         void (*time_init)(void);
106
107         /* Set and set time of day */
108         unsigned long (*get_wallclock)(void);
109         int (*set_wallclock)(unsigned long);
110
111         unsigned long long (*sched_clock)(void);
112         unsigned long (*get_tsc_khz)(void);
113 };
114
115 struct pv_cpu_ops {
116         /* hooks for various privileged instructions */
117         unsigned long (*get_debugreg)(int regno);
118         void (*set_debugreg)(int regno, unsigned long value);
119
120         void (*clts)(void);
121
122         unsigned long (*read_cr0)(void);
123         void (*write_cr0)(unsigned long);
124
125         unsigned long (*read_cr4_safe)(void);
126         unsigned long (*read_cr4)(void);
127         void (*write_cr4)(unsigned long);
128
129 #ifdef CONFIG_X86_64
130         unsigned long (*read_cr8)(void);
131         void (*write_cr8)(unsigned long);
132 #endif
133
134         /* Segment descriptor handling */
135         void (*load_tr_desc)(void);
136         void (*load_gdt)(const struct desc_ptr *);
137         void (*load_idt)(const struct desc_ptr *);
138         void (*store_gdt)(struct desc_ptr *);
139         void (*store_idt)(struct desc_ptr *);
140         void (*set_ldt)(const void *desc, unsigned entries);
141         unsigned long (*store_tr)(void);
142         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
143 #ifdef CONFIG_X86_64
144         void (*load_gs_index)(unsigned int idx);
145 #endif
146         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
147                                 const void *desc);
148         void (*write_gdt_entry)(struct desc_struct *,
149                                 int entrynum, const void *desc, int size);
150         void (*write_idt_entry)(gate_desc *,
151                                 int entrynum, const gate_desc *gate);
152         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
153         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
154
155         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
156
157         void (*set_iopl_mask)(unsigned mask);
158
159         void (*wbinvd)(void);
160         void (*io_delay)(void);
161
162         /* cpuid emulation, mostly so that caps bits can be disabled */
163         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
164                       unsigned int *ecx, unsigned int *edx);
165
166         /* MSR, PMC and TSR operations.
167            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
168         u64 (*read_msr_amd)(unsigned int msr, int *err);
169         u64 (*read_msr)(unsigned int msr, int *err);
170         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
171
172         u64 (*read_tsc)(void);
173         u64 (*read_pmc)(int counter);
174         unsigned long long (*read_tscp)(unsigned int *aux);
175
176         /*
177          * Atomically enable interrupts and return to userspace.  This
178          * is only ever used to return to 32-bit processes; in a
179          * 64-bit kernel, it's used for 32-on-64 compat processes, but
180          * never native 64-bit processes.  (Jump, not call.)
181          */
182         void (*irq_enable_sysexit)(void);
183
184         /*
185          * Switch to usermode gs and return to 64-bit usermode using
186          * sysret.  Only used in 64-bit kernels to return to 64-bit
187          * processes.  Usermode register state, including %rsp, must
188          * already be restored.
189          */
190         void (*usergs_sysret64)(void);
191
192         /*
193          * Switch to usermode gs and return to 32-bit usermode using
194          * sysret.  Used to return to 32-on-64 compat processes.
195          * Other usermode register state, including %esp, must already
196          * be restored.
197          */
198         void (*usergs_sysret32)(void);
199
200         /* Normal iret.  Jump to this with the standard iret stack
201            frame set up. */
202         void (*iret)(void);
203
204         void (*swapgs)(void);
205
206         struct pv_lazy_ops lazy_mode;
207 };
208
209 struct pv_irq_ops {
210         void (*init_IRQ)(void);
211
212         /*
213          * Get/set interrupt state.  save_fl and restore_fl are only
214          * expected to use X86_EFLAGS_IF; all other bits
215          * returned from save_fl are undefined, and may be ignored by
216          * restore_fl.
217          *
218          * NOTE: These functions callers expect the callee to preserve
219          * more registers than the standard C calling convention.
220          */
221         struct paravirt_callee_save save_fl;
222         struct paravirt_callee_save restore_fl;
223         struct paravirt_callee_save irq_disable;
224         struct paravirt_callee_save irq_enable;
225
226         void (*safe_halt)(void);
227         void (*halt)(void);
228
229 #ifdef CONFIG_X86_64
230         void (*adjust_exception_frame)(void);
231 #endif
232 };
233
234 struct pv_apic_ops {
235 #ifdef CONFIG_X86_LOCAL_APIC
236         void (*setup_boot_clock)(void);
237         void (*setup_secondary_clock)(void);
238
239         void (*startup_ipi_hook)(int phys_apicid,
240                                  unsigned long start_eip,
241                                  unsigned long start_esp);
242 #endif
243 };
244
245 struct pv_mmu_ops {
246         /*
247          * Called before/after init_mm pagetable setup. setup_start
248          * may reset %cr3, and may pre-install parts of the pagetable;
249          * pagetable setup is expected to preserve any existing
250          * mapping.
251          */
252         void (*pagetable_setup_start)(pgd_t *pgd_base);
253         void (*pagetable_setup_done)(pgd_t *pgd_base);
254
255         unsigned long (*read_cr2)(void);
256         void (*write_cr2)(unsigned long);
257
258         unsigned long (*read_cr3)(void);
259         void (*write_cr3)(unsigned long);
260
261         /*
262          * Hooks for intercepting the creation/use/destruction of an
263          * mm_struct.
264          */
265         void (*activate_mm)(struct mm_struct *prev,
266                             struct mm_struct *next);
267         void (*dup_mmap)(struct mm_struct *oldmm,
268                          struct mm_struct *mm);
269         void (*exit_mmap)(struct mm_struct *mm);
270
271
272         /* TLB operations */
273         void (*flush_tlb_user)(void);
274         void (*flush_tlb_kernel)(void);
275         void (*flush_tlb_single)(unsigned long addr);
276         void (*flush_tlb_others)(const struct cpumask *cpus,
277                                  struct mm_struct *mm,
278                                  unsigned long va);
279
280         /* Hooks for allocating and freeing a pagetable top-level */
281         int  (*pgd_alloc)(struct mm_struct *mm);
282         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
283
284         /*
285          * Hooks for allocating/releasing pagetable pages when they're
286          * attached to a pagetable
287          */
288         void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
289         void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
290         void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
291         void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
292         void (*release_pte)(unsigned long pfn);
293         void (*release_pmd)(unsigned long pfn);
294         void (*release_pud)(unsigned long pfn);
295
296         /* Pagetable manipulation functions */
297         void (*set_pte)(pte_t *ptep, pte_t pteval);
298         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
299                            pte_t *ptep, pte_t pteval);
300         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
301         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
302                            pte_t *ptep);
303         void (*pte_update_defer)(struct mm_struct *mm,
304                                  unsigned long addr, pte_t *ptep);
305
306         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
307                                         pte_t *ptep);
308         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
309                                         pte_t *ptep, pte_t pte);
310
311         struct paravirt_callee_save pte_val;
312         struct paravirt_callee_save make_pte;
313
314         struct paravirt_callee_save pgd_val;
315         struct paravirt_callee_save make_pgd;
316
317 #if PAGETABLE_LEVELS >= 3
318 #ifdef CONFIG_X86_PAE
319         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
320         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
321                           pte_t *ptep);
322         void (*pmd_clear)(pmd_t *pmdp);
323
324 #endif  /* CONFIG_X86_PAE */
325
326         void (*set_pud)(pud_t *pudp, pud_t pudval);
327
328         struct paravirt_callee_save pmd_val;
329         struct paravirt_callee_save make_pmd;
330
331 #if PAGETABLE_LEVELS == 4
332         struct paravirt_callee_save pud_val;
333         struct paravirt_callee_save make_pud;
334
335         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
336 #endif  /* PAGETABLE_LEVELS == 4 */
337 #endif  /* PAGETABLE_LEVELS >= 3 */
338
339 #ifdef CONFIG_HIGHPTE
340         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
341 #endif
342
343         struct pv_lazy_ops lazy_mode;
344
345         /* dom0 ops */
346
347         /* Sometimes the physical address is a pfn, and sometimes its
348            an mfn.  We can tell which is which from the index. */
349         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
350                            phys_addr_t phys, pgprot_t flags);
351 };
352
353 struct raw_spinlock;
354 struct pv_lock_ops {
355         int (*spin_is_locked)(struct raw_spinlock *lock);
356         int (*spin_is_contended)(struct raw_spinlock *lock);
357         void (*spin_lock)(struct raw_spinlock *lock);
358         void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
359         int (*spin_trylock)(struct raw_spinlock *lock);
360         void (*spin_unlock)(struct raw_spinlock *lock);
361 };
362
363 /* This contains all the paravirt structures: we get a convenient
364  * number for each function using the offset which we use to indicate
365  * what to patch. */
366 struct paravirt_patch_template {
367         struct pv_init_ops pv_init_ops;
368         struct pv_time_ops pv_time_ops;
369         struct pv_cpu_ops pv_cpu_ops;
370         struct pv_irq_ops pv_irq_ops;
371         struct pv_apic_ops pv_apic_ops;
372         struct pv_mmu_ops pv_mmu_ops;
373         struct pv_lock_ops pv_lock_ops;
374 };
375
376 extern struct pv_info pv_info;
377 extern struct pv_init_ops pv_init_ops;
378 extern struct pv_time_ops pv_time_ops;
379 extern struct pv_cpu_ops pv_cpu_ops;
380 extern struct pv_irq_ops pv_irq_ops;
381 extern struct pv_apic_ops pv_apic_ops;
382 extern struct pv_mmu_ops pv_mmu_ops;
383 extern struct pv_lock_ops pv_lock_ops;
384
385 #define PARAVIRT_PATCH(x)                                       \
386         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
387
388 #define paravirt_type(op)                               \
389         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
390         [paravirt_opptr] "i" (&(op))
391 #define paravirt_clobber(clobber)               \
392         [paravirt_clobber] "i" (clobber)
393
394 /*
395  * Generate some code, and mark it as patchable by the
396  * apply_paravirt() alternate instruction patcher.
397  */
398 #define _paravirt_alt(insn_string, type, clobber)       \
399         "771:\n\t" insn_string "\n" "772:\n"            \
400         ".pushsection .parainstructions,\"a\"\n"        \
401         _ASM_ALIGN "\n"                                 \
402         _ASM_PTR " 771b\n"                              \
403         "  .byte " type "\n"                            \
404         "  .byte 772b-771b\n"                           \
405         "  .short " clobber "\n"                        \
406         ".popsection\n"
407
408 /* Generate patchable code, with the default asm parameters. */
409 #define paravirt_alt(insn_string)                                       \
410         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
411
412 /* Simple instruction patching code. */
413 #define DEF_NATIVE(ops, name, code)                                     \
414         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
415         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
416
417 unsigned paravirt_patch_nop(void);
418 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
419 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
420 unsigned paravirt_patch_ignore(unsigned len);
421 unsigned paravirt_patch_call(void *insnbuf,
422                              const void *target, u16 tgt_clobbers,
423                              unsigned long addr, u16 site_clobbers,
424                              unsigned len);
425 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
426                             unsigned long addr, unsigned len);
427 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
428                                 unsigned long addr, unsigned len);
429
430 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
431                               const char *start, const char *end);
432
433 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
434                       unsigned long addr, unsigned len);
435
436 int paravirt_disable_iospace(void);
437
438 /*
439  * This generates an indirect call based on the operation type number.
440  * The type number, computed in PARAVIRT_PATCH, is derived from the
441  * offset into the paravirt_patch_template structure, and can therefore be
442  * freely converted back into a structure offset.
443  */
444 #define PARAVIRT_CALL   "call *%c[paravirt_opptr];"
445
446 /*
447  * These macros are intended to wrap calls through one of the paravirt
448  * ops structs, so that they can be later identified and patched at
449  * runtime.
450  *
451  * Normally, a call to a pv_op function is a simple indirect call:
452  * (pv_op_struct.operations)(args...).
453  *
454  * Unfortunately, this is a relatively slow operation for modern CPUs,
455  * because it cannot necessarily determine what the destination
456  * address is.  In this case, the address is a runtime constant, so at
457  * the very least we can patch the call to e a simple direct call, or
458  * ideally, patch an inline implementation into the callsite.  (Direct
459  * calls are essentially free, because the call and return addresses
460  * are completely predictable.)
461  *
462  * For i386, these macros rely on the standard gcc "regparm(3)" calling
463  * convention, in which the first three arguments are placed in %eax,
464  * %edx, %ecx (in that order), and the remaining arguments are placed
465  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
466  * to be modified (either clobbered or used for return values).
467  * X86_64, on the other hand, already specifies a register-based calling
468  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
469  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
470  * special handling for dealing with 4 arguments, unlike i386.
471  * However, x86_64 also have to clobber all caller saved registers, which
472  * unfortunately, are quite a bit (r8 - r11)
473  *
474  * The call instruction itself is marked by placing its start address
475  * and size into the .parainstructions section, so that
476  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
477  * appropriate patching under the control of the backend pv_init_ops
478  * implementation.
479  *
480  * Unfortunately there's no way to get gcc to generate the args setup
481  * for the call, and then allow the call itself to be generated by an
482  * inline asm.  Because of this, we must do the complete arg setup and
483  * return value handling from within these macros.  This is fairly
484  * cumbersome.
485  *
486  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
487  * It could be extended to more arguments, but there would be little
488  * to be gained from that.  For each number of arguments, there are
489  * the two VCALL and CALL variants for void and non-void functions.
490  *
491  * When there is a return value, the invoker of the macro must specify
492  * the return type.  The macro then uses sizeof() on that type to
493  * determine whether its a 32 or 64 bit value, and places the return
494  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
495  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
496  * the return value size.
497  *
498  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
499  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
500  * in low,high order
501  *
502  * Small structures are passed and returned in registers.  The macro
503  * calling convention can't directly deal with this, so the wrapper
504  * functions must do this.
505  *
506  * These PVOP_* macros are only defined within this header.  This
507  * means that all uses must be wrapped in inline functions.  This also
508  * makes sure the incoming and outgoing types are always correct.
509  */
510 #ifdef CONFIG_X86_32
511 #define PVOP_VCALL_ARGS                         \
512         unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
513 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
514
515 #define PVOP_CALL_ARG1(x)               "a" ((unsigned long)(x))
516 #define PVOP_CALL_ARG2(x)               "d" ((unsigned long)(x))
517 #define PVOP_CALL_ARG3(x)               "c" ((unsigned long)(x))
518
519 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
520                                         "=c" (__ecx)
521 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
522
523 #define PVOP_VCALLEE_CLOBBERS           "=a" (__eax), "=d" (__edx)
524 #define PVOP_CALLEE_CLOBBERS            PVOP_VCALLEE_CLOBBERS
525
526 #define EXTRA_CLOBBERS
527 #define VEXTRA_CLOBBERS
528 #else  /* CONFIG_X86_64 */
529 #define PVOP_VCALL_ARGS                                 \
530         unsigned long __edi = __edi, __esi = __esi,     \
531                 __edx = __edx, __ecx = __ecx
532 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
533
534 #define PVOP_CALL_ARG1(x)               "D" ((unsigned long)(x))
535 #define PVOP_CALL_ARG2(x)               "S" ((unsigned long)(x))
536 #define PVOP_CALL_ARG3(x)               "d" ((unsigned long)(x))
537 #define PVOP_CALL_ARG4(x)               "c" ((unsigned long)(x))
538
539 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
540                                 "=S" (__esi), "=d" (__edx),             \
541                                 "=c" (__ecx)
542 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
543
544 #define PVOP_VCALLEE_CLOBBERS   "=a" (__eax)
545 #define PVOP_CALLEE_CLOBBERS    PVOP_VCALLEE_CLOBBERS
546
547 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
548 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
549 #endif  /* CONFIG_X86_32 */
550
551 #ifdef CONFIG_PARAVIRT_DEBUG
552 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
553 #else
554 #define PVOP_TEST_NULL(op)      ((void)op)
555 #endif
556
557 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,         \
558                       pre, post, ...)                                   \
559         ({                                                              \
560                 rettype __ret;                                          \
561                 PVOP_CALL_ARGS;                                         \
562                 PVOP_TEST_NULL(op);                                     \
563                 /* This is 32-bit specific, but is okay in 64-bit */    \
564                 /* since this condition will never hold */              \
565                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
566                         asm volatile(pre                                \
567                                      paravirt_alt(PARAVIRT_CALL)        \
568                                      post                               \
569                                      : call_clbr                        \
570                                      : paravirt_type(op),               \
571                                        paravirt_clobber(clbr),          \
572                                        ##__VA_ARGS__                    \
573                                      : "memory", "cc" extra_clbr);      \
574                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
575                 } else {                                                \
576                         asm volatile(pre                                \
577                                      paravirt_alt(PARAVIRT_CALL)        \
578                                      post                               \
579                                      : call_clbr                        \
580                                      : paravirt_type(op),               \
581                                        paravirt_clobber(clbr),          \
582                                        ##__VA_ARGS__                    \
583                                      : "memory", "cc" extra_clbr);      \
584                         __ret = (rettype)__eax;                         \
585                 }                                                       \
586                 __ret;                                                  \
587         })
588
589 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
590         ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
591                       EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
592
593 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                  \
594         ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
595                       PVOP_CALLEE_CLOBBERS, ,                           \
596                       pre, post, ##__VA_ARGS__)
597
598
599 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
600         ({                                                              \
601                 PVOP_VCALL_ARGS;                                        \
602                 PVOP_TEST_NULL(op);                                     \
603                 asm volatile(pre                                        \
604                              paravirt_alt(PARAVIRT_CALL)                \
605                              post                                       \
606                              : call_clbr                                \
607                              : paravirt_type(op),                       \
608                                paravirt_clobber(clbr),                  \
609                                ##__VA_ARGS__                            \
610                              : "memory", "cc" extra_clbr);              \
611         })
612
613 #define __PVOP_VCALL(op, pre, post, ...)                                \
614         ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
615                        VEXTRA_CLOBBERS,                                 \
616                        pre, post, ##__VA_ARGS__)
617
618 #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                 \
619         ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
620                       PVOP_VCALLEE_CLOBBERS, ,                          \
621                       pre, post, ##__VA_ARGS__)
622
623
624
625 #define PVOP_CALL0(rettype, op)                                         \
626         __PVOP_CALL(rettype, op, "", "")
627 #define PVOP_VCALL0(op)                                                 \
628         __PVOP_VCALL(op, "", "")
629
630 #define PVOP_CALLEE0(rettype, op)                                       \
631         __PVOP_CALLEESAVE(rettype, op, "", "")
632 #define PVOP_VCALLEE0(op)                                               \
633         __PVOP_VCALLEESAVE(op, "", "")
634
635
636 #define PVOP_CALL1(rettype, op, arg1)                                   \
637         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
638 #define PVOP_VCALL1(op, arg1)                                           \
639         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
640
641 #define PVOP_CALLEE1(rettype, op, arg1)                                 \
642         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
643 #define PVOP_VCALLEE1(op, arg1)                                         \
644         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
645
646
647 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
648         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
649                     PVOP_CALL_ARG2(arg2))
650 #define PVOP_VCALL2(op, arg1, arg2)                                     \
651         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
652                      PVOP_CALL_ARG2(arg2))
653
654 #define PVOP_CALLEE2(rettype, op, arg1, arg2)                           \
655         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
656                           PVOP_CALL_ARG2(arg2))
657 #define PVOP_VCALLEE2(op, arg1, arg2)                                   \
658         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
659                            PVOP_CALL_ARG2(arg2))
660
661
662 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
663         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
664                     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
665 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
666         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
667                      PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
668
669 /* This is the only difference in x86_64. We can make it much simpler */
670 #ifdef CONFIG_X86_32
671 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
672         __PVOP_CALL(rettype, op,                                        \
673                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
674                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
675                     PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
676 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
677         __PVOP_VCALL(op,                                                \
678                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
679                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
680                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
681 #else
682 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
683         __PVOP_CALL(rettype, op, "", "",                                \
684                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
685                     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
686 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
687         __PVOP_VCALL(op, "", "",                                        \
688                      PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
689                      PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
690 #endif
691
692 static inline int paravirt_enabled(void)
693 {
694         return pv_info.paravirt_enabled;
695 }
696
697 static inline void load_sp0(struct tss_struct *tss,
698                              struct thread_struct *thread)
699 {
700         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
701 }
702
703 #define ARCH_SETUP                      pv_init_ops.arch_setup();
704 static inline unsigned long get_wallclock(void)
705 {
706         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
707 }
708
709 static inline int set_wallclock(unsigned long nowtime)
710 {
711         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
712 }
713
714 static inline void (*choose_time_init(void))(void)
715 {
716         return pv_time_ops.time_init;
717 }
718
719 /* The paravirtualized CPUID instruction. */
720 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
721                            unsigned int *ecx, unsigned int *edx)
722 {
723         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
724 }
725
726 /*
727  * These special macros can be used to get or set a debugging register
728  */
729 static inline unsigned long paravirt_get_debugreg(int reg)
730 {
731         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
732 }
733 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
734 static inline void set_debugreg(unsigned long val, int reg)
735 {
736         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
737 }
738
739 static inline void clts(void)
740 {
741         PVOP_VCALL0(pv_cpu_ops.clts);
742 }
743
744 static inline unsigned long read_cr0(void)
745 {
746         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
747 }
748
749 static inline void write_cr0(unsigned long x)
750 {
751         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
752 }
753
754 static inline unsigned long read_cr2(void)
755 {
756         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
757 }
758
759 static inline void write_cr2(unsigned long x)
760 {
761         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
762 }
763
764 static inline unsigned long read_cr3(void)
765 {
766         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
767 }
768
769 static inline void write_cr3(unsigned long x)
770 {
771         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
772 }
773
774 static inline unsigned long read_cr4(void)
775 {
776         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
777 }
778 static inline unsigned long read_cr4_safe(void)
779 {
780         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
781 }
782
783 static inline void write_cr4(unsigned long x)
784 {
785         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
786 }
787
788 #ifdef CONFIG_X86_64
789 static inline unsigned long read_cr8(void)
790 {
791         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
792 }
793
794 static inline void write_cr8(unsigned long x)
795 {
796         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
797 }
798 #endif
799
800 static inline void raw_safe_halt(void)
801 {
802         PVOP_VCALL0(pv_irq_ops.safe_halt);
803 }
804
805 static inline void halt(void)
806 {
807         PVOP_VCALL0(pv_irq_ops.safe_halt);
808 }
809
810 static inline void wbinvd(void)
811 {
812         PVOP_VCALL0(pv_cpu_ops.wbinvd);
813 }
814
815 #define get_kernel_rpl()  (pv_info.kernel_rpl)
816
817 static inline u64 paravirt_read_msr(unsigned msr, int *err)
818 {
819         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
820 }
821 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
822 {
823         return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
824 }
825 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
826 {
827         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
828 }
829
830 /* These should all do BUG_ON(_err), but our headers are too tangled. */
831 #define rdmsr(msr, val1, val2)                  \
832 do {                                            \
833         int _err;                               \
834         u64 _l = paravirt_read_msr(msr, &_err); \
835         val1 = (u32)_l;                         \
836         val2 = _l >> 32;                        \
837 } while (0)
838
839 #define wrmsr(msr, val1, val2)                  \
840 do {                                            \
841         paravirt_write_msr(msr, val1, val2);    \
842 } while (0)
843
844 #define rdmsrl(msr, val)                        \
845 do {                                            \
846         int _err;                               \
847         val = paravirt_read_msr(msr, &_err);    \
848 } while (0)
849
850 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
851 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
852
853 /* rdmsr with exception handling */
854 #define rdmsr_safe(msr, a, b)                   \
855 ({                                              \
856         int _err;                               \
857         u64 _l = paravirt_read_msr(msr, &_err); \
858         (*a) = (u32)_l;                         \
859         (*b) = _l >> 32;                        \
860         _err;                                   \
861 })
862
863 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
864 {
865         int err;
866
867         *p = paravirt_read_msr(msr, &err);
868         return err;
869 }
870 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
871 {
872         int err;
873
874         *p = paravirt_read_msr_amd(msr, &err);
875         return err;
876 }
877
878 static inline u64 paravirt_read_tsc(void)
879 {
880         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
881 }
882
883 #define rdtscl(low)                             \
884 do {                                            \
885         u64 _l = paravirt_read_tsc();           \
886         low = (int)_l;                          \
887 } while (0)
888
889 #define rdtscll(val) (val = paravirt_read_tsc())
890
891 static inline unsigned long long paravirt_sched_clock(void)
892 {
893         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
894 }
895 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
896
897 static inline unsigned long long paravirt_read_pmc(int counter)
898 {
899         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
900 }
901
902 #define rdpmc(counter, low, high)               \
903 do {                                            \
904         u64 _l = paravirt_read_pmc(counter);    \
905         low = (u32)_l;                          \
906         high = _l >> 32;                        \
907 } while (0)
908
909 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
910 {
911         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
912 }
913
914 #define rdtscp(low, high, aux)                          \
915 do {                                                    \
916         int __aux;                                      \
917         unsigned long __val = paravirt_rdtscp(&__aux);  \
918         (low) = (u32)__val;                             \
919         (high) = (u32)(__val >> 32);                    \
920         (aux) = __aux;                                  \
921 } while (0)
922
923 #define rdtscpll(val, aux)                              \
924 do {                                                    \
925         unsigned long __aux;                            \
926         val = paravirt_rdtscp(&__aux);                  \
927         (aux) = __aux;                                  \
928 } while (0)
929
930 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
931 {
932         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
933 }
934
935 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
936 {
937         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
938 }
939
940 static inline void load_TR_desc(void)
941 {
942         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
943 }
944 static inline void load_gdt(const struct desc_ptr *dtr)
945 {
946         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
947 }
948 static inline void load_idt(const struct desc_ptr *dtr)
949 {
950         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
951 }
952 static inline void set_ldt(const void *addr, unsigned entries)
953 {
954         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
955 }
956 static inline void store_gdt(struct desc_ptr *dtr)
957 {
958         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
959 }
960 static inline void store_idt(struct desc_ptr *dtr)
961 {
962         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
963 }
964 static inline unsigned long paravirt_store_tr(void)
965 {
966         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
967 }
968 #define store_tr(tr)    ((tr) = paravirt_store_tr())
969 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
970 {
971         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
972 }
973
974 #ifdef CONFIG_X86_64
975 static inline void load_gs_index(unsigned int gs)
976 {
977         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
978 }
979 #endif
980
981 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
982                                    const void *desc)
983 {
984         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
985 }
986
987 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
988                                    void *desc, int type)
989 {
990         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
991 }
992
993 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
994 {
995         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
996 }
997 static inline void set_iopl_mask(unsigned mask)
998 {
999         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
1000 }
1001
1002 /* The paravirtualized I/O functions */
1003 static inline void slow_down_io(void)
1004 {
1005         pv_cpu_ops.io_delay();
1006 #ifdef REALLY_SLOW_IO
1007         pv_cpu_ops.io_delay();
1008         pv_cpu_ops.io_delay();
1009         pv_cpu_ops.io_delay();
1010 #endif
1011 }
1012
1013 #ifdef CONFIG_X86_LOCAL_APIC
1014 static inline void setup_boot_clock(void)
1015 {
1016         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
1017 }
1018
1019 static inline void setup_secondary_clock(void)
1020 {
1021         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
1022 }
1023 #endif
1024
1025 static inline void paravirt_post_allocator_init(void)
1026 {
1027         if (pv_init_ops.post_allocator_init)
1028                 (*pv_init_ops.post_allocator_init)();
1029 }
1030
1031 static inline void paravirt_pagetable_setup_start(pgd_t *base)
1032 {
1033         (*pv_mmu_ops.pagetable_setup_start)(base);
1034 }
1035
1036 static inline void paravirt_pagetable_setup_done(pgd_t *base)
1037 {
1038         (*pv_mmu_ops.pagetable_setup_done)(base);
1039 }
1040
1041 #ifdef CONFIG_SMP
1042 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1043                                     unsigned long start_esp)
1044 {
1045         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
1046                     phys_apicid, start_eip, start_esp);
1047 }
1048 #endif
1049
1050 static inline void paravirt_activate_mm(struct mm_struct *prev,
1051                                         struct mm_struct *next)
1052 {
1053         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
1054 }
1055
1056 static inline void arch_dup_mmap(struct mm_struct *oldmm,
1057                                  struct mm_struct *mm)
1058 {
1059         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
1060 }
1061
1062 static inline void arch_exit_mmap(struct mm_struct *mm)
1063 {
1064         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
1065 }
1066
1067 static inline void __flush_tlb(void)
1068 {
1069         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
1070 }
1071 static inline void __flush_tlb_global(void)
1072 {
1073         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
1074 }
1075 static inline void __flush_tlb_single(unsigned long addr)
1076 {
1077         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
1078 }
1079
1080 static inline void flush_tlb_others(const struct cpumask *cpumask,
1081                                     struct mm_struct *mm,
1082                                     unsigned long va)
1083 {
1084         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
1085 }
1086
1087 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1088 {
1089         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1090 }
1091
1092 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1093 {
1094         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1095 }
1096
1097 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1098 {
1099         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1100 }
1101 static inline void paravirt_release_pte(unsigned long pfn)
1102 {
1103         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1104 }
1105
1106 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1107 {
1108         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1109 }
1110
1111 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1112                                             unsigned long start, unsigned long count)
1113 {
1114         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1115 }
1116 static inline void paravirt_release_pmd(unsigned long pfn)
1117 {
1118         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1119 }
1120
1121 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1122 {
1123         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1124 }
1125 static inline void paravirt_release_pud(unsigned long pfn)
1126 {
1127         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1128 }
1129
1130 #ifdef CONFIG_HIGHPTE
1131 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1132 {
1133         unsigned long ret;
1134         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1135         return (void *)ret;
1136 }
1137 #endif
1138
1139 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1140                               pte_t *ptep)
1141 {
1142         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1143 }
1144
1145 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1146                                     pte_t *ptep)
1147 {
1148         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1149 }
1150
1151 static inline pte_t __pte(pteval_t val)
1152 {
1153         pteval_t ret;
1154
1155         if (sizeof(pteval_t) > sizeof(long))
1156                 ret = PVOP_CALLEE2(pteval_t,
1157                                    pv_mmu_ops.make_pte,
1158                                    val, (u64)val >> 32);
1159         else
1160                 ret = PVOP_CALLEE1(pteval_t,
1161                                    pv_mmu_ops.make_pte,
1162                                    val);
1163
1164         return (pte_t) { .pte = ret };
1165 }
1166
1167 static inline pteval_t pte_val(pte_t pte)
1168 {
1169         pteval_t ret;
1170
1171         if (sizeof(pteval_t) > sizeof(long))
1172                 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1173                                    pte.pte, (u64)pte.pte >> 32);
1174         else
1175                 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1176                                    pte.pte);
1177
1178         return ret;
1179 }
1180
1181 static inline pgd_t __pgd(pgdval_t val)
1182 {
1183         pgdval_t ret;
1184
1185         if (sizeof(pgdval_t) > sizeof(long))
1186                 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1187                                    val, (u64)val >> 32);
1188         else
1189                 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1190                                    val);
1191
1192         return (pgd_t) { ret };
1193 }
1194
1195 static inline pgdval_t pgd_val(pgd_t pgd)
1196 {
1197         pgdval_t ret;
1198
1199         if (sizeof(pgdval_t) > sizeof(long))
1200                 ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1201                                     pgd.pgd, (u64)pgd.pgd >> 32);
1202         else
1203                 ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1204                                     pgd.pgd);
1205
1206         return ret;
1207 }
1208
1209 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1210 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1211                                            pte_t *ptep)
1212 {
1213         pteval_t ret;
1214
1215         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1216                          mm, addr, ptep);
1217
1218         return (pte_t) { .pte = ret };
1219 }
1220
1221 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1222                                            pte_t *ptep, pte_t pte)
1223 {
1224         if (sizeof(pteval_t) > sizeof(long))
1225                 /* 5 arg words */
1226                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1227         else
1228                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1229                             mm, addr, ptep, pte.pte);
1230 }
1231
1232 static inline void set_pte(pte_t *ptep, pte_t pte)
1233 {
1234         if (sizeof(pteval_t) > sizeof(long))
1235                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1236                             pte.pte, (u64)pte.pte >> 32);
1237         else
1238                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1239                             pte.pte);
1240 }
1241
1242 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1243                               pte_t *ptep, pte_t pte)
1244 {
1245         if (sizeof(pteval_t) > sizeof(long))
1246                 /* 5 arg words */
1247                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1248         else
1249                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1250 }
1251
1252 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1253 {
1254         pmdval_t val = native_pmd_val(pmd);
1255
1256         if (sizeof(pmdval_t) > sizeof(long))
1257                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1258         else
1259                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1260 }
1261
1262 #if PAGETABLE_LEVELS >= 3
1263 static inline pmd_t __pmd(pmdval_t val)
1264 {
1265         pmdval_t ret;
1266
1267         if (sizeof(pmdval_t) > sizeof(long))
1268                 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1269                                    val, (u64)val >> 32);
1270         else
1271                 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1272                                    val);
1273
1274         return (pmd_t) { ret };
1275 }
1276
1277 static inline pmdval_t pmd_val(pmd_t pmd)
1278 {
1279         pmdval_t ret;
1280
1281         if (sizeof(pmdval_t) > sizeof(long))
1282                 ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1283                                     pmd.pmd, (u64)pmd.pmd >> 32);
1284         else
1285                 ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1286                                     pmd.pmd);
1287
1288         return ret;
1289 }
1290
1291 static inline void set_pud(pud_t *pudp, pud_t pud)
1292 {
1293         pudval_t val = native_pud_val(pud);
1294
1295         if (sizeof(pudval_t) > sizeof(long))
1296                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1297                             val, (u64)val >> 32);
1298         else
1299                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1300                             val);
1301 }
1302 #if PAGETABLE_LEVELS == 4
1303 static inline pud_t __pud(pudval_t val)
1304 {
1305         pudval_t ret;
1306
1307         if (sizeof(pudval_t) > sizeof(long))
1308                 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1309                                    val, (u64)val >> 32);
1310         else
1311                 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1312                                    val);
1313
1314         return (pud_t) { ret };
1315 }
1316
1317 static inline pudval_t pud_val(pud_t pud)
1318 {
1319         pudval_t ret;
1320
1321         if (sizeof(pudval_t) > sizeof(long))
1322                 ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1323                                     pud.pud, (u64)pud.pud >> 32);
1324         else
1325                 ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1326                                     pud.pud);
1327
1328         return ret;
1329 }
1330
1331 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1332 {
1333         pgdval_t val = native_pgd_val(pgd);
1334
1335         if (sizeof(pgdval_t) > sizeof(long))
1336                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1337                             val, (u64)val >> 32);
1338         else
1339                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1340                             val);
1341 }
1342
1343 static inline void pgd_clear(pgd_t *pgdp)
1344 {
1345         set_pgd(pgdp, __pgd(0));
1346 }
1347
1348 static inline void pud_clear(pud_t *pudp)
1349 {
1350         set_pud(pudp, __pud(0));
1351 }
1352
1353 #endif  /* PAGETABLE_LEVELS == 4 */
1354
1355 #endif  /* PAGETABLE_LEVELS >= 3 */
1356
1357 #ifdef CONFIG_X86_PAE
1358 /* Special-case pte-setting operations for PAE, which can't update a
1359    64-bit pte atomically */
1360 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1361 {
1362         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1363                     pte.pte, pte.pte >> 32);
1364 }
1365
1366 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1367                              pte_t *ptep)
1368 {
1369         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1370 }
1371
1372 static inline void pmd_clear(pmd_t *pmdp)
1373 {
1374         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1375 }
1376 #else  /* !CONFIG_X86_PAE */
1377 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1378 {
1379         set_pte(ptep, pte);
1380 }
1381
1382 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1383                              pte_t *ptep)
1384 {
1385         set_pte_at(mm, addr, ptep, __pte(0));
1386 }
1387
1388 static inline void pmd_clear(pmd_t *pmdp)
1389 {
1390         set_pmd(pmdp, __pmd(0));
1391 }
1392 #endif  /* CONFIG_X86_PAE */
1393
1394 /* Lazy mode for batching updates / context switch */
1395 enum paravirt_lazy_mode {
1396         PARAVIRT_LAZY_NONE,
1397         PARAVIRT_LAZY_MMU,
1398         PARAVIRT_LAZY_CPU,
1399 };
1400
1401 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1402 void paravirt_enter_lazy_cpu(void);
1403 void paravirt_leave_lazy_cpu(void);
1404 void paravirt_enter_lazy_mmu(void);
1405 void paravirt_leave_lazy_mmu(void);
1406 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1407
1408 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1409 static inline void arch_enter_lazy_cpu_mode(void)
1410 {
1411         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1412 }
1413
1414 static inline void arch_leave_lazy_cpu_mode(void)
1415 {
1416         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1417 }
1418
1419 void arch_flush_lazy_cpu_mode(void);
1420
1421 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1422 static inline void arch_enter_lazy_mmu_mode(void)
1423 {
1424         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1425 }
1426
1427 static inline void arch_leave_lazy_mmu_mode(void)
1428 {
1429         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1430 }
1431
1432 void arch_flush_lazy_mmu_mode(void);
1433
1434 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1435                                 phys_addr_t phys, pgprot_t flags)
1436 {
1437         pv_mmu_ops.set_fixmap(idx, phys, flags);
1438 }
1439
1440 void _paravirt_nop(void);
1441 u32 _paravirt_ident_32(u32);
1442 u64 _paravirt_ident_64(u64);
1443
1444 #define paravirt_nop    ((void *)_paravirt_nop)
1445
1446 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1447
1448 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1449 {
1450         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1451 }
1452
1453 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1454 {
1455         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1456 }
1457 #define __raw_spin_is_contended __raw_spin_is_contended
1458
1459 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1460 {
1461         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1462 }
1463
1464 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1465                                                   unsigned long flags)
1466 {
1467         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1468 }
1469
1470 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1471 {
1472         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1473 }
1474
1475 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1476 {
1477         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1478 }
1479
1480 #endif
1481
1482 /* These all sit in the .parainstructions section to tell us what to patch. */
1483 struct paravirt_patch_site {
1484         u8 *instr;              /* original instructions */
1485         u8 instrtype;           /* type of this instruction */
1486         u8 len;                 /* length of original instruction */
1487         u16 clobbers;           /* what registers you may clobber */
1488 };
1489
1490 extern struct paravirt_patch_site __parainstructions[],
1491         __parainstructions_end[];
1492
1493 #ifdef CONFIG_X86_32
1494 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1495 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1496
1497 /* save and restore all caller-save registers, except return value */
1498 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
1499 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
1500
1501 #define PV_FLAGS_ARG "0"
1502 #define PV_EXTRA_CLOBBERS
1503 #define PV_VEXTRA_CLOBBERS
1504 #else
1505 /* save and restore all caller-save registers, except return value */
1506 #define PV_SAVE_ALL_CALLER_REGS                                         \
1507         "push %rcx;"                                                    \
1508         "push %rdx;"                                                    \
1509         "push %rsi;"                                                    \
1510         "push %rdi;"                                                    \
1511         "push %r8;"                                                     \
1512         "push %r9;"                                                     \
1513         "push %r10;"                                                    \
1514         "push %r11;"
1515 #define PV_RESTORE_ALL_CALLER_REGS                                      \
1516         "pop %r11;"                                                     \
1517         "pop %r10;"                                                     \
1518         "pop %r9;"                                                      \
1519         "pop %r8;"                                                      \
1520         "pop %rdi;"                                                     \
1521         "pop %rsi;"                                                     \
1522         "pop %rdx;"                                                     \
1523         "pop %rcx;"
1524
1525 /* We save some registers, but all of them, that's too much. We clobber all
1526  * caller saved registers but the argument parameter */
1527 #define PV_SAVE_REGS "pushq %%rdi;"
1528 #define PV_RESTORE_REGS "popq %%rdi;"
1529 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1530 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1531 #define PV_FLAGS_ARG "D"
1532 #endif
1533
1534 /*
1535  * Generate a thunk around a function which saves all caller-save
1536  * registers except for the return value.  This allows C functions to
1537  * be called from assembler code where fewer than normal registers are
1538  * available.  It may also help code generation around calls from C
1539  * code if the common case doesn't use many registers.
1540  *
1541  * When a callee is wrapped in a thunk, the caller can assume that all
1542  * arg regs and all scratch registers are preserved across the
1543  * call. The return value in rax/eax will not be saved, even for void
1544  * functions.
1545  */
1546 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
1547         extern typeof(func) __raw_callee_save_##func;                   \
1548         static void *__##func##__ __used = func;                        \
1549                                                                         \
1550         asm(".pushsection .text;"                                       \
1551             "__raw_callee_save_" #func ": "                             \
1552             PV_SAVE_ALL_CALLER_REGS                                     \
1553             "call " #func ";"                                           \
1554             PV_RESTORE_ALL_CALLER_REGS                                  \
1555             "ret;"                                                      \
1556             ".popsection")
1557
1558 /* Get a reference to a callee-save function */
1559 #define PV_CALLEE_SAVE(func)                                            \
1560         ((struct paravirt_callee_save) { __raw_callee_save_##func })
1561
1562 /* Promise that "func" already uses the right calling convention */
1563 #define __PV_IS_CALLEE_SAVE(func)                       \
1564         ((struct paravirt_callee_save) { func })
1565
1566 static inline unsigned long __raw_local_save_flags(void)
1567 {
1568         unsigned long f;
1569
1570         asm volatile(paravirt_alt(PARAVIRT_CALL)
1571                      : "=a"(f)
1572                      : paravirt_type(pv_irq_ops.save_fl),
1573                        paravirt_clobber(CLBR_EAX)
1574                      : "memory", "cc");
1575         return f;
1576 }
1577
1578 static inline void raw_local_irq_restore(unsigned long f)
1579 {
1580         asm volatile(paravirt_alt(PARAVIRT_CALL)
1581                      : "=a"(f)
1582                      : PV_FLAGS_ARG(f),
1583                        paravirt_type(pv_irq_ops.restore_fl),
1584                        paravirt_clobber(CLBR_EAX)
1585                      : "memory", "cc");
1586 }
1587
1588 static inline void raw_local_irq_disable(void)
1589 {
1590         asm volatile(paravirt_alt(PARAVIRT_CALL)
1591                      :
1592                      : paravirt_type(pv_irq_ops.irq_disable),
1593                        paravirt_clobber(CLBR_EAX)
1594                      : "memory", "eax", "cc");
1595 }
1596
1597 static inline void raw_local_irq_enable(void)
1598 {
1599         asm volatile(paravirt_alt(PARAVIRT_CALL)
1600                      :
1601                      : paravirt_type(pv_irq_ops.irq_enable),
1602                        paravirt_clobber(CLBR_EAX)
1603                      : "memory", "eax", "cc");
1604 }
1605
1606 static inline unsigned long __raw_local_irq_save(void)
1607 {
1608         unsigned long f;
1609
1610         f = __raw_local_save_flags();
1611         raw_local_irq_disable();
1612         return f;
1613 }
1614
1615
1616 /* Make sure as little as possible of this mess escapes. */
1617 #undef PARAVIRT_CALL
1618 #undef __PVOP_CALL
1619 #undef __PVOP_VCALL
1620 #undef PVOP_VCALL0
1621 #undef PVOP_CALL0
1622 #undef PVOP_VCALL1
1623 #undef PVOP_CALL1
1624 #undef PVOP_VCALL2
1625 #undef PVOP_CALL2
1626 #undef PVOP_VCALL3
1627 #undef PVOP_CALL3
1628 #undef PVOP_VCALL4
1629 #undef PVOP_CALL4
1630
1631 #else  /* __ASSEMBLY__ */
1632
1633 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
1634 771:;                                           \
1635         ops;                                    \
1636 772:;                                           \
1637         .pushsection .parainstructions,"a";     \
1638          .align algn;                           \
1639          word 771b;                             \
1640          .byte ptype;                           \
1641          .byte 772b-771b;                       \
1642          .short clobbers;                       \
1643         .popsection
1644
1645
1646 #define COND_PUSH(set, mask, reg)                       \
1647         .if ((~(set)) & mask); push %reg; .endif
1648 #define COND_POP(set, mask, reg)                        \
1649         .if ((~(set)) & mask); pop %reg; .endif
1650
1651 #ifdef CONFIG_X86_64
1652
1653 #define PV_SAVE_REGS(set)                       \
1654         COND_PUSH(set, CLBR_RAX, rax);          \
1655         COND_PUSH(set, CLBR_RCX, rcx);          \
1656         COND_PUSH(set, CLBR_RDX, rdx);          \
1657         COND_PUSH(set, CLBR_RSI, rsi);          \
1658         COND_PUSH(set, CLBR_RDI, rdi);          \
1659         COND_PUSH(set, CLBR_R8, r8);            \
1660         COND_PUSH(set, CLBR_R9, r9);            \
1661         COND_PUSH(set, CLBR_R10, r10);          \
1662         COND_PUSH(set, CLBR_R11, r11)
1663 #define PV_RESTORE_REGS(set)                    \
1664         COND_POP(set, CLBR_R11, r11);           \
1665         COND_POP(set, CLBR_R10, r10);           \
1666         COND_POP(set, CLBR_R9, r9);             \
1667         COND_POP(set, CLBR_R8, r8);             \
1668         COND_POP(set, CLBR_RDI, rdi);           \
1669         COND_POP(set, CLBR_RSI, rsi);           \
1670         COND_POP(set, CLBR_RDX, rdx);           \
1671         COND_POP(set, CLBR_RCX, rcx);           \
1672         COND_POP(set, CLBR_RAX, rax)
1673
1674 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
1675 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1676 #define PARA_INDIRECT(addr)     *addr(%rip)
1677 #else
1678 #define PV_SAVE_REGS(set)                       \
1679         COND_PUSH(set, CLBR_EAX, eax);          \
1680         COND_PUSH(set, CLBR_EDI, edi);          \
1681         COND_PUSH(set, CLBR_ECX, ecx);          \
1682         COND_PUSH(set, CLBR_EDX, edx)
1683 #define PV_RESTORE_REGS(set)                    \
1684         COND_POP(set, CLBR_EDX, edx);           \
1685         COND_POP(set, CLBR_ECX, ecx);           \
1686         COND_POP(set, CLBR_EDI, edi);           \
1687         COND_POP(set, CLBR_EAX, eax)
1688
1689 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1690 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1691 #define PARA_INDIRECT(addr)     *%cs:addr
1692 #endif
1693
1694 #define INTERRUPT_RETURN                                                \
1695         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1696                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1697
1698 #define DISABLE_INTERRUPTS(clobbers)                                    \
1699         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1700                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
1701                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1702                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1703
1704 #define ENABLE_INTERRUPTS(clobbers)                                     \
1705         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1706                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
1707                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1708                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1709
1710 #define USERGS_SYSRET32                                                 \
1711         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1712                   CLBR_NONE,                                            \
1713                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1714
1715 #ifdef CONFIG_X86_32
1716 #define GET_CR0_INTO_EAX                                \
1717         push %ecx; push %edx;                           \
1718         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1719         pop %edx; pop %ecx
1720
1721 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1722         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1723                   CLBR_NONE,                                            \
1724                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1725
1726
1727 #else   /* !CONFIG_X86_32 */
1728
1729 /*
1730  * If swapgs is used while the userspace stack is still current,
1731  * there's no way to call a pvop.  The PV replacement *must* be
1732  * inlined, or the swapgs instruction must be trapped and emulated.
1733  */
1734 #define SWAPGS_UNSAFE_STACK                                             \
1735         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1736                   swapgs)
1737
1738 /*
1739  * Note: swapgs is very special, and in practise is either going to be
1740  * implemented with a single "swapgs" instruction or something very
1741  * special.  Either way, we don't need to save any registers for
1742  * it.
1743  */
1744 #define SWAPGS                                                          \
1745         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1746                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
1747                  )
1748
1749 #define GET_CR2_INTO_RCX                                \
1750         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1751         movq %rax, %rcx;                                \
1752         xorq %rax, %rax;
1753
1754 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1755         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1756                   CLBR_NONE,                                            \
1757                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1758
1759 #define USERGS_SYSRET64                                                 \
1760         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1761                   CLBR_NONE,                                            \
1762                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1763
1764 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1765         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1766                   CLBR_NONE,                                            \
1767                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1768 #endif  /* CONFIG_X86_32 */
1769
1770 #endif /* __ASSEMBLY__ */
1771 #endif /* CONFIG_PARAVIRT */
1772 #endif /* _ASM_X86_PARAVIRT_H */