2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining. The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
9 * We only bother with direct forms (ie, vcpu in percpu data) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
14 #include <asm/asm-offsets.h>
15 #include <asm/percpu.h>
16 #include <asm/processor-flags.h>
21 * Enable events. This clears the event mask and tests the pending
22 * event status with one and operation. If there are pending events,
23 * then enter the hypervisor to get them handled.
25 ENTRY(xen_irq_enable_direct)
27 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
30 * Preempt here doesn't matter because that will deal with any
31 * pending interrupts. The pending check may end up being run
32 * on the wrong CPU, but that doesn't hurt.
35 /* Test for pending */
36 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
41 ENDPATCH(xen_irq_enable_direct)
43 ENDPROC(xen_irq_enable_direct)
44 RELOC(xen_irq_enable_direct, 2b+1)
48 * Disabling events is simply a matter of making the event mask
51 ENTRY(xen_irq_disable_direct)
52 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
53 ENDPATCH(xen_irq_disable_direct)
55 ENDPROC(xen_irq_disable_direct)
56 RELOC(xen_irq_disable_direct, 0)
59 * (xen_)save_fl is used to get the current interrupt enable status.
60 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
61 * may be set in the return value. We take advantage of this by
62 * making sure that X86_EFLAGS_IF has the right value (and other bits
63 * in that byte are 0), but other bits in the return value are
64 * undefined. We need to toggle the state of the bit, because Xen and
65 * x86 use opposite senses (mask vs enable).
67 ENTRY(xen_save_fl_direct)
68 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
71 ENDPATCH(xen_save_fl_direct)
73 ENDPROC(xen_save_fl_direct)
74 RELOC(xen_save_fl_direct, 0)
78 * In principle the caller should be passing us a value return from
79 * xen_save_fl_direct, but for robustness sake we test only the
80 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
81 * interrupt mask state, it checks for unmasked pending events and
82 * enters the hypervisor to get them delivered if so.
84 ENTRY(xen_restore_fl_direct)
86 testw $X86_EFLAGS_IF, %di
88 testb $X86_EFLAGS_IF>>8, %ah
90 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
92 * Preempt here doesn't matter because that will deal with any
93 * pending interrupts. The pending check may end up being run
94 * on the wrong CPU, but that doesn't hurt.
97 /* check for unmasked and pending */
98 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
102 ENDPATCH(xen_restore_fl_direct)
104 ENDPROC(xen_restore_fl_direct)
105 RELOC(xen_restore_fl_direct, 2b+1)
109 * Force an event check by making a hypercall, but preserve regs
110 * before making the call.
117 call xen_force_evtchn_callback
131 call xen_force_evtchn_callback