4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
34 #include <asm/ptrace.h>
37 #include <asm/io_apic.h>
38 #include <asm/sync_bitops.h>
39 #include <asm/xen/hypercall.h>
40 #include <asm/xen/hypervisor.h>
44 #include <xen/xen-ops.h>
45 #include <xen/events.h>
46 #include <xen/interface/xen.h>
47 #include <xen/interface/event_channel.h>
48 #include <xen/interface/hvm/hvm_op.h>
49 #include <xen/interface/hvm/params.h>
52 * This lock protects updates to the following mapping and reference-count
53 * arrays. The lock does not need to be acquired to read the mapping tables.
55 static DEFINE_SPINLOCK(irq_mapping_update_lock);
57 /* IRQ <-> VIRQ mapping. */
58 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
60 /* IRQ <-> IPI mapping */
61 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
63 /* Interrupt types. */
73 * Packed IRQ information:
74 * type - enum xen_irq_type
75 * event channel - irq->event channel mapping
76 * cpu - cpu this event channel is bound to
77 * index - type-specific information:
78 * PIRQ - vector, with MSB being "needs EIO"
85 enum xen_irq_type type; /* type */
86 unsigned short evtchn; /* event channel */
87 unsigned short cpu; /* cpu bound */
99 #define PIRQ_NEEDS_EOI (1 << 0)
101 static struct irq_info *irq_info;
103 static int *evtchn_to_irq;
104 struct cpu_evtchn_s {
105 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
107 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
108 static inline unsigned long *cpu_evtchn_mask(int cpu)
110 return cpu_evtchn_mask_p[cpu].bits;
113 /* Xen will never allocate port zero for any purpose. */
114 #define VALID_EVTCHN(chn) ((chn) != 0)
116 static struct irq_chip xen_dynamic_chip;
117 static struct irq_chip xen_percpu_chip;
118 static struct irq_chip xen_pirq_chip;
120 /* Constructor for packed IRQ information. */
121 static struct irq_info mk_unbound_info(void)
123 return (struct irq_info) { .type = IRQT_UNBOUND };
126 static struct irq_info mk_evtchn_info(unsigned short evtchn)
128 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
132 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
134 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
135 .cpu = 0, .u.ipi = ipi };
138 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
140 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
141 .cpu = 0, .u.virq = virq };
144 static struct irq_info mk_pirq_info(unsigned short evtchn,
145 unsigned short gsi, unsigned short vector)
147 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
148 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
152 * Accessors for packed IRQ information.
154 static struct irq_info *info_for_irq(unsigned irq)
156 return &irq_info[irq];
159 static unsigned int evtchn_from_irq(unsigned irq)
161 return info_for_irq(irq)->evtchn;
164 unsigned irq_from_evtchn(unsigned int evtchn)
166 return evtchn_to_irq[evtchn];
168 EXPORT_SYMBOL_GPL(irq_from_evtchn);
170 static enum ipi_vector ipi_from_irq(unsigned irq)
172 struct irq_info *info = info_for_irq(irq);
174 BUG_ON(info == NULL);
175 BUG_ON(info->type != IRQT_IPI);
180 static unsigned virq_from_irq(unsigned irq)
182 struct irq_info *info = info_for_irq(irq);
184 BUG_ON(info == NULL);
185 BUG_ON(info->type != IRQT_VIRQ);
190 static unsigned gsi_from_irq(unsigned irq)
192 struct irq_info *info = info_for_irq(irq);
194 BUG_ON(info == NULL);
195 BUG_ON(info->type != IRQT_PIRQ);
197 return info->u.pirq.gsi;
200 static unsigned vector_from_irq(unsigned irq)
202 struct irq_info *info = info_for_irq(irq);
204 BUG_ON(info == NULL);
205 BUG_ON(info->type != IRQT_PIRQ);
207 return info->u.pirq.vector;
210 static enum xen_irq_type type_from_irq(unsigned irq)
212 return info_for_irq(irq)->type;
215 static unsigned cpu_from_irq(unsigned irq)
217 return info_for_irq(irq)->cpu;
220 static unsigned int cpu_from_evtchn(unsigned int evtchn)
222 int irq = evtchn_to_irq[evtchn];
226 ret = cpu_from_irq(irq);
231 static bool pirq_needs_eoi(unsigned irq)
233 struct irq_info *info = info_for_irq(irq);
235 BUG_ON(info->type != IRQT_PIRQ);
237 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
240 static inline unsigned long active_evtchns(unsigned int cpu,
241 struct shared_info *sh,
244 return (sh->evtchn_pending[idx] &
245 cpu_evtchn_mask(cpu)[idx] &
246 ~sh->evtchn_mask[idx]);
249 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
251 int irq = evtchn_to_irq[chn];
255 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
258 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
259 __set_bit(chn, cpu_evtchn_mask(cpu));
261 irq_info[irq].cpu = cpu;
264 static void init_evtchn_cpu_bindings(void)
267 struct irq_desc *desc;
270 /* By default all event channels notify CPU#0. */
271 for_each_irq_desc(i, desc) {
272 cpumask_copy(desc->affinity, cpumask_of(0));
276 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
279 static inline void clear_evtchn(int port)
281 struct shared_info *s = HYPERVISOR_shared_info;
282 sync_clear_bit(port, &s->evtchn_pending[0]);
285 static inline void set_evtchn(int port)
287 struct shared_info *s = HYPERVISOR_shared_info;
288 sync_set_bit(port, &s->evtchn_pending[0]);
291 static inline int test_evtchn(int port)
293 struct shared_info *s = HYPERVISOR_shared_info;
294 return sync_test_bit(port, &s->evtchn_pending[0]);
299 * notify_remote_via_irq - send event to remote end of event channel via irq
300 * @irq: irq of event channel to send event to
302 * Unlike notify_remote_via_evtchn(), this is safe to use across
303 * save/restore. Notifications on a broken connection are silently
306 void notify_remote_via_irq(int irq)
308 int evtchn = evtchn_from_irq(irq);
310 if (VALID_EVTCHN(evtchn))
311 notify_remote_via_evtchn(evtchn);
313 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
315 static void mask_evtchn(int port)
317 struct shared_info *s = HYPERVISOR_shared_info;
318 sync_set_bit(port, &s->evtchn_mask[0]);
321 static void unmask_evtchn(int port)
323 struct shared_info *s = HYPERVISOR_shared_info;
324 unsigned int cpu = get_cpu();
326 BUG_ON(!irqs_disabled());
328 /* Slow path (hypercall) if this is a non-local port. */
329 if (unlikely(cpu != cpu_from_evtchn(port))) {
330 struct evtchn_unmask unmask = { .port = port };
331 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
333 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
335 sync_clear_bit(port, &s->evtchn_mask[0]);
338 * The following is basically the equivalent of
339 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
340 * the interrupt edge' if the channel is masked.
342 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
343 !sync_test_and_set_bit(port / BITS_PER_LONG,
344 &vcpu_info->evtchn_pending_sel))
345 vcpu_info->evtchn_upcall_pending = 1;
351 static int get_nr_hw_irqs(void)
355 #ifdef CONFIG_X86_IO_APIC
356 ret = get_nr_irqs_gsi();
362 static int find_unbound_irq(void)
364 struct irq_data *data;
367 for (irq = 0; irq < nr_irqs; irq++) {
368 data = irq_get_irq_data(irq);
369 /* only 0->15 have init'd desc; handle irq > 16 */
372 if (data->chip == &no_irq_chip)
374 if (data->chip != &xen_dynamic_chip)
376 if (irq_info[irq].type == IRQT_UNBOUND)
381 panic("No available IRQ to bind to: increase nr_irqs!\n");
383 res = irq_alloc_desc_at(irq, 0);
385 if (WARN_ON(res != irq))
391 static bool identity_mapped_irq(unsigned irq)
393 /* identity map all the hardware irqs */
394 return irq < get_nr_hw_irqs();
397 static void pirq_unmask_notify(int irq)
399 struct physdev_eoi eoi = { .irq = irq };
401 if (unlikely(pirq_needs_eoi(irq))) {
402 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
407 static void pirq_query_unmask(int irq)
409 struct physdev_irq_status_query irq_status;
410 struct irq_info *info = info_for_irq(irq);
412 BUG_ON(info->type != IRQT_PIRQ);
414 irq_status.irq = irq;
415 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
416 irq_status.flags = 0;
418 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
419 if (irq_status.flags & XENIRQSTAT_needs_eoi)
420 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
423 static bool probing_irq(int irq)
425 struct irq_desc *desc = irq_to_desc(irq);
427 return desc && desc->action == NULL;
430 static unsigned int startup_pirq(unsigned int irq)
432 struct evtchn_bind_pirq bind_pirq;
433 struct irq_info *info = info_for_irq(irq);
434 int evtchn = evtchn_from_irq(irq);
436 BUG_ON(info->type != IRQT_PIRQ);
438 if (VALID_EVTCHN(evtchn))
441 bind_pirq.pirq = irq;
442 /* NB. We are happy to share unless we are probing. */
443 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
444 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
445 if (!probing_irq(irq))
446 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
450 evtchn = bind_pirq.port;
452 pirq_query_unmask(irq);
454 evtchn_to_irq[evtchn] = irq;
455 bind_evtchn_to_cpu(evtchn, 0);
456 info->evtchn = evtchn;
459 unmask_evtchn(evtchn);
460 pirq_unmask_notify(irq);
465 static void shutdown_pirq(unsigned int irq)
467 struct evtchn_close close;
468 struct irq_info *info = info_for_irq(irq);
469 int evtchn = evtchn_from_irq(irq);
471 BUG_ON(info->type != IRQT_PIRQ);
473 if (!VALID_EVTCHN(evtchn))
479 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
482 bind_evtchn_to_cpu(evtchn, 0);
483 evtchn_to_irq[evtchn] = -1;
487 static void enable_pirq(unsigned int irq)
492 static void disable_pirq(unsigned int irq)
496 static void ack_pirq(unsigned int irq)
498 int evtchn = evtchn_from_irq(irq);
500 move_native_irq(irq);
502 if (VALID_EVTCHN(evtchn)) {
504 clear_evtchn(evtchn);
508 static void end_pirq(unsigned int irq)
510 int evtchn = evtchn_from_irq(irq);
511 struct irq_desc *desc = irq_to_desc(irq);
516 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
517 (IRQ_DISABLED|IRQ_PENDING)) {
519 } else if (VALID_EVTCHN(evtchn)) {
520 unmask_evtchn(evtchn);
521 pirq_unmask_notify(irq);
525 static int find_irq_by_gsi(unsigned gsi)
529 for (irq = 0; irq < nr_irqs; irq++) {
530 struct irq_info *info = info_for_irq(irq);
532 if (info == NULL || info->type != IRQT_PIRQ)
535 if (gsi_from_irq(irq) == gsi)
543 * Allocate a physical irq, along with a vector. We don't assign an
544 * event channel until the irq actually started up. Return an
545 * existing irq if we've already got one for the gsi.
547 int xen_allocate_pirq(unsigned gsi, char *name)
550 struct physdev_irq irq_op;
552 spin_lock(&irq_mapping_update_lock);
554 irq = find_irq_by_gsi(gsi);
556 printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n",
558 goto out; /* XXX need refcount? */
561 if (identity_mapped_irq(gsi)) {
563 irq_to_desc_alloc_node(irq, 0);
564 dynamic_irq_init(irq);
566 irq = find_unbound_irq();
568 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
569 handle_level_irq, name);
572 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
573 dynamic_irq_cleanup(irq);
578 irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector);
581 spin_unlock(&irq_mapping_update_lock);
586 int xen_vector_from_irq(unsigned irq)
588 return vector_from_irq(irq);
591 int xen_gsi_from_irq(unsigned irq)
593 return gsi_from_irq(irq);
596 int bind_evtchn_to_irq(unsigned int evtchn)
600 spin_lock(&irq_mapping_update_lock);
602 irq = evtchn_to_irq[evtchn];
605 irq = find_unbound_irq();
607 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
608 handle_edge_irq, "event");
610 evtchn_to_irq[evtchn] = irq;
611 irq_info[irq] = mk_evtchn_info(evtchn);
614 spin_unlock(&irq_mapping_update_lock);
618 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
620 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
622 struct evtchn_bind_ipi bind_ipi;
625 spin_lock(&irq_mapping_update_lock);
627 irq = per_cpu(ipi_to_irq, cpu)[ipi];
630 irq = find_unbound_irq();
634 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
635 handle_percpu_irq, "ipi");
638 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
641 evtchn = bind_ipi.port;
643 evtchn_to_irq[evtchn] = irq;
644 irq_info[irq] = mk_ipi_info(evtchn, ipi);
645 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
647 bind_evtchn_to_cpu(evtchn, cpu);
651 spin_unlock(&irq_mapping_update_lock);
656 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
658 struct evtchn_bind_virq bind_virq;
661 spin_lock(&irq_mapping_update_lock);
663 irq = per_cpu(virq_to_irq, cpu)[virq];
666 bind_virq.virq = virq;
667 bind_virq.vcpu = cpu;
668 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
671 evtchn = bind_virq.port;
673 irq = find_unbound_irq();
675 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
676 handle_percpu_irq, "virq");
678 evtchn_to_irq[evtchn] = irq;
679 irq_info[irq] = mk_virq_info(evtchn, virq);
681 per_cpu(virq_to_irq, cpu)[virq] = irq;
683 bind_evtchn_to_cpu(evtchn, cpu);
686 spin_unlock(&irq_mapping_update_lock);
691 static void unbind_from_irq(unsigned int irq)
693 struct evtchn_close close;
694 int evtchn = evtchn_from_irq(irq);
696 spin_lock(&irq_mapping_update_lock);
698 if (VALID_EVTCHN(evtchn)) {
700 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
703 switch (type_from_irq(irq)) {
705 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
706 [virq_from_irq(irq)] = -1;
709 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
710 [ipi_from_irq(irq)] = -1;
716 /* Closed ports are implicitly re-bound to VCPU0. */
717 bind_evtchn_to_cpu(evtchn, 0);
719 evtchn_to_irq[evtchn] = -1;
722 if (irq_info[irq].type != IRQT_UNBOUND) {
723 irq_info[irq] = mk_unbound_info();
728 spin_unlock(&irq_mapping_update_lock);
731 int bind_evtchn_to_irqhandler(unsigned int evtchn,
732 irq_handler_t handler,
733 unsigned long irqflags,
734 const char *devname, void *dev_id)
739 irq = bind_evtchn_to_irq(evtchn);
740 retval = request_irq(irq, handler, irqflags, devname, dev_id);
742 unbind_from_irq(irq);
748 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
750 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
751 irq_handler_t handler,
752 unsigned long irqflags, const char *devname, void *dev_id)
757 irq = bind_virq_to_irq(virq, cpu);
758 retval = request_irq(irq, handler, irqflags, devname, dev_id);
760 unbind_from_irq(irq);
766 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
768 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
770 irq_handler_t handler,
771 unsigned long irqflags,
777 irq = bind_ipi_to_irq(ipi, cpu);
781 irqflags |= IRQF_NO_SUSPEND;
782 retval = request_irq(irq, handler, irqflags, devname, dev_id);
784 unbind_from_irq(irq);
791 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
793 free_irq(irq, dev_id);
794 unbind_from_irq(irq);
796 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
798 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
800 int irq = per_cpu(ipi_to_irq, cpu)[vector];
802 notify_remote_via_irq(irq);
805 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
807 struct shared_info *sh = HYPERVISOR_shared_info;
808 int cpu = smp_processor_id();
811 static DEFINE_SPINLOCK(debug_lock);
813 spin_lock_irqsave(&debug_lock, flags);
815 printk("vcpu %d\n ", cpu);
817 for_each_online_cpu(i) {
818 struct vcpu_info *v = per_cpu(xen_vcpu, i);
819 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
820 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
821 v->evtchn_upcall_pending,
822 v->evtchn_pending_sel);
824 printk("pending:\n ");
825 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
826 printk("%08lx%s", sh->evtchn_pending[i],
827 i % 8 == 0 ? "\n " : " ");
828 printk("\nmasks:\n ");
829 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
830 printk("%08lx%s", sh->evtchn_mask[i],
831 i % 8 == 0 ? "\n " : " ");
833 printk("\nunmasked:\n ");
834 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
835 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
836 i % 8 == 0 ? "\n " : " ");
838 printk("\npending list:\n");
839 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
840 if (sync_test_bit(i, sh->evtchn_pending)) {
841 printk(" %d: event %d -> irq %d\n",
842 cpu_from_evtchn(i), i,
847 spin_unlock_irqrestore(&debug_lock, flags);
852 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
855 * Search the CPUs pending events bitmasks. For each one found, map
856 * the event number to an irq, and feed it into do_IRQ() for
859 * Xen uses a two-level bitmap to speed searching. The first level is
860 * a bitset of words which contain pending event bits. The second
861 * level is a bitset of pending events themselves.
863 static void __xen_evtchn_do_upcall(void)
866 struct shared_info *s = HYPERVISOR_shared_info;
867 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
871 unsigned long pending_words;
873 vcpu_info->evtchn_upcall_pending = 0;
875 if (__get_cpu_var(xed_nesting_count)++)
878 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
879 /* Clear master flag /before/ clearing selector flag. */
882 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
883 while (pending_words != 0) {
884 unsigned long pending_bits;
885 int word_idx = __ffs(pending_words);
886 pending_words &= ~(1UL << word_idx);
888 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
889 int bit_idx = __ffs(pending_bits);
890 int port = (word_idx * BITS_PER_LONG) + bit_idx;
891 int irq = evtchn_to_irq[port];
892 struct irq_desc *desc;
895 desc = irq_to_desc(irq);
897 generic_handle_irq_desc(irq, desc);
902 BUG_ON(!irqs_disabled());
904 count = __get_cpu_var(xed_nesting_count);
905 __get_cpu_var(xed_nesting_count) = 0;
906 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
913 void xen_evtchn_do_upcall(struct pt_regs *regs)
915 struct pt_regs *old_regs = set_irq_regs(regs);
920 __xen_evtchn_do_upcall();
923 set_irq_regs(old_regs);
926 void xen_hvm_evtchn_do_upcall(void)
928 __xen_evtchn_do_upcall();
930 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
932 /* Rebind a new event channel to an existing irq. */
933 void rebind_evtchn_irq(int evtchn, int irq)
935 struct irq_info *info = info_for_irq(irq);
937 /* Make sure the irq is masked, since the new event channel
938 will also be masked. */
941 spin_lock(&irq_mapping_update_lock);
943 /* After resume the irq<->evtchn mappings are all cleared out */
944 BUG_ON(evtchn_to_irq[evtchn] != -1);
945 /* Expect irq to have been bound before,
946 so there should be a proper type */
947 BUG_ON(info->type == IRQT_UNBOUND);
949 evtchn_to_irq[evtchn] = irq;
950 irq_info[irq] = mk_evtchn_info(evtchn);
952 spin_unlock(&irq_mapping_update_lock);
954 /* new event channels are always bound to cpu 0 */
955 irq_set_affinity(irq, cpumask_of(0));
957 /* Unmask the event channel. */
961 /* Rebind an evtchn so that it gets delivered to a specific cpu */
962 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
964 struct evtchn_bind_vcpu bind_vcpu;
965 int evtchn = evtchn_from_irq(irq);
967 /* events delivered via platform PCI interrupts are always
968 * routed to vcpu 0 */
969 if (!VALID_EVTCHN(evtchn) ||
970 (xen_hvm_domain() && !xen_have_vector_callback))
973 /* Send future instances of this interrupt to other vcpu. */
974 bind_vcpu.port = evtchn;
975 bind_vcpu.vcpu = tcpu;
978 * If this fails, it usually just indicates that we're dealing with a
979 * virq or IPI channel, which don't actually need to be rebound. Ignore
980 * it, but don't do the xenlinux-level rebind in that case.
982 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
983 bind_evtchn_to_cpu(evtchn, tcpu);
988 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
990 unsigned tcpu = cpumask_first(dest);
992 return rebind_irq_to_cpu(irq, tcpu);
995 int resend_irq_on_evtchn(unsigned int irq)
997 int masked, evtchn = evtchn_from_irq(irq);
998 struct shared_info *s = HYPERVISOR_shared_info;
1000 if (!VALID_EVTCHN(evtchn))
1003 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1004 sync_set_bit(evtchn, s->evtchn_pending);
1006 unmask_evtchn(evtchn);
1011 static void enable_dynirq(unsigned int irq)
1013 int evtchn = evtchn_from_irq(irq);
1015 if (VALID_EVTCHN(evtchn))
1016 unmask_evtchn(evtchn);
1019 static void disable_dynirq(unsigned int irq)
1021 int evtchn = evtchn_from_irq(irq);
1023 if (VALID_EVTCHN(evtchn))
1024 mask_evtchn(evtchn);
1027 static void ack_dynirq(unsigned int irq)
1029 int evtchn = evtchn_from_irq(irq);
1031 move_native_irq(irq);
1033 if (VALID_EVTCHN(evtchn))
1034 clear_evtchn(evtchn);
1037 static int retrigger_dynirq(unsigned int irq)
1039 int evtchn = evtchn_from_irq(irq);
1040 struct shared_info *sh = HYPERVISOR_shared_info;
1043 if (VALID_EVTCHN(evtchn)) {
1046 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1047 sync_set_bit(evtchn, sh->evtchn_pending);
1049 unmask_evtchn(evtchn);
1056 static void restore_cpu_virqs(unsigned int cpu)
1058 struct evtchn_bind_virq bind_virq;
1059 int virq, irq, evtchn;
1061 for (virq = 0; virq < NR_VIRQS; virq++) {
1062 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1065 BUG_ON(virq_from_irq(irq) != virq);
1067 /* Get a new binding from Xen. */
1068 bind_virq.virq = virq;
1069 bind_virq.vcpu = cpu;
1070 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1073 evtchn = bind_virq.port;
1075 /* Record the new mapping. */
1076 evtchn_to_irq[evtchn] = irq;
1077 irq_info[irq] = mk_virq_info(evtchn, virq);
1078 bind_evtchn_to_cpu(evtchn, cpu);
1080 /* Ready for use. */
1081 unmask_evtchn(evtchn);
1085 static void restore_cpu_ipis(unsigned int cpu)
1087 struct evtchn_bind_ipi bind_ipi;
1088 int ipi, irq, evtchn;
1090 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1091 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1094 BUG_ON(ipi_from_irq(irq) != ipi);
1096 /* Get a new binding from Xen. */
1097 bind_ipi.vcpu = cpu;
1098 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1101 evtchn = bind_ipi.port;
1103 /* Record the new mapping. */
1104 evtchn_to_irq[evtchn] = irq;
1105 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1106 bind_evtchn_to_cpu(evtchn, cpu);
1108 /* Ready for use. */
1109 unmask_evtchn(evtchn);
1114 /* Clear an irq's pending state, in preparation for polling on it */
1115 void xen_clear_irq_pending(int irq)
1117 int evtchn = evtchn_from_irq(irq);
1119 if (VALID_EVTCHN(evtchn))
1120 clear_evtchn(evtchn);
1123 void xen_set_irq_pending(int irq)
1125 int evtchn = evtchn_from_irq(irq);
1127 if (VALID_EVTCHN(evtchn))
1131 bool xen_test_irq_pending(int irq)
1133 int evtchn = evtchn_from_irq(irq);
1136 if (VALID_EVTCHN(evtchn))
1137 ret = test_evtchn(evtchn);
1142 /* Poll waiting for an irq to become pending. In the usual case, the
1143 irq will be disabled so it won't deliver an interrupt. */
1144 void xen_poll_irq(int irq)
1146 evtchn_port_t evtchn = evtchn_from_irq(irq);
1148 if (VALID_EVTCHN(evtchn)) {
1149 struct sched_poll poll;
1153 set_xen_guest_handle(poll.ports, &evtchn);
1155 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1160 void xen_irq_resume(void)
1162 unsigned int cpu, irq, evtchn;
1164 init_evtchn_cpu_bindings();
1166 /* New event-channel space is not 'live' yet. */
1167 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1168 mask_evtchn(evtchn);
1170 /* No IRQ <-> event-channel mappings. */
1171 for (irq = 0; irq < nr_irqs; irq++)
1172 irq_info[irq].evtchn = 0; /* zap event-channel binding */
1174 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1175 evtchn_to_irq[evtchn] = -1;
1177 for_each_possible_cpu(cpu) {
1178 restore_cpu_virqs(cpu);
1179 restore_cpu_ipis(cpu);
1183 static struct irq_chip xen_dynamic_chip __read_mostly = {
1186 .disable = disable_dynirq,
1187 .mask = disable_dynirq,
1188 .unmask = enable_dynirq,
1191 .set_affinity = set_affinity_irq,
1192 .retrigger = retrigger_dynirq,
1195 static struct irq_chip xen_pirq_chip __read_mostly = {
1198 .startup = startup_pirq,
1199 .shutdown = shutdown_pirq,
1201 .enable = enable_pirq,
1202 .unmask = enable_pirq,
1204 .disable = disable_pirq,
1205 .mask = disable_pirq,
1210 .set_affinity = set_affinity_irq,
1212 .retrigger = retrigger_dynirq,
1215 static struct irq_chip xen_percpu_chip __read_mostly = {
1216 .name = "xen-percpu",
1218 .disable = disable_dynirq,
1219 .mask = disable_dynirq,
1220 .unmask = enable_dynirq,
1225 int xen_set_callback_via(uint64_t via)
1227 struct xen_hvm_param a;
1228 a.domid = DOMID_SELF;
1229 a.index = HVM_PARAM_CALLBACK_IRQ;
1231 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1233 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1235 #ifdef CONFIG_XEN_PVHVM
1236 /* Vector callbacks are better than PCI interrupts to receive event
1237 * channel notifications because we can receive vector callbacks on any
1238 * vcpu and we don't need PCI support or APIC interactions. */
1239 void xen_callback_vector(void)
1242 uint64_t callback_via;
1243 if (xen_have_vector_callback) {
1244 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1245 rc = xen_set_callback_via(callback_via);
1247 printk(KERN_ERR "Request for Xen HVM callback vector"
1249 xen_have_vector_callback = 0;
1252 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1254 /* in the restore case the vector has already been allocated */
1255 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1256 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1260 void xen_callback_vector(void) {}
1263 void __init xen_init_IRQ(void)
1267 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1269 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1271 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1273 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1274 evtchn_to_irq[i] = -1;
1276 init_evtchn_cpu_bindings();
1278 /* No event channels are 'live' right now. */
1279 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1282 if (xen_hvm_domain()) {
1283 xen_callback_vector();
1286 irq_ctx_init(smp_processor_id());