4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is received, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
35 #include <asm/ptrace.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
57 static DEFINE_SPINLOCK(irq_mapping_update_lock);
59 static LIST_HEAD(xen_irq_list_head);
61 /* IRQ <-> VIRQ mapping. */
62 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
64 /* IRQ <-> IPI mapping */
65 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
67 /* Interrupt types. */
77 * Packed IRQ information:
78 * type - enum xen_irq_type
79 * event channel - irq->event channel mapping
80 * cpu - cpu this event channel is bound to
81 * index - type-specific information:
82 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83 * guest, or GSI (real passthrough IRQ) of the device.
90 struct list_head list;
91 enum xen_irq_type type; /* type */
93 unsigned short evtchn; /* event channel */
94 unsigned short cpu; /* cpu bound */
102 unsigned char vector;
108 #define PIRQ_NEEDS_EOI (1 << 0)
109 #define PIRQ_SHAREABLE (1 << 1)
111 static int *evtchn_to_irq;
113 static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
116 /* Xen will never allocate port zero for any purpose. */
117 #define VALID_EVTCHN(chn) ((chn) != 0)
119 static struct irq_chip xen_dynamic_chip;
120 static struct irq_chip xen_percpu_chip;
121 static struct irq_chip xen_pirq_chip;
123 /* Get info for IRQ */
124 static struct irq_info *info_for_irq(unsigned irq)
126 return irq_get_handler_data(irq);
129 /* Constructors for packed IRQ information. */
130 static void xen_irq_info_common_init(struct irq_info *info,
132 enum xen_irq_type type,
133 unsigned short evtchn,
137 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
141 info->evtchn = evtchn;
144 evtchn_to_irq[evtchn] = irq;
147 static void xen_irq_info_evtchn_init(unsigned irq,
148 unsigned short evtchn)
150 struct irq_info *info = info_for_irq(irq);
152 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
155 static void xen_irq_info_ipi_init(unsigned cpu,
157 unsigned short evtchn,
160 struct irq_info *info = info_for_irq(irq);
162 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
166 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
169 static void xen_irq_info_virq_init(unsigned cpu,
171 unsigned short evtchn,
174 struct irq_info *info = info_for_irq(irq);
176 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
180 per_cpu(virq_to_irq, cpu)[virq] = irq;
183 static void xen_irq_info_pirq_init(unsigned irq,
184 unsigned short evtchn,
187 unsigned short vector,
191 struct irq_info *info = info_for_irq(irq);
193 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
195 info->u.pirq.pirq = pirq;
196 info->u.pirq.gsi = gsi;
197 info->u.pirq.vector = vector;
198 info->u.pirq.domid = domid;
199 info->u.pirq.flags = flags;
203 * Accessors for packed IRQ information.
205 static unsigned int evtchn_from_irq(unsigned irq)
207 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
210 return info_for_irq(irq)->evtchn;
213 unsigned irq_from_evtchn(unsigned int evtchn)
215 return evtchn_to_irq[evtchn];
217 EXPORT_SYMBOL_GPL(irq_from_evtchn);
219 static enum ipi_vector ipi_from_irq(unsigned irq)
221 struct irq_info *info = info_for_irq(irq);
223 BUG_ON(info == NULL);
224 BUG_ON(info->type != IRQT_IPI);
229 static unsigned virq_from_irq(unsigned irq)
231 struct irq_info *info = info_for_irq(irq);
233 BUG_ON(info == NULL);
234 BUG_ON(info->type != IRQT_VIRQ);
239 static unsigned pirq_from_irq(unsigned irq)
241 struct irq_info *info = info_for_irq(irq);
243 BUG_ON(info == NULL);
244 BUG_ON(info->type != IRQT_PIRQ);
246 return info->u.pirq.pirq;
249 static enum xen_irq_type type_from_irq(unsigned irq)
251 return info_for_irq(irq)->type;
254 static unsigned cpu_from_irq(unsigned irq)
256 return info_for_irq(irq)->cpu;
259 static unsigned int cpu_from_evtchn(unsigned int evtchn)
261 int irq = evtchn_to_irq[evtchn];
265 ret = cpu_from_irq(irq);
270 static bool pirq_needs_eoi(unsigned irq)
272 struct irq_info *info = info_for_irq(irq);
274 BUG_ON(info->type != IRQT_PIRQ);
276 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
279 static inline unsigned long active_evtchns(unsigned int cpu,
280 struct shared_info *sh,
283 return (sh->evtchn_pending[idx] &
284 per_cpu(cpu_evtchn_mask, cpu)[idx] &
285 ~sh->evtchn_mask[idx]);
288 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
290 int irq = evtchn_to_irq[chn];
294 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
297 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
298 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
300 info_for_irq(irq)->cpu = cpu;
303 static void init_evtchn_cpu_bindings(void)
307 struct irq_info *info;
309 /* By default all event channels notify CPU#0. */
310 list_for_each_entry(info, &xen_irq_list_head, list) {
311 struct irq_desc *desc = irq_to_desc(info->irq);
312 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
316 for_each_possible_cpu(i)
317 memset(per_cpu(cpu_evtchn_mask, i),
318 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
321 static inline void clear_evtchn(int port)
323 struct shared_info *s = HYPERVISOR_shared_info;
324 sync_clear_bit(port, &s->evtchn_pending[0]);
327 static inline void set_evtchn(int port)
329 struct shared_info *s = HYPERVISOR_shared_info;
330 sync_set_bit(port, &s->evtchn_pending[0]);
333 static inline int test_evtchn(int port)
335 struct shared_info *s = HYPERVISOR_shared_info;
336 return sync_test_bit(port, &s->evtchn_pending[0]);
341 * notify_remote_via_irq - send event to remote end of event channel via irq
342 * @irq: irq of event channel to send event to
344 * Unlike notify_remote_via_evtchn(), this is safe to use across
345 * save/restore. Notifications on a broken connection are silently
348 void notify_remote_via_irq(int irq)
350 int evtchn = evtchn_from_irq(irq);
352 if (VALID_EVTCHN(evtchn))
353 notify_remote_via_evtchn(evtchn);
355 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
357 static void mask_evtchn(int port)
359 struct shared_info *s = HYPERVISOR_shared_info;
360 sync_set_bit(port, &s->evtchn_mask[0]);
363 static void unmask_evtchn(int port)
365 struct shared_info *s = HYPERVISOR_shared_info;
366 unsigned int cpu = get_cpu();
368 BUG_ON(!irqs_disabled());
370 /* Slow path (hypercall) if this is a non-local port. */
371 if (unlikely(cpu != cpu_from_evtchn(port))) {
372 struct evtchn_unmask unmask = { .port = port };
373 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
375 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
377 sync_clear_bit(port, &s->evtchn_mask[0]);
380 * The following is basically the equivalent of
381 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
382 * the interrupt edge' if the channel is masked.
384 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
385 !sync_test_and_set_bit(port / BITS_PER_LONG,
386 &vcpu_info->evtchn_pending_sel))
387 vcpu_info->evtchn_upcall_pending = 1;
393 static void xen_irq_init(unsigned irq)
395 struct irq_info *info;
396 struct irq_desc *desc = irq_to_desc(irq);
399 /* By default all event channels notify CPU#0. */
400 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
403 info = kzalloc(sizeof(*info), GFP_KERNEL);
405 panic("Unable to allocate metadata for IRQ%d\n", irq);
407 info->type = IRQT_UNBOUND;
409 irq_set_handler_data(irq, info);
411 list_add_tail(&info->list, &xen_irq_list_head);
414 static int __must_check xen_allocate_irq_dynamic(void)
419 #ifdef CONFIG_X86_IO_APIC
421 * For an HVM guest or domain 0 which see "real" (emulated or
422 * actual respectively) GSIs we allocate dynamic IRQs
423 * e.g. those corresponding to event channels or MSIs
424 * etc. from the range above those "real" GSIs to avoid
427 if (xen_initial_domain() || xen_hvm_domain())
428 first = get_nr_irqs_gsi();
431 irq = irq_alloc_desc_from(first, -1);
438 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
443 * A PV guest has no concept of a GSI (since it has no ACPI
444 * nor access to/knowledge of the physical APICs). Therefore
445 * all IRQs are dynamically allocated from the entire IRQ
448 if (xen_pv_domain() && !xen_initial_domain())
449 return xen_allocate_irq_dynamic();
451 /* Legacy IRQ descriptors are already allocated by the arch. */
452 if (gsi < NR_IRQS_LEGACY)
455 irq = irq_alloc_desc_at(gsi, -1);
462 static void xen_free_irq(unsigned irq)
464 struct irq_info *info = irq_get_handler_data(irq);
466 list_del(&info->list);
468 irq_set_handler_data(irq, NULL);
472 /* Legacy IRQ descriptors are managed by the arch. */
473 if (irq < NR_IRQS_LEGACY)
479 static void pirq_unmask_notify(int irq)
481 struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
483 if (unlikely(pirq_needs_eoi(irq))) {
484 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
489 static void pirq_query_unmask(int irq)
491 struct physdev_irq_status_query irq_status;
492 struct irq_info *info = info_for_irq(irq);
494 BUG_ON(info->type != IRQT_PIRQ);
496 irq_status.irq = pirq_from_irq(irq);
497 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
498 irq_status.flags = 0;
500 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
501 if (irq_status.flags & XENIRQSTAT_needs_eoi)
502 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
505 static bool probing_irq(int irq)
507 struct irq_desc *desc = irq_to_desc(irq);
509 return desc && desc->action == NULL;
512 static unsigned int __startup_pirq(unsigned int irq)
514 struct evtchn_bind_pirq bind_pirq;
515 struct irq_info *info = info_for_irq(irq);
516 int evtchn = evtchn_from_irq(irq);
519 BUG_ON(info->type != IRQT_PIRQ);
521 if (VALID_EVTCHN(evtchn))
524 bind_pirq.pirq = pirq_from_irq(irq);
525 /* NB. We are happy to share unless we are probing. */
526 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
527 BIND_PIRQ__WILL_SHARE : 0;
528 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
530 if (!probing_irq(irq))
531 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
535 evtchn = bind_pirq.port;
537 pirq_query_unmask(irq);
539 evtchn_to_irq[evtchn] = irq;
540 bind_evtchn_to_cpu(evtchn, 0);
541 info->evtchn = evtchn;
544 unmask_evtchn(evtchn);
545 pirq_unmask_notify(irq);
550 static unsigned int startup_pirq(struct irq_data *data)
552 return __startup_pirq(data->irq);
555 static void shutdown_pirq(struct irq_data *data)
557 struct evtchn_close close;
558 unsigned int irq = data->irq;
559 struct irq_info *info = info_for_irq(irq);
560 int evtchn = evtchn_from_irq(irq);
562 BUG_ON(info->type != IRQT_PIRQ);
564 if (!VALID_EVTCHN(evtchn))
570 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
573 bind_evtchn_to_cpu(evtchn, 0);
574 evtchn_to_irq[evtchn] = -1;
578 static void enable_pirq(struct irq_data *data)
583 static void disable_pirq(struct irq_data *data)
587 static void ack_pirq(struct irq_data *data)
589 int evtchn = evtchn_from_irq(data->irq);
593 if (VALID_EVTCHN(evtchn)) {
595 clear_evtchn(evtchn);
599 static int find_irq_by_gsi(unsigned gsi)
601 struct irq_info *info;
603 list_for_each_entry(info, &xen_irq_list_head, list) {
604 if (info->type != IRQT_PIRQ)
607 if (info->u.pirq.gsi == gsi)
614 int xen_allocate_pirq_gsi(unsigned gsi)
620 * Do not make any assumptions regarding the relationship between the
621 * IRQ number returned here and the Xen pirq argument.
623 * Note: We don't assign an event channel until the irq actually started
624 * up. Return an existing irq if we've already got one for the gsi.
626 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
627 unsigned pirq, int shareable, char *name)
630 struct physdev_irq irq_op;
632 spin_lock(&irq_mapping_update_lock);
634 irq = find_irq_by_gsi(gsi);
636 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
638 goto out; /* XXX need refcount? */
641 irq = xen_allocate_irq_gsi(gsi);
645 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
651 /* Only the privileged domain can do this. For non-priv, the pcifront
652 * driver provides a PCI bus that does the call to do exactly
653 * this in the priv domain. */
654 if (xen_initial_domain() &&
655 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
661 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
662 shareable ? PIRQ_SHAREABLE : 0);
665 spin_unlock(&irq_mapping_update_lock);
670 #ifdef CONFIG_PCI_MSI
671 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
674 struct physdev_get_free_pirq op_get_free_pirq;
676 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
677 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
679 WARN_ONCE(rc == -ENOSYS,
680 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
682 return rc ? -1 : op_get_free_pirq.pirq;
685 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
686 int pirq, int vector, const char *name,
691 spin_lock(&irq_mapping_update_lock);
693 irq = xen_allocate_irq_dynamic();
697 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
700 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
701 ret = irq_set_msi_desc(irq, msidesc);
705 spin_unlock(&irq_mapping_update_lock);
708 spin_unlock(&irq_mapping_update_lock);
714 int xen_destroy_irq(int irq)
716 struct irq_desc *desc;
717 struct physdev_unmap_pirq unmap_irq;
718 struct irq_info *info = info_for_irq(irq);
721 spin_lock(&irq_mapping_update_lock);
723 desc = irq_to_desc(irq);
727 if (xen_initial_domain()) {
728 unmap_irq.pirq = info->u.pirq.pirq;
729 unmap_irq.domid = info->u.pirq.domid;
730 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
732 printk(KERN_WARNING "unmap irq failed %d\n", rc);
740 spin_unlock(&irq_mapping_update_lock);
744 int xen_irq_from_pirq(unsigned pirq)
748 struct irq_info *info;
750 spin_lock(&irq_mapping_update_lock);
752 list_for_each_entry(info, &xen_irq_list_head, list) {
753 if (info == NULL || info->type != IRQT_PIRQ)
756 if (info->u.pirq.pirq == pirq)
761 spin_unlock(&irq_mapping_update_lock);
766 int bind_evtchn_to_irq(unsigned int evtchn)
770 spin_lock(&irq_mapping_update_lock);
772 irq = evtchn_to_irq[evtchn];
775 irq = xen_allocate_irq_dynamic();
779 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
780 handle_fasteoi_irq, "event");
782 xen_irq_info_evtchn_init(irq, evtchn);
786 spin_unlock(&irq_mapping_update_lock);
790 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
792 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
794 struct evtchn_bind_ipi bind_ipi;
797 spin_lock(&irq_mapping_update_lock);
799 irq = per_cpu(ipi_to_irq, cpu)[ipi];
802 irq = xen_allocate_irq_dynamic();
806 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
807 handle_percpu_irq, "ipi");
810 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
813 evtchn = bind_ipi.port;
815 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
817 bind_evtchn_to_cpu(evtchn, cpu);
821 spin_unlock(&irq_mapping_update_lock);
825 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
826 unsigned int remote_port)
828 struct evtchn_bind_interdomain bind_interdomain;
831 bind_interdomain.remote_dom = remote_domain;
832 bind_interdomain.remote_port = remote_port;
834 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
837 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
841 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
843 struct evtchn_bind_virq bind_virq;
846 spin_lock(&irq_mapping_update_lock);
848 irq = per_cpu(virq_to_irq, cpu)[virq];
851 irq = xen_allocate_irq_dynamic();
855 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
856 handle_percpu_irq, "virq");
858 bind_virq.virq = virq;
859 bind_virq.vcpu = cpu;
860 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
863 evtchn = bind_virq.port;
865 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
867 bind_evtchn_to_cpu(evtchn, cpu);
871 spin_unlock(&irq_mapping_update_lock);
876 static void unbind_from_irq(unsigned int irq)
878 struct evtchn_close close;
879 int evtchn = evtchn_from_irq(irq);
881 spin_lock(&irq_mapping_update_lock);
883 if (VALID_EVTCHN(evtchn)) {
885 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
888 switch (type_from_irq(irq)) {
890 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
891 [virq_from_irq(irq)] = -1;
894 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
895 [ipi_from_irq(irq)] = -1;
901 /* Closed ports are implicitly re-bound to VCPU0. */
902 bind_evtchn_to_cpu(evtchn, 0);
904 evtchn_to_irq[evtchn] = -1;
907 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
911 spin_unlock(&irq_mapping_update_lock);
914 int bind_evtchn_to_irqhandler(unsigned int evtchn,
915 irq_handler_t handler,
916 unsigned long irqflags,
917 const char *devname, void *dev_id)
922 irq = bind_evtchn_to_irq(evtchn);
925 retval = request_irq(irq, handler, irqflags, devname, dev_id);
927 unbind_from_irq(irq);
933 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
935 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
936 unsigned int remote_port,
937 irq_handler_t handler,
938 unsigned long irqflags,
944 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
948 retval = request_irq(irq, handler, irqflags, devname, dev_id);
950 unbind_from_irq(irq);
956 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
958 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
959 irq_handler_t handler,
960 unsigned long irqflags, const char *devname, void *dev_id)
965 irq = bind_virq_to_irq(virq, cpu);
968 retval = request_irq(irq, handler, irqflags, devname, dev_id);
970 unbind_from_irq(irq);
976 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
978 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
980 irq_handler_t handler,
981 unsigned long irqflags,
987 irq = bind_ipi_to_irq(ipi, cpu);
991 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
992 retval = request_irq(irq, handler, irqflags, devname, dev_id);
994 unbind_from_irq(irq);
1001 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1003 free_irq(irq, dev_id);
1004 unbind_from_irq(irq);
1006 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1008 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1010 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1012 notify_remote_via_irq(irq);
1015 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1017 struct shared_info *sh = HYPERVISOR_shared_info;
1018 int cpu = smp_processor_id();
1019 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1021 unsigned long flags;
1022 static DEFINE_SPINLOCK(debug_lock);
1023 struct vcpu_info *v;
1025 spin_lock_irqsave(&debug_lock, flags);
1027 printk("\nvcpu %d\n ", cpu);
1029 for_each_online_cpu(i) {
1031 v = per_cpu(xen_vcpu, i);
1032 pending = (get_irq_regs() && i == cpu)
1033 ? xen_irqs_disabled(get_irq_regs())
1034 : v->evtchn_upcall_mask;
1035 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1036 pending, v->evtchn_upcall_pending,
1037 (int)(sizeof(v->evtchn_pending_sel)*2),
1038 v->evtchn_pending_sel);
1040 v = per_cpu(xen_vcpu, cpu);
1042 printk("\npending:\n ");
1043 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1044 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1045 sh->evtchn_pending[i],
1046 i % 8 == 0 ? "\n " : " ");
1047 printk("\nglobal mask:\n ");
1048 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1050 (int)(sizeof(sh->evtchn_mask[0])*2),
1052 i % 8 == 0 ? "\n " : " ");
1054 printk("\nglobally unmasked:\n ");
1055 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1056 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1057 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1058 i % 8 == 0 ? "\n " : " ");
1060 printk("\nlocal cpu%d mask:\n ", cpu);
1061 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1062 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1064 i % 8 == 0 ? "\n " : " ");
1066 printk("\nlocally unmasked:\n ");
1067 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1068 unsigned long pending = sh->evtchn_pending[i]
1069 & ~sh->evtchn_mask[i]
1071 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1072 pending, i % 8 == 0 ? "\n " : " ");
1075 printk("\npending list:\n");
1076 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1077 if (sync_test_bit(i, sh->evtchn_pending)) {
1078 int word_idx = i / BITS_PER_LONG;
1079 printk(" %d: event %d -> irq %d%s%s%s\n",
1080 cpu_from_evtchn(i), i,
1082 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1084 !sync_test_bit(i, sh->evtchn_mask)
1085 ? "" : " globally-masked",
1086 sync_test_bit(i, cpu_evtchn)
1087 ? "" : " locally-masked");
1091 spin_unlock_irqrestore(&debug_lock, flags);
1096 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1097 static DEFINE_PER_CPU(unsigned int, current_word_idx);
1098 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1101 * Mask out the i least significant bits of w
1103 #define MASK_LSBS(w, i) (w & ((~0UL) << i))
1106 * Search the CPUs pending events bitmasks. For each one found, map
1107 * the event number to an irq, and feed it into do_IRQ() for
1110 * Xen uses a two-level bitmap to speed searching. The first level is
1111 * a bitset of words which contain pending event bits. The second
1112 * level is a bitset of pending events themselves.
1114 static void __xen_evtchn_do_upcall(void)
1116 int start_word_idx, start_bit_idx;
1117 int word_idx, bit_idx;
1119 int cpu = get_cpu();
1120 struct shared_info *s = HYPERVISOR_shared_info;
1121 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1125 unsigned long pending_words;
1127 vcpu_info->evtchn_upcall_pending = 0;
1129 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1132 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1133 /* Clear master flag /before/ clearing selector flag. */
1136 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1138 start_word_idx = __this_cpu_read(current_word_idx);
1139 start_bit_idx = __this_cpu_read(current_bit_idx);
1141 word_idx = start_word_idx;
1143 for (i = 0; pending_words != 0; i++) {
1144 unsigned long pending_bits;
1145 unsigned long words;
1147 words = MASK_LSBS(pending_words, word_idx);
1150 * If we masked out all events, wrap to beginning.
1157 word_idx = __ffs(words);
1159 pending_bits = active_evtchns(cpu, s, word_idx);
1160 bit_idx = 0; /* usually scan entire word from start */
1161 if (word_idx == start_word_idx) {
1162 /* We scan the starting word in two parts */
1164 /* 1st time: start in the middle */
1165 bit_idx = start_bit_idx;
1167 /* 2nd time: mask bits done already */
1168 bit_idx &= (1UL << start_bit_idx) - 1;
1174 struct irq_desc *desc;
1176 bits = MASK_LSBS(pending_bits, bit_idx);
1178 /* If we masked out all events, move on. */
1182 bit_idx = __ffs(bits);
1185 port = (word_idx * BITS_PER_LONG) + bit_idx;
1186 irq = evtchn_to_irq[port];
1192 desc = irq_to_desc(irq);
1194 generic_handle_irq_desc(irq, desc);
1197 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1199 /* Next caller starts at last processed + 1 */
1200 __this_cpu_write(current_word_idx,
1201 bit_idx ? word_idx :
1202 (word_idx+1) % BITS_PER_LONG);
1203 __this_cpu_write(current_bit_idx, bit_idx);
1204 } while (bit_idx != 0);
1206 /* Scan start_l1i twice; all others once. */
1207 if ((word_idx != start_word_idx) || (i != 0))
1208 pending_words &= ~(1UL << word_idx);
1210 word_idx = (word_idx + 1) % BITS_PER_LONG;
1213 BUG_ON(!irqs_disabled());
1215 count = __this_cpu_read(xed_nesting_count);
1216 __this_cpu_write(xed_nesting_count, 0);
1217 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1224 void xen_evtchn_do_upcall(struct pt_regs *regs)
1226 struct pt_regs *old_regs = set_irq_regs(regs);
1231 __xen_evtchn_do_upcall();
1234 set_irq_regs(old_regs);
1237 void xen_hvm_evtchn_do_upcall(void)
1239 __xen_evtchn_do_upcall();
1241 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1243 /* Rebind a new event channel to an existing irq. */
1244 void rebind_evtchn_irq(int evtchn, int irq)
1246 struct irq_info *info = info_for_irq(irq);
1248 /* Make sure the irq is masked, since the new event channel
1249 will also be masked. */
1252 spin_lock(&irq_mapping_update_lock);
1254 /* After resume the irq<->evtchn mappings are all cleared out */
1255 BUG_ON(evtchn_to_irq[evtchn] != -1);
1256 /* Expect irq to have been bound before,
1257 so there should be a proper type */
1258 BUG_ON(info->type == IRQT_UNBOUND);
1260 xen_irq_info_evtchn_init(irq, evtchn);
1262 spin_unlock(&irq_mapping_update_lock);
1264 /* new event channels are always bound to cpu 0 */
1265 irq_set_affinity(irq, cpumask_of(0));
1267 /* Unmask the event channel. */
1271 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1272 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1274 struct evtchn_bind_vcpu bind_vcpu;
1275 int evtchn = evtchn_from_irq(irq);
1277 if (!VALID_EVTCHN(evtchn))
1281 * Events delivered via platform PCI interrupts are always
1282 * routed to vcpu 0 and hence cannot be rebound.
1284 if (xen_hvm_domain() && !xen_have_vector_callback)
1287 /* Send future instances of this interrupt to other vcpu. */
1288 bind_vcpu.port = evtchn;
1289 bind_vcpu.vcpu = tcpu;
1292 * If this fails, it usually just indicates that we're dealing with a
1293 * virq or IPI channel, which don't actually need to be rebound. Ignore
1294 * it, but don't do the xenlinux-level rebind in that case.
1296 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1297 bind_evtchn_to_cpu(evtchn, tcpu);
1302 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1305 unsigned tcpu = cpumask_first(dest);
1307 return rebind_irq_to_cpu(data->irq, tcpu);
1310 int resend_irq_on_evtchn(unsigned int irq)
1312 int masked, evtchn = evtchn_from_irq(irq);
1313 struct shared_info *s = HYPERVISOR_shared_info;
1315 if (!VALID_EVTCHN(evtchn))
1318 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1319 sync_set_bit(evtchn, s->evtchn_pending);
1321 unmask_evtchn(evtchn);
1326 static void enable_dynirq(struct irq_data *data)
1328 int evtchn = evtchn_from_irq(data->irq);
1330 if (VALID_EVTCHN(evtchn))
1331 unmask_evtchn(evtchn);
1334 static void disable_dynirq(struct irq_data *data)
1336 int evtchn = evtchn_from_irq(data->irq);
1338 if (VALID_EVTCHN(evtchn))
1339 mask_evtchn(evtchn);
1342 static void ack_dynirq(struct irq_data *data)
1344 int evtchn = evtchn_from_irq(data->irq);
1346 irq_move_masked_irq(data);
1348 if (VALID_EVTCHN(evtchn))
1349 unmask_evtchn(evtchn);
1352 static int retrigger_dynirq(struct irq_data *data)
1354 int evtchn = evtchn_from_irq(data->irq);
1355 struct shared_info *sh = HYPERVISOR_shared_info;
1358 if (VALID_EVTCHN(evtchn)) {
1361 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1362 sync_set_bit(evtchn, sh->evtchn_pending);
1364 unmask_evtchn(evtchn);
1371 static void restore_pirqs(void)
1373 int pirq, rc, irq, gsi;
1374 struct physdev_map_pirq map_irq;
1375 struct irq_info *info;
1377 list_for_each_entry(info, &xen_irq_list_head, list) {
1378 if (info->type != IRQT_PIRQ)
1381 pirq = info->u.pirq.pirq;
1382 gsi = info->u.pirq.gsi;
1385 /* save/restore of PT devices doesn't work, so at this point the
1386 * only devices present are GSI based emulated devices */
1390 map_irq.domid = DOMID_SELF;
1391 map_irq.type = MAP_PIRQ_TYPE_GSI;
1392 map_irq.index = gsi;
1393 map_irq.pirq = pirq;
1395 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1397 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1398 gsi, irq, pirq, rc);
1403 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1405 __startup_pirq(irq);
1409 static void restore_cpu_virqs(unsigned int cpu)
1411 struct evtchn_bind_virq bind_virq;
1412 int virq, irq, evtchn;
1414 for (virq = 0; virq < NR_VIRQS; virq++) {
1415 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1418 BUG_ON(virq_from_irq(irq) != virq);
1420 /* Get a new binding from Xen. */
1421 bind_virq.virq = virq;
1422 bind_virq.vcpu = cpu;
1423 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1426 evtchn = bind_virq.port;
1428 /* Record the new mapping. */
1429 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1430 bind_evtchn_to_cpu(evtchn, cpu);
1434 static void restore_cpu_ipis(unsigned int cpu)
1436 struct evtchn_bind_ipi bind_ipi;
1437 int ipi, irq, evtchn;
1439 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1440 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1443 BUG_ON(ipi_from_irq(irq) != ipi);
1445 /* Get a new binding from Xen. */
1446 bind_ipi.vcpu = cpu;
1447 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1450 evtchn = bind_ipi.port;
1452 /* Record the new mapping. */
1453 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1454 bind_evtchn_to_cpu(evtchn, cpu);
1458 /* Clear an irq's pending state, in preparation for polling on it */
1459 void xen_clear_irq_pending(int irq)
1461 int evtchn = evtchn_from_irq(irq);
1463 if (VALID_EVTCHN(evtchn))
1464 clear_evtchn(evtchn);
1466 EXPORT_SYMBOL(xen_clear_irq_pending);
1467 void xen_set_irq_pending(int irq)
1469 int evtchn = evtchn_from_irq(irq);
1471 if (VALID_EVTCHN(evtchn))
1475 bool xen_test_irq_pending(int irq)
1477 int evtchn = evtchn_from_irq(irq);
1480 if (VALID_EVTCHN(evtchn))
1481 ret = test_evtchn(evtchn);
1486 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1487 * the irq will be disabled so it won't deliver an interrupt. */
1488 void xen_poll_irq_timeout(int irq, u64 timeout)
1490 evtchn_port_t evtchn = evtchn_from_irq(irq);
1492 if (VALID_EVTCHN(evtchn)) {
1493 struct sched_poll poll;
1496 poll.timeout = timeout;
1497 set_xen_guest_handle(poll.ports, &evtchn);
1499 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1503 EXPORT_SYMBOL(xen_poll_irq_timeout);
1504 /* Poll waiting for an irq to become pending. In the usual case, the
1505 * irq will be disabled so it won't deliver an interrupt. */
1506 void xen_poll_irq(int irq)
1508 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1511 /* Check whether the IRQ line is shared with other guests. */
1512 int xen_test_irq_shared(int irq)
1514 struct irq_info *info = info_for_irq(irq);
1515 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1517 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1519 return !(irq_status.flags & XENIRQSTAT_shared);
1521 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1523 void xen_irq_resume(void)
1525 unsigned int cpu, evtchn;
1526 struct irq_info *info;
1528 init_evtchn_cpu_bindings();
1530 /* New event-channel space is not 'live' yet. */
1531 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1532 mask_evtchn(evtchn);
1534 /* No IRQ <-> event-channel mappings. */
1535 list_for_each_entry(info, &xen_irq_list_head, list)
1536 info->evtchn = 0; /* zap event-channel binding */
1538 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1539 evtchn_to_irq[evtchn] = -1;
1541 for_each_possible_cpu(cpu) {
1542 restore_cpu_virqs(cpu);
1543 restore_cpu_ipis(cpu);
1549 static struct irq_chip xen_dynamic_chip __read_mostly = {
1552 .irq_disable = disable_dynirq,
1553 .irq_mask = disable_dynirq,
1554 .irq_unmask = enable_dynirq,
1556 .irq_eoi = ack_dynirq,
1557 .irq_set_affinity = set_affinity_irq,
1558 .irq_retrigger = retrigger_dynirq,
1561 static struct irq_chip xen_pirq_chip __read_mostly = {
1564 .irq_startup = startup_pirq,
1565 .irq_shutdown = shutdown_pirq,
1567 .irq_enable = enable_pirq,
1568 .irq_unmask = enable_pirq,
1570 .irq_disable = disable_pirq,
1571 .irq_mask = disable_pirq,
1573 .irq_ack = ack_pirq,
1575 .irq_set_affinity = set_affinity_irq,
1577 .irq_retrigger = retrigger_dynirq,
1580 static struct irq_chip xen_percpu_chip __read_mostly = {
1581 .name = "xen-percpu",
1583 .irq_disable = disable_dynirq,
1584 .irq_mask = disable_dynirq,
1585 .irq_unmask = enable_dynirq,
1587 .irq_ack = ack_dynirq,
1590 int xen_set_callback_via(uint64_t via)
1592 struct xen_hvm_param a;
1593 a.domid = DOMID_SELF;
1594 a.index = HVM_PARAM_CALLBACK_IRQ;
1596 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1598 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1600 #ifdef CONFIG_XEN_PVHVM
1601 /* Vector callbacks are better than PCI interrupts to receive event
1602 * channel notifications because we can receive vector callbacks on any
1603 * vcpu and we don't need PCI support or APIC interactions. */
1604 void xen_callback_vector(void)
1607 uint64_t callback_via;
1608 if (xen_have_vector_callback) {
1609 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1610 rc = xen_set_callback_via(callback_via);
1612 printk(KERN_ERR "Request for Xen HVM callback vector"
1614 xen_have_vector_callback = 0;
1617 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1619 /* in the restore case the vector has already been allocated */
1620 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1621 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1625 void xen_callback_vector(void) {}
1628 void __init xen_init_IRQ(void)
1632 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1634 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1635 evtchn_to_irq[i] = -1;
1637 init_evtchn_cpu_bindings();
1639 /* No event channels are 'live' right now. */
1640 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1643 if (xen_hvm_domain()) {
1644 xen_callback_vector();
1646 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1647 * __acpi_register_gsi can point at the right function */
1650 irq_ctx_init(smp_processor_id());
1651 if (xen_initial_domain())