xen/events: Handle VIRQ_TIMER before any other hardirq in event loop.
[pandora-kernel.git] / drivers / xen / events.c
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
33
34 #include <asm/desc.h>
35 #include <asm/ptrace.h>
36 #include <asm/irq.h>
37 #include <asm/idle.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
43
44 #include <xen/xen.h>
45 #include <xen/hvm.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
52
53 /*
54  * This lock protects updates to the following mapping and reference-count
55  * arrays. The lock does not need to be acquired to read the mapping tables.
56  */
57 static DEFINE_MUTEX(irq_mapping_update_lock);
58
59 static LIST_HEAD(xen_irq_list_head);
60
61 /* IRQ <-> VIRQ mapping. */
62 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
63
64 /* IRQ <-> IPI mapping */
65 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
66
67 /* Interrupt types. */
68 enum xen_irq_type {
69         IRQT_UNBOUND = 0,
70         IRQT_PIRQ,
71         IRQT_VIRQ,
72         IRQT_IPI,
73         IRQT_EVTCHN
74 };
75
76 /*
77  * Packed IRQ information:
78  * type - enum xen_irq_type
79  * event channel - irq->event channel mapping
80  * cpu - cpu this event channel is bound to
81  * index - type-specific information:
82  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83  *           guest, or GSI (real passthrough IRQ) of the device.
84  *    VIRQ - virq number
85  *    IPI - IPI vector
86  *    EVTCHN -
87  */
88 struct irq_info {
89         struct list_head list;
90         enum xen_irq_type type; /* type */
91         unsigned irq;
92         unsigned short evtchn;  /* event channel */
93         unsigned short cpu;     /* cpu bound */
94
95         union {
96                 unsigned short virq;
97                 enum ipi_vector ipi;
98                 struct {
99                         unsigned short pirq;
100                         unsigned short gsi;
101                         unsigned char vector;
102                         unsigned char flags;
103                         uint16_t domid;
104                 } pirq;
105         } u;
106 };
107 #define PIRQ_NEEDS_EOI  (1 << 0)
108 #define PIRQ_SHAREABLE  (1 << 1)
109
110 static int *evtchn_to_irq;
111
112 static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
113                       cpu_evtchn_mask);
114
115 /* Xen will never allocate port zero for any purpose. */
116 #define VALID_EVTCHN(chn)       ((chn) != 0)
117
118 static struct irq_chip xen_dynamic_chip;
119 static struct irq_chip xen_percpu_chip;
120 static struct irq_chip xen_pirq_chip;
121 static void enable_dynirq(struct irq_data *data);
122 static void disable_dynirq(struct irq_data *data);
123
124 /* Get info for IRQ */
125 static struct irq_info *info_for_irq(unsigned irq)
126 {
127         return irq_get_handler_data(irq);
128 }
129
130 /* Constructors for packed IRQ information. */
131 static void xen_irq_info_common_init(struct irq_info *info,
132                                      unsigned irq,
133                                      enum xen_irq_type type,
134                                      unsigned short evtchn,
135                                      unsigned short cpu)
136 {
137
138         BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
139
140         info->type = type;
141         info->irq = irq;
142         info->evtchn = evtchn;
143         info->cpu = cpu;
144
145         evtchn_to_irq[evtchn] = irq;
146 }
147
148 static void xen_irq_info_evtchn_init(unsigned irq,
149                                      unsigned short evtchn)
150 {
151         struct irq_info *info = info_for_irq(irq);
152
153         xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
154 }
155
156 static void xen_irq_info_ipi_init(unsigned cpu,
157                                   unsigned irq,
158                                   unsigned short evtchn,
159                                   enum ipi_vector ipi)
160 {
161         struct irq_info *info = info_for_irq(irq);
162
163         xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
164
165         info->u.ipi = ipi;
166
167         per_cpu(ipi_to_irq, cpu)[ipi] = irq;
168 }
169
170 static void xen_irq_info_virq_init(unsigned cpu,
171                                    unsigned irq,
172                                    unsigned short evtchn,
173                                    unsigned short virq)
174 {
175         struct irq_info *info = info_for_irq(irq);
176
177         xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
178
179         info->u.virq = virq;
180
181         per_cpu(virq_to_irq, cpu)[virq] = irq;
182 }
183
184 static void xen_irq_info_pirq_init(unsigned irq,
185                                    unsigned short evtchn,
186                                    unsigned short pirq,
187                                    unsigned short gsi,
188                                    unsigned short vector,
189                                    uint16_t domid,
190                                    unsigned char flags)
191 {
192         struct irq_info *info = info_for_irq(irq);
193
194         xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
195
196         info->u.pirq.pirq = pirq;
197         info->u.pirq.gsi = gsi;
198         info->u.pirq.vector = vector;
199         info->u.pirq.domid = domid;
200         info->u.pirq.flags = flags;
201 }
202
203 /*
204  * Accessors for packed IRQ information.
205  */
206 static unsigned int evtchn_from_irq(unsigned irq)
207 {
208         if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
209                 return 0;
210
211         return info_for_irq(irq)->evtchn;
212 }
213
214 unsigned irq_from_evtchn(unsigned int evtchn)
215 {
216         return evtchn_to_irq[evtchn];
217 }
218 EXPORT_SYMBOL_GPL(irq_from_evtchn);
219
220 static enum ipi_vector ipi_from_irq(unsigned irq)
221 {
222         struct irq_info *info = info_for_irq(irq);
223
224         BUG_ON(info == NULL);
225         BUG_ON(info->type != IRQT_IPI);
226
227         return info->u.ipi;
228 }
229
230 static unsigned virq_from_irq(unsigned irq)
231 {
232         struct irq_info *info = info_for_irq(irq);
233
234         BUG_ON(info == NULL);
235         BUG_ON(info->type != IRQT_VIRQ);
236
237         return info->u.virq;
238 }
239
240 static unsigned pirq_from_irq(unsigned irq)
241 {
242         struct irq_info *info = info_for_irq(irq);
243
244         BUG_ON(info == NULL);
245         BUG_ON(info->type != IRQT_PIRQ);
246
247         return info->u.pirq.pirq;
248 }
249
250 static enum xen_irq_type type_from_irq(unsigned irq)
251 {
252         return info_for_irq(irq)->type;
253 }
254
255 static unsigned cpu_from_irq(unsigned irq)
256 {
257         return info_for_irq(irq)->cpu;
258 }
259
260 static unsigned int cpu_from_evtchn(unsigned int evtchn)
261 {
262         int irq = evtchn_to_irq[evtchn];
263         unsigned ret = 0;
264
265         if (irq != -1)
266                 ret = cpu_from_irq(irq);
267
268         return ret;
269 }
270
271 static bool pirq_needs_eoi(unsigned irq)
272 {
273         struct irq_info *info = info_for_irq(irq);
274
275         BUG_ON(info->type != IRQT_PIRQ);
276
277         return info->u.pirq.flags & PIRQ_NEEDS_EOI;
278 }
279
280 static inline unsigned long active_evtchns(unsigned int cpu,
281                                            struct shared_info *sh,
282                                            unsigned int idx)
283 {
284         return sh->evtchn_pending[idx] &
285                 per_cpu(cpu_evtchn_mask, cpu)[idx] &
286                 ~sh->evtchn_mask[idx];
287 }
288
289 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
290 {
291         int irq = evtchn_to_irq[chn];
292
293         BUG_ON(irq == -1);
294 #ifdef CONFIG_SMP
295         cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
296 #endif
297
298         clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
299         set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
300
301         info_for_irq(irq)->cpu = cpu;
302 }
303
304 static void init_evtchn_cpu_bindings(void)
305 {
306         int i;
307 #ifdef CONFIG_SMP
308         struct irq_info *info;
309
310         /* By default all event channels notify CPU#0. */
311         list_for_each_entry(info, &xen_irq_list_head, list) {
312                 struct irq_desc *desc = irq_to_desc(info->irq);
313                 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
314         }
315 #endif
316
317         for_each_possible_cpu(i)
318                 memset(per_cpu(cpu_evtchn_mask, i),
319                        (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
320 }
321
322 static inline void clear_evtchn(int port)
323 {
324         struct shared_info *s = HYPERVISOR_shared_info;
325         sync_clear_bit(port, &s->evtchn_pending[0]);
326 }
327
328 static inline void set_evtchn(int port)
329 {
330         struct shared_info *s = HYPERVISOR_shared_info;
331         sync_set_bit(port, &s->evtchn_pending[0]);
332 }
333
334 static inline int test_evtchn(int port)
335 {
336         struct shared_info *s = HYPERVISOR_shared_info;
337         return sync_test_bit(port, &s->evtchn_pending[0]);
338 }
339
340
341 /**
342  * notify_remote_via_irq - send event to remote end of event channel via irq
343  * @irq: irq of event channel to send event to
344  *
345  * Unlike notify_remote_via_evtchn(), this is safe to use across
346  * save/restore. Notifications on a broken connection are silently
347  * dropped.
348  */
349 void notify_remote_via_irq(int irq)
350 {
351         int evtchn = evtchn_from_irq(irq);
352
353         if (VALID_EVTCHN(evtchn))
354                 notify_remote_via_evtchn(evtchn);
355 }
356 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
357
358 static void mask_evtchn(int port)
359 {
360         struct shared_info *s = HYPERVISOR_shared_info;
361         sync_set_bit(port, &s->evtchn_mask[0]);
362 }
363
364 static void unmask_evtchn(int port)
365 {
366         struct shared_info *s = HYPERVISOR_shared_info;
367         unsigned int cpu = get_cpu();
368
369         BUG_ON(!irqs_disabled());
370
371         /* Slow path (hypercall) if this is a non-local port. */
372         if (unlikely(cpu != cpu_from_evtchn(port))) {
373                 struct evtchn_unmask unmask = { .port = port };
374                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
375         } else {
376                 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
377
378                 sync_clear_bit(port, &s->evtchn_mask[0]);
379
380                 /*
381                  * The following is basically the equivalent of
382                  * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
383                  * the interrupt edge' if the channel is masked.
384                  */
385                 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
386                     !sync_test_and_set_bit(port / BITS_PER_LONG,
387                                            &vcpu_info->evtchn_pending_sel))
388                         vcpu_info->evtchn_upcall_pending = 1;
389         }
390
391         put_cpu();
392 }
393
394 static void xen_irq_init(unsigned irq)
395 {
396         struct irq_info *info;
397 #ifdef CONFIG_SMP
398         struct irq_desc *desc = irq_to_desc(irq);
399
400         /* By default all event channels notify CPU#0. */
401         cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
402 #endif
403
404         info = kzalloc(sizeof(*info), GFP_KERNEL);
405         if (info == NULL)
406                 panic("Unable to allocate metadata for IRQ%d\n", irq);
407
408         info->type = IRQT_UNBOUND;
409
410         irq_set_handler_data(irq, info);
411
412         list_add_tail(&info->list, &xen_irq_list_head);
413 }
414
415 static int __must_check xen_allocate_irq_dynamic(void)
416 {
417         int first = 0;
418         int irq;
419
420 #ifdef CONFIG_X86_IO_APIC
421         /*
422          * For an HVM guest or domain 0 which see "real" (emulated or
423          * actual respectively) GSIs we allocate dynamic IRQs
424          * e.g. those corresponding to event channels or MSIs
425          * etc. from the range above those "real" GSIs to avoid
426          * collisions.
427          */
428         if (xen_initial_domain() || xen_hvm_domain())
429                 first = get_nr_irqs_gsi();
430 #endif
431
432         irq = irq_alloc_desc_from(first, -1);
433
434         if (irq >= 0)
435                 xen_irq_init(irq);
436
437         return irq;
438 }
439
440 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
441 {
442         int irq;
443
444         /*
445          * A PV guest has no concept of a GSI (since it has no ACPI
446          * nor access to/knowledge of the physical APICs). Therefore
447          * all IRQs are dynamically allocated from the entire IRQ
448          * space.
449          */
450         if (xen_pv_domain() && !xen_initial_domain())
451                 return xen_allocate_irq_dynamic();
452
453         /* Legacy IRQ descriptors are already allocated by the arch. */
454         if (gsi < NR_IRQS_LEGACY)
455                 irq = gsi;
456         else
457                 irq = irq_alloc_desc_at(gsi, -1);
458
459         xen_irq_init(irq);
460
461         return irq;
462 }
463
464 static void xen_free_irq(unsigned irq)
465 {
466         struct irq_info *info = irq_get_handler_data(irq);
467
468         list_del(&info->list);
469
470         irq_set_handler_data(irq, NULL);
471
472         kfree(info);
473
474         /* Legacy IRQ descriptors are managed by the arch. */
475         if (irq < NR_IRQS_LEGACY)
476                 return;
477
478         irq_free_desc(irq);
479 }
480
481 static void pirq_query_unmask(int irq)
482 {
483         struct physdev_irq_status_query irq_status;
484         struct irq_info *info = info_for_irq(irq);
485
486         BUG_ON(info->type != IRQT_PIRQ);
487
488         irq_status.irq = pirq_from_irq(irq);
489         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
490                 irq_status.flags = 0;
491
492         info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
493         if (irq_status.flags & XENIRQSTAT_needs_eoi)
494                 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
495 }
496
497 static bool probing_irq(int irq)
498 {
499         struct irq_desc *desc = irq_to_desc(irq);
500
501         return desc && desc->action == NULL;
502 }
503
504 static void eoi_pirq(struct irq_data *data)
505 {
506         int evtchn = evtchn_from_irq(data->irq);
507         struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
508         int rc = 0;
509
510         irq_move_irq(data);
511
512         if (VALID_EVTCHN(evtchn))
513                 clear_evtchn(evtchn);
514
515         if (pirq_needs_eoi(data->irq)) {
516                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
517                 WARN_ON(rc);
518         }
519 }
520
521 static void mask_ack_pirq(struct irq_data *data)
522 {
523         disable_dynirq(data);
524         eoi_pirq(data);
525 }
526
527 static unsigned int __startup_pirq(unsigned int irq)
528 {
529         struct evtchn_bind_pirq bind_pirq;
530         struct irq_info *info = info_for_irq(irq);
531         int evtchn = evtchn_from_irq(irq);
532         int rc;
533
534         BUG_ON(info->type != IRQT_PIRQ);
535
536         if (VALID_EVTCHN(evtchn))
537                 goto out;
538
539         bind_pirq.pirq = pirq_from_irq(irq);
540         /* NB. We are happy to share unless we are probing. */
541         bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
542                                         BIND_PIRQ__WILL_SHARE : 0;
543         rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
544         if (rc != 0) {
545                 if (!probing_irq(irq))
546                         printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
547                                irq);
548                 return 0;
549         }
550         evtchn = bind_pirq.port;
551
552         pirq_query_unmask(irq);
553
554         evtchn_to_irq[evtchn] = irq;
555         bind_evtchn_to_cpu(evtchn, 0);
556         info->evtchn = evtchn;
557
558 out:
559         unmask_evtchn(evtchn);
560         eoi_pirq(irq_get_irq_data(irq));
561
562         return 0;
563 }
564
565 static unsigned int startup_pirq(struct irq_data *data)
566 {
567         return __startup_pirq(data->irq);
568 }
569
570 static void shutdown_pirq(struct irq_data *data)
571 {
572         struct evtchn_close close;
573         unsigned int irq = data->irq;
574         struct irq_info *info = info_for_irq(irq);
575         int evtchn = evtchn_from_irq(irq);
576
577         BUG_ON(info->type != IRQT_PIRQ);
578
579         if (!VALID_EVTCHN(evtchn))
580                 return;
581
582         mask_evtchn(evtchn);
583
584         close.port = evtchn;
585         if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
586                 BUG();
587
588         bind_evtchn_to_cpu(evtchn, 0);
589         evtchn_to_irq[evtchn] = -1;
590         info->evtchn = 0;
591 }
592
593 static void enable_pirq(struct irq_data *data)
594 {
595         startup_pirq(data);
596 }
597
598 static void disable_pirq(struct irq_data *data)
599 {
600         disable_dynirq(data);
601 }
602
603 int xen_irq_from_gsi(unsigned gsi)
604 {
605         struct irq_info *info;
606
607         list_for_each_entry(info, &xen_irq_list_head, list) {
608                 if (info->type != IRQT_PIRQ)
609                         continue;
610
611                 if (info->u.pirq.gsi == gsi)
612                         return info->irq;
613         }
614
615         return -1;
616 }
617 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
618
619 /*
620  * Do not make any assumptions regarding the relationship between the
621  * IRQ number returned here and the Xen pirq argument.
622  *
623  * Note: We don't assign an event channel until the irq actually started
624  * up.  Return an existing irq if we've already got one for the gsi.
625  *
626  * Shareable implies level triggered, not shareable implies edge
627  * triggered here.
628  */
629 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
630                              unsigned pirq, int shareable, char *name)
631 {
632         int irq = -1;
633         struct physdev_irq irq_op;
634
635         mutex_lock(&irq_mapping_update_lock);
636
637         irq = xen_irq_from_gsi(gsi);
638         if (irq != -1) {
639                 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
640                        irq, gsi);
641                 goto out;       /* XXX need refcount? */
642         }
643
644         irq = xen_allocate_irq_gsi(gsi);
645         if (irq < 0)
646                 goto out;
647
648         irq_op.irq = irq;
649         irq_op.vector = 0;
650
651         /* Only the privileged domain can do this. For non-priv, the pcifront
652          * driver provides a PCI bus that does the call to do exactly
653          * this in the priv domain. */
654         if (xen_initial_domain() &&
655             HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
656                 xen_free_irq(irq);
657                 irq = -ENOSPC;
658                 goto out;
659         }
660
661         xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
662                                shareable ? PIRQ_SHAREABLE : 0);
663
664         pirq_query_unmask(irq);
665         /* We try to use the handler with the appropriate semantic for the
666          * type of interrupt: if the interrupt is an edge triggered
667          * interrupt we use handle_edge_irq.
668          *
669          * On the other hand if the interrupt is level triggered we use
670          * handle_fasteoi_irq like the native code does for this kind of
671          * interrupts.
672          *
673          * Depending on the Xen version, pirq_needs_eoi might return true
674          * not only for level triggered interrupts but for edge triggered
675          * interrupts too. In any case Xen always honors the eoi mechanism,
676          * not injecting any more pirqs of the same kind if the first one
677          * hasn't received an eoi yet. Therefore using the fasteoi handler
678          * is the right choice either way.
679          */
680         if (shareable)
681                 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
682                                 handle_fasteoi_irq, name);
683         else
684                 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
685                                 handle_edge_irq, name);
686
687 out:
688         mutex_unlock(&irq_mapping_update_lock);
689
690         return irq;
691 }
692
693 #ifdef CONFIG_PCI_MSI
694 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
695 {
696         int rc;
697         struct physdev_get_free_pirq op_get_free_pirq;
698
699         op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
700         rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
701
702         WARN_ONCE(rc == -ENOSYS,
703                   "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
704
705         return rc ? -1 : op_get_free_pirq.pirq;
706 }
707
708 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
709                              int pirq, int vector, const char *name,
710                              domid_t domid)
711 {
712         int irq, ret;
713
714         mutex_lock(&irq_mapping_update_lock);
715
716         irq = xen_allocate_irq_dynamic();
717         if (irq < 0)
718                 goto out;
719
720         irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
721                         name);
722
723         xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
724         ret = irq_set_msi_desc(irq, msidesc);
725         if (ret < 0)
726                 goto error_irq;
727 out:
728         mutex_unlock(&irq_mapping_update_lock);
729         return irq;
730 error_irq:
731         mutex_unlock(&irq_mapping_update_lock);
732         xen_free_irq(irq);
733         return ret;
734 }
735 #endif
736
737 int xen_destroy_irq(int irq)
738 {
739         struct irq_desc *desc;
740         struct physdev_unmap_pirq unmap_irq;
741         struct irq_info *info = info_for_irq(irq);
742         int rc = -ENOENT;
743
744         mutex_lock(&irq_mapping_update_lock);
745
746         desc = irq_to_desc(irq);
747         if (!desc)
748                 goto out;
749
750         if (xen_initial_domain()) {
751                 unmap_irq.pirq = info->u.pirq.pirq;
752                 unmap_irq.domid = info->u.pirq.domid;
753                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
754                 /* If another domain quits without making the pci_disable_msix
755                  * call, the Xen hypervisor takes care of freeing the PIRQs
756                  * (free_domain_pirqs).
757                  */
758                 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
759                         printk(KERN_INFO "domain %d does not have %d anymore\n",
760                                 info->u.pirq.domid, info->u.pirq.pirq);
761                 else if (rc) {
762                         printk(KERN_WARNING "unmap irq failed %d\n", rc);
763                         goto out;
764                 }
765         }
766
767         xen_free_irq(irq);
768
769 out:
770         mutex_unlock(&irq_mapping_update_lock);
771         return rc;
772 }
773
774 int xen_irq_from_pirq(unsigned pirq)
775 {
776         int irq;
777
778         struct irq_info *info;
779
780         mutex_lock(&irq_mapping_update_lock);
781
782         list_for_each_entry(info, &xen_irq_list_head, list) {
783                 if (info->type != IRQT_PIRQ)
784                         continue;
785                 irq = info->irq;
786                 if (info->u.pirq.pirq == pirq)
787                         goto out;
788         }
789         irq = -1;
790 out:
791         mutex_unlock(&irq_mapping_update_lock);
792
793         return irq;
794 }
795
796
797 int xen_pirq_from_irq(unsigned irq)
798 {
799         return pirq_from_irq(irq);
800 }
801 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
802 int bind_evtchn_to_irq(unsigned int evtchn)
803 {
804         int irq;
805
806         mutex_lock(&irq_mapping_update_lock);
807
808         irq = evtchn_to_irq[evtchn];
809
810         if (irq == -1) {
811                 irq = xen_allocate_irq_dynamic();
812                 if (irq == -1)
813                         goto out;
814
815                 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
816                                               handle_edge_irq, "event");
817
818                 xen_irq_info_evtchn_init(irq, evtchn);
819         }
820
821 out:
822         mutex_unlock(&irq_mapping_update_lock);
823
824         return irq;
825 }
826 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
827
828 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
829 {
830         struct evtchn_bind_ipi bind_ipi;
831         int evtchn, irq;
832
833         mutex_lock(&irq_mapping_update_lock);
834
835         irq = per_cpu(ipi_to_irq, cpu)[ipi];
836
837         if (irq == -1) {
838                 irq = xen_allocate_irq_dynamic();
839                 if (irq < 0)
840                         goto out;
841
842                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
843                                               handle_percpu_irq, "ipi");
844
845                 bind_ipi.vcpu = cpu;
846                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
847                                                 &bind_ipi) != 0)
848                         BUG();
849                 evtchn = bind_ipi.port;
850
851                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
852
853                 bind_evtchn_to_cpu(evtchn, cpu);
854         }
855
856  out:
857         mutex_unlock(&irq_mapping_update_lock);
858         return irq;
859 }
860
861 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
862                                           unsigned int remote_port)
863 {
864         struct evtchn_bind_interdomain bind_interdomain;
865         int err;
866
867         bind_interdomain.remote_dom  = remote_domain;
868         bind_interdomain.remote_port = remote_port;
869
870         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
871                                           &bind_interdomain);
872
873         return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
874 }
875
876 static int find_virq(unsigned int virq, unsigned int cpu)
877 {
878         struct evtchn_status status;
879         int port, rc = -ENOENT;
880
881         memset(&status, 0, sizeof(status));
882         for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
883                 status.dom = DOMID_SELF;
884                 status.port = port;
885                 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
886                 if (rc < 0)
887                         continue;
888                 if (status.status != EVTCHNSTAT_virq)
889                         continue;
890                 if (status.u.virq == virq && status.vcpu == cpu) {
891                         rc = port;
892                         break;
893                 }
894         }
895         return rc;
896 }
897
898 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
899 {
900         struct evtchn_bind_virq bind_virq;
901         int evtchn, irq, ret;
902
903         mutex_lock(&irq_mapping_update_lock);
904
905         irq = per_cpu(virq_to_irq, cpu)[virq];
906
907         if (irq == -1) {
908                 irq = xen_allocate_irq_dynamic();
909                 if (irq == -1)
910                         goto out;
911
912                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
913                                               handle_percpu_irq, "virq");
914
915                 bind_virq.virq = virq;
916                 bind_virq.vcpu = cpu;
917                 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
918                                                 &bind_virq);
919                 if (ret == 0)
920                         evtchn = bind_virq.port;
921                 else {
922                         if (ret == -EEXIST)
923                                 ret = find_virq(virq, cpu);
924                         BUG_ON(ret < 0);
925                         evtchn = ret;
926                 }
927
928                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
929
930                 bind_evtchn_to_cpu(evtchn, cpu);
931         }
932
933 out:
934         mutex_unlock(&irq_mapping_update_lock);
935
936         return irq;
937 }
938
939 static void unbind_from_irq(unsigned int irq)
940 {
941         struct evtchn_close close;
942         int evtchn = evtchn_from_irq(irq);
943
944         mutex_lock(&irq_mapping_update_lock);
945
946         if (VALID_EVTCHN(evtchn)) {
947                 close.port = evtchn;
948                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
949                         BUG();
950
951                 switch (type_from_irq(irq)) {
952                 case IRQT_VIRQ:
953                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
954                                 [virq_from_irq(irq)] = -1;
955                         break;
956                 case IRQT_IPI:
957                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
958                                 [ipi_from_irq(irq)] = -1;
959                         break;
960                 default:
961                         break;
962                 }
963
964                 /* Closed ports are implicitly re-bound to VCPU0. */
965                 bind_evtchn_to_cpu(evtchn, 0);
966
967                 evtchn_to_irq[evtchn] = -1;
968         }
969
970         BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
971
972         xen_free_irq(irq);
973
974         mutex_unlock(&irq_mapping_update_lock);
975 }
976
977 int bind_evtchn_to_irqhandler(unsigned int evtchn,
978                               irq_handler_t handler,
979                               unsigned long irqflags,
980                               const char *devname, void *dev_id)
981 {
982         int irq, retval;
983
984         irq = bind_evtchn_to_irq(evtchn);
985         if (irq < 0)
986                 return irq;
987         retval = request_irq(irq, handler, irqflags, devname, dev_id);
988         if (retval != 0) {
989                 unbind_from_irq(irq);
990                 return retval;
991         }
992
993         return irq;
994 }
995 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
996
997 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
998                                           unsigned int remote_port,
999                                           irq_handler_t handler,
1000                                           unsigned long irqflags,
1001                                           const char *devname,
1002                                           void *dev_id)
1003 {
1004         int irq, retval;
1005
1006         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1007         if (irq < 0)
1008                 return irq;
1009
1010         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1011         if (retval != 0) {
1012                 unbind_from_irq(irq);
1013                 return retval;
1014         }
1015
1016         return irq;
1017 }
1018 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1019
1020 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1021                             irq_handler_t handler,
1022                             unsigned long irqflags, const char *devname, void *dev_id)
1023 {
1024         int irq, retval;
1025
1026         irq = bind_virq_to_irq(virq, cpu);
1027         if (irq < 0)
1028                 return irq;
1029         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1030         if (retval != 0) {
1031                 unbind_from_irq(irq);
1032                 return retval;
1033         }
1034
1035         return irq;
1036 }
1037 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1038
1039 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1040                            unsigned int cpu,
1041                            irq_handler_t handler,
1042                            unsigned long irqflags,
1043                            const char *devname,
1044                            void *dev_id)
1045 {
1046         int irq, retval;
1047
1048         irq = bind_ipi_to_irq(ipi, cpu);
1049         if (irq < 0)
1050                 return irq;
1051
1052         irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1053         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1054         if (retval != 0) {
1055                 unbind_from_irq(irq);
1056                 return retval;
1057         }
1058
1059         return irq;
1060 }
1061
1062 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1063 {
1064         free_irq(irq, dev_id);
1065         unbind_from_irq(irq);
1066 }
1067 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1068
1069 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1070 {
1071         int irq = per_cpu(ipi_to_irq, cpu)[vector];
1072         BUG_ON(irq < 0);
1073         notify_remote_via_irq(irq);
1074 }
1075
1076 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1077 {
1078         struct shared_info *sh = HYPERVISOR_shared_info;
1079         int cpu = smp_processor_id();
1080         unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1081         int i;
1082         unsigned long flags;
1083         static DEFINE_SPINLOCK(debug_lock);
1084         struct vcpu_info *v;
1085
1086         spin_lock_irqsave(&debug_lock, flags);
1087
1088         printk("\nvcpu %d\n  ", cpu);
1089
1090         for_each_online_cpu(i) {
1091                 int pending;
1092                 v = per_cpu(xen_vcpu, i);
1093                 pending = (get_irq_regs() && i == cpu)
1094                         ? xen_irqs_disabled(get_irq_regs())
1095                         : v->evtchn_upcall_mask;
1096                 printk("%d: masked=%d pending=%d event_sel %0*lx\n  ", i,
1097                        pending, v->evtchn_upcall_pending,
1098                        (int)(sizeof(v->evtchn_pending_sel)*2),
1099                        v->evtchn_pending_sel);
1100         }
1101         v = per_cpu(xen_vcpu, cpu);
1102
1103         printk("\npending:\n   ");
1104         for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1105                 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1106                        sh->evtchn_pending[i],
1107                        i % 8 == 0 ? "\n   " : " ");
1108         printk("\nglobal mask:\n   ");
1109         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1110                 printk("%0*lx%s",
1111                        (int)(sizeof(sh->evtchn_mask[0])*2),
1112                        sh->evtchn_mask[i],
1113                        i % 8 == 0 ? "\n   " : " ");
1114
1115         printk("\nglobally unmasked:\n   ");
1116         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1117                 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1118                        sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1119                        i % 8 == 0 ? "\n   " : " ");
1120
1121         printk("\nlocal cpu%d mask:\n   ", cpu);
1122         for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1123                 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1124                        cpu_evtchn[i],
1125                        i % 8 == 0 ? "\n   " : " ");
1126
1127         printk("\nlocally unmasked:\n   ");
1128         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1129                 unsigned long pending = sh->evtchn_pending[i]
1130                         & ~sh->evtchn_mask[i]
1131                         & cpu_evtchn[i];
1132                 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1133                        pending, i % 8 == 0 ? "\n   " : " ");
1134         }
1135
1136         printk("\npending list:\n");
1137         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1138                 if (sync_test_bit(i, sh->evtchn_pending)) {
1139                         int word_idx = i / BITS_PER_LONG;
1140                         printk("  %d: event %d -> irq %d%s%s%s\n",
1141                                cpu_from_evtchn(i), i,
1142                                evtchn_to_irq[i],
1143                                sync_test_bit(word_idx, &v->evtchn_pending_sel)
1144                                              ? "" : " l2-clear",
1145                                !sync_test_bit(i, sh->evtchn_mask)
1146                                              ? "" : " globally-masked",
1147                                sync_test_bit(i, cpu_evtchn)
1148                                              ? "" : " locally-masked");
1149                 }
1150         }
1151
1152         spin_unlock_irqrestore(&debug_lock, flags);
1153
1154         return IRQ_HANDLED;
1155 }
1156
1157 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1158 static DEFINE_PER_CPU(unsigned int, current_word_idx);
1159 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1160
1161 /*
1162  * Mask out the i least significant bits of w
1163  */
1164 #define MASK_LSBS(w, i) (w & ((~0UL) << i))
1165
1166 /*
1167  * Search the CPUs pending events bitmasks.  For each one found, map
1168  * the event number to an irq, and feed it into do_IRQ() for
1169  * handling.
1170  *
1171  * Xen uses a two-level bitmap to speed searching.  The first level is
1172  * a bitset of words which contain pending event bits.  The second
1173  * level is a bitset of pending events themselves.
1174  */
1175 static void __xen_evtchn_do_upcall(void)
1176 {
1177         int start_word_idx, start_bit_idx;
1178         int word_idx, bit_idx;
1179         int i, irq;
1180         int cpu = get_cpu();
1181         struct shared_info *s = HYPERVISOR_shared_info;
1182         struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1183         unsigned count;
1184
1185         do {
1186                 unsigned long pending_words;
1187                 unsigned long pending_bits;
1188                 struct irq_desc *desc;
1189
1190                 vcpu_info->evtchn_upcall_pending = 0;
1191
1192                 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1193                         goto out;
1194
1195 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1196                 /* Clear master flag /before/ clearing selector flag. */
1197                 wmb();
1198 #endif
1199                 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1200                         int evtchn = evtchn_from_irq(irq);
1201                         word_idx = evtchn / BITS_PER_LONG;
1202                         pending_bits = evtchn % BITS_PER_LONG;
1203                         if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1204                                 desc = irq_to_desc(irq);
1205                                 if (desc)
1206                                         generic_handle_irq_desc(irq, desc);
1207                         }
1208                 }
1209
1210                 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1211
1212                 start_word_idx = __this_cpu_read(current_word_idx);
1213                 start_bit_idx = __this_cpu_read(current_bit_idx);
1214
1215                 word_idx = start_word_idx;
1216
1217                 for (i = 0; pending_words != 0; i++) {
1218                         unsigned long words;
1219
1220                         words = MASK_LSBS(pending_words, word_idx);
1221
1222                         /*
1223                          * If we masked out all events, wrap to beginning.
1224                          */
1225                         if (words == 0) {
1226                                 word_idx = 0;
1227                                 bit_idx = 0;
1228                                 continue;
1229                         }
1230                         word_idx = __ffs(words);
1231
1232                         pending_bits = active_evtchns(cpu, s, word_idx);
1233                         bit_idx = 0; /* usually scan entire word from start */
1234                         if (word_idx == start_word_idx) {
1235                                 /* We scan the starting word in two parts */
1236                                 if (i == 0)
1237                                         /* 1st time: start in the middle */
1238                                         bit_idx = start_bit_idx;
1239                                 else
1240                                         /* 2nd time: mask bits done already */
1241                                         bit_idx &= (1UL << start_bit_idx) - 1;
1242                         }
1243
1244                         do {
1245                                 unsigned long bits;
1246                                 int port;
1247
1248                                 bits = MASK_LSBS(pending_bits, bit_idx);
1249
1250                                 /* If we masked out all events, move on. */
1251                                 if (bits == 0)
1252                                         break;
1253
1254                                 bit_idx = __ffs(bits);
1255
1256                                 /* Process port. */
1257                                 port = (word_idx * BITS_PER_LONG) + bit_idx;
1258                                 irq = evtchn_to_irq[port];
1259
1260                                 if (irq != -1) {
1261                                         desc = irq_to_desc(irq);
1262                                         if (desc)
1263                                                 generic_handle_irq_desc(irq, desc);
1264                                 }
1265
1266                                 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1267
1268                                 /* Next caller starts at last processed + 1 */
1269                                 __this_cpu_write(current_word_idx,
1270                                                  bit_idx ? word_idx :
1271                                                  (word_idx+1) % BITS_PER_LONG);
1272                                 __this_cpu_write(current_bit_idx, bit_idx);
1273                         } while (bit_idx != 0);
1274
1275                         /* Scan start_l1i twice; all others once. */
1276                         if ((word_idx != start_word_idx) || (i != 0))
1277                                 pending_words &= ~(1UL << word_idx);
1278
1279                         word_idx = (word_idx + 1) % BITS_PER_LONG;
1280                 }
1281
1282                 BUG_ON(!irqs_disabled());
1283
1284                 count = __this_cpu_read(xed_nesting_count);
1285                 __this_cpu_write(xed_nesting_count, 0);
1286         } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1287
1288 out:
1289
1290         put_cpu();
1291 }
1292
1293 void xen_evtchn_do_upcall(struct pt_regs *regs)
1294 {
1295         struct pt_regs *old_regs = set_irq_regs(regs);
1296
1297         exit_idle();
1298         irq_enter();
1299
1300         __xen_evtchn_do_upcall();
1301
1302         irq_exit();
1303         set_irq_regs(old_regs);
1304 }
1305
1306 void xen_hvm_evtchn_do_upcall(void)
1307 {
1308         __xen_evtchn_do_upcall();
1309 }
1310 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1311
1312 /* Rebind a new event channel to an existing irq. */
1313 void rebind_evtchn_irq(int evtchn, int irq)
1314 {
1315         struct irq_info *info = info_for_irq(irq);
1316
1317         /* Make sure the irq is masked, since the new event channel
1318            will also be masked. */
1319         disable_irq(irq);
1320
1321         mutex_lock(&irq_mapping_update_lock);
1322
1323         /* After resume the irq<->evtchn mappings are all cleared out */
1324         BUG_ON(evtchn_to_irq[evtchn] != -1);
1325         /* Expect irq to have been bound before,
1326            so there should be a proper type */
1327         BUG_ON(info->type == IRQT_UNBOUND);
1328
1329         xen_irq_info_evtchn_init(irq, evtchn);
1330
1331         mutex_unlock(&irq_mapping_update_lock);
1332
1333         /* new event channels are always bound to cpu 0 */
1334         irq_set_affinity(irq, cpumask_of(0));
1335
1336         /* Unmask the event channel. */
1337         enable_irq(irq);
1338 }
1339
1340 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1341 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1342 {
1343         struct evtchn_bind_vcpu bind_vcpu;
1344         int evtchn = evtchn_from_irq(irq);
1345
1346         if (!VALID_EVTCHN(evtchn))
1347                 return -1;
1348
1349         /*
1350          * Events delivered via platform PCI interrupts are always
1351          * routed to vcpu 0 and hence cannot be rebound.
1352          */
1353         if (xen_hvm_domain() && !xen_have_vector_callback)
1354                 return -1;
1355
1356         /* Send future instances of this interrupt to other vcpu. */
1357         bind_vcpu.port = evtchn;
1358         bind_vcpu.vcpu = tcpu;
1359
1360         /*
1361          * If this fails, it usually just indicates that we're dealing with a
1362          * virq or IPI channel, which don't actually need to be rebound. Ignore
1363          * it, but don't do the xenlinux-level rebind in that case.
1364          */
1365         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1366                 bind_evtchn_to_cpu(evtchn, tcpu);
1367
1368         return 0;
1369 }
1370
1371 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1372                             bool force)
1373 {
1374         unsigned tcpu = cpumask_first(dest);
1375
1376         return rebind_irq_to_cpu(data->irq, tcpu);
1377 }
1378
1379 int resend_irq_on_evtchn(unsigned int irq)
1380 {
1381         int masked, evtchn = evtchn_from_irq(irq);
1382         struct shared_info *s = HYPERVISOR_shared_info;
1383
1384         if (!VALID_EVTCHN(evtchn))
1385                 return 1;
1386
1387         masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1388         sync_set_bit(evtchn, s->evtchn_pending);
1389         if (!masked)
1390                 unmask_evtchn(evtchn);
1391
1392         return 1;
1393 }
1394
1395 static void enable_dynirq(struct irq_data *data)
1396 {
1397         int evtchn = evtchn_from_irq(data->irq);
1398
1399         if (VALID_EVTCHN(evtchn))
1400                 unmask_evtchn(evtchn);
1401 }
1402
1403 static void disable_dynirq(struct irq_data *data)
1404 {
1405         int evtchn = evtchn_from_irq(data->irq);
1406
1407         if (VALID_EVTCHN(evtchn))
1408                 mask_evtchn(evtchn);
1409 }
1410
1411 static void ack_dynirq(struct irq_data *data)
1412 {
1413         int evtchn = evtchn_from_irq(data->irq);
1414
1415         irq_move_irq(data);
1416
1417         if (VALID_EVTCHN(evtchn))
1418                 clear_evtchn(evtchn);
1419 }
1420
1421 static void mask_ack_dynirq(struct irq_data *data)
1422 {
1423         disable_dynirq(data);
1424         ack_dynirq(data);
1425 }
1426
1427 static int retrigger_dynirq(struct irq_data *data)
1428 {
1429         int evtchn = evtchn_from_irq(data->irq);
1430         struct shared_info *sh = HYPERVISOR_shared_info;
1431         int ret = 0;
1432
1433         if (VALID_EVTCHN(evtchn)) {
1434                 int masked;
1435
1436                 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1437                 sync_set_bit(evtchn, sh->evtchn_pending);
1438                 if (!masked)
1439                         unmask_evtchn(evtchn);
1440                 ret = 1;
1441         }
1442
1443         return ret;
1444 }
1445
1446 static void restore_pirqs(void)
1447 {
1448         int pirq, rc, irq, gsi;
1449         struct physdev_map_pirq map_irq;
1450         struct irq_info *info;
1451
1452         list_for_each_entry(info, &xen_irq_list_head, list) {
1453                 if (info->type != IRQT_PIRQ)
1454                         continue;
1455
1456                 pirq = info->u.pirq.pirq;
1457                 gsi = info->u.pirq.gsi;
1458                 irq = info->irq;
1459
1460                 /* save/restore of PT devices doesn't work, so at this point the
1461                  * only devices present are GSI based emulated devices */
1462                 if (!gsi)
1463                         continue;
1464
1465                 map_irq.domid = DOMID_SELF;
1466                 map_irq.type = MAP_PIRQ_TYPE_GSI;
1467                 map_irq.index = gsi;
1468                 map_irq.pirq = pirq;
1469
1470                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1471                 if (rc) {
1472                         printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1473                                         gsi, irq, pirq, rc);
1474                         xen_free_irq(irq);
1475                         continue;
1476                 }
1477
1478                 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1479
1480                 __startup_pirq(irq);
1481         }
1482 }
1483
1484 static void restore_cpu_virqs(unsigned int cpu)
1485 {
1486         struct evtchn_bind_virq bind_virq;
1487         int virq, irq, evtchn;
1488
1489         for (virq = 0; virq < NR_VIRQS; virq++) {
1490                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1491                         continue;
1492
1493                 BUG_ON(virq_from_irq(irq) != virq);
1494
1495                 /* Get a new binding from Xen. */
1496                 bind_virq.virq = virq;
1497                 bind_virq.vcpu = cpu;
1498                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1499                                                 &bind_virq) != 0)
1500                         BUG();
1501                 evtchn = bind_virq.port;
1502
1503                 /* Record the new mapping. */
1504                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1505                 bind_evtchn_to_cpu(evtchn, cpu);
1506         }
1507 }
1508
1509 static void restore_cpu_ipis(unsigned int cpu)
1510 {
1511         struct evtchn_bind_ipi bind_ipi;
1512         int ipi, irq, evtchn;
1513
1514         for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1515                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1516                         continue;
1517
1518                 BUG_ON(ipi_from_irq(irq) != ipi);
1519
1520                 /* Get a new binding from Xen. */
1521                 bind_ipi.vcpu = cpu;
1522                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1523                                                 &bind_ipi) != 0)
1524                         BUG();
1525                 evtchn = bind_ipi.port;
1526
1527                 /* Record the new mapping. */
1528                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1529                 bind_evtchn_to_cpu(evtchn, cpu);
1530         }
1531 }
1532
1533 /* Clear an irq's pending state, in preparation for polling on it */
1534 void xen_clear_irq_pending(int irq)
1535 {
1536         int evtchn = evtchn_from_irq(irq);
1537
1538         if (VALID_EVTCHN(evtchn))
1539                 clear_evtchn(evtchn);
1540 }
1541 EXPORT_SYMBOL(xen_clear_irq_pending);
1542 void xen_set_irq_pending(int irq)
1543 {
1544         int evtchn = evtchn_from_irq(irq);
1545
1546         if (VALID_EVTCHN(evtchn))
1547                 set_evtchn(evtchn);
1548 }
1549
1550 bool xen_test_irq_pending(int irq)
1551 {
1552         int evtchn = evtchn_from_irq(irq);
1553         bool ret = false;
1554
1555         if (VALID_EVTCHN(evtchn))
1556                 ret = test_evtchn(evtchn);
1557
1558         return ret;
1559 }
1560
1561 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1562  * the irq will be disabled so it won't deliver an interrupt. */
1563 void xen_poll_irq_timeout(int irq, u64 timeout)
1564 {
1565         evtchn_port_t evtchn = evtchn_from_irq(irq);
1566
1567         if (VALID_EVTCHN(evtchn)) {
1568                 struct sched_poll poll;
1569
1570                 poll.nr_ports = 1;
1571                 poll.timeout = timeout;
1572                 set_xen_guest_handle(poll.ports, &evtchn);
1573
1574                 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1575                         BUG();
1576         }
1577 }
1578 EXPORT_SYMBOL(xen_poll_irq_timeout);
1579 /* Poll waiting for an irq to become pending.  In the usual case, the
1580  * irq will be disabled so it won't deliver an interrupt. */
1581 void xen_poll_irq(int irq)
1582 {
1583         xen_poll_irq_timeout(irq, 0 /* no timeout */);
1584 }
1585
1586 /* Check whether the IRQ line is shared with other guests. */
1587 int xen_test_irq_shared(int irq)
1588 {
1589         struct irq_info *info = info_for_irq(irq);
1590         struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1591
1592         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1593                 return 0;
1594         return !(irq_status.flags & XENIRQSTAT_shared);
1595 }
1596 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1597
1598 void xen_irq_resume(void)
1599 {
1600         unsigned int cpu, evtchn;
1601         struct irq_info *info;
1602
1603         init_evtchn_cpu_bindings();
1604
1605         /* New event-channel space is not 'live' yet. */
1606         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1607                 mask_evtchn(evtchn);
1608
1609         /* No IRQ <-> event-channel mappings. */
1610         list_for_each_entry(info, &xen_irq_list_head, list)
1611                 info->evtchn = 0; /* zap event-channel binding */
1612
1613         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1614                 evtchn_to_irq[evtchn] = -1;
1615
1616         for_each_possible_cpu(cpu) {
1617                 restore_cpu_virqs(cpu);
1618                 restore_cpu_ipis(cpu);
1619         }
1620
1621         restore_pirqs();
1622 }
1623
1624 static struct irq_chip xen_dynamic_chip __read_mostly = {
1625         .name                   = "xen-dyn",
1626
1627         .irq_disable            = disable_dynirq,
1628         .irq_mask               = disable_dynirq,
1629         .irq_unmask             = enable_dynirq,
1630
1631         .irq_ack                = ack_dynirq,
1632         .irq_mask_ack           = mask_ack_dynirq,
1633
1634         .irq_set_affinity       = set_affinity_irq,
1635         .irq_retrigger          = retrigger_dynirq,
1636 };
1637
1638 static struct irq_chip xen_pirq_chip __read_mostly = {
1639         .name                   = "xen-pirq",
1640
1641         .irq_startup            = startup_pirq,
1642         .irq_shutdown           = shutdown_pirq,
1643         .irq_enable             = enable_pirq,
1644         .irq_disable            = disable_pirq,
1645
1646         .irq_mask               = disable_dynirq,
1647         .irq_unmask             = enable_dynirq,
1648
1649         .irq_ack                = eoi_pirq,
1650         .irq_eoi                = eoi_pirq,
1651         .irq_mask_ack           = mask_ack_pirq,
1652
1653         .irq_set_affinity       = set_affinity_irq,
1654
1655         .irq_retrigger          = retrigger_dynirq,
1656 };
1657
1658 static struct irq_chip xen_percpu_chip __read_mostly = {
1659         .name                   = "xen-percpu",
1660
1661         .irq_disable            = disable_dynirq,
1662         .irq_mask               = disable_dynirq,
1663         .irq_unmask             = enable_dynirq,
1664
1665         .irq_ack                = ack_dynirq,
1666 };
1667
1668 int xen_set_callback_via(uint64_t via)
1669 {
1670         struct xen_hvm_param a;
1671         a.domid = DOMID_SELF;
1672         a.index = HVM_PARAM_CALLBACK_IRQ;
1673         a.value = via;
1674         return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1675 }
1676 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1677
1678 #ifdef CONFIG_XEN_PVHVM
1679 /* Vector callbacks are better than PCI interrupts to receive event
1680  * channel notifications because we can receive vector callbacks on any
1681  * vcpu and we don't need PCI support or APIC interactions. */
1682 void xen_callback_vector(void)
1683 {
1684         int rc;
1685         uint64_t callback_via;
1686         if (xen_have_vector_callback) {
1687                 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1688                 rc = xen_set_callback_via(callback_via);
1689                 if (rc) {
1690                         printk(KERN_ERR "Request for Xen HVM callback vector"
1691                                         " failed.\n");
1692                         xen_have_vector_callback = 0;
1693                         return;
1694                 }
1695                 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1696                                 "enabled\n");
1697                 /* in the restore case the vector has already been allocated */
1698                 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1699                         alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1700         }
1701 }
1702 #else
1703 void xen_callback_vector(void) {}
1704 #endif
1705
1706 void __init xen_init_IRQ(void)
1707 {
1708         int i;
1709
1710         evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1711                                     GFP_KERNEL);
1712         BUG_ON(!evtchn_to_irq);
1713         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1714                 evtchn_to_irq[i] = -1;
1715
1716         init_evtchn_cpu_bindings();
1717
1718         /* No event channels are 'live' right now. */
1719         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1720                 mask_evtchn(i);
1721
1722         if (xen_hvm_domain()) {
1723                 xen_callback_vector();
1724                 native_init_IRQ();
1725                 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1726                  * __acpi_register_gsi can point at the right function */
1727                 pci_xen_hvm_init();
1728         } else {
1729                 irq_ctx_init(smp_processor_id());
1730                 if (xen_initial_domain())
1731                         pci_xen_initial_domain();
1732         }
1733 }