0ac7a149e7f297de405d0d38910af12cbbb8ac7d
[pandora-kernel.git] / drivers / xen / events.c
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
33
34 #include <asm/desc.h>
35 #include <asm/ptrace.h>
36 #include <asm/irq.h>
37 #include <asm/idle.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
43
44 #include <xen/xen.h>
45 #include <xen/hvm.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
52
53 /*
54  * This lock protects updates to the following mapping and reference-count
55  * arrays. The lock does not need to be acquired to read the mapping tables.
56  */
57 static DEFINE_SPINLOCK(irq_mapping_update_lock);
58
59 static LIST_HEAD(xen_irq_list_head);
60
61 /* IRQ <-> VIRQ mapping. */
62 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
63
64 /* IRQ <-> IPI mapping */
65 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
66
67 /* Interrupt types. */
68 enum xen_irq_type {
69         IRQT_UNBOUND = 0,
70         IRQT_PIRQ,
71         IRQT_VIRQ,
72         IRQT_IPI,
73         IRQT_EVTCHN
74 };
75
76 /*
77  * Packed IRQ information:
78  * type - enum xen_irq_type
79  * event channel - irq->event channel mapping
80  * cpu - cpu this event channel is bound to
81  * index - type-specific information:
82  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83  *           guest, or GSI (real passthrough IRQ) of the device.
84  *    VIRQ - virq number
85  *    IPI - IPI vector
86  *    EVTCHN -
87  */
88 struct irq_info
89 {
90         struct list_head list;
91         enum xen_irq_type type; /* type */
92         unsigned irq;
93         unsigned short evtchn;  /* event channel */
94         unsigned short cpu;     /* cpu bound */
95
96         union {
97                 unsigned short virq;
98                 enum ipi_vector ipi;
99                 struct {
100                         unsigned short pirq;
101                         unsigned short gsi;
102                         unsigned char vector;
103                         unsigned char flags;
104                         uint16_t domid;
105                 } pirq;
106         } u;
107 };
108 #define PIRQ_NEEDS_EOI  (1 << 0)
109 #define PIRQ_SHAREABLE  (1 << 1)
110
111 static int *evtchn_to_irq;
112
113 static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
114                       cpu_evtchn_mask);
115
116 /* Xen will never allocate port zero for any purpose. */
117 #define VALID_EVTCHN(chn)       ((chn) != 0)
118
119 static struct irq_chip xen_dynamic_chip;
120 static struct irq_chip xen_percpu_chip;
121 static struct irq_chip xen_pirq_chip;
122
123 /* Get info for IRQ */
124 static struct irq_info *info_for_irq(unsigned irq)
125 {
126         return irq_get_handler_data(irq);
127 }
128
129 /* Constructors for packed IRQ information. */
130 static void xen_irq_info_common_init(struct irq_info *info,
131                                      unsigned irq,
132                                      enum xen_irq_type type,
133                                      unsigned short evtchn,
134                                      unsigned short cpu)
135 {
136
137         BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
138
139         info->type = type;
140         info->irq = irq;
141         info->evtchn = evtchn;
142         info->cpu = cpu;
143
144         evtchn_to_irq[evtchn] = irq;
145 }
146
147 static void xen_irq_info_evtchn_init(unsigned irq,
148                                      unsigned short evtchn)
149 {
150         struct irq_info *info = info_for_irq(irq);
151
152         xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
153 }
154
155 static void xen_irq_info_ipi_init(unsigned cpu,
156                                   unsigned irq,
157                                   unsigned short evtchn,
158                                   enum ipi_vector ipi)
159 {
160         struct irq_info *info = info_for_irq(irq);
161
162         xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
163
164         info->u.ipi = ipi;
165
166         per_cpu(ipi_to_irq, cpu)[ipi] = irq;
167 }
168
169 static void xen_irq_info_virq_init(unsigned cpu,
170                                    unsigned irq,
171                                    unsigned short evtchn,
172                                    unsigned short virq)
173 {
174         struct irq_info *info = info_for_irq(irq);
175
176         xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
177
178         info->u.virq = virq;
179
180         per_cpu(virq_to_irq, cpu)[virq] = irq;
181 }
182
183 static void xen_irq_info_pirq_init(unsigned irq,
184                                    unsigned short evtchn,
185                                    unsigned short pirq,
186                                    unsigned short gsi,
187                                    unsigned short vector,
188                                    uint16_t domid,
189                                    unsigned char flags)
190 {
191         struct irq_info *info = info_for_irq(irq);
192
193         xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
194
195         info->u.pirq.pirq = pirq;
196         info->u.pirq.gsi = gsi;
197         info->u.pirq.vector = vector;
198         info->u.pirq.domid = domid;
199         info->u.pirq.flags = flags;
200 }
201
202 /*
203  * Accessors for packed IRQ information.
204  */
205 static unsigned int evtchn_from_irq(unsigned irq)
206 {
207         if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
208                 return 0;
209
210         return info_for_irq(irq)->evtchn;
211 }
212
213 unsigned irq_from_evtchn(unsigned int evtchn)
214 {
215         return evtchn_to_irq[evtchn];
216 }
217 EXPORT_SYMBOL_GPL(irq_from_evtchn);
218
219 static enum ipi_vector ipi_from_irq(unsigned irq)
220 {
221         struct irq_info *info = info_for_irq(irq);
222
223         BUG_ON(info == NULL);
224         BUG_ON(info->type != IRQT_IPI);
225
226         return info->u.ipi;
227 }
228
229 static unsigned virq_from_irq(unsigned irq)
230 {
231         struct irq_info *info = info_for_irq(irq);
232
233         BUG_ON(info == NULL);
234         BUG_ON(info->type != IRQT_VIRQ);
235
236         return info->u.virq;
237 }
238
239 static unsigned pirq_from_irq(unsigned irq)
240 {
241         struct irq_info *info = info_for_irq(irq);
242
243         BUG_ON(info == NULL);
244         BUG_ON(info->type != IRQT_PIRQ);
245
246         return info->u.pirq.pirq;
247 }
248
249 static enum xen_irq_type type_from_irq(unsigned irq)
250 {
251         return info_for_irq(irq)->type;
252 }
253
254 static unsigned cpu_from_irq(unsigned irq)
255 {
256         return info_for_irq(irq)->cpu;
257 }
258
259 static unsigned int cpu_from_evtchn(unsigned int evtchn)
260 {
261         int irq = evtchn_to_irq[evtchn];
262         unsigned ret = 0;
263
264         if (irq != -1)
265                 ret = cpu_from_irq(irq);
266
267         return ret;
268 }
269
270 static bool pirq_needs_eoi(unsigned irq)
271 {
272         struct irq_info *info = info_for_irq(irq);
273
274         BUG_ON(info->type != IRQT_PIRQ);
275
276         return info->u.pirq.flags & PIRQ_NEEDS_EOI;
277 }
278
279 static inline unsigned long active_evtchns(unsigned int cpu,
280                                            struct shared_info *sh,
281                                            unsigned int idx)
282 {
283         return (sh->evtchn_pending[idx] &
284                 per_cpu(cpu_evtchn_mask, cpu)[idx] &
285                 ~sh->evtchn_mask[idx]);
286 }
287
288 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
289 {
290         int irq = evtchn_to_irq[chn];
291
292         BUG_ON(irq == -1);
293 #ifdef CONFIG_SMP
294         cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
295 #endif
296
297         clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
298         set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
299
300         info_for_irq(irq)->cpu = cpu;
301 }
302
303 static void init_evtchn_cpu_bindings(void)
304 {
305         int i;
306 #ifdef CONFIG_SMP
307         struct irq_info *info;
308
309         /* By default all event channels notify CPU#0. */
310         list_for_each_entry(info, &xen_irq_list_head, list) {
311                 struct irq_desc *desc = irq_to_desc(info->irq);
312                 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
313         }
314 #endif
315
316         for_each_possible_cpu(i)
317                 memset(per_cpu(cpu_evtchn_mask, i),
318                        (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
319 }
320
321 static inline void clear_evtchn(int port)
322 {
323         struct shared_info *s = HYPERVISOR_shared_info;
324         sync_clear_bit(port, &s->evtchn_pending[0]);
325 }
326
327 static inline void set_evtchn(int port)
328 {
329         struct shared_info *s = HYPERVISOR_shared_info;
330         sync_set_bit(port, &s->evtchn_pending[0]);
331 }
332
333 static inline int test_evtchn(int port)
334 {
335         struct shared_info *s = HYPERVISOR_shared_info;
336         return sync_test_bit(port, &s->evtchn_pending[0]);
337 }
338
339
340 /**
341  * notify_remote_via_irq - send event to remote end of event channel via irq
342  * @irq: irq of event channel to send event to
343  *
344  * Unlike notify_remote_via_evtchn(), this is safe to use across
345  * save/restore. Notifications on a broken connection are silently
346  * dropped.
347  */
348 void notify_remote_via_irq(int irq)
349 {
350         int evtchn = evtchn_from_irq(irq);
351
352         if (VALID_EVTCHN(evtchn))
353                 notify_remote_via_evtchn(evtchn);
354 }
355 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
356
357 static void mask_evtchn(int port)
358 {
359         struct shared_info *s = HYPERVISOR_shared_info;
360         sync_set_bit(port, &s->evtchn_mask[0]);
361 }
362
363 static void unmask_evtchn(int port)
364 {
365         struct shared_info *s = HYPERVISOR_shared_info;
366         unsigned int cpu = get_cpu();
367
368         BUG_ON(!irqs_disabled());
369
370         /* Slow path (hypercall) if this is a non-local port. */
371         if (unlikely(cpu != cpu_from_evtchn(port))) {
372                 struct evtchn_unmask unmask = { .port = port };
373                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
374         } else {
375                 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
376
377                 sync_clear_bit(port, &s->evtchn_mask[0]);
378
379                 /*
380                  * The following is basically the equivalent of
381                  * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
382                  * the interrupt edge' if the channel is masked.
383                  */
384                 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
385                     !sync_test_and_set_bit(port / BITS_PER_LONG,
386                                            &vcpu_info->evtchn_pending_sel))
387                         vcpu_info->evtchn_upcall_pending = 1;
388         }
389
390         put_cpu();
391 }
392
393 static void xen_irq_init(unsigned irq)
394 {
395         struct irq_info *info;
396         struct irq_desc *desc = irq_to_desc(irq);
397
398 #ifdef CONFIG_SMP
399         /* By default all event channels notify CPU#0. */
400         cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
401 #endif
402
403         info = kzalloc(sizeof(*info), GFP_KERNEL);
404         if (info == NULL)
405                 panic("Unable to allocate metadata for IRQ%d\n", irq);
406
407         info->type = IRQT_UNBOUND;
408
409         irq_set_handler_data(irq, info);
410
411         list_add_tail(&info->list, &xen_irq_list_head);
412 }
413
414 static int __must_check xen_allocate_irq_dynamic(void)
415 {
416         int first = 0;
417         int irq;
418
419 #ifdef CONFIG_X86_IO_APIC
420         /*
421          * For an HVM guest or domain 0 which see "real" (emulated or
422          * actual respectively) GSIs we allocate dynamic IRQs
423          * e.g. those corresponding to event channels or MSIs
424          * etc. from the range above those "real" GSIs to avoid
425          * collisions.
426          */
427         if (xen_initial_domain() || xen_hvm_domain())
428                 first = get_nr_irqs_gsi();
429 #endif
430
431         irq = irq_alloc_desc_from(first, -1);
432
433         xen_irq_init(irq);
434
435         return irq;
436 }
437
438 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
439 {
440         int irq;
441
442         /*
443          * A PV guest has no concept of a GSI (since it has no ACPI
444          * nor access to/knowledge of the physical APICs). Therefore
445          * all IRQs are dynamically allocated from the entire IRQ
446          * space.
447          */
448         if (xen_pv_domain() && !xen_initial_domain())
449                 return xen_allocate_irq_dynamic();
450
451         /* Legacy IRQ descriptors are already allocated by the arch. */
452         if (gsi < NR_IRQS_LEGACY)
453                 irq = gsi;
454         else
455                 irq = irq_alloc_desc_at(gsi, -1);
456
457         xen_irq_init(irq);
458
459         return irq;
460 }
461
462 static void xen_free_irq(unsigned irq)
463 {
464         struct irq_info *info = irq_get_handler_data(irq);
465
466         list_del(&info->list);
467
468         irq_set_handler_data(irq, NULL);
469
470         kfree(info);
471
472         /* Legacy IRQ descriptors are managed by the arch. */
473         if (irq < NR_IRQS_LEGACY)
474                 return;
475
476         irq_free_desc(irq);
477 }
478
479 static void pirq_unmask_notify(int irq)
480 {
481         struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
482
483         if (unlikely(pirq_needs_eoi(irq))) {
484                 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
485                 WARN_ON(rc);
486         }
487 }
488
489 static void pirq_query_unmask(int irq)
490 {
491         struct physdev_irq_status_query irq_status;
492         struct irq_info *info = info_for_irq(irq);
493
494         BUG_ON(info->type != IRQT_PIRQ);
495
496         irq_status.irq = pirq_from_irq(irq);
497         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
498                 irq_status.flags = 0;
499
500         info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
501         if (irq_status.flags & XENIRQSTAT_needs_eoi)
502                 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
503 }
504
505 static bool probing_irq(int irq)
506 {
507         struct irq_desc *desc = irq_to_desc(irq);
508
509         return desc && desc->action == NULL;
510 }
511
512 static unsigned int __startup_pirq(unsigned int irq)
513 {
514         struct evtchn_bind_pirq bind_pirq;
515         struct irq_info *info = info_for_irq(irq);
516         int evtchn = evtchn_from_irq(irq);
517         int rc;
518
519         BUG_ON(info->type != IRQT_PIRQ);
520
521         if (VALID_EVTCHN(evtchn))
522                 goto out;
523
524         bind_pirq.pirq = pirq_from_irq(irq);
525         /* NB. We are happy to share unless we are probing. */
526         bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
527                                         BIND_PIRQ__WILL_SHARE : 0;
528         rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
529         if (rc != 0) {
530                 if (!probing_irq(irq))
531                         printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
532                                irq);
533                 return 0;
534         }
535         evtchn = bind_pirq.port;
536
537         pirq_query_unmask(irq);
538
539         evtchn_to_irq[evtchn] = irq;
540         bind_evtchn_to_cpu(evtchn, 0);
541         info->evtchn = evtchn;
542
543 out:
544         unmask_evtchn(evtchn);
545         pirq_unmask_notify(irq);
546
547         return 0;
548 }
549
550 static unsigned int startup_pirq(struct irq_data *data)
551 {
552         return __startup_pirq(data->irq);
553 }
554
555 static void shutdown_pirq(struct irq_data *data)
556 {
557         struct evtchn_close close;
558         unsigned int irq = data->irq;
559         struct irq_info *info = info_for_irq(irq);
560         int evtchn = evtchn_from_irq(irq);
561
562         BUG_ON(info->type != IRQT_PIRQ);
563
564         if (!VALID_EVTCHN(evtchn))
565                 return;
566
567         mask_evtchn(evtchn);
568
569         close.port = evtchn;
570         if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
571                 BUG();
572
573         bind_evtchn_to_cpu(evtchn, 0);
574         evtchn_to_irq[evtchn] = -1;
575         info->evtchn = 0;
576 }
577
578 static void enable_pirq(struct irq_data *data)
579 {
580         startup_pirq(data);
581 }
582
583 static void disable_pirq(struct irq_data *data)
584 {
585 }
586
587 static void ack_pirq(struct irq_data *data)
588 {
589         int evtchn = evtchn_from_irq(data->irq);
590
591         irq_move_irq(data);
592
593         if (VALID_EVTCHN(evtchn)) {
594                 mask_evtchn(evtchn);
595                 clear_evtchn(evtchn);
596         }
597 }
598
599 static int find_irq_by_gsi(unsigned gsi)
600 {
601         struct irq_info *info;
602
603         list_for_each_entry(info, &xen_irq_list_head, list) {
604                 if (info->type != IRQT_PIRQ)
605                         continue;
606
607                 if (info->u.pirq.gsi == gsi)
608                         return info->irq;
609         }
610
611         return -1;
612 }
613
614 int xen_allocate_pirq_gsi(unsigned gsi)
615 {
616         return gsi;
617 }
618
619 /*
620  * Do not make any assumptions regarding the relationship between the
621  * IRQ number returned here and the Xen pirq argument.
622  *
623  * Note: We don't assign an event channel until the irq actually started
624  * up.  Return an existing irq if we've already got one for the gsi.
625  */
626 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
627                              unsigned pirq, int shareable, char *name)
628 {
629         int irq = -1;
630         struct physdev_irq irq_op;
631
632         spin_lock(&irq_mapping_update_lock);
633
634         irq = find_irq_by_gsi(gsi);
635         if (irq != -1) {
636                 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
637                        irq, gsi);
638                 goto out;       /* XXX need refcount? */
639         }
640
641         irq = xen_allocate_irq_gsi(gsi);
642         if (irq < 0)
643                 goto out;
644
645         irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
646                                       name);
647
648         irq_op.irq = irq;
649         irq_op.vector = 0;
650
651         /* Only the privileged domain can do this. For non-priv, the pcifront
652          * driver provides a PCI bus that does the call to do exactly
653          * this in the priv domain. */
654         if (xen_initial_domain() &&
655             HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
656                 xen_free_irq(irq);
657                 irq = -ENOSPC;
658                 goto out;
659         }
660
661         xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
662                                shareable ? PIRQ_SHAREABLE : 0);
663
664 out:
665         spin_unlock(&irq_mapping_update_lock);
666
667         return irq;
668 }
669
670 #ifdef CONFIG_PCI_MSI
671 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
672 {
673         int rc;
674         struct physdev_get_free_pirq op_get_free_pirq;
675
676         op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
677         rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
678
679         WARN_ONCE(rc == -ENOSYS,
680                   "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
681
682         return rc ? -1 : op_get_free_pirq.pirq;
683 }
684
685 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
686                              int pirq, int vector, const char *name,
687                              domid_t domid)
688 {
689         int irq, ret;
690
691         spin_lock(&irq_mapping_update_lock);
692
693         irq = xen_allocate_irq_dynamic();
694         if (irq == -1)
695                 goto out;
696
697         irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
698                                       name);
699
700         xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
701         ret = irq_set_msi_desc(irq, msidesc);
702         if (ret < 0)
703                 goto error_irq;
704 out:
705         spin_unlock(&irq_mapping_update_lock);
706         return irq;
707 error_irq:
708         spin_unlock(&irq_mapping_update_lock);
709         xen_free_irq(irq);
710         return -1;
711 }
712 #endif
713
714 int xen_destroy_irq(int irq)
715 {
716         struct irq_desc *desc;
717         struct physdev_unmap_pirq unmap_irq;
718         struct irq_info *info = info_for_irq(irq);
719         int rc = -ENOENT;
720
721         spin_lock(&irq_mapping_update_lock);
722
723         desc = irq_to_desc(irq);
724         if (!desc)
725                 goto out;
726
727         if (xen_initial_domain()) {
728                 unmap_irq.pirq = info->u.pirq.pirq;
729                 unmap_irq.domid = info->u.pirq.domid;
730                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
731                 if (rc) {
732                         printk(KERN_WARNING "unmap irq failed %d\n", rc);
733                         goto out;
734                 }
735         }
736
737         xen_free_irq(irq);
738
739 out:
740         spin_unlock(&irq_mapping_update_lock);
741         return rc;
742 }
743
744 int xen_irq_from_pirq(unsigned pirq)
745 {
746         int irq;
747
748         struct irq_info *info;
749
750         spin_lock(&irq_mapping_update_lock);
751
752         list_for_each_entry(info, &xen_irq_list_head, list) {
753                 if (info == NULL || info->type != IRQT_PIRQ)
754                         continue;
755                 irq = info->irq;
756                 if (info->u.pirq.pirq == pirq)
757                         goto out;
758         }
759         irq = -1;
760 out:
761         spin_unlock(&irq_mapping_update_lock);
762
763         return irq;
764 }
765
766 int bind_evtchn_to_irq(unsigned int evtchn)
767 {
768         int irq;
769
770         spin_lock(&irq_mapping_update_lock);
771
772         irq = evtchn_to_irq[evtchn];
773
774         if (irq == -1) {
775                 irq = xen_allocate_irq_dynamic();
776                 if (irq == -1)
777                         goto out;
778
779                 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
780                                               handle_fasteoi_irq, "event");
781
782                 xen_irq_info_evtchn_init(irq, evtchn);
783         }
784
785 out:
786         spin_unlock(&irq_mapping_update_lock);
787
788         return irq;
789 }
790 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
791
792 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
793 {
794         struct evtchn_bind_ipi bind_ipi;
795         int evtchn, irq;
796
797         spin_lock(&irq_mapping_update_lock);
798
799         irq = per_cpu(ipi_to_irq, cpu)[ipi];
800
801         if (irq == -1) {
802                 irq = xen_allocate_irq_dynamic();
803                 if (irq < 0)
804                         goto out;
805
806                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
807                                               handle_percpu_irq, "ipi");
808
809                 bind_ipi.vcpu = cpu;
810                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
811                                                 &bind_ipi) != 0)
812                         BUG();
813                 evtchn = bind_ipi.port;
814
815                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
816
817                 bind_evtchn_to_cpu(evtchn, cpu);
818         }
819
820  out:
821         spin_unlock(&irq_mapping_update_lock);
822         return irq;
823 }
824
825 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
826                                           unsigned int remote_port)
827 {
828         struct evtchn_bind_interdomain bind_interdomain;
829         int err;
830
831         bind_interdomain.remote_dom  = remote_domain;
832         bind_interdomain.remote_port = remote_port;
833
834         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
835                                           &bind_interdomain);
836
837         return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
838 }
839
840
841 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
842 {
843         struct evtchn_bind_virq bind_virq;
844         int evtchn, irq;
845
846         spin_lock(&irq_mapping_update_lock);
847
848         irq = per_cpu(virq_to_irq, cpu)[virq];
849
850         if (irq == -1) {
851                 irq = xen_allocate_irq_dynamic();
852                 if (irq == -1)
853                         goto out;
854
855                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
856                                               handle_percpu_irq, "virq");
857
858                 bind_virq.virq = virq;
859                 bind_virq.vcpu = cpu;
860                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
861                                                 &bind_virq) != 0)
862                         BUG();
863                 evtchn = bind_virq.port;
864
865                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
866
867                 bind_evtchn_to_cpu(evtchn, cpu);
868         }
869
870 out:
871         spin_unlock(&irq_mapping_update_lock);
872
873         return irq;
874 }
875
876 static void unbind_from_irq(unsigned int irq)
877 {
878         struct evtchn_close close;
879         int evtchn = evtchn_from_irq(irq);
880
881         spin_lock(&irq_mapping_update_lock);
882
883         if (VALID_EVTCHN(evtchn)) {
884                 close.port = evtchn;
885                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
886                         BUG();
887
888                 switch (type_from_irq(irq)) {
889                 case IRQT_VIRQ:
890                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
891                                 [virq_from_irq(irq)] = -1;
892                         break;
893                 case IRQT_IPI:
894                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
895                                 [ipi_from_irq(irq)] = -1;
896                         break;
897                 default:
898                         break;
899                 }
900
901                 /* Closed ports are implicitly re-bound to VCPU0. */
902                 bind_evtchn_to_cpu(evtchn, 0);
903
904                 evtchn_to_irq[evtchn] = -1;
905         }
906
907         BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
908
909         xen_free_irq(irq);
910
911         spin_unlock(&irq_mapping_update_lock);
912 }
913
914 int bind_evtchn_to_irqhandler(unsigned int evtchn,
915                               irq_handler_t handler,
916                               unsigned long irqflags,
917                               const char *devname, void *dev_id)
918 {
919         unsigned int irq;
920         int retval;
921
922         irq = bind_evtchn_to_irq(evtchn);
923         if (irq < 0)
924                 return irq;
925         retval = request_irq(irq, handler, irqflags, devname, dev_id);
926         if (retval != 0) {
927                 unbind_from_irq(irq);
928                 return retval;
929         }
930
931         return irq;
932 }
933 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
934
935 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
936                                           unsigned int remote_port,
937                                           irq_handler_t handler,
938                                           unsigned long irqflags,
939                                           const char *devname,
940                                           void *dev_id)
941 {
942         int irq, retval;
943
944         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
945         if (irq < 0)
946                 return irq;
947
948         retval = request_irq(irq, handler, irqflags, devname, dev_id);
949         if (retval != 0) {
950                 unbind_from_irq(irq);
951                 return retval;
952         }
953
954         return irq;
955 }
956 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
957
958 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
959                             irq_handler_t handler,
960                             unsigned long irqflags, const char *devname, void *dev_id)
961 {
962         unsigned int irq;
963         int retval;
964
965         irq = bind_virq_to_irq(virq, cpu);
966         if (irq < 0)
967                 return irq;
968         retval = request_irq(irq, handler, irqflags, devname, dev_id);
969         if (retval != 0) {
970                 unbind_from_irq(irq);
971                 return retval;
972         }
973
974         return irq;
975 }
976 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
977
978 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
979                            unsigned int cpu,
980                            irq_handler_t handler,
981                            unsigned long irqflags,
982                            const char *devname,
983                            void *dev_id)
984 {
985         int irq, retval;
986
987         irq = bind_ipi_to_irq(ipi, cpu);
988         if (irq < 0)
989                 return irq;
990
991         irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
992         retval = request_irq(irq, handler, irqflags, devname, dev_id);
993         if (retval != 0) {
994                 unbind_from_irq(irq);
995                 return retval;
996         }
997
998         return irq;
999 }
1000
1001 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1002 {
1003         free_irq(irq, dev_id);
1004         unbind_from_irq(irq);
1005 }
1006 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1007
1008 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1009 {
1010         int irq = per_cpu(ipi_to_irq, cpu)[vector];
1011         BUG_ON(irq < 0);
1012         notify_remote_via_irq(irq);
1013 }
1014
1015 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1016 {
1017         struct shared_info *sh = HYPERVISOR_shared_info;
1018         int cpu = smp_processor_id();
1019         unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1020         int i;
1021         unsigned long flags;
1022         static DEFINE_SPINLOCK(debug_lock);
1023         struct vcpu_info *v;
1024
1025         spin_lock_irqsave(&debug_lock, flags);
1026
1027         printk("\nvcpu %d\n  ", cpu);
1028
1029         for_each_online_cpu(i) {
1030                 int pending;
1031                 v = per_cpu(xen_vcpu, i);
1032                 pending = (get_irq_regs() && i == cpu)
1033                         ? xen_irqs_disabled(get_irq_regs())
1034                         : v->evtchn_upcall_mask;
1035                 printk("%d: masked=%d pending=%d event_sel %0*lx\n  ", i,
1036                        pending, v->evtchn_upcall_pending,
1037                        (int)(sizeof(v->evtchn_pending_sel)*2),
1038                        v->evtchn_pending_sel);
1039         }
1040         v = per_cpu(xen_vcpu, cpu);
1041
1042         printk("\npending:\n   ");
1043         for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1044                 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1045                        sh->evtchn_pending[i],
1046                        i % 8 == 0 ? "\n   " : " ");
1047         printk("\nglobal mask:\n   ");
1048         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1049                 printk("%0*lx%s",
1050                        (int)(sizeof(sh->evtchn_mask[0])*2),
1051                        sh->evtchn_mask[i],
1052                        i % 8 == 0 ? "\n   " : " ");
1053
1054         printk("\nglobally unmasked:\n   ");
1055         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1056                 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1057                        sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1058                        i % 8 == 0 ? "\n   " : " ");
1059
1060         printk("\nlocal cpu%d mask:\n   ", cpu);
1061         for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1062                 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1063                        cpu_evtchn[i],
1064                        i % 8 == 0 ? "\n   " : " ");
1065
1066         printk("\nlocally unmasked:\n   ");
1067         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1068                 unsigned long pending = sh->evtchn_pending[i]
1069                         & ~sh->evtchn_mask[i]
1070                         & cpu_evtchn[i];
1071                 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1072                        pending, i % 8 == 0 ? "\n   " : " ");
1073         }
1074
1075         printk("\npending list:\n");
1076         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1077                 if (sync_test_bit(i, sh->evtchn_pending)) {
1078                         int word_idx = i / BITS_PER_LONG;
1079                         printk("  %d: event %d -> irq %d%s%s%s\n",
1080                                cpu_from_evtchn(i), i,
1081                                evtchn_to_irq[i],
1082                                sync_test_bit(word_idx, &v->evtchn_pending_sel)
1083                                              ? "" : " l2-clear",
1084                                !sync_test_bit(i, sh->evtchn_mask)
1085                                              ? "" : " globally-masked",
1086                                sync_test_bit(i, cpu_evtchn)
1087                                              ? "" : " locally-masked");
1088                 }
1089         }
1090
1091         spin_unlock_irqrestore(&debug_lock, flags);
1092
1093         return IRQ_HANDLED;
1094 }
1095
1096 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1097 static DEFINE_PER_CPU(unsigned int, current_word_idx);
1098 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1099
1100 /*
1101  * Mask out the i least significant bits of w
1102  */
1103 #define MASK_LSBS(w, i) (w & ((~0UL) << i))
1104
1105 /*
1106  * Search the CPUs pending events bitmasks.  For each one found, map
1107  * the event number to an irq, and feed it into do_IRQ() for
1108  * handling.
1109  *
1110  * Xen uses a two-level bitmap to speed searching.  The first level is
1111  * a bitset of words which contain pending event bits.  The second
1112  * level is a bitset of pending events themselves.
1113  */
1114 static void __xen_evtchn_do_upcall(void)
1115 {
1116         int start_word_idx, start_bit_idx;
1117         int word_idx, bit_idx;
1118         int i;
1119         int cpu = get_cpu();
1120         struct shared_info *s = HYPERVISOR_shared_info;
1121         struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1122         unsigned count;
1123
1124         do {
1125                 unsigned long pending_words;
1126
1127                 vcpu_info->evtchn_upcall_pending = 0;
1128
1129                 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1130                         goto out;
1131
1132 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1133                 /* Clear master flag /before/ clearing selector flag. */
1134                 wmb();
1135 #endif
1136                 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1137
1138                 start_word_idx = __this_cpu_read(current_word_idx);
1139                 start_bit_idx = __this_cpu_read(current_bit_idx);
1140
1141                 word_idx = start_word_idx;
1142
1143                 for (i = 0; pending_words != 0; i++) {
1144                         unsigned long pending_bits;
1145                         unsigned long words;
1146
1147                         words = MASK_LSBS(pending_words, word_idx);
1148
1149                         /*
1150                          * If we masked out all events, wrap to beginning.
1151                          */
1152                         if (words == 0) {
1153                                 word_idx = 0;
1154                                 bit_idx = 0;
1155                                 continue;
1156                         }
1157                         word_idx = __ffs(words);
1158
1159                         pending_bits = active_evtchns(cpu, s, word_idx);
1160                         bit_idx = 0; /* usually scan entire word from start */
1161                         if (word_idx == start_word_idx) {
1162                                 /* We scan the starting word in two parts */
1163                                 if (i == 0)
1164                                         /* 1st time: start in the middle */
1165                                         bit_idx = start_bit_idx;
1166                                 else
1167                                         /* 2nd time: mask bits done already */
1168                                         bit_idx &= (1UL << start_bit_idx) - 1;
1169                         }
1170
1171                         do {
1172                                 unsigned long bits;
1173                                 int port, irq;
1174                                 struct irq_desc *desc;
1175
1176                                 bits = MASK_LSBS(pending_bits, bit_idx);
1177
1178                                 /* If we masked out all events, move on. */
1179                                 if (bits == 0)
1180                                         break;
1181
1182                                 bit_idx = __ffs(bits);
1183
1184                                 /* Process port. */
1185                                 port = (word_idx * BITS_PER_LONG) + bit_idx;
1186                                 irq = evtchn_to_irq[port];
1187
1188                                 mask_evtchn(port);
1189                                 clear_evtchn(port);
1190
1191                                 if (irq != -1) {
1192                                         desc = irq_to_desc(irq);
1193                                         if (desc)
1194                                                 generic_handle_irq_desc(irq, desc);
1195                                 }
1196
1197                                 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1198
1199                                 /* Next caller starts at last processed + 1 */
1200                                 __this_cpu_write(current_word_idx,
1201                                                  bit_idx ? word_idx :
1202                                                  (word_idx+1) % BITS_PER_LONG);
1203                                 __this_cpu_write(current_bit_idx, bit_idx);
1204                         } while (bit_idx != 0);
1205
1206                         /* Scan start_l1i twice; all others once. */
1207                         if ((word_idx != start_word_idx) || (i != 0))
1208                                 pending_words &= ~(1UL << word_idx);
1209
1210                         word_idx = (word_idx + 1) % BITS_PER_LONG;
1211                 }
1212
1213                 BUG_ON(!irqs_disabled());
1214
1215                 count = __this_cpu_read(xed_nesting_count);
1216                 __this_cpu_write(xed_nesting_count, 0);
1217         } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1218
1219 out:
1220
1221         put_cpu();
1222 }
1223
1224 void xen_evtchn_do_upcall(struct pt_regs *regs)
1225 {
1226         struct pt_regs *old_regs = set_irq_regs(regs);
1227
1228         exit_idle();
1229         irq_enter();
1230
1231         __xen_evtchn_do_upcall();
1232
1233         irq_exit();
1234         set_irq_regs(old_regs);
1235 }
1236
1237 void xen_hvm_evtchn_do_upcall(void)
1238 {
1239         __xen_evtchn_do_upcall();
1240 }
1241 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1242
1243 /* Rebind a new event channel to an existing irq. */
1244 void rebind_evtchn_irq(int evtchn, int irq)
1245 {
1246         struct irq_info *info = info_for_irq(irq);
1247
1248         /* Make sure the irq is masked, since the new event channel
1249            will also be masked. */
1250         disable_irq(irq);
1251
1252         spin_lock(&irq_mapping_update_lock);
1253
1254         /* After resume the irq<->evtchn mappings are all cleared out */
1255         BUG_ON(evtchn_to_irq[evtchn] != -1);
1256         /* Expect irq to have been bound before,
1257            so there should be a proper type */
1258         BUG_ON(info->type == IRQT_UNBOUND);
1259
1260         xen_irq_info_evtchn_init(irq, evtchn);
1261
1262         spin_unlock(&irq_mapping_update_lock);
1263
1264         /* new event channels are always bound to cpu 0 */
1265         irq_set_affinity(irq, cpumask_of(0));
1266
1267         /* Unmask the event channel. */
1268         enable_irq(irq);
1269 }
1270
1271 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1272 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1273 {
1274         struct evtchn_bind_vcpu bind_vcpu;
1275         int evtchn = evtchn_from_irq(irq);
1276
1277         if (!VALID_EVTCHN(evtchn))
1278                 return -1;
1279
1280         /*
1281          * Events delivered via platform PCI interrupts are always
1282          * routed to vcpu 0 and hence cannot be rebound.
1283          */
1284         if (xen_hvm_domain() && !xen_have_vector_callback)
1285                 return -1;
1286
1287         /* Send future instances of this interrupt to other vcpu. */
1288         bind_vcpu.port = evtchn;
1289         bind_vcpu.vcpu = tcpu;
1290
1291         /*
1292          * If this fails, it usually just indicates that we're dealing with a
1293          * virq or IPI channel, which don't actually need to be rebound. Ignore
1294          * it, but don't do the xenlinux-level rebind in that case.
1295          */
1296         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1297                 bind_evtchn_to_cpu(evtchn, tcpu);
1298
1299         return 0;
1300 }
1301
1302 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1303                             bool force)
1304 {
1305         unsigned tcpu = cpumask_first(dest);
1306
1307         return rebind_irq_to_cpu(data->irq, tcpu);
1308 }
1309
1310 int resend_irq_on_evtchn(unsigned int irq)
1311 {
1312         int masked, evtchn = evtchn_from_irq(irq);
1313         struct shared_info *s = HYPERVISOR_shared_info;
1314
1315         if (!VALID_EVTCHN(evtchn))
1316                 return 1;
1317
1318         masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1319         sync_set_bit(evtchn, s->evtchn_pending);
1320         if (!masked)
1321                 unmask_evtchn(evtchn);
1322
1323         return 1;
1324 }
1325
1326 static void enable_dynirq(struct irq_data *data)
1327 {
1328         int evtchn = evtchn_from_irq(data->irq);
1329
1330         if (VALID_EVTCHN(evtchn))
1331                 unmask_evtchn(evtchn);
1332 }
1333
1334 static void disable_dynirq(struct irq_data *data)
1335 {
1336         int evtchn = evtchn_from_irq(data->irq);
1337
1338         if (VALID_EVTCHN(evtchn))
1339                 mask_evtchn(evtchn);
1340 }
1341
1342 static void ack_dynirq(struct irq_data *data)
1343 {
1344         int evtchn = evtchn_from_irq(data->irq);
1345
1346         irq_move_masked_irq(data);
1347
1348         if (VALID_EVTCHN(evtchn))
1349                 unmask_evtchn(evtchn);
1350 }
1351
1352 static int retrigger_dynirq(struct irq_data *data)
1353 {
1354         int evtchn = evtchn_from_irq(data->irq);
1355         struct shared_info *sh = HYPERVISOR_shared_info;
1356         int ret = 0;
1357
1358         if (VALID_EVTCHN(evtchn)) {
1359                 int masked;
1360
1361                 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1362                 sync_set_bit(evtchn, sh->evtchn_pending);
1363                 if (!masked)
1364                         unmask_evtchn(evtchn);
1365                 ret = 1;
1366         }
1367
1368         return ret;
1369 }
1370
1371 static void restore_pirqs(void)
1372 {
1373         int pirq, rc, irq, gsi;
1374         struct physdev_map_pirq map_irq;
1375         struct irq_info *info;
1376
1377         list_for_each_entry(info, &xen_irq_list_head, list) {
1378                 if (info->type != IRQT_PIRQ)
1379                         continue;
1380
1381                 pirq = info->u.pirq.pirq;
1382                 gsi = info->u.pirq.gsi;
1383                 irq = info->irq;
1384
1385                 /* save/restore of PT devices doesn't work, so at this point the
1386                  * only devices present are GSI based emulated devices */
1387                 if (!gsi)
1388                         continue;
1389
1390                 map_irq.domid = DOMID_SELF;
1391                 map_irq.type = MAP_PIRQ_TYPE_GSI;
1392                 map_irq.index = gsi;
1393                 map_irq.pirq = pirq;
1394
1395                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1396                 if (rc) {
1397                         printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1398                                         gsi, irq, pirq, rc);
1399                         xen_free_irq(irq);
1400                         continue;
1401                 }
1402
1403                 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1404
1405                 __startup_pirq(irq);
1406         }
1407 }
1408
1409 static void restore_cpu_virqs(unsigned int cpu)
1410 {
1411         struct evtchn_bind_virq bind_virq;
1412         int virq, irq, evtchn;
1413
1414         for (virq = 0; virq < NR_VIRQS; virq++) {
1415                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1416                         continue;
1417
1418                 BUG_ON(virq_from_irq(irq) != virq);
1419
1420                 /* Get a new binding from Xen. */
1421                 bind_virq.virq = virq;
1422                 bind_virq.vcpu = cpu;
1423                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1424                                                 &bind_virq) != 0)
1425                         BUG();
1426                 evtchn = bind_virq.port;
1427
1428                 /* Record the new mapping. */
1429                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1430                 bind_evtchn_to_cpu(evtchn, cpu);
1431         }
1432 }
1433
1434 static void restore_cpu_ipis(unsigned int cpu)
1435 {
1436         struct evtchn_bind_ipi bind_ipi;
1437         int ipi, irq, evtchn;
1438
1439         for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1440                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1441                         continue;
1442
1443                 BUG_ON(ipi_from_irq(irq) != ipi);
1444
1445                 /* Get a new binding from Xen. */
1446                 bind_ipi.vcpu = cpu;
1447                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1448                                                 &bind_ipi) != 0)
1449                         BUG();
1450                 evtchn = bind_ipi.port;
1451
1452                 /* Record the new mapping. */
1453                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1454                 bind_evtchn_to_cpu(evtchn, cpu);
1455         }
1456 }
1457
1458 /* Clear an irq's pending state, in preparation for polling on it */
1459 void xen_clear_irq_pending(int irq)
1460 {
1461         int evtchn = evtchn_from_irq(irq);
1462
1463         if (VALID_EVTCHN(evtchn))
1464                 clear_evtchn(evtchn);
1465 }
1466 EXPORT_SYMBOL(xen_clear_irq_pending);
1467 void xen_set_irq_pending(int irq)
1468 {
1469         int evtchn = evtchn_from_irq(irq);
1470
1471         if (VALID_EVTCHN(evtchn))
1472                 set_evtchn(evtchn);
1473 }
1474
1475 bool xen_test_irq_pending(int irq)
1476 {
1477         int evtchn = evtchn_from_irq(irq);
1478         bool ret = false;
1479
1480         if (VALID_EVTCHN(evtchn))
1481                 ret = test_evtchn(evtchn);
1482
1483         return ret;
1484 }
1485
1486 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1487  * the irq will be disabled so it won't deliver an interrupt. */
1488 void xen_poll_irq_timeout(int irq, u64 timeout)
1489 {
1490         evtchn_port_t evtchn = evtchn_from_irq(irq);
1491
1492         if (VALID_EVTCHN(evtchn)) {
1493                 struct sched_poll poll;
1494
1495                 poll.nr_ports = 1;
1496                 poll.timeout = timeout;
1497                 set_xen_guest_handle(poll.ports, &evtchn);
1498
1499                 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1500                         BUG();
1501         }
1502 }
1503 EXPORT_SYMBOL(xen_poll_irq_timeout);
1504 /* Poll waiting for an irq to become pending.  In the usual case, the
1505  * irq will be disabled so it won't deliver an interrupt. */
1506 void xen_poll_irq(int irq)
1507 {
1508         xen_poll_irq_timeout(irq, 0 /* no timeout */);
1509 }
1510
1511 /* Check whether the IRQ line is shared with other guests. */
1512 int xen_test_irq_shared(int irq)
1513 {
1514         struct irq_info *info = info_for_irq(irq);
1515         struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1516
1517         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1518                 return 0;
1519         return !(irq_status.flags & XENIRQSTAT_shared);
1520 }
1521 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1522
1523 void xen_irq_resume(void)
1524 {
1525         unsigned int cpu, evtchn;
1526         struct irq_info *info;
1527
1528         init_evtchn_cpu_bindings();
1529
1530         /* New event-channel space is not 'live' yet. */
1531         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1532                 mask_evtchn(evtchn);
1533
1534         /* No IRQ <-> event-channel mappings. */
1535         list_for_each_entry(info, &xen_irq_list_head, list)
1536                 info->evtchn = 0; /* zap event-channel binding */
1537
1538         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1539                 evtchn_to_irq[evtchn] = -1;
1540
1541         for_each_possible_cpu(cpu) {
1542                 restore_cpu_virqs(cpu);
1543                 restore_cpu_ipis(cpu);
1544         }
1545
1546         restore_pirqs();
1547 }
1548
1549 static struct irq_chip xen_dynamic_chip __read_mostly = {
1550         .name                   = "xen-dyn",
1551
1552         .irq_disable            = disable_dynirq,
1553         .irq_mask               = disable_dynirq,
1554         .irq_unmask             = enable_dynirq,
1555
1556         .irq_eoi                = ack_dynirq,
1557         .irq_set_affinity       = set_affinity_irq,
1558         .irq_retrigger          = retrigger_dynirq,
1559 };
1560
1561 static struct irq_chip xen_pirq_chip __read_mostly = {
1562         .name                   = "xen-pirq",
1563
1564         .irq_startup            = startup_pirq,
1565         .irq_shutdown           = shutdown_pirq,
1566
1567         .irq_enable             = enable_pirq,
1568         .irq_unmask             = enable_pirq,
1569
1570         .irq_disable            = disable_pirq,
1571         .irq_mask               = disable_pirq,
1572
1573         .irq_ack                = ack_pirq,
1574
1575         .irq_set_affinity       = set_affinity_irq,
1576
1577         .irq_retrigger          = retrigger_dynirq,
1578 };
1579
1580 static struct irq_chip xen_percpu_chip __read_mostly = {
1581         .name                   = "xen-percpu",
1582
1583         .irq_disable            = disable_dynirq,
1584         .irq_mask               = disable_dynirq,
1585         .irq_unmask             = enable_dynirq,
1586
1587         .irq_ack                = ack_dynirq,
1588 };
1589
1590 int xen_set_callback_via(uint64_t via)
1591 {
1592         struct xen_hvm_param a;
1593         a.domid = DOMID_SELF;
1594         a.index = HVM_PARAM_CALLBACK_IRQ;
1595         a.value = via;
1596         return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1597 }
1598 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1599
1600 #ifdef CONFIG_XEN_PVHVM
1601 /* Vector callbacks are better than PCI interrupts to receive event
1602  * channel notifications because we can receive vector callbacks on any
1603  * vcpu and we don't need PCI support or APIC interactions. */
1604 void xen_callback_vector(void)
1605 {
1606         int rc;
1607         uint64_t callback_via;
1608         if (xen_have_vector_callback) {
1609                 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1610                 rc = xen_set_callback_via(callback_via);
1611                 if (rc) {
1612                         printk(KERN_ERR "Request for Xen HVM callback vector"
1613                                         " failed.\n");
1614                         xen_have_vector_callback = 0;
1615                         return;
1616                 }
1617                 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1618                                 "enabled\n");
1619                 /* in the restore case the vector has already been allocated */
1620                 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1621                         alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1622         }
1623 }
1624 #else
1625 void xen_callback_vector(void) {}
1626 #endif
1627
1628 void __init xen_init_IRQ(void)
1629 {
1630         int i;
1631
1632         evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1633                                     GFP_KERNEL);
1634         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1635                 evtchn_to_irq[i] = -1;
1636
1637         init_evtchn_cpu_bindings();
1638
1639         /* No event channels are 'live' right now. */
1640         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1641                 mask_evtchn(i);
1642
1643         if (xen_hvm_domain()) {
1644                 xen_callback_vector();
1645                 native_init_IRQ();
1646                 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1647                  * __acpi_register_gsi can point at the right function */
1648                 pci_xen_hvm_init();
1649         } else {
1650                 irq_ctx_init(smp_processor_id());
1651                 if (xen_initial_domain())
1652                         xen_setup_pirqs();
1653         }
1654 }