Merge tag 'usb-3.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[pandora-kernel.git] / virt / kvm / irq_comm.c
1 /*
2  * irq_comm.c: Common API for in kernel interrupt controller
3  * Copyright (c) 2007, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  * Authors:
18  *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19  *
20  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21  */
22
23 #include <linux/kvm_host.h>
24 #include <linux/slab.h>
25 #include <linux/export.h>
26 #include <trace/events/kvm.h>
27
28 #include <asm/msidef.h>
29 #ifdef CONFIG_IA64
30 #include <asm/iosapic.h>
31 #endif
32
33 #include "irq.h"
34
35 #include "ioapic.h"
36
37 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
38                            struct kvm *kvm, int irq_source_id, int level)
39 {
40 #ifdef CONFIG_X86
41         struct kvm_pic *pic = pic_irqchip(kvm);
42         return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
43 #else
44         return -1;
45 #endif
46 }
47
48 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
49                               struct kvm *kvm, int irq_source_id, int level)
50 {
51         struct kvm_ioapic *ioapic = kvm->arch.vioapic;
52         return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level);
53 }
54
55 inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
56 {
57 #ifdef CONFIG_IA64
58         return irq->delivery_mode ==
59                 (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
60 #else
61         return irq->delivery_mode == APIC_DM_LOWEST;
62 #endif
63 }
64
65 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
66                 struct kvm_lapic_irq *irq)
67 {
68         int i, r = -1;
69         struct kvm_vcpu *vcpu, *lowest = NULL;
70
71         if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
72                         kvm_is_dm_lowest_prio(irq)) {
73                 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
74                 irq->delivery_mode = APIC_DM_FIXED;
75         }
76
77         if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r))
78                 return r;
79
80         kvm_for_each_vcpu(i, vcpu, kvm) {
81                 if (!kvm_apic_present(vcpu))
82                         continue;
83
84                 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
85                                         irq->dest_id, irq->dest_mode))
86                         continue;
87
88                 if (!kvm_is_dm_lowest_prio(irq)) {
89                         if (r < 0)
90                                 r = 0;
91                         r += kvm_apic_set_irq(vcpu, irq);
92                 } else if (kvm_lapic_enabled(vcpu)) {
93                         if (!lowest)
94                                 lowest = vcpu;
95                         else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
96                                 lowest = vcpu;
97                 }
98         }
99
100         if (lowest)
101                 r = kvm_apic_set_irq(lowest, irq);
102
103         return r;
104 }
105
106 static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
107                                    struct kvm_lapic_irq *irq)
108 {
109         trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
110
111         irq->dest_id = (e->msi.address_lo &
112                         MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
113         irq->vector = (e->msi.data &
114                         MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
115         irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
116         irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
117         irq->delivery_mode = e->msi.data & 0x700;
118         irq->level = 1;
119         irq->shorthand = 0;
120         /* TODO Deal with RH bit of MSI message address */
121 }
122
123 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
124                 struct kvm *kvm, int irq_source_id, int level)
125 {
126         struct kvm_lapic_irq irq;
127
128         if (!level)
129                 return -1;
130
131         kvm_set_msi_irq(e, &irq);
132
133         return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
134 }
135
136
137 static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
138                          struct kvm *kvm)
139 {
140         struct kvm_lapic_irq irq;
141         int r;
142
143         kvm_set_msi_irq(e, &irq);
144
145         if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r))
146                 return r;
147         else
148                 return -EWOULDBLOCK;
149 }
150
151 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
152 {
153         struct kvm_kernel_irq_routing_entry route;
154
155         if (!irqchip_in_kernel(kvm) || msi->flags != 0)
156                 return -EINVAL;
157
158         route.msi.address_lo = msi->address_lo;
159         route.msi.address_hi = msi->address_hi;
160         route.msi.data = msi->data;
161
162         return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
163 }
164
165 /*
166  * Return value:
167  *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
168  *  = 0   Interrupt was coalesced (previous irq is still pending)
169  *  > 0   Number of CPUs interrupt was delivered to
170  */
171 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
172 {
173         struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
174         int ret = -1, i = 0;
175         struct kvm_irq_routing_table *irq_rt;
176
177         trace_kvm_set_irq(irq, level, irq_source_id);
178
179         /* Not possible to detect if the guest uses the PIC or the
180          * IOAPIC.  So set the bit in both. The guest will ignore
181          * writes to the unused one.
182          */
183         rcu_read_lock();
184         irq_rt = rcu_dereference(kvm->irq_routing);
185         if (irq < irq_rt->nr_rt_entries)
186                 hlist_for_each_entry(e, &irq_rt->map[irq], link)
187                         irq_set[i++] = *e;
188         rcu_read_unlock();
189
190         while(i--) {
191                 int r;
192                 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
193                 if (r < 0)
194                         continue;
195
196                 ret = r + ((ret < 0) ? 0 : ret);
197         }
198
199         return ret;
200 }
201
202 /*
203  * Deliver an IRQ in an atomic context if we can, or return a failure,
204  * user can retry in a process context.
205  * Return value:
206  *  -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
207  *  Other values - No need to retry.
208  */
209 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
210 {
211         struct kvm_kernel_irq_routing_entry *e;
212         int ret = -EINVAL;
213         struct kvm_irq_routing_table *irq_rt;
214
215         trace_kvm_set_irq(irq, level, irq_source_id);
216
217         /*
218          * Injection into either PIC or IOAPIC might need to scan all CPUs,
219          * which would need to be retried from thread context;  when same GSI
220          * is connected to both PIC and IOAPIC, we'd have to report a
221          * partial failure here.
222          * Since there's no easy way to do this, we only support injecting MSI
223          * which is limited to 1:1 GSI mapping.
224          */
225         rcu_read_lock();
226         irq_rt = rcu_dereference(kvm->irq_routing);
227         if (irq < irq_rt->nr_rt_entries)
228                 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
229                         if (likely(e->type == KVM_IRQ_ROUTING_MSI))
230                                 ret = kvm_set_msi_inatomic(e, kvm);
231                         else
232                                 ret = -EWOULDBLOCK;
233                         break;
234                 }
235         rcu_read_unlock();
236         return ret;
237 }
238
239 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
240 {
241         struct kvm_irq_ack_notifier *kian;
242         int gsi;
243
244         rcu_read_lock();
245         gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
246         if (gsi != -1)
247                 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
248                                          link)
249                         if (kian->gsi == gsi) {
250                                 rcu_read_unlock();
251                                 return true;
252                         }
253
254         rcu_read_unlock();
255
256         return false;
257 }
258 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
259
260 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
261 {
262         struct kvm_irq_ack_notifier *kian;
263         int gsi;
264
265         trace_kvm_ack_irq(irqchip, pin);
266
267         rcu_read_lock();
268         gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
269         if (gsi != -1)
270                 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
271                                          link)
272                         if (kian->gsi == gsi)
273                                 kian->irq_acked(kian);
274         rcu_read_unlock();
275 }
276
277 void kvm_register_irq_ack_notifier(struct kvm *kvm,
278                                    struct kvm_irq_ack_notifier *kian)
279 {
280         mutex_lock(&kvm->irq_lock);
281         hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
282         mutex_unlock(&kvm->irq_lock);
283         kvm_ioapic_make_eoibitmap_request(kvm);
284 }
285
286 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
287                                     struct kvm_irq_ack_notifier *kian)
288 {
289         mutex_lock(&kvm->irq_lock);
290         hlist_del_init_rcu(&kian->link);
291         mutex_unlock(&kvm->irq_lock);
292         synchronize_rcu();
293         kvm_ioapic_make_eoibitmap_request(kvm);
294 }
295
296 int kvm_request_irq_source_id(struct kvm *kvm)
297 {
298         unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
299         int irq_source_id;
300
301         mutex_lock(&kvm->irq_lock);
302         irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
303
304         if (irq_source_id >= BITS_PER_LONG) {
305                 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
306                 irq_source_id = -EFAULT;
307                 goto unlock;
308         }
309
310         ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
311 #ifdef CONFIG_X86
312         ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
313 #endif
314         set_bit(irq_source_id, bitmap);
315 unlock:
316         mutex_unlock(&kvm->irq_lock);
317
318         return irq_source_id;
319 }
320
321 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
322 {
323         ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
324 #ifdef CONFIG_X86
325         ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
326 #endif
327
328         mutex_lock(&kvm->irq_lock);
329         if (irq_source_id < 0 ||
330             irq_source_id >= BITS_PER_LONG) {
331                 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
332                 goto unlock;
333         }
334         clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
335         if (!irqchip_in_kernel(kvm))
336                 goto unlock;
337
338         kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
339 #ifdef CONFIG_X86
340         kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
341 #endif
342 unlock:
343         mutex_unlock(&kvm->irq_lock);
344 }
345
346 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
347                                     struct kvm_irq_mask_notifier *kimn)
348 {
349         mutex_lock(&kvm->irq_lock);
350         kimn->irq = irq;
351         hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
352         mutex_unlock(&kvm->irq_lock);
353 }
354
355 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
356                                       struct kvm_irq_mask_notifier *kimn)
357 {
358         mutex_lock(&kvm->irq_lock);
359         hlist_del_rcu(&kimn->link);
360         mutex_unlock(&kvm->irq_lock);
361         synchronize_rcu();
362 }
363
364 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
365                              bool mask)
366 {
367         struct kvm_irq_mask_notifier *kimn;
368         int gsi;
369
370         rcu_read_lock();
371         gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
372         if (gsi != -1)
373                 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
374                         if (kimn->irq == gsi)
375                                 kimn->func(kimn, mask);
376         rcu_read_unlock();
377 }
378
379 void kvm_free_irq_routing(struct kvm *kvm)
380 {
381         /* Called only during vm destruction. Nobody can use the pointer
382            at this stage */
383         kfree(kvm->irq_routing);
384 }
385
386 static int setup_routing_entry(struct kvm_irq_routing_table *rt,
387                                struct kvm_kernel_irq_routing_entry *e,
388                                const struct kvm_irq_routing_entry *ue)
389 {
390         int r = -EINVAL;
391         int delta;
392         unsigned max_pin;
393         struct kvm_kernel_irq_routing_entry *ei;
394
395         /*
396          * Do not allow GSI to be mapped to the same irqchip more than once.
397          * Allow only one to one mapping between GSI and MSI.
398          */
399         hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
400                 if (ei->type == KVM_IRQ_ROUTING_MSI ||
401                     ue->type == KVM_IRQ_ROUTING_MSI ||
402                     ue->u.irqchip.irqchip == ei->irqchip.irqchip)
403                         return r;
404
405         e->gsi = ue->gsi;
406         e->type = ue->type;
407         switch (ue->type) {
408         case KVM_IRQ_ROUTING_IRQCHIP:
409                 delta = 0;
410                 switch (ue->u.irqchip.irqchip) {
411                 case KVM_IRQCHIP_PIC_MASTER:
412                         e->set = kvm_set_pic_irq;
413                         max_pin = PIC_NUM_PINS;
414                         break;
415                 case KVM_IRQCHIP_PIC_SLAVE:
416                         e->set = kvm_set_pic_irq;
417                         max_pin = PIC_NUM_PINS;
418                         delta = 8;
419                         break;
420                 case KVM_IRQCHIP_IOAPIC:
421                         max_pin = KVM_IOAPIC_NUM_PINS;
422                         e->set = kvm_set_ioapic_irq;
423                         break;
424                 default:
425                         goto out;
426                 }
427                 e->irqchip.irqchip = ue->u.irqchip.irqchip;
428                 e->irqchip.pin = ue->u.irqchip.pin + delta;
429                 if (e->irqchip.pin >= max_pin)
430                         goto out;
431                 rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
432                 break;
433         case KVM_IRQ_ROUTING_MSI:
434                 e->set = kvm_set_msi;
435                 e->msi.address_lo = ue->u.msi.address_lo;
436                 e->msi.address_hi = ue->u.msi.address_hi;
437                 e->msi.data = ue->u.msi.data;
438                 break;
439         default:
440                 goto out;
441         }
442
443         hlist_add_head(&e->link, &rt->map[e->gsi]);
444         r = 0;
445 out:
446         return r;
447 }
448
449
450 int kvm_set_irq_routing(struct kvm *kvm,
451                         const struct kvm_irq_routing_entry *ue,
452                         unsigned nr,
453                         unsigned flags)
454 {
455         struct kvm_irq_routing_table *new, *old;
456         u32 i, j, nr_rt_entries = 0;
457         int r;
458
459         for (i = 0; i < nr; ++i) {
460                 if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
461                         return -EINVAL;
462                 nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
463         }
464
465         nr_rt_entries += 1;
466
467         new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head))
468                       + (nr * sizeof(struct kvm_kernel_irq_routing_entry)),
469                       GFP_KERNEL);
470
471         if (!new)
472                 return -ENOMEM;
473
474         new->rt_entries = (void *)&new->map[nr_rt_entries];
475
476         new->nr_rt_entries = nr_rt_entries;
477         for (i = 0; i < 3; i++)
478                 for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++)
479                         new->chip[i][j] = -1;
480
481         for (i = 0; i < nr; ++i) {
482                 r = -EINVAL;
483                 if (ue->flags)
484                         goto out;
485                 r = setup_routing_entry(new, &new->rt_entries[i], ue);
486                 if (r)
487                         goto out;
488                 ++ue;
489         }
490
491         mutex_lock(&kvm->irq_lock);
492         old = kvm->irq_routing;
493         kvm_irq_routing_update(kvm, new);
494         mutex_unlock(&kvm->irq_lock);
495
496         synchronize_rcu();
497
498         new = old;
499         r = 0;
500
501 out:
502         kfree(new);
503         return r;
504 }
505
506 #define IOAPIC_ROUTING_ENTRY(irq) \
507         { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
508           .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
509 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
510
511 #ifdef CONFIG_X86
512 #  define PIC_ROUTING_ENTRY(irq) \
513         { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
514           .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
515 #  define ROUTING_ENTRY2(irq) \
516         IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
517 #else
518 #  define ROUTING_ENTRY2(irq) \
519         IOAPIC_ROUTING_ENTRY(irq)
520 #endif
521
522 static const struct kvm_irq_routing_entry default_routing[] = {
523         ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
524         ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
525         ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
526         ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
527         ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
528         ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
529         ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
530         ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
531         ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
532         ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
533         ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
534         ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
535 #ifdef CONFIG_IA64
536         ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
537         ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
538         ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
539         ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
540         ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
541         ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
542         ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
543         ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
544         ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
545         ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
546         ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
547         ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
548 #endif
549 };
550
551 int kvm_setup_default_irq_routing(struct kvm *kvm)
552 {
553         return kvm_set_irq_routing(kvm, default_routing,
554                                    ARRAY_SIZE(default_routing), 0);
555 }