Merge branch 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[pandora-kernel.git] / virt / kvm / assigned-dev.c
1 /*
2  * Kernel-based Virtual Machine - device assignment support
3  *
4  * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2.  See
7  * the COPYING file in the top-level directory.
8  *
9  */
10
11 #include <linux/kvm_host.h>
12 #include <linux/kvm.h>
13 #include <linux/uaccess.h>
14 #include <linux/vmalloc.h>
15 #include <linux/errno.h>
16 #include <linux/spinlock.h>
17 #include <linux/pci.h>
18 #include <linux/interrupt.h>
19 #include <linux/slab.h>
20 #include <linux/namei.h>
21 #include <linux/fs.h>
22 #include "irq.h"
23
24 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
25                                                       int assigned_dev_id)
26 {
27         struct list_head *ptr;
28         struct kvm_assigned_dev_kernel *match;
29
30         list_for_each(ptr, head) {
31                 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
32                 if (match->assigned_dev_id == assigned_dev_id)
33                         return match;
34         }
35         return NULL;
36 }
37
38 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
39                                     *assigned_dev, int irq)
40 {
41         int i, index;
42         struct msix_entry *host_msix_entries;
43
44         host_msix_entries = assigned_dev->host_msix_entries;
45
46         index = -1;
47         for (i = 0; i < assigned_dev->entries_nr; i++)
48                 if (irq == host_msix_entries[i].vector) {
49                         index = i;
50                         break;
51                 }
52         if (index < 0) {
53                 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
54                 return 0;
55         }
56
57         return index;
58 }
59
60 static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
61 {
62         struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
63
64         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
65                 spin_lock(&assigned_dev->intx_lock);
66                 disable_irq_nosync(irq);
67                 assigned_dev->host_irq_disabled = true;
68                 spin_unlock(&assigned_dev->intx_lock);
69         }
70
71         kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
72                     assigned_dev->guest_irq, 1);
73
74         return IRQ_HANDLED;
75 }
76
77 #ifdef __KVM_HAVE_MSIX
78 static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
79 {
80         struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
81         int index = find_index_from_host_irq(assigned_dev, irq);
82         u32 vector;
83
84         if (index >= 0) {
85                 vector = assigned_dev->guest_msix_entries[index].vector;
86                 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
87                             vector, 1);
88         }
89
90         return IRQ_HANDLED;
91 }
92 #endif
93
94 /* Ack the irq line for an assigned device */
95 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
96 {
97         struct kvm_assigned_dev_kernel *dev =
98                 container_of(kian, struct kvm_assigned_dev_kernel,
99                              ack_notifier);
100
101         kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
102
103         /* The guest irq may be shared so this ack may be
104          * from another device.
105          */
106         spin_lock(&dev->intx_lock);
107         if (dev->host_irq_disabled) {
108                 enable_irq(dev->host_irq);
109                 dev->host_irq_disabled = false;
110         }
111         spin_unlock(&dev->intx_lock);
112 }
113
114 static void deassign_guest_irq(struct kvm *kvm,
115                                struct kvm_assigned_dev_kernel *assigned_dev)
116 {
117         if (assigned_dev->ack_notifier.gsi != -1)
118                 kvm_unregister_irq_ack_notifier(kvm,
119                                                 &assigned_dev->ack_notifier);
120
121         kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
122                     assigned_dev->guest_irq, 0);
123
124         if (assigned_dev->irq_source_id != -1)
125                 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
126         assigned_dev->irq_source_id = -1;
127         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
128 }
129
130 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
131 static void deassign_host_irq(struct kvm *kvm,
132                               struct kvm_assigned_dev_kernel *assigned_dev)
133 {
134         /*
135          * We disable irq here to prevent further events.
136          *
137          * Notice this maybe result in nested disable if the interrupt type is
138          * INTx, but it's OK for we are going to free it.
139          *
140          * If this function is a part of VM destroy, please ensure that till
141          * now, the kvm state is still legal for probably we also have to wait
142          * on a currently running IRQ handler.
143          */
144         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
145                 int i;
146                 for (i = 0; i < assigned_dev->entries_nr; i++)
147                         disable_irq(assigned_dev->host_msix_entries[i].vector);
148
149                 for (i = 0; i < assigned_dev->entries_nr; i++)
150                         free_irq(assigned_dev->host_msix_entries[i].vector,
151                                  assigned_dev);
152
153                 assigned_dev->entries_nr = 0;
154                 kfree(assigned_dev->host_msix_entries);
155                 kfree(assigned_dev->guest_msix_entries);
156                 pci_disable_msix(assigned_dev->dev);
157         } else {
158                 /* Deal with MSI and INTx */
159                 disable_irq(assigned_dev->host_irq);
160
161                 free_irq(assigned_dev->host_irq, assigned_dev);
162
163                 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
164                         pci_disable_msi(assigned_dev->dev);
165         }
166
167         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
168 }
169
170 static int kvm_deassign_irq(struct kvm *kvm,
171                             struct kvm_assigned_dev_kernel *assigned_dev,
172                             unsigned long irq_requested_type)
173 {
174         unsigned long guest_irq_type, host_irq_type;
175
176         if (!irqchip_in_kernel(kvm))
177                 return -EINVAL;
178         /* no irq assignment to deassign */
179         if (!assigned_dev->irq_requested_type)
180                 return -ENXIO;
181
182         host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
183         guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
184
185         if (host_irq_type)
186                 deassign_host_irq(kvm, assigned_dev);
187         if (guest_irq_type)
188                 deassign_guest_irq(kvm, assigned_dev);
189
190         return 0;
191 }
192
193 static void kvm_free_assigned_irq(struct kvm *kvm,
194                                   struct kvm_assigned_dev_kernel *assigned_dev)
195 {
196         kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
197 }
198
199 static void kvm_free_assigned_device(struct kvm *kvm,
200                                      struct kvm_assigned_dev_kernel
201                                      *assigned_dev)
202 {
203         kvm_free_assigned_irq(kvm, assigned_dev);
204
205         pci_reset_function(assigned_dev->dev);
206         if (pci_load_and_free_saved_state(assigned_dev->dev,
207                                           &assigned_dev->pci_saved_state))
208                 printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
209                        __func__, dev_name(&assigned_dev->dev->dev));
210         else
211                 pci_restore_state(assigned_dev->dev);
212
213         assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
214
215         pci_release_regions(assigned_dev->dev);
216         pci_disable_device(assigned_dev->dev);
217         pci_dev_put(assigned_dev->dev);
218
219         list_del(&assigned_dev->list);
220         kfree(assigned_dev);
221 }
222
223 void kvm_free_all_assigned_devices(struct kvm *kvm)
224 {
225         struct list_head *ptr, *ptr2;
226         struct kvm_assigned_dev_kernel *assigned_dev;
227
228         list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
229                 assigned_dev = list_entry(ptr,
230                                           struct kvm_assigned_dev_kernel,
231                                           list);
232
233                 kvm_free_assigned_device(kvm, assigned_dev);
234         }
235 }
236
237 static int assigned_device_enable_host_intx(struct kvm *kvm,
238                                             struct kvm_assigned_dev_kernel *dev)
239 {
240         dev->host_irq = dev->dev->irq;
241         /* Even though this is PCI, we don't want to use shared
242          * interrupts. Sharing host devices with guest-assigned devices
243          * on the same interrupt line is not a happy situation: there
244          * are going to be long delays in accepting, acking, etc.
245          */
246         if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
247                                  IRQF_ONESHOT, dev->irq_name, dev))
248                 return -EIO;
249         return 0;
250 }
251
252 #ifdef __KVM_HAVE_MSI
253 static int assigned_device_enable_host_msi(struct kvm *kvm,
254                                            struct kvm_assigned_dev_kernel *dev)
255 {
256         int r;
257
258         if (!dev->dev->msi_enabled) {
259                 r = pci_enable_msi(dev->dev);
260                 if (r)
261                         return r;
262         }
263
264         dev->host_irq = dev->dev->irq;
265         if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
266                                  0, dev->irq_name, dev)) {
267                 pci_disable_msi(dev->dev);
268                 return -EIO;
269         }
270
271         return 0;
272 }
273 #endif
274
275 #ifdef __KVM_HAVE_MSIX
276 static int assigned_device_enable_host_msix(struct kvm *kvm,
277                                             struct kvm_assigned_dev_kernel *dev)
278 {
279         int i, r = -EINVAL;
280
281         /* host_msix_entries and guest_msix_entries should have been
282          * initialized */
283         if (dev->entries_nr == 0)
284                 return r;
285
286         r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
287         if (r)
288                 return r;
289
290         for (i = 0; i < dev->entries_nr; i++) {
291                 r = request_threaded_irq(dev->host_msix_entries[i].vector,
292                                          NULL, kvm_assigned_dev_thread_msix,
293                                          0, dev->irq_name, dev);
294                 if (r)
295                         goto err;
296         }
297
298         return 0;
299 err:
300         for (i -= 1; i >= 0; i--)
301                 free_irq(dev->host_msix_entries[i].vector, dev);
302         pci_disable_msix(dev->dev);
303         return r;
304 }
305
306 #endif
307
308 static int assigned_device_enable_guest_intx(struct kvm *kvm,
309                                 struct kvm_assigned_dev_kernel *dev,
310                                 struct kvm_assigned_irq *irq)
311 {
312         dev->guest_irq = irq->guest_irq;
313         dev->ack_notifier.gsi = irq->guest_irq;
314         return 0;
315 }
316
317 #ifdef __KVM_HAVE_MSI
318 static int assigned_device_enable_guest_msi(struct kvm *kvm,
319                         struct kvm_assigned_dev_kernel *dev,
320                         struct kvm_assigned_irq *irq)
321 {
322         dev->guest_irq = irq->guest_irq;
323         dev->ack_notifier.gsi = -1;
324         dev->host_irq_disabled = false;
325         return 0;
326 }
327 #endif
328
329 #ifdef __KVM_HAVE_MSIX
330 static int assigned_device_enable_guest_msix(struct kvm *kvm,
331                         struct kvm_assigned_dev_kernel *dev,
332                         struct kvm_assigned_irq *irq)
333 {
334         dev->guest_irq = irq->guest_irq;
335         dev->ack_notifier.gsi = -1;
336         dev->host_irq_disabled = false;
337         return 0;
338 }
339 #endif
340
341 static int assign_host_irq(struct kvm *kvm,
342                            struct kvm_assigned_dev_kernel *dev,
343                            __u32 host_irq_type)
344 {
345         int r = -EEXIST;
346
347         if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
348                 return r;
349
350         snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
351                  pci_name(dev->dev));
352
353         switch (host_irq_type) {
354         case KVM_DEV_IRQ_HOST_INTX:
355                 r = assigned_device_enable_host_intx(kvm, dev);
356                 break;
357 #ifdef __KVM_HAVE_MSI
358         case KVM_DEV_IRQ_HOST_MSI:
359                 r = assigned_device_enable_host_msi(kvm, dev);
360                 break;
361 #endif
362 #ifdef __KVM_HAVE_MSIX
363         case KVM_DEV_IRQ_HOST_MSIX:
364                 r = assigned_device_enable_host_msix(kvm, dev);
365                 break;
366 #endif
367         default:
368                 r = -EINVAL;
369         }
370
371         if (!r)
372                 dev->irq_requested_type |= host_irq_type;
373
374         return r;
375 }
376
377 static int assign_guest_irq(struct kvm *kvm,
378                             struct kvm_assigned_dev_kernel *dev,
379                             struct kvm_assigned_irq *irq,
380                             unsigned long guest_irq_type)
381 {
382         int id;
383         int r = -EEXIST;
384
385         if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
386                 return r;
387
388         id = kvm_request_irq_source_id(kvm);
389         if (id < 0)
390                 return id;
391
392         dev->irq_source_id = id;
393
394         switch (guest_irq_type) {
395         case KVM_DEV_IRQ_GUEST_INTX:
396                 r = assigned_device_enable_guest_intx(kvm, dev, irq);
397                 break;
398 #ifdef __KVM_HAVE_MSI
399         case KVM_DEV_IRQ_GUEST_MSI:
400                 r = assigned_device_enable_guest_msi(kvm, dev, irq);
401                 break;
402 #endif
403 #ifdef __KVM_HAVE_MSIX
404         case KVM_DEV_IRQ_GUEST_MSIX:
405                 r = assigned_device_enable_guest_msix(kvm, dev, irq);
406                 break;
407 #endif
408         default:
409                 r = -EINVAL;
410         }
411
412         if (!r) {
413                 dev->irq_requested_type |= guest_irq_type;
414                 if (dev->ack_notifier.gsi != -1)
415                         kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
416         } else
417                 kvm_free_irq_source_id(kvm, dev->irq_source_id);
418
419         return r;
420 }
421
422 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
423 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
424                                    struct kvm_assigned_irq *assigned_irq)
425 {
426         int r = -EINVAL;
427         struct kvm_assigned_dev_kernel *match;
428         unsigned long host_irq_type, guest_irq_type;
429
430         if (!irqchip_in_kernel(kvm))
431                 return r;
432
433         mutex_lock(&kvm->lock);
434         r = -ENODEV;
435         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
436                                       assigned_irq->assigned_dev_id);
437         if (!match)
438                 goto out;
439
440         host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
441         guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
442
443         r = -EINVAL;
444         /* can only assign one type at a time */
445         if (hweight_long(host_irq_type) > 1)
446                 goto out;
447         if (hweight_long(guest_irq_type) > 1)
448                 goto out;
449         if (host_irq_type == 0 && guest_irq_type == 0)
450                 goto out;
451
452         r = 0;
453         if (host_irq_type)
454                 r = assign_host_irq(kvm, match, host_irq_type);
455         if (r)
456                 goto out;
457
458         if (guest_irq_type)
459                 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
460 out:
461         mutex_unlock(&kvm->lock);
462         return r;
463 }
464
465 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
466                                          struct kvm_assigned_irq
467                                          *assigned_irq)
468 {
469         int r = -ENODEV;
470         struct kvm_assigned_dev_kernel *match;
471
472         mutex_lock(&kvm->lock);
473
474         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
475                                       assigned_irq->assigned_dev_id);
476         if (!match)
477                 goto out;
478
479         r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
480 out:
481         mutex_unlock(&kvm->lock);
482         return r;
483 }
484
485 /*
486  * We want to test whether the caller has been granted permissions to
487  * use this device.  To be able to configure and control the device,
488  * the user needs access to PCI configuration space and BAR resources.
489  * These are accessed through PCI sysfs.  PCI config space is often
490  * passed to the process calling this ioctl via file descriptor, so we
491  * can't rely on access to that file.  We can check for permissions
492  * on each of the BAR resource files, which is a pretty clear
493  * indicator that the user has been granted access to the device.
494  */
495 static int probe_sysfs_permissions(struct pci_dev *dev)
496 {
497 #ifdef CONFIG_SYSFS
498         int i;
499         bool bar_found = false;
500
501         for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
502                 char *kpath, *syspath;
503                 struct path path;
504                 struct inode *inode;
505                 int r;
506
507                 if (!pci_resource_len(dev, i))
508                         continue;
509
510                 kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
511                 if (!kpath)
512                         return -ENOMEM;
513
514                 /* Per sysfs-rules, sysfs is always at /sys */
515                 syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
516                 kfree(kpath);
517                 if (!syspath)
518                         return -ENOMEM;
519
520                 r = kern_path(syspath, LOOKUP_FOLLOW, &path);
521                 kfree(syspath);
522                 if (r)
523                         return r;
524
525                 inode = path.dentry->d_inode;
526
527                 r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
528                 path_put(&path);
529                 if (r)
530                         return r;
531
532                 bar_found = true;
533         }
534
535         /* If no resources, probably something special */
536         if (!bar_found)
537                 return -EPERM;
538
539         return 0;
540 #else
541         return -EINVAL; /* No way to control the device without sysfs */
542 #endif
543 }
544
545 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
546                                       struct kvm_assigned_pci_dev *assigned_dev)
547 {
548         int r = 0, idx;
549         struct kvm_assigned_dev_kernel *match;
550         struct pci_dev *dev;
551         u8 header_type;
552
553         if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
554                 return -EINVAL;
555
556         mutex_lock(&kvm->lock);
557         idx = srcu_read_lock(&kvm->srcu);
558
559         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
560                                       assigned_dev->assigned_dev_id);
561         if (match) {
562                 /* device already assigned */
563                 r = -EEXIST;
564                 goto out;
565         }
566
567         match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
568         if (match == NULL) {
569                 printk(KERN_INFO "%s: Couldn't allocate memory\n",
570                        __func__);
571                 r = -ENOMEM;
572                 goto out;
573         }
574         dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
575                                    assigned_dev->busnr,
576                                    assigned_dev->devfn);
577         if (!dev) {
578                 printk(KERN_INFO "%s: host device not found\n", __func__);
579                 r = -EINVAL;
580                 goto out_free;
581         }
582
583         /* Don't allow bridges to be assigned */
584         pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
585         if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
586                 r = -EPERM;
587                 goto out_put;
588         }
589
590         r = probe_sysfs_permissions(dev);
591         if (r)
592                 goto out_put;
593
594         if (pci_enable_device(dev)) {
595                 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
596                 r = -EBUSY;
597                 goto out_put;
598         }
599         r = pci_request_regions(dev, "kvm_assigned_device");
600         if (r) {
601                 printk(KERN_INFO "%s: Could not get access to device regions\n",
602                        __func__);
603                 goto out_disable;
604         }
605
606         pci_reset_function(dev);
607         pci_save_state(dev);
608         match->pci_saved_state = pci_store_saved_state(dev);
609         if (!match->pci_saved_state)
610                 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
611                        __func__, dev_name(&dev->dev));
612         match->assigned_dev_id = assigned_dev->assigned_dev_id;
613         match->host_segnr = assigned_dev->segnr;
614         match->host_busnr = assigned_dev->busnr;
615         match->host_devfn = assigned_dev->devfn;
616         match->flags = assigned_dev->flags;
617         match->dev = dev;
618         spin_lock_init(&match->intx_lock);
619         match->irq_source_id = -1;
620         match->kvm = kvm;
621         match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
622
623         list_add(&match->list, &kvm->arch.assigned_dev_head);
624
625         if (!kvm->arch.iommu_domain) {
626                 r = kvm_iommu_map_guest(kvm);
627                 if (r)
628                         goto out_list_del;
629         }
630         r = kvm_assign_device(kvm, match);
631         if (r)
632                 goto out_list_del;
633
634 out:
635         srcu_read_unlock(&kvm->srcu, idx);
636         mutex_unlock(&kvm->lock);
637         return r;
638 out_list_del:
639         if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
640                 printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
641                        __func__, dev_name(&dev->dev));
642         list_del(&match->list);
643         pci_release_regions(dev);
644 out_disable:
645         pci_disable_device(dev);
646 out_put:
647         pci_dev_put(dev);
648 out_free:
649         kfree(match);
650         srcu_read_unlock(&kvm->srcu, idx);
651         mutex_unlock(&kvm->lock);
652         return r;
653 }
654
655 static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
656                 struct kvm_assigned_pci_dev *assigned_dev)
657 {
658         int r = 0;
659         struct kvm_assigned_dev_kernel *match;
660
661         mutex_lock(&kvm->lock);
662
663         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
664                                       assigned_dev->assigned_dev_id);
665         if (!match) {
666                 printk(KERN_INFO "%s: device hasn't been assigned before, "
667                   "so cannot be deassigned\n", __func__);
668                 r = -EINVAL;
669                 goto out;
670         }
671
672         kvm_deassign_device(kvm, match);
673
674         kvm_free_assigned_device(kvm, match);
675
676 out:
677         mutex_unlock(&kvm->lock);
678         return r;
679 }
680
681
682 #ifdef __KVM_HAVE_MSIX
683 static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
684                                     struct kvm_assigned_msix_nr *entry_nr)
685 {
686         int r = 0;
687         struct kvm_assigned_dev_kernel *adev;
688
689         mutex_lock(&kvm->lock);
690
691         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
692                                       entry_nr->assigned_dev_id);
693         if (!adev) {
694                 r = -EINVAL;
695                 goto msix_nr_out;
696         }
697
698         if (adev->entries_nr == 0) {
699                 adev->entries_nr = entry_nr->entry_nr;
700                 if (adev->entries_nr == 0 ||
701                     adev->entries_nr > KVM_MAX_MSIX_PER_DEV) {
702                         r = -EINVAL;
703                         goto msix_nr_out;
704                 }
705
706                 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
707                                                 entry_nr->entry_nr,
708                                                 GFP_KERNEL);
709                 if (!adev->host_msix_entries) {
710                         r = -ENOMEM;
711                         goto msix_nr_out;
712                 }
713                 adev->guest_msix_entries =
714                         kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
715                                 GFP_KERNEL);
716                 if (!adev->guest_msix_entries) {
717                         kfree(adev->host_msix_entries);
718                         r = -ENOMEM;
719                         goto msix_nr_out;
720                 }
721         } else /* Not allowed set MSI-X number twice */
722                 r = -EINVAL;
723 msix_nr_out:
724         mutex_unlock(&kvm->lock);
725         return r;
726 }
727
728 static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
729                                        struct kvm_assigned_msix_entry *entry)
730 {
731         int r = 0, i;
732         struct kvm_assigned_dev_kernel *adev;
733
734         mutex_lock(&kvm->lock);
735
736         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
737                                       entry->assigned_dev_id);
738
739         if (!adev) {
740                 r = -EINVAL;
741                 goto msix_entry_out;
742         }
743
744         for (i = 0; i < adev->entries_nr; i++)
745                 if (adev->guest_msix_entries[i].vector == 0 ||
746                     adev->guest_msix_entries[i].entry == entry->entry) {
747                         adev->guest_msix_entries[i].entry = entry->entry;
748                         adev->guest_msix_entries[i].vector = entry->gsi;
749                         adev->host_msix_entries[i].entry = entry->entry;
750                         break;
751                 }
752         if (i == adev->entries_nr) {
753                 r = -ENOSPC;
754                 goto msix_entry_out;
755         }
756
757 msix_entry_out:
758         mutex_unlock(&kvm->lock);
759
760         return r;
761 }
762 #endif
763
764 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
765                                   unsigned long arg)
766 {
767         void __user *argp = (void __user *)arg;
768         int r;
769
770         switch (ioctl) {
771         case KVM_ASSIGN_PCI_DEVICE: {
772                 struct kvm_assigned_pci_dev assigned_dev;
773
774                 r = -EFAULT;
775                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
776                         goto out;
777                 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
778                 if (r)
779                         goto out;
780                 break;
781         }
782         case KVM_ASSIGN_IRQ: {
783                 r = -EOPNOTSUPP;
784                 break;
785         }
786         case KVM_ASSIGN_DEV_IRQ: {
787                 struct kvm_assigned_irq assigned_irq;
788
789                 r = -EFAULT;
790                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
791                         goto out;
792                 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
793                 if (r)
794                         goto out;
795                 break;
796         }
797         case KVM_DEASSIGN_DEV_IRQ: {
798                 struct kvm_assigned_irq assigned_irq;
799
800                 r = -EFAULT;
801                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
802                         goto out;
803                 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
804                 if (r)
805                         goto out;
806                 break;
807         }
808         case KVM_DEASSIGN_PCI_DEVICE: {
809                 struct kvm_assigned_pci_dev assigned_dev;
810
811                 r = -EFAULT;
812                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
813                         goto out;
814                 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
815                 if (r)
816                         goto out;
817                 break;
818         }
819 #ifdef KVM_CAP_IRQ_ROUTING
820         case KVM_SET_GSI_ROUTING: {
821                 struct kvm_irq_routing routing;
822                 struct kvm_irq_routing __user *urouting;
823                 struct kvm_irq_routing_entry *entries;
824
825                 r = -EFAULT;
826                 if (copy_from_user(&routing, argp, sizeof(routing)))
827                         goto out;
828                 r = -EINVAL;
829                 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
830                         goto out;
831                 if (routing.flags)
832                         goto out;
833                 r = -ENOMEM;
834                 entries = vmalloc(routing.nr * sizeof(*entries));
835                 if (!entries)
836                         goto out;
837                 r = -EFAULT;
838                 urouting = argp;
839                 if (copy_from_user(entries, urouting->entries,
840                                    routing.nr * sizeof(*entries)))
841                         goto out_free_irq_routing;
842                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
843                                         routing.flags);
844         out_free_irq_routing:
845                 vfree(entries);
846                 break;
847         }
848 #endif /* KVM_CAP_IRQ_ROUTING */
849 #ifdef __KVM_HAVE_MSIX
850         case KVM_ASSIGN_SET_MSIX_NR: {
851                 struct kvm_assigned_msix_nr entry_nr;
852                 r = -EFAULT;
853                 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
854                         goto out;
855                 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
856                 if (r)
857                         goto out;
858                 break;
859         }
860         case KVM_ASSIGN_SET_MSIX_ENTRY: {
861                 struct kvm_assigned_msix_entry entry;
862                 r = -EFAULT;
863                 if (copy_from_user(&entry, argp, sizeof entry))
864                         goto out;
865                 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
866                 if (r)
867                         goto out;
868                 break;
869         }
870 #endif
871         default:
872                 r = -ENOTTY;
873                 break;
874         }
875 out:
876         return r;
877 }
878