4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
7 * Copyright IBM Corp. 2007
10 * Anthony Liguori <aliguori@us.ibm.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
17 #include <linux/module.h>
18 #include <linux/list.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/virtio.h>
23 #include <linux/virtio_config.h>
24 #include <linux/virtio_ring.h>
25 #include <linux/virtio_pci.h>
26 #include <linux/highmem.h>
27 #include <linux/spinlock.h>
29 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
30 MODULE_DESCRIPTION("virtio-pci");
31 MODULE_LICENSE("GPL");
34 struct virtio_pci_vq_info {
35 /* the actual virtqueue */
38 /* the number of entries in the queue */
41 /* the virtual address of the ring queue */
44 /* the list node for the virtqueues list */
45 struct list_head node;
47 /* MSI-X vector (or none) */
51 /* Our device structure */
52 struct virtio_pci_device {
53 struct virtio_device vdev;
54 struct pci_dev *pci_dev;
56 /* the IO mapping for the PCI config space */
59 /* the IO mapping for ISR operation */
62 /* a list of queues so we can dispatch IRQs */
64 struct list_head virtqueues;
66 /* array of all queues for house-keeping */
67 struct virtio_pci_vq_info **vqs;
72 struct msix_entry *msix_entries;
73 cpumask_var_t *msix_affinity_masks;
74 /* Name strings for interrupts. This size should be enough,
75 * and I'm too lazy to allocate each name separately. */
76 char (*msix_names)[256];
77 /* Number of available vectors */
78 unsigned msix_vectors;
79 /* Vectors allocated, excluding per-vq vectors if any */
80 unsigned msix_used_vectors;
82 /* Whether we have vector per vq */
85 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
86 struct virtio_pci_vq_info *info,
88 void (*callback)(struct virtqueue *vq),
91 void (*del_vq)(struct virtio_pci_vq_info *info);
94 /* Constants for MSI-X */
95 /* Use first vector for configuration changes, second and the rest for
96 * virtqueues Thus, we need at least 2 vectors for MSI. */
98 VP_MSIX_CONFIG_VECTOR = 0,
99 VP_MSIX_VQ_VECTOR = 1,
102 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
103 static const struct pci_device_id virtio_pci_id_table[] = {
104 { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
108 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
110 /* Convert a generic virtio device to our structure */
111 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
113 return container_of(vdev, struct virtio_pci_device, vdev);
116 /* virtio config->get_features() implementation */
117 static u64 vp_get_features(struct virtio_device *vdev)
119 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
121 /* When someone needs more than 32 feature bits, we'll need to
122 * steal a bit to indicate that the rest are somewhere else. */
123 return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
126 /* virtio config->finalize_features() implementation */
127 static int vp_finalize_features(struct virtio_device *vdev)
129 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
131 /* Give virtio_ring a chance to accept features. */
132 vring_transport_features(vdev);
134 /* Make sure we don't have any features > 32 bits! */
135 BUG_ON((u32)vdev->features != vdev->features);
137 /* We only support 32 feature bits. */
138 iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
143 /* virtio config->get() implementation */
144 static void vp_get(struct virtio_device *vdev, unsigned offset,
145 void *buf, unsigned len)
147 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
148 void __iomem *ioaddr = vp_dev->ioaddr +
149 VIRTIO_PCI_CONFIG(vp_dev) + offset;
153 for (i = 0; i < len; i++)
154 ptr[i] = ioread8(ioaddr + i);
157 /* the config->set() implementation. it's symmetric to the config->get()
159 static void vp_set(struct virtio_device *vdev, unsigned offset,
160 const void *buf, unsigned len)
162 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
163 void __iomem *ioaddr = vp_dev->ioaddr +
164 VIRTIO_PCI_CONFIG(vp_dev) + offset;
168 for (i = 0; i < len; i++)
169 iowrite8(ptr[i], ioaddr + i);
172 /* config->{get,set}_status() implementations */
173 static u8 vp_get_status(struct virtio_device *vdev)
175 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
176 return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
179 static void vp_set_status(struct virtio_device *vdev, u8 status)
181 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
182 /* We should never be setting status to 0. */
184 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
187 /* wait for pending irq handlers */
188 static void vp_synchronize_vectors(struct virtio_device *vdev)
190 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
193 if (vp_dev->intx_enabled)
194 synchronize_irq(vp_dev->pci_dev->irq);
196 for (i = 0; i < vp_dev->msix_vectors; ++i)
197 synchronize_irq(vp_dev->msix_entries[i].vector);
200 static void vp_reset(struct virtio_device *vdev)
202 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
203 /* 0 status means a reset. */
204 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
205 /* Flush out the status write, and flush in device writes,
206 * including MSi-X interrupts, if any. */
207 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
208 /* Flush pending VQ/configuration callbacks. */
209 vp_synchronize_vectors(vdev);
212 /* the notify function used when creating a virt queue */
213 static bool vp_notify(struct virtqueue *vq)
215 /* we write the queue's selector into the notification register to
216 * signal the other end */
217 iowrite16(vq->index, (void __iomem *)vq->priv);
221 /* Handle a configuration change: Tell driver if it wants to know. */
222 static irqreturn_t vp_config_changed(int irq, void *opaque)
224 struct virtio_pci_device *vp_dev = opaque;
226 virtio_config_changed(&vp_dev->vdev);
230 /* Notify all virtqueues on an interrupt. */
231 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
233 struct virtio_pci_device *vp_dev = opaque;
234 struct virtio_pci_vq_info *info;
235 irqreturn_t ret = IRQ_NONE;
238 spin_lock_irqsave(&vp_dev->lock, flags);
239 list_for_each_entry(info, &vp_dev->virtqueues, node) {
240 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
243 spin_unlock_irqrestore(&vp_dev->lock, flags);
248 /* A small wrapper to also acknowledge the interrupt when it's handled.
249 * I really need an EIO hook for the vring so I can ack the interrupt once we
250 * know that we'll be handling the IRQ but before we invoke the callback since
251 * the callback may notify the host which results in the host attempting to
252 * raise an interrupt that we would then mask once we acknowledged the
254 static irqreturn_t vp_interrupt(int irq, void *opaque)
256 struct virtio_pci_device *vp_dev = opaque;
259 /* reading the ISR has the effect of also clearing it so it's very
260 * important to save off the value. */
261 isr = ioread8(vp_dev->isr);
263 /* It's definitely not us if the ISR was not high */
267 /* Configuration change? Tell driver if it wants to know. */
268 if (isr & VIRTIO_PCI_ISR_CONFIG)
269 vp_config_changed(irq, opaque);
271 return vp_vring_interrupt(irq, opaque);
274 static void vp_free_vectors(struct virtio_device *vdev)
276 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
279 if (vp_dev->intx_enabled) {
280 free_irq(vp_dev->pci_dev->irq, vp_dev);
281 vp_dev->intx_enabled = 0;
284 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
285 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
287 for (i = 0; i < vp_dev->msix_vectors; i++)
288 if (vp_dev->msix_affinity_masks[i])
289 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
291 if (vp_dev->msix_enabled) {
292 /* Disable the vector used for configuration */
293 iowrite16(VIRTIO_MSI_NO_VECTOR,
294 vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
295 /* Flush the write out to device */
296 ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
298 pci_disable_msix(vp_dev->pci_dev);
299 vp_dev->msix_enabled = 0;
302 vp_dev->msix_vectors = 0;
303 vp_dev->msix_used_vectors = 0;
304 kfree(vp_dev->msix_names);
305 vp_dev->msix_names = NULL;
306 kfree(vp_dev->msix_entries);
307 vp_dev->msix_entries = NULL;
308 kfree(vp_dev->msix_affinity_masks);
309 vp_dev->msix_affinity_masks = NULL;
312 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
315 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
316 const char *name = dev_name(&vp_dev->vdev.dev);
320 vp_dev->msix_vectors = nvectors;
322 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
324 if (!vp_dev->msix_entries)
326 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
328 if (!vp_dev->msix_names)
330 vp_dev->msix_affinity_masks
331 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
333 if (!vp_dev->msix_affinity_masks)
335 for (i = 0; i < nvectors; ++i)
336 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
340 for (i = 0; i < nvectors; ++i)
341 vp_dev->msix_entries[i].entry = i;
343 err = pci_enable_msix_exact(vp_dev->pci_dev,
344 vp_dev->msix_entries, nvectors);
347 vp_dev->msix_enabled = 1;
349 /* Set the vector used for configuration */
350 v = vp_dev->msix_used_vectors;
351 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
353 err = request_irq(vp_dev->msix_entries[v].vector,
354 vp_config_changed, 0, vp_dev->msix_names[v],
358 ++vp_dev->msix_used_vectors;
360 iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
361 /* Verify we had enough resources to assign the vector */
362 v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
363 if (v == VIRTIO_MSI_NO_VECTOR) {
368 if (!per_vq_vectors) {
369 /* Shared vector for all VQs */
370 v = vp_dev->msix_used_vectors;
371 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
372 "%s-virtqueues", name);
373 err = request_irq(vp_dev->msix_entries[v].vector,
374 vp_vring_interrupt, 0, vp_dev->msix_names[v],
378 ++vp_dev->msix_used_vectors;
382 vp_free_vectors(vdev);
386 static int vp_request_intx(struct virtio_device *vdev)
389 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
391 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
392 IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
394 vp_dev->intx_enabled = 1;
398 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
399 struct virtio_pci_vq_info *info,
401 void (*callback)(struct virtqueue *vq),
405 struct virtqueue *vq;
410 /* Select the queue we're interested in */
411 iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
413 /* Check if queue is either not available or already active. */
414 num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
415 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
416 return ERR_PTR(-ENOENT);
419 info->msix_vector = msix_vec;
421 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
422 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
423 if (info->queue == NULL)
424 return ERR_PTR(-ENOMEM);
426 /* activate the queue */
427 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
428 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
430 /* create the vring */
431 vq = vring_new_virtqueue(index, info->num,
432 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
433 true, info->queue, vp_notify, callback, name);
436 goto out_activate_queue;
439 vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
441 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
442 iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
443 msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
444 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
453 vring_del_virtqueue(vq);
455 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
456 free_pages_exact(info->queue, size);
460 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
461 void (*callback)(struct virtqueue *vq),
465 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
466 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
467 struct virtqueue *vq;
470 /* fill out our structure that represents an active queue */
472 return ERR_PTR(-ENOMEM);
474 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
480 spin_lock_irqsave(&vp_dev->lock, flags);
481 list_add(&info->node, &vp_dev->virtqueues);
482 spin_unlock_irqrestore(&vp_dev->lock, flags);
484 INIT_LIST_HEAD(&info->node);
487 vp_dev->vqs[index] = info;
495 static void del_vq(struct virtio_pci_vq_info *info)
497 struct virtqueue *vq = info->vq;
498 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
501 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
503 if (vp_dev->msix_enabled) {
504 iowrite16(VIRTIO_MSI_NO_VECTOR,
505 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
506 /* Flush the write out to device */
507 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
510 vring_del_virtqueue(vq);
512 /* Select and deactivate the queue */
513 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
515 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
516 free_pages_exact(info->queue, size);
519 static void vp_del_vq(struct virtqueue *vq)
521 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
522 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
525 spin_lock_irqsave(&vp_dev->lock, flags);
526 list_del(&info->node);
527 spin_unlock_irqrestore(&vp_dev->lock, flags);
529 vp_dev->del_vq(info);
533 /* the config->del_vqs() implementation */
534 static void vp_del_vqs(struct virtio_device *vdev)
536 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
537 struct virtqueue *vq, *n;
538 struct virtio_pci_vq_info *info;
540 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
541 info = vp_dev->vqs[vq->index];
542 if (vp_dev->per_vq_vectors &&
543 info->msix_vector != VIRTIO_MSI_NO_VECTOR)
544 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
548 vp_dev->per_vq_vectors = false;
550 vp_free_vectors(vdev);
554 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
555 struct virtqueue *vqs[],
556 vq_callback_t *callbacks[],
561 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
563 int i, err, nvectors, allocated_vectors;
565 vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
570 /* Old style: one normal interrupt for change and all vqs. */
571 err = vp_request_intx(vdev);
575 if (per_vq_vectors) {
576 /* Best option: one for change interrupt, one per vq. */
578 for (i = 0; i < nvqs; ++i)
582 /* Second best: one for change, shared for all vqs. */
586 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
591 vp_dev->per_vq_vectors = per_vq_vectors;
592 allocated_vectors = vp_dev->msix_used_vectors;
593 for (i = 0; i < nvqs; ++i) {
597 } else if (!callbacks[i] || !vp_dev->msix_enabled)
598 msix_vec = VIRTIO_MSI_NO_VECTOR;
599 else if (vp_dev->per_vq_vectors)
600 msix_vec = allocated_vectors++;
602 msix_vec = VP_MSIX_VQ_VECTOR;
603 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
604 if (IS_ERR(vqs[i])) {
605 err = PTR_ERR(vqs[i]);
609 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
612 /* allocate per-vq irq if available and necessary */
613 snprintf(vp_dev->msix_names[msix_vec],
614 sizeof *vp_dev->msix_names,
616 dev_name(&vp_dev->vdev.dev), names[i]);
617 err = request_irq(vp_dev->msix_entries[msix_vec].vector,
619 vp_dev->msix_names[msix_vec],
633 /* the config->find_vqs() implementation */
634 static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
635 struct virtqueue *vqs[],
636 vq_callback_t *callbacks[],
641 /* Try MSI-X with one vector per queue. */
642 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
645 /* Fallback: MSI-X with one vector for config, one shared for queues. */
646 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
650 /* Finally fall back to regular interrupts. */
651 return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
655 static const char *vp_bus_name(struct virtio_device *vdev)
657 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
659 return pci_name(vp_dev->pci_dev);
662 /* Setup the affinity for a virtqueue:
663 * - force the affinity for per vq vector
664 * - OR over all affinities for shared MSI
665 * - ignore the affinity request if we're using INTX
667 static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
669 struct virtio_device *vdev = vq->vdev;
670 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
671 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
672 struct cpumask *mask;
678 if (vp_dev->msix_enabled) {
679 mask = vp_dev->msix_affinity_masks[info->msix_vector];
680 irq = vp_dev->msix_entries[info->msix_vector].vector;
682 irq_set_affinity_hint(irq, NULL);
684 cpumask_set_cpu(cpu, mask);
685 irq_set_affinity_hint(irq, mask);
691 static const struct virtio_config_ops virtio_pci_config_ops = {
694 .get_status = vp_get_status,
695 .set_status = vp_set_status,
697 .find_vqs = vp_find_vqs,
698 .del_vqs = vp_del_vqs,
699 .get_features = vp_get_features,
700 .finalize_features = vp_finalize_features,
701 .bus_name = vp_bus_name,
702 .set_vq_affinity = vp_set_vq_affinity,
705 static void virtio_pci_release_dev(struct device *_d)
708 * No need for a release method as we allocate/free
709 * all devices together with the pci devices.
710 * Provide an empty one to avoid getting a warning from core.
714 /* the PCI probing function */
715 static int virtio_pci_probe(struct pci_dev *pci_dev,
716 const struct pci_device_id *id)
718 struct virtio_pci_device *vp_dev;
721 /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
722 if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
725 if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
726 printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
727 VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
731 /* allocate our structure and fill it out */
732 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
736 vp_dev->vdev.dev.parent = &pci_dev->dev;
737 vp_dev->vdev.dev.release = virtio_pci_release_dev;
738 vp_dev->vdev.config = &virtio_pci_config_ops;
739 vp_dev->pci_dev = pci_dev;
740 INIT_LIST_HEAD(&vp_dev->virtqueues);
741 spin_lock_init(&vp_dev->lock);
743 /* Disable MSI/MSIX to bring device to a known good state. */
744 pci_msi_off(pci_dev);
746 /* enable the device */
747 err = pci_enable_device(pci_dev);
751 err = pci_request_regions(pci_dev, "virtio-pci");
753 goto out_enable_device;
755 vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
756 if (vp_dev->ioaddr == NULL) {
758 goto out_req_regions;
761 vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
763 pci_set_drvdata(pci_dev, vp_dev);
764 pci_set_master(pci_dev);
766 /* we use the subsystem vendor/device id as the virtio vendor/device
767 * id. this allows us to use the same PCI vendor/device id for all
768 * virtio devices and to identify the particular virtio driver by
769 * the subsystem ids */
770 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
771 vp_dev->vdev.id.device = pci_dev->subsystem_device;
773 vp_dev->setup_vq = setup_vq;
774 vp_dev->del_vq = del_vq;
776 /* finally register the virtio device */
777 err = register_virtio_device(&vp_dev->vdev);
779 goto out_set_drvdata;
784 pci_iounmap(pci_dev, vp_dev->ioaddr);
786 pci_release_regions(pci_dev);
788 pci_disable_device(pci_dev);
794 static void virtio_pci_remove(struct pci_dev *pci_dev)
796 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
798 unregister_virtio_device(&vp_dev->vdev);
800 vp_del_vqs(&vp_dev->vdev);
801 pci_iounmap(pci_dev, vp_dev->ioaddr);
802 pci_release_regions(pci_dev);
803 pci_disable_device(pci_dev);
807 #ifdef CONFIG_PM_SLEEP
808 static int virtio_pci_freeze(struct device *dev)
810 struct pci_dev *pci_dev = to_pci_dev(dev);
811 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
814 ret = virtio_device_freeze(&vp_dev->vdev);
817 pci_disable_device(pci_dev);
821 static int virtio_pci_restore(struct device *dev)
823 struct pci_dev *pci_dev = to_pci_dev(dev);
824 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
827 ret = pci_enable_device(pci_dev);
831 pci_set_master(pci_dev);
832 return virtio_device_restore(&vp_dev->vdev);
835 static const struct dev_pm_ops virtio_pci_pm_ops = {
836 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
840 static struct pci_driver virtio_pci_driver = {
841 .name = "virtio-pci",
842 .id_table = virtio_pci_id_table,
843 .probe = virtio_pci_probe,
844 .remove = virtio_pci_remove,
845 #ifdef CONFIG_PM_SLEEP
846 .driver.pm = &virtio_pci_pm_ops,
850 module_pci_driver(virtio_pci_driver);