KVM: use the new intel iommu APIs
authorWeidong Han <weidong.han@intel.com>
Tue, 2 Dec 2008 13:03:39 +0000 (21:03 +0800)
committerJoerg Roedel <joerg.roedel@amd.com>
Sat, 3 Jan 2009 13:02:19 +0000 (14:02 +0100)
intel iommu APIs are updated, use the new APIs.

In addition, change kvm_iommu_map_guest() to just create the domain, let kvm_iommu_assign_device() assign device.

Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
include/linux/kvm_host.h
virt/kvm/kvm_main.c
virt/kvm/vtd.c

index eafabd5..c96739b 100644 (file)
@@ -330,9 +330,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 #ifdef CONFIG_DMAR
 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
                        unsigned long npages);
-int kvm_iommu_map_guest(struct kvm *kvm,
-                       struct kvm_assigned_dev_kernel *assigned_dev);
+int kvm_iommu_map_guest(struct kvm *kvm);
 int kvm_iommu_unmap_guest(struct kvm *kvm);
+int kvm_assign_device(struct kvm *kvm,
+                     struct kvm_assigned_dev_kernel *assigned_dev);
 #else /* CONFIG_DMAR */
 static inline int kvm_iommu_map_pages(struct kvm *kvm,
                                      gfn_t base_gfn,
@@ -341,9 +342,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
        return 0;
 }
 
-static inline int kvm_iommu_map_guest(struct kvm *kvm,
-                                     struct kvm_assigned_dev_kernel
-                                     *assigned_dev)
+static inline int kvm_iommu_map_guest(struct kvm *kvm)
 {
        return -ENODEV;
 }
@@ -352,6 +351,12 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 {
        return 0;
 }
+
+static inline int kvm_assign_device(struct kvm *kvm,
+               struct kvm_assigned_dev_kernel *assigned_dev)
+{
+       return 0;
+}
 #endif /* CONFIG_DMAR */
 
 static inline void kvm_guest_enter(void)
index fc6127c..c92b634 100644 (file)
@@ -503,7 +503,12 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        list_add(&match->list, &kvm->arch.assigned_dev_head);
 
        if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
-               r = kvm_iommu_map_guest(kvm, match);
+               if (!kvm->arch.intel_iommu_domain) {
+                       r = kvm_iommu_map_guest(kvm);
+                       if (r)
+                               goto out_list_del;
+               }
+               r = kvm_assign_device(kvm, match);
                if (r)
                        goto out_list_del;
        }
index a770874..44bb58a 100644 (file)
@@ -45,20 +45,18 @@ int kvm_iommu_map_pages(struct kvm *kvm,
 
        for (i = 0; i < npages; i++) {
                /* check if already mapped */
-               pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
-                                                    gfn_to_gpa(gfn));
-               if (pfn)
+               if (intel_iommu_iova_to_phys(domain,
+                                            gfn_to_gpa(gfn)))
                        continue;
 
                pfn = gfn_to_pfn(kvm, gfn);
-               r = intel_iommu_page_mapping(domain,
-                                            gfn_to_gpa(gfn),
-                                            pfn_to_hpa(pfn),
-                                            PAGE_SIZE,
-                                            DMA_PTE_READ |
-                                            DMA_PTE_WRITE);
+               r = intel_iommu_map_address(domain,
+                                           gfn_to_gpa(gfn),
+                                           pfn_to_hpa(pfn),
+                                           PAGE_SIZE,
+                                           DMA_PTE_READ | DMA_PTE_WRITE);
                if (r) {
-                       printk(KERN_ERR "kvm_iommu_map_pages:"
+                       printk(KERN_ERR "kvm_iommu_map_address:"
                               "iommu failed to map pfn=%lx\n", pfn);
                        goto unmap_pages;
                }
@@ -86,50 +84,55 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
        return r;
 }
 
-int kvm_iommu_map_guest(struct kvm *kvm,
-                       struct kvm_assigned_dev_kernel *assigned_dev)
+int kvm_assign_device(struct kvm *kvm,
+                     struct kvm_assigned_dev_kernel *assigned_dev)
 {
        struct pci_dev *pdev = NULL;
+       struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
        int r;
 
-       if (!intel_iommu_found()) {
-               printk(KERN_ERR "%s: intel iommu not found\n", __func__);
+       /* check if iommu exists and in use */
+       if (!domain)
+               return 0;
+
+       pdev = assigned_dev->dev;
+       if (pdev == NULL)
                return -ENODEV;
+
+       r = intel_iommu_attach_device(domain, pdev);
+       if (r) {
+               printk(KERN_ERR "assign device %x:%x.%x failed",
+                       pdev->bus->number,
+                       PCI_SLOT(pdev->devfn),
+                       PCI_FUNC(pdev->devfn));
+               return r;
        }
 
-       printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n",
-              assigned_dev->host_busnr,
-              PCI_SLOT(assigned_dev->host_devfn),
-              PCI_FUNC(assigned_dev->host_devfn));
+       printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
+               assigned_dev->host_busnr,
+               PCI_SLOT(assigned_dev->host_devfn),
+               PCI_FUNC(assigned_dev->host_devfn));
 
-       pdev = assigned_dev->dev;
+       return 0;
+}
 
-       if (pdev == NULL) {
-               if (kvm->arch.intel_iommu_domain) {
-                       intel_iommu_domain_exit(kvm->arch.intel_iommu_domain);
-                       kvm->arch.intel_iommu_domain = NULL;
-               }
+int kvm_iommu_map_guest(struct kvm *kvm)
+{
+       int r;
+
+       if (!intel_iommu_found()) {
+               printk(KERN_ERR "%s: intel iommu not found\n", __func__);
                return -ENODEV;
        }
 
-       kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev);
+       kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain();
        if (!kvm->arch.intel_iommu_domain)
-               return -ENODEV;
+               return -ENOMEM;
 
        r = kvm_iommu_map_memslots(kvm);
        if (r)
                goto out_unmap;
 
-       intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
-                              pdev->bus->number, pdev->devfn);
-
-       r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
-                                       pdev);
-       if (r) {
-               printk(KERN_ERR "Domain context map for %s failed",
-                      pci_name(pdev));
-               goto out_unmap;
-       }
        return 0;
 
 out_unmap:
@@ -138,19 +141,29 @@ out_unmap:
 }
 
 static void kvm_iommu_put_pages(struct kvm *kvm,
-                              gfn_t base_gfn, unsigned long npages)
+                               gfn_t base_gfn, unsigned long npages)
 {
        gfn_t gfn = base_gfn;
        pfn_t pfn;
        struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
-       int i;
+       unsigned long i;
+       u64 phys;
+
+       /* check if iommu exists and in use */
+       if (!domain)
+               return;
 
        for (i = 0; i < npages; i++) {
-               pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
-                                                    gfn_to_gpa(gfn));
+               phys = intel_iommu_iova_to_phys(domain,
+                                               gfn_to_gpa(gfn));
+               pfn = phys >> PAGE_SHIFT;
                kvm_release_pfn_clean(pfn);
                gfn++;
        }
+
+       intel_iommu_unmap_address(domain,
+                                 gfn_to_gpa(base_gfn),
+                                 PAGE_SIZE * npages);
 }
 
 static int kvm_iommu_unmap_memslots(struct kvm *kvm)
@@ -182,10 +195,9 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
                       PCI_FUNC(entry->host_devfn));
 
                /* detach kvm dmar domain */
-               intel_iommu_detach_dev(domain, entry->host_busnr,
-                                      entry->host_devfn);
+               intel_iommu_detach_device(domain, entry->dev);
        }
        kvm_iommu_unmap_memslots(kvm);
-       intel_iommu_domain_exit(domain);
+       intel_iommu_free_domain(domain);
        return 0;
 }