aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/vtd.c
diff options
context:
space:
mode:
authorWeidong Han <weidong.han@intel.com>2008-12-02 08:03:39 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-01-03 08:02:19 -0500
commit260782bcfdaaa7850f29d6bb2ec6603019168c57 (patch)
tree6f0b72f07ee04ad1210aca861dd4ee8c51846609 /virt/kvm/vtd.c
parentfe40f1e020d0923f5f35ca15f02a206c75a28053 (diff)
KVM: use the new intel iommu APIs
intel iommu APIs are updated, use the new APIs. In addition, change kvm_iommu_map_guest() to just create the domain, let kvm_iommu_assign_device() assign device. Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'virt/kvm/vtd.c')
-rw-r--r--virt/kvm/vtd.c98
1 files changed, 55 insertions, 43 deletions
diff --git a/virt/kvm/vtd.c b/virt/kvm/vtd.c
index a770874f3a3a..44bb58a395a5 100644
--- a/virt/kvm/vtd.c
+++ b/virt/kvm/vtd.c
@@ -45,20 +45,18 @@ int kvm_iommu_map_pages(struct kvm *kvm,
45 45
46 for (i = 0; i < npages; i++) { 46 for (i = 0; i < npages; i++) {
47 /* check if already mapped */ 47 /* check if already mapped */
48 pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, 48 if (intel_iommu_iova_to_phys(domain,
49 gfn_to_gpa(gfn)); 49 gfn_to_gpa(gfn)))
50 if (pfn)
51 continue; 50 continue;
52 51
53 pfn = gfn_to_pfn(kvm, gfn); 52 pfn = gfn_to_pfn(kvm, gfn);
54 r = intel_iommu_page_mapping(domain, 53 r = intel_iommu_map_address(domain,
55 gfn_to_gpa(gfn), 54 gfn_to_gpa(gfn),
56 pfn_to_hpa(pfn), 55 pfn_to_hpa(pfn),
57 PAGE_SIZE, 56 PAGE_SIZE,
58 DMA_PTE_READ | 57 DMA_PTE_READ | DMA_PTE_WRITE);
59 DMA_PTE_WRITE);
60 if (r) { 58 if (r) {
61 printk(KERN_ERR "kvm_iommu_map_pages:" 59 printk(KERN_ERR "kvm_iommu_map_address:"
62 "iommu failed to map pfn=%lx\n", pfn); 60 "iommu failed to map pfn=%lx\n", pfn);
63 goto unmap_pages; 61 goto unmap_pages;
64 } 62 }
@@ -86,50 +84,55 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
86 return r; 84 return r;
87} 85}
88 86
89int kvm_iommu_map_guest(struct kvm *kvm, 87int kvm_assign_device(struct kvm *kvm,
90 struct kvm_assigned_dev_kernel *assigned_dev) 88 struct kvm_assigned_dev_kernel *assigned_dev)
91{ 89{
92 struct pci_dev *pdev = NULL; 90 struct pci_dev *pdev = NULL;
91 struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
93 int r; 92 int r;
94 93
95 if (!intel_iommu_found()) { 94 /* check if iommu exists and in use */
96 printk(KERN_ERR "%s: intel iommu not found\n", __func__); 95 if (!domain)
96 return 0;
97
98 pdev = assigned_dev->dev;
99 if (pdev == NULL)
97 return -ENODEV; 100 return -ENODEV;
101
102 r = intel_iommu_attach_device(domain, pdev);
103 if (r) {
104 printk(KERN_ERR "assign device %x:%x.%x failed",
105 pdev->bus->number,
106 PCI_SLOT(pdev->devfn),
107 PCI_FUNC(pdev->devfn));
108 return r;
98 } 109 }
99 110
100 printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n", 111 printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
101 assigned_dev->host_busnr, 112 assigned_dev->host_busnr,
102 PCI_SLOT(assigned_dev->host_devfn), 113 PCI_SLOT(assigned_dev->host_devfn),
103 PCI_FUNC(assigned_dev->host_devfn)); 114 PCI_FUNC(assigned_dev->host_devfn));
104 115
105 pdev = assigned_dev->dev; 116 return 0;
117}
106 118
107 if (pdev == NULL) { 119int kvm_iommu_map_guest(struct kvm *kvm)
108 if (kvm->arch.intel_iommu_domain) { 120{
109 intel_iommu_domain_exit(kvm->arch.intel_iommu_domain); 121 int r;
110 kvm->arch.intel_iommu_domain = NULL; 122
111 } 123 if (!intel_iommu_found()) {
124 printk(KERN_ERR "%s: intel iommu not found\n", __func__);
112 return -ENODEV; 125 return -ENODEV;
113 } 126 }
114 127
115 kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev); 128 kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain();
116 if (!kvm->arch.intel_iommu_domain) 129 if (!kvm->arch.intel_iommu_domain)
117 return -ENODEV; 130 return -ENOMEM;
118 131
119 r = kvm_iommu_map_memslots(kvm); 132 r = kvm_iommu_map_memslots(kvm);
120 if (r) 133 if (r)
121 goto out_unmap; 134 goto out_unmap;
122 135
123 intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
124 pdev->bus->number, pdev->devfn);
125
126 r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
127 pdev);
128 if (r) {
129 printk(KERN_ERR "Domain context map for %s failed",
130 pci_name(pdev));
131 goto out_unmap;
132 }
133 return 0; 136 return 0;
134 137
135out_unmap: 138out_unmap:
@@ -138,19 +141,29 @@ out_unmap:
138} 141}
139 142
140static void kvm_iommu_put_pages(struct kvm *kvm, 143static void kvm_iommu_put_pages(struct kvm *kvm,
141 gfn_t base_gfn, unsigned long npages) 144 gfn_t base_gfn, unsigned long npages)
142{ 145{
143 gfn_t gfn = base_gfn; 146 gfn_t gfn = base_gfn;
144 pfn_t pfn; 147 pfn_t pfn;
145 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 148 struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
146 int i; 149 unsigned long i;
150 u64 phys;
151
152 /* check if iommu exists and in use */
153 if (!domain)
154 return;
147 155
148 for (i = 0; i < npages; i++) { 156 for (i = 0; i < npages; i++) {
149 pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, 157 phys = intel_iommu_iova_to_phys(domain,
150 gfn_to_gpa(gfn)); 158 gfn_to_gpa(gfn));
159 pfn = phys >> PAGE_SHIFT;
151 kvm_release_pfn_clean(pfn); 160 kvm_release_pfn_clean(pfn);
152 gfn++; 161 gfn++;
153 } 162 }
163
164 intel_iommu_unmap_address(domain,
165 gfn_to_gpa(base_gfn),
166 PAGE_SIZE * npages);
154} 167}
155 168
156static int kvm_iommu_unmap_memslots(struct kvm *kvm) 169static int kvm_iommu_unmap_memslots(struct kvm *kvm)
@@ -182,10 +195,9 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
182 PCI_FUNC(entry->host_devfn)); 195 PCI_FUNC(entry->host_devfn));
183 196
184 /* detach kvm dmar domain */ 197 /* detach kvm dmar domain */
185 intel_iommu_detach_dev(domain, entry->host_busnr, 198 intel_iommu_detach_device(domain, entry->dev);
186 entry->host_devfn);
187 } 199 }
188 kvm_iommu_unmap_memslots(kvm); 200 kvm_iommu_unmap_memslots(kvm);
189 intel_iommu_domain_exit(domain); 201 intel_iommu_free_domain(domain);
190 return 0; 202 return 0;
191} 203}