aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-12-03 08:43:34 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-01-03 08:11:07 -0500
commit19de40a8472fa64693eab844911eec277d489f6c (patch)
tree502a8df560341ad715965ed39db33c720c657066 /virt
parent1aaf118352b85bb359ce28070bcc478f659a7031 (diff)
KVM: change KVM to use IOMMU API
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/iommu.c45
-rw-r--r--virt/kvm/kvm_main.c2
2 files changed, 22 insertions, 25 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index d46de9af838b..d0bebaa5bf0a 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -25,6 +25,7 @@
25#include <linux/kvm_host.h> 25#include <linux/kvm_host.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/dmar.h> 27#include <linux/dmar.h>
28#include <linux/iommu.h>
28#include <linux/intel-iommu.h> 29#include <linux/intel-iommu.h>
29 30
30static int kvm_iommu_unmap_memslots(struct kvm *kvm); 31static int kvm_iommu_unmap_memslots(struct kvm *kvm);
@@ -37,7 +38,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
37 gfn_t gfn = base_gfn; 38 gfn_t gfn = base_gfn;
38 pfn_t pfn; 39 pfn_t pfn;
39 int i, r = 0; 40 int i, r = 0;
40 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 41 struct iommu_domain *domain = kvm->arch.iommu_domain;
41 42
42 /* check if iommu exists and in use */ 43 /* check if iommu exists and in use */
43 if (!domain) 44 if (!domain)
@@ -45,16 +46,15 @@ int kvm_iommu_map_pages(struct kvm *kvm,
45 46
46 for (i = 0; i < npages; i++) { 47 for (i = 0; i < npages; i++) {
47 /* check if already mapped */ 48 /* check if already mapped */
48 if (intel_iommu_iova_to_phys(domain, 49 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
49 gfn_to_gpa(gfn)))
50 continue; 50 continue;
51 51
52 pfn = gfn_to_pfn(kvm, gfn); 52 pfn = gfn_to_pfn(kvm, gfn);
53 r = intel_iommu_map_address(domain, 53 r = iommu_map_range(domain,
54 gfn_to_gpa(gfn), 54 gfn_to_gpa(gfn),
55 pfn_to_hpa(pfn), 55 pfn_to_hpa(pfn),
56 PAGE_SIZE, 56 PAGE_SIZE,
57 DMA_PTE_READ | DMA_PTE_WRITE); 57 IOMMU_READ | IOMMU_WRITE);
58 if (r) { 58 if (r) {
59 printk(KERN_ERR "kvm_iommu_map_address:" 59 printk(KERN_ERR "kvm_iommu_map_address:"
60 "iommu failed to map pfn=%lx\n", pfn); 60 "iommu failed to map pfn=%lx\n", pfn);
@@ -88,7 +88,7 @@ int kvm_assign_device(struct kvm *kvm,
88 struct kvm_assigned_dev_kernel *assigned_dev) 88 struct kvm_assigned_dev_kernel *assigned_dev)
89{ 89{
90 struct pci_dev *pdev = NULL; 90 struct pci_dev *pdev = NULL;
91 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 91 struct iommu_domain *domain = kvm->arch.iommu_domain;
92 int r; 92 int r;
93 93
94 /* check if iommu exists and in use */ 94 /* check if iommu exists and in use */
@@ -99,7 +99,7 @@ int kvm_assign_device(struct kvm *kvm,
99 if (pdev == NULL) 99 if (pdev == NULL)
100 return -ENODEV; 100 return -ENODEV;
101 101
102 r = intel_iommu_attach_device(domain, pdev); 102 r = iommu_attach_device(domain, &pdev->dev);
103 if (r) { 103 if (r) {
104 printk(KERN_ERR "assign device %x:%x.%x failed", 104 printk(KERN_ERR "assign device %x:%x.%x failed",
105 pdev->bus->number, 105 pdev->bus->number,
@@ -119,7 +119,7 @@ int kvm_assign_device(struct kvm *kvm,
119int kvm_deassign_device(struct kvm *kvm, 119int kvm_deassign_device(struct kvm *kvm,
120 struct kvm_assigned_dev_kernel *assigned_dev) 120 struct kvm_assigned_dev_kernel *assigned_dev)
121{ 121{
122 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 122 struct iommu_domain *domain = kvm->arch.iommu_domain;
123 struct pci_dev *pdev = NULL; 123 struct pci_dev *pdev = NULL;
124 124
125 /* check if iommu exists and in use */ 125 /* check if iommu exists and in use */
@@ -130,7 +130,7 @@ int kvm_deassign_device(struct kvm *kvm,
130 if (pdev == NULL) 130 if (pdev == NULL)
131 return -ENODEV; 131 return -ENODEV;
132 132
133 intel_iommu_detach_device(domain, pdev); 133 iommu_detach_device(domain, &pdev->dev);
134 134
135 printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", 135 printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n",
136 assigned_dev->host_busnr, 136 assigned_dev->host_busnr,
@@ -144,13 +144,13 @@ int kvm_iommu_map_guest(struct kvm *kvm)
144{ 144{
145 int r; 145 int r;
146 146
147 if (!intel_iommu_found()) { 147 if (!iommu_found()) {
148 printk(KERN_ERR "%s: intel iommu not found\n", __func__); 148 printk(KERN_ERR "%s: iommu not found\n", __func__);
149 return -ENODEV; 149 return -ENODEV;
150 } 150 }
151 151
152 kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain(); 152 kvm->arch.iommu_domain = iommu_domain_alloc();
153 if (!kvm->arch.intel_iommu_domain) 153 if (!kvm->arch.iommu_domain)
154 return -ENOMEM; 154 return -ENOMEM;
155 155
156 r = kvm_iommu_map_memslots(kvm); 156 r = kvm_iommu_map_memslots(kvm);
@@ -169,7 +169,7 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
169{ 169{
170 gfn_t gfn = base_gfn; 170 gfn_t gfn = base_gfn;
171 pfn_t pfn; 171 pfn_t pfn;
172 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 172 struct iommu_domain *domain = kvm->arch.iommu_domain;
173 unsigned long i; 173 unsigned long i;
174 u64 phys; 174 u64 phys;
175 175
@@ -178,16 +178,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
178 return; 178 return;
179 179
180 for (i = 0; i < npages; i++) { 180 for (i = 0; i < npages; i++) {
181 phys = intel_iommu_iova_to_phys(domain, 181 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
182 gfn_to_gpa(gfn));
183 pfn = phys >> PAGE_SHIFT; 182 pfn = phys >> PAGE_SHIFT;
184 kvm_release_pfn_clean(pfn); 183 kvm_release_pfn_clean(pfn);
185 gfn++; 184 gfn++;
186 } 185 }
187 186
188 intel_iommu_unmap_address(domain, 187 iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
189 gfn_to_gpa(base_gfn),
190 PAGE_SIZE * npages);
191} 188}
192 189
193static int kvm_iommu_unmap_memslots(struct kvm *kvm) 190static int kvm_iommu_unmap_memslots(struct kvm *kvm)
@@ -205,13 +202,13 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
205 202
206int kvm_iommu_unmap_guest(struct kvm *kvm) 203int kvm_iommu_unmap_guest(struct kvm *kvm)
207{ 204{
208 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 205 struct iommu_domain *domain = kvm->arch.iommu_domain;
209 206
210 /* check if iommu exists and in use */ 207 /* check if iommu exists and in use */
211 if (!domain) 208 if (!domain)
212 return 0; 209 return 0;
213 210
214 kvm_iommu_unmap_memslots(kvm); 211 kvm_iommu_unmap_memslots(kvm);
215 intel_iommu_free_domain(domain); 212 iommu_domain_free(domain);
216 return 0; 213 return 0;
217} 214}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4ef0fb43d1f9..3a5a08298aab 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -504,7 +504,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
504 list_add(&match->list, &kvm->arch.assigned_dev_head); 504 list_add(&match->list, &kvm->arch.assigned_dev_head);
505 505
506 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { 506 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
507 if (!kvm->arch.intel_iommu_domain) { 507 if (!kvm->arch.iommu_domain) {
508 r = kvm_iommu_map_guest(kvm); 508 r = kvm_iommu_map_guest(kvm);
509 if (r) 509 if (r)
510 goto out_list_del; 510 goto out_list_del;