diff options
author | Weidong Han <weidong.han@intel.com> | 2008-12-02 08:03:39 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-01-03 08:02:19 -0500 |
commit | 260782bcfdaaa7850f29d6bb2ec6603019168c57 (patch) | |
tree | 6f0b72f07ee04ad1210aca861dd4ee8c51846609 | |
parent | fe40f1e020d0923f5f35ca15f02a206c75a28053 (diff) |
KVM: use the new intel iommu APIs
intel iommu APIs are updated, use the new APIs.
In addition, change kvm_iommu_map_guest() to just create the domain, let kvm_iommu_assign_device() assign device.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r-- | include/linux/kvm_host.h | 15 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 7 | ||||
-rw-r--r-- | virt/kvm/vtd.c | 98 |
3 files changed, 71 insertions, 49 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index eafabd5c66b2..c96739b4b7a3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -330,9 +330,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |||
330 | #ifdef CONFIG_DMAR | 330 | #ifdef CONFIG_DMAR |
331 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 331 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
332 | unsigned long npages); | 332 | unsigned long npages); |
333 | int kvm_iommu_map_guest(struct kvm *kvm, | 333 | int kvm_iommu_map_guest(struct kvm *kvm); |
334 | struct kvm_assigned_dev_kernel *assigned_dev); | ||
335 | int kvm_iommu_unmap_guest(struct kvm *kvm); | 334 | int kvm_iommu_unmap_guest(struct kvm *kvm); |
335 | int kvm_assign_device(struct kvm *kvm, | ||
336 | struct kvm_assigned_dev_kernel *assigned_dev); | ||
336 | #else /* CONFIG_DMAR */ | 337 | #else /* CONFIG_DMAR */ |
337 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 338 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
338 | gfn_t base_gfn, | 339 | gfn_t base_gfn, |
@@ -341,9 +342,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm, | |||
341 | return 0; | 342 | return 0; |
342 | } | 343 | } |
343 | 344 | ||
344 | static inline int kvm_iommu_map_guest(struct kvm *kvm, | 345 | static inline int kvm_iommu_map_guest(struct kvm *kvm) |
345 | struct kvm_assigned_dev_kernel | ||
346 | *assigned_dev) | ||
347 | { | 346 | { |
348 | return -ENODEV; | 347 | return -ENODEV; |
349 | } | 348 | } |
@@ -352,6 +351,12 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | |||
352 | { | 351 | { |
353 | return 0; | 352 | return 0; |
354 | } | 353 | } |
354 | |||
355 | static inline int kvm_assign_device(struct kvm *kvm, | ||
356 | struct kvm_assigned_dev_kernel *assigned_dev) | ||
357 | { | ||
358 | return 0; | ||
359 | } | ||
355 | #endif /* CONFIG_DMAR */ | 360 | #endif /* CONFIG_DMAR */ |
356 | 361 | ||
357 | static inline void kvm_guest_enter(void) | 362 | static inline void kvm_guest_enter(void) |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index fc6127cbea1f..c92b63462b79 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -503,7 +503,12 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |||
503 | list_add(&match->list, &kvm->arch.assigned_dev_head); | 503 | list_add(&match->list, &kvm->arch.assigned_dev_head); |
504 | 504 | ||
505 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { | 505 | if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { |
506 | r = kvm_iommu_map_guest(kvm, match); | 506 | if (!kvm->arch.intel_iommu_domain) { |
507 | r = kvm_iommu_map_guest(kvm); | ||
508 | if (r) | ||
509 | goto out_list_del; | ||
510 | } | ||
511 | r = kvm_assign_device(kvm, match); | ||
507 | if (r) | 512 | if (r) |
508 | goto out_list_del; | 513 | goto out_list_del; |
509 | } | 514 | } |
diff --git a/virt/kvm/vtd.c b/virt/kvm/vtd.c index a770874f3a3a..44bb58a395a5 100644 --- a/virt/kvm/vtd.c +++ b/virt/kvm/vtd.c | |||
@@ -45,20 +45,18 @@ int kvm_iommu_map_pages(struct kvm *kvm, | |||
45 | 45 | ||
46 | for (i = 0; i < npages; i++) { | 46 | for (i = 0; i < npages; i++) { |
47 | /* check if already mapped */ | 47 | /* check if already mapped */ |
48 | pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, | 48 | if (intel_iommu_iova_to_phys(domain, |
49 | gfn_to_gpa(gfn)); | 49 | gfn_to_gpa(gfn))) |
50 | if (pfn) | ||
51 | continue; | 50 | continue; |
52 | 51 | ||
53 | pfn = gfn_to_pfn(kvm, gfn); | 52 | pfn = gfn_to_pfn(kvm, gfn); |
54 | r = intel_iommu_page_mapping(domain, | 53 | r = intel_iommu_map_address(domain, |
55 | gfn_to_gpa(gfn), | 54 | gfn_to_gpa(gfn), |
56 | pfn_to_hpa(pfn), | 55 | pfn_to_hpa(pfn), |
57 | PAGE_SIZE, | 56 | PAGE_SIZE, |
58 | DMA_PTE_READ | | 57 | DMA_PTE_READ | DMA_PTE_WRITE); |
59 | DMA_PTE_WRITE); | ||
60 | if (r) { | 58 | if (r) { |
61 | printk(KERN_ERR "kvm_iommu_map_pages:" | 59 | printk(KERN_ERR "kvm_iommu_map_address:" |
62 | "iommu failed to map pfn=%lx\n", pfn); | 60 | "iommu failed to map pfn=%lx\n", pfn); |
63 | goto unmap_pages; | 61 | goto unmap_pages; |
64 | } | 62 | } |
@@ -86,50 +84,55 @@ static int kvm_iommu_map_memslots(struct kvm *kvm) | |||
86 | return r; | 84 | return r; |
87 | } | 85 | } |
88 | 86 | ||
89 | int kvm_iommu_map_guest(struct kvm *kvm, | 87 | int kvm_assign_device(struct kvm *kvm, |
90 | struct kvm_assigned_dev_kernel *assigned_dev) | 88 | struct kvm_assigned_dev_kernel *assigned_dev) |
91 | { | 89 | { |
92 | struct pci_dev *pdev = NULL; | 90 | struct pci_dev *pdev = NULL; |
91 | struct dmar_domain *domain = kvm->arch.intel_iommu_domain; | ||
93 | int r; | 92 | int r; |
94 | 93 | ||
95 | if (!intel_iommu_found()) { | 94 | /* check if iommu exists and in use */ |
96 | printk(KERN_ERR "%s: intel iommu not found\n", __func__); | 95 | if (!domain) |
96 | return 0; | ||
97 | |||
98 | pdev = assigned_dev->dev; | ||
99 | if (pdev == NULL) | ||
97 | return -ENODEV; | 100 | return -ENODEV; |
101 | |||
102 | r = intel_iommu_attach_device(domain, pdev); | ||
103 | if (r) { | ||
104 | printk(KERN_ERR "assign device %x:%x.%x failed", | ||
105 | pdev->bus->number, | ||
106 | PCI_SLOT(pdev->devfn), | ||
107 | PCI_FUNC(pdev->devfn)); | ||
108 | return r; | ||
98 | } | 109 | } |
99 | 110 | ||
100 | printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n", | 111 | printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n", |
101 | assigned_dev->host_busnr, | 112 | assigned_dev->host_busnr, |
102 | PCI_SLOT(assigned_dev->host_devfn), | 113 | PCI_SLOT(assigned_dev->host_devfn), |
103 | PCI_FUNC(assigned_dev->host_devfn)); | 114 | PCI_FUNC(assigned_dev->host_devfn)); |
104 | 115 | ||
105 | pdev = assigned_dev->dev; | 116 | return 0; |
117 | } | ||
106 | 118 | ||
107 | if (pdev == NULL) { | 119 | int kvm_iommu_map_guest(struct kvm *kvm) |
108 | if (kvm->arch.intel_iommu_domain) { | 120 | { |
109 | intel_iommu_domain_exit(kvm->arch.intel_iommu_domain); | 121 | int r; |
110 | kvm->arch.intel_iommu_domain = NULL; | 122 | |
111 | } | 123 | if (!intel_iommu_found()) { |
124 | printk(KERN_ERR "%s: intel iommu not found\n", __func__); | ||
112 | return -ENODEV; | 125 | return -ENODEV; |
113 | } | 126 | } |
114 | 127 | ||
115 | kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev); | 128 | kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain(); |
116 | if (!kvm->arch.intel_iommu_domain) | 129 | if (!kvm->arch.intel_iommu_domain) |
117 | return -ENODEV; | 130 | return -ENOMEM; |
118 | 131 | ||
119 | r = kvm_iommu_map_memslots(kvm); | 132 | r = kvm_iommu_map_memslots(kvm); |
120 | if (r) | 133 | if (r) |
121 | goto out_unmap; | 134 | goto out_unmap; |
122 | 135 | ||
123 | intel_iommu_detach_dev(kvm->arch.intel_iommu_domain, | ||
124 | pdev->bus->number, pdev->devfn); | ||
125 | |||
126 | r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain, | ||
127 | pdev); | ||
128 | if (r) { | ||
129 | printk(KERN_ERR "Domain context map for %s failed", | ||
130 | pci_name(pdev)); | ||
131 | goto out_unmap; | ||
132 | } | ||
133 | return 0; | 136 | return 0; |
134 | 137 | ||
135 | out_unmap: | 138 | out_unmap: |
@@ -138,19 +141,29 @@ out_unmap: | |||
138 | } | 141 | } |
139 | 142 | ||
140 | static void kvm_iommu_put_pages(struct kvm *kvm, | 143 | static void kvm_iommu_put_pages(struct kvm *kvm, |
141 | gfn_t base_gfn, unsigned long npages) | 144 | gfn_t base_gfn, unsigned long npages) |
142 | { | 145 | { |
143 | gfn_t gfn = base_gfn; | 146 | gfn_t gfn = base_gfn; |
144 | pfn_t pfn; | 147 | pfn_t pfn; |
145 | struct dmar_domain *domain = kvm->arch.intel_iommu_domain; | 148 | struct dmar_domain *domain = kvm->arch.intel_iommu_domain; |
146 | int i; | 149 | unsigned long i; |
150 | u64 phys; | ||
151 | |||
152 | /* check if iommu exists and in use */ | ||
153 | if (!domain) | ||
154 | return; | ||
147 | 155 | ||
148 | for (i = 0; i < npages; i++) { | 156 | for (i = 0; i < npages; i++) { |
149 | pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, | 157 | phys = intel_iommu_iova_to_phys(domain, |
150 | gfn_to_gpa(gfn)); | 158 | gfn_to_gpa(gfn)); |
159 | pfn = phys >> PAGE_SHIFT; | ||
151 | kvm_release_pfn_clean(pfn); | 160 | kvm_release_pfn_clean(pfn); |
152 | gfn++; | 161 | gfn++; |
153 | } | 162 | } |
163 | |||
164 | intel_iommu_unmap_address(domain, | ||
165 | gfn_to_gpa(base_gfn), | ||
166 | PAGE_SIZE * npages); | ||
154 | } | 167 | } |
155 | 168 | ||
156 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) | 169 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |
@@ -182,10 +195,9 @@ int kvm_iommu_unmap_guest(struct kvm *kvm) | |||
182 | PCI_FUNC(entry->host_devfn)); | 195 | PCI_FUNC(entry->host_devfn)); |
183 | 196 | ||
184 | /* detach kvm dmar domain */ | 197 | /* detach kvm dmar domain */ |
185 | intel_iommu_detach_dev(domain, entry->host_busnr, | 198 | intel_iommu_detach_device(domain, entry->dev); |
186 | entry->host_devfn); | ||
187 | } | 199 | } |
188 | kvm_iommu_unmap_memslots(kvm); | 200 | kvm_iommu_unmap_memslots(kvm); |
189 | intel_iommu_domain_exit(domain); | 201 | intel_iommu_free_domain(domain); |
190 | return 0; | 202 | return 0; |
191 | } | 203 | } |