aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vtd.c
diff options
context:
space:
mode:
authorWeidong Han <weidong.han@intel.com>2008-09-25 11:32:02 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:29 -0400
commite5fcfc821a467bd0827635db8fd39ab1213987e5 (patch)
treec0b4247f84dc8382bd1e5312aa49430499b16615 /arch/x86/kvm/vtd.c
parente48258009d941891fca35348986b8d280caf31cd (diff)
KVM: Device Assignment: Map mmio pages into VT-d page table
Assigned device could DMA to mmio pages, so also need to map mmio pages into VT-d page table. Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vtd.c')
-rw-r--r--arch/x86/kvm/vtd.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/arch/x86/kvm/vtd.c b/arch/x86/kvm/vtd.c
index 667bf3fb64bf..a770874f3a3a 100644
--- a/arch/x86/kvm/vtd.c
+++ b/arch/x86/kvm/vtd.c
@@ -36,37 +36,30 @@ int kvm_iommu_map_pages(struct kvm *kvm,
36{ 36{
37 gfn_t gfn = base_gfn; 37 gfn_t gfn = base_gfn;
38 pfn_t pfn; 38 pfn_t pfn;
39 int i, r; 39 int i, r = 0;
40 struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 40 struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
41 41
42 /* check if iommu exists and in use */ 42 /* check if iommu exists and in use */
43 if (!domain) 43 if (!domain)
44 return 0; 44 return 0;
45 45
46 r = -EINVAL;
47 for (i = 0; i < npages; i++) { 46 for (i = 0; i < npages; i++) {
48 /* check if already mapped */ 47 /* check if already mapped */
49 pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, 48 pfn = (pfn_t)intel_iommu_iova_to_pfn(domain,
50 gfn_to_gpa(gfn)); 49 gfn_to_gpa(gfn));
51 if (pfn && !is_mmio_pfn(pfn)) 50 if (pfn)
52 continue; 51 continue;
53 52
54 pfn = gfn_to_pfn(kvm, gfn); 53 pfn = gfn_to_pfn(kvm, gfn);
55 if (!is_mmio_pfn(pfn)) { 54 r = intel_iommu_page_mapping(domain,
56 r = intel_iommu_page_mapping(domain, 55 gfn_to_gpa(gfn),
57 gfn_to_gpa(gfn), 56 pfn_to_hpa(pfn),
58 pfn_to_hpa(pfn), 57 PAGE_SIZE,
59 PAGE_SIZE, 58 DMA_PTE_READ |
60 DMA_PTE_READ | 59 DMA_PTE_WRITE);
61 DMA_PTE_WRITE); 60 if (r) {
62 if (r) { 61 printk(KERN_ERR "kvm_iommu_map_pages:"
63 printk(KERN_DEBUG "kvm_iommu_map_pages:" 62 "iommu failed to map pfn=%lx\n", pfn);
64 "iommu failed to map pfn=%lx\n", pfn);
65 goto unmap_pages;
66 }
67 } else {
68 printk(KERN_DEBUG "kvm_iommu_map_page:"
69 "invalid pfn=%lx\n", pfn);
70 goto unmap_pages; 63 goto unmap_pages;
71 } 64 }
72 gfn++; 65 gfn++;