diff options
author | Weidong Han <weidong.han@intel.com> | 2008-09-25 11:32:02 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-15 08:25:29 -0400 |
commit | e5fcfc821a467bd0827635db8fd39ab1213987e5 (patch) | |
tree | c0b4247f84dc8382bd1e5312aa49430499b16615 | |
parent | e48258009d941891fca35348986b8d280caf31cd (diff) |
KVM: Device Assignment: Map mmio pages into VT-d page table
Assigned device could DMA to mmio pages, so also need to map mmio pages
into VT-d page table.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/vtd.c | 29 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 2 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 2 |
3 files changed, 12 insertions, 21 deletions
diff --git a/arch/x86/kvm/vtd.c b/arch/x86/kvm/vtd.c index 667bf3fb64bf..a770874f3a3a 100644 --- a/arch/x86/kvm/vtd.c +++ b/arch/x86/kvm/vtd.c | |||
@@ -36,37 +36,30 @@ int kvm_iommu_map_pages(struct kvm *kvm, | |||
36 | { | 36 | { |
37 | gfn_t gfn = base_gfn; | 37 | gfn_t gfn = base_gfn; |
38 | pfn_t pfn; | 38 | pfn_t pfn; |
39 | int i, r; | 39 | int i, r = 0; |
40 | struct dmar_domain *domain = kvm->arch.intel_iommu_domain; | 40 | struct dmar_domain *domain = kvm->arch.intel_iommu_domain; |
41 | 41 | ||
42 | /* check if iommu exists and in use */ | 42 | /* check if iommu exists and in use */ |
43 | if (!domain) | 43 | if (!domain) |
44 | return 0; | 44 | return 0; |
45 | 45 | ||
46 | r = -EINVAL; | ||
47 | for (i = 0; i < npages; i++) { | 46 | for (i = 0; i < npages; i++) { |
48 | /* check if already mapped */ | 47 | /* check if already mapped */ |
49 | pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, | 48 | pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, |
50 | gfn_to_gpa(gfn)); | 49 | gfn_to_gpa(gfn)); |
51 | if (pfn && !is_mmio_pfn(pfn)) | 50 | if (pfn) |
52 | continue; | 51 | continue; |
53 | 52 | ||
54 | pfn = gfn_to_pfn(kvm, gfn); | 53 | pfn = gfn_to_pfn(kvm, gfn); |
55 | if (!is_mmio_pfn(pfn)) { | 54 | r = intel_iommu_page_mapping(domain, |
56 | r = intel_iommu_page_mapping(domain, | 55 | gfn_to_gpa(gfn), |
57 | gfn_to_gpa(gfn), | 56 | pfn_to_hpa(pfn), |
58 | pfn_to_hpa(pfn), | 57 | PAGE_SIZE, |
59 | PAGE_SIZE, | 58 | DMA_PTE_READ | |
60 | DMA_PTE_READ | | 59 | DMA_PTE_WRITE); |
61 | DMA_PTE_WRITE); | 60 | if (r) { |
62 | if (r) { | 61 | printk(KERN_ERR "kvm_iommu_map_pages:" |
63 | printk(KERN_DEBUG "kvm_iommu_map_pages:" | 62 | "iommu failed to map pfn=%lx\n", pfn); |
64 | "iommu failed to map pfn=%lx\n", pfn); | ||
65 | goto unmap_pages; | ||
66 | } | ||
67 | } else { | ||
68 | printk(KERN_DEBUG "kvm_iommu_map_page:" | ||
69 | "invalid pfn=%lx\n", pfn); | ||
70 | goto unmap_pages; | 63 | goto unmap_pages; |
71 | } | 64 | } |
72 | gfn++; | 65 | gfn++; |
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 0992d721c5f7..ca6bbc0bd97c 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -502,8 +502,6 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
502 | int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, | 502 | int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, |
503 | gpa_t addr, unsigned long *ret); | 503 | gpa_t addr, unsigned long *ret); |
504 | 504 | ||
505 | int is_mmio_pfn(pfn_t pfn); | ||
506 | |||
507 | extern bool tdp_enabled; | 505 | extern bool tdp_enabled; |
508 | 506 | ||
509 | enum emulation_result { | 507 | enum emulation_result { |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6cf042789ad1..98cd916448a8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -76,7 +76,7 @@ static inline int valid_vcpu(int n) | |||
76 | return likely(n >= 0 && n < KVM_MAX_VCPUS); | 76 | return likely(n >= 0 && n < KVM_MAX_VCPUS); |
77 | } | 77 | } |
78 | 78 | ||
79 | inline int is_mmio_pfn(pfn_t pfn) | 79 | static inline int is_mmio_pfn(pfn_t pfn) |
80 | { | 80 | { |
81 | if (pfn_valid(pfn)) | 81 | if (pfn_valid(pfn)) |
82 | return PageReserved(pfn_to_page(pfn)); | 82 | return PageReserved(pfn_to_page(pfn)); |