diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-01-11 10:38:18 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2010-03-07 12:01:11 -0500 |
commit | fcd95807fb61e67d602610e7ff7129ed769e9fee (patch) | |
tree | 19f83180b75184a5b711cc064f4d9c7d8d93713f /virt | |
parent | b146a1c9f7f1feeacf840fa1ba197a99593cea15 (diff) |
kvm: Change kvm_iommu_map_pages to map large pages
This patch changes the implementation of of
kvm_iommu_map_pages to map the pages with the host page size
into the io virtual address space.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Acked-By: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/iommu.c | 113 |
1 files changed, 91 insertions, 22 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 80fd3ad3b2de..11692b9e8830 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c | |||
@@ -32,12 +32,30 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm); | |||
32 | static void kvm_iommu_put_pages(struct kvm *kvm, | 32 | static void kvm_iommu_put_pages(struct kvm *kvm, |
33 | gfn_t base_gfn, unsigned long npages); | 33 | gfn_t base_gfn, unsigned long npages); |
34 | 34 | ||
35 | static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, | ||
36 | gfn_t gfn, unsigned long size) | ||
37 | { | ||
38 | gfn_t end_gfn; | ||
39 | pfn_t pfn; | ||
40 | |||
41 | pfn = gfn_to_pfn_memslot(kvm, slot, gfn); | ||
42 | end_gfn = gfn + (size >> PAGE_SHIFT); | ||
43 | gfn += 1; | ||
44 | |||
45 | if (is_error_pfn(pfn)) | ||
46 | return pfn; | ||
47 | |||
48 | while (gfn < end_gfn) | ||
49 | gfn_to_pfn_memslot(kvm, slot, gfn++); | ||
50 | |||
51 | return pfn; | ||
52 | } | ||
53 | |||
35 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | 54 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
36 | { | 55 | { |
37 | gfn_t gfn = slot->base_gfn; | 56 | gfn_t gfn, end_gfn; |
38 | unsigned long npages = slot->npages; | ||
39 | pfn_t pfn; | 57 | pfn_t pfn; |
40 | int i, r = 0; | 58 | int r = 0; |
41 | struct iommu_domain *domain = kvm->arch.iommu_domain; | 59 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
42 | int flags; | 60 | int flags; |
43 | 61 | ||
@@ -45,31 +63,62 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) | |||
45 | if (!domain) | 63 | if (!domain) |
46 | return 0; | 64 | return 0; |
47 | 65 | ||
66 | gfn = slot->base_gfn; | ||
67 | end_gfn = gfn + slot->npages; | ||
68 | |||
48 | flags = IOMMU_READ | IOMMU_WRITE; | 69 | flags = IOMMU_READ | IOMMU_WRITE; |
49 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) | 70 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) |
50 | flags |= IOMMU_CACHE; | 71 | flags |= IOMMU_CACHE; |
51 | 72 | ||
52 | for (i = 0; i < npages; i++) { | 73 | |
53 | /* check if already mapped */ | 74 | while (gfn < end_gfn) { |
54 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) | 75 | unsigned long page_size; |
76 | |||
77 | /* Check if already mapped */ | ||
78 | if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { | ||
79 | gfn += 1; | ||
80 | continue; | ||
81 | } | ||
82 | |||
83 | /* Get the page size we could use to map */ | ||
84 | page_size = kvm_host_page_size(kvm, gfn); | ||
85 | |||
86 | /* Make sure the page_size does not exceed the memslot */ | ||
87 | while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) | ||
88 | page_size >>= 1; | ||
89 | |||
90 | /* Make sure gfn is aligned to the page size we want to map */ | ||
91 | while ((gfn << PAGE_SHIFT) & (page_size - 1)) | ||
92 | page_size >>= 1; | ||
93 | |||
94 | /* | ||
95 | * Pin all pages we are about to map in memory. This is | ||
96 | * important because we unmap and unpin in 4kb steps later. | ||
97 | */ | ||
98 | pfn = kvm_pin_pages(kvm, slot, gfn, page_size); | ||
99 | if (is_error_pfn(pfn)) { | ||
100 | gfn += 1; | ||
55 | continue; | 101 | continue; |
102 | } | ||
56 | 103 | ||
57 | pfn = gfn_to_pfn_memslot(kvm, slot, gfn); | 104 | /* Map into IO address space */ |
58 | r = iommu_map_range(domain, | 105 | r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), |
59 | gfn_to_gpa(gfn), | 106 | get_order(page_size), flags); |
60 | pfn_to_hpa(pfn), | ||
61 | PAGE_SIZE, flags); | ||
62 | if (r) { | 107 | if (r) { |
63 | printk(KERN_ERR "kvm_iommu_map_address:" | 108 | printk(KERN_ERR "kvm_iommu_map_address:" |
64 | "iommu failed to map pfn=%lx\n", pfn); | 109 | "iommu failed to map pfn=%lx\n", pfn); |
65 | goto unmap_pages; | 110 | goto unmap_pages; |
66 | } | 111 | } |
67 | gfn++; | 112 | |
113 | gfn += page_size >> PAGE_SHIFT; | ||
114 | |||
115 | |||
68 | } | 116 | } |
117 | |||
69 | return 0; | 118 | return 0; |
70 | 119 | ||
71 | unmap_pages: | 120 | unmap_pages: |
72 | kvm_iommu_put_pages(kvm, slot->base_gfn, i); | 121 | kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); |
73 | return r; | 122 | return r; |
74 | } | 123 | } |
75 | 124 | ||
@@ -189,27 +238,47 @@ out_unmap: | |||
189 | return r; | 238 | return r; |
190 | } | 239 | } |
191 | 240 | ||
241 | static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) | ||
242 | { | ||
243 | unsigned long i; | ||
244 | |||
245 | for (i = 0; i < npages; ++i) | ||
246 | kvm_release_pfn_clean(pfn + i); | ||
247 | } | ||
248 | |||
192 | static void kvm_iommu_put_pages(struct kvm *kvm, | 249 | static void kvm_iommu_put_pages(struct kvm *kvm, |
193 | gfn_t base_gfn, unsigned long npages) | 250 | gfn_t base_gfn, unsigned long npages) |
194 | { | 251 | { |
195 | gfn_t gfn = base_gfn; | 252 | struct iommu_domain *domain; |
253 | gfn_t end_gfn, gfn; | ||
196 | pfn_t pfn; | 254 | pfn_t pfn; |
197 | struct iommu_domain *domain = kvm->arch.iommu_domain; | ||
198 | unsigned long i; | ||
199 | u64 phys; | 255 | u64 phys; |
200 | 256 | ||
257 | domain = kvm->arch.iommu_domain; | ||
258 | end_gfn = base_gfn + npages; | ||
259 | gfn = base_gfn; | ||
260 | |||
201 | /* check if iommu exists and in use */ | 261 | /* check if iommu exists and in use */ |
202 | if (!domain) | 262 | if (!domain) |
203 | return; | 263 | return; |
204 | 264 | ||
205 | for (i = 0; i < npages; i++) { | 265 | while (gfn < end_gfn) { |
266 | unsigned long unmap_pages; | ||
267 | int order; | ||
268 | |||
269 | /* Get physical address */ | ||
206 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); | 270 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); |
207 | pfn = phys >> PAGE_SHIFT; | 271 | pfn = phys >> PAGE_SHIFT; |
208 | kvm_release_pfn_clean(pfn); | 272 | |
209 | gfn++; | 273 | /* Unmap address from IO address space */ |
210 | } | 274 | order = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); |
275 | unmap_pages = 1ULL << order; | ||
211 | 276 | ||
212 | iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages); | 277 | /* Unpin all pages we just unmapped to not leak any memory */ |
278 | kvm_unpin_pages(kvm, pfn, unmap_pages); | ||
279 | |||
280 | gfn += unmap_pages; | ||
281 | } | ||
213 | } | 282 | } |
214 | 283 | ||
215 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) | 284 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |