aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kvm/e500_tlb.c2
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--include/linux/kvm_host.h5
-rw-r--r--virt/kvm/iommu.c10
-rw-r--r--virt/kvm/kvm_main.c15
5 files changed, 16 insertions, 18 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc961302..c8f6c5826742 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -520,7 +520,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
520 520
521 if (likely(!pfnmap)) { 521 if (likely(!pfnmap)) {
522 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); 522 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
523 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn); 523 pfn = gfn_to_pfn_memslot(slot, gfn);
524 if (is_error_pfn(pfn)) { 524 if (is_error_pfn(pfn)) {
525 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 525 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
526 (long)gfn); 526 (long)gfn);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f85cc21ae95d..4f77f7ac6d25 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2518,7 +2518,7 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2518 2518
2519 hva = gfn_to_hva_memslot(slot, gfn); 2519 hva = gfn_to_hva_memslot(slot, gfn);
2520 2520
2521 return hva_to_pfn_atomic(vcpu->kvm, hva); 2521 return hva_to_pfn_atomic(hva);
2522} 2522}
2523 2523
2524static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, 2524static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e8d13a072d24..db9aa917840a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -418,15 +418,14 @@ void kvm_release_page_dirty(struct page *page);
418void kvm_set_page_dirty(struct page *page); 418void kvm_set_page_dirty(struct page *page);
419void kvm_set_page_accessed(struct page *page); 419void kvm_set_page_accessed(struct page *page);
420 420
421pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); 421pfn_t hva_to_pfn_atomic(unsigned long addr);
422pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 422pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
423pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, 423pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
424 bool write_fault, bool *writable); 424 bool write_fault, bool *writable);
425pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 425pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
426pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 426pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
427 bool *writable); 427 bool *writable);
428pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 428pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
429 struct kvm_memory_slot *slot, gfn_t gfn);
430void kvm_release_pfn_dirty(pfn_t); 429void kvm_release_pfn_dirty(pfn_t);
431void kvm_release_pfn_clean(pfn_t pfn); 430void kvm_release_pfn_clean(pfn_t pfn);
432void kvm_set_pfn_dirty(pfn_t pfn); 431void kvm_set_pfn_dirty(pfn_t pfn);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index e9fff9830bf0..c03f1fb26701 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -42,13 +42,13 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
42static void kvm_iommu_put_pages(struct kvm *kvm, 42static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages); 43 gfn_t base_gfn, unsigned long npages);
44 44
45static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, 45static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
46 gfn_t gfn, unsigned long size) 46 unsigned long size)
47{ 47{
48 gfn_t end_gfn; 48 gfn_t end_gfn;
49 pfn_t pfn; 49 pfn_t pfn;
50 50
51 pfn = gfn_to_pfn_memslot(kvm, slot, gfn); 51 pfn = gfn_to_pfn_memslot(slot, gfn);
52 end_gfn = gfn + (size >> PAGE_SHIFT); 52 end_gfn = gfn + (size >> PAGE_SHIFT);
53 gfn += 1; 53 gfn += 1;
54 54
@@ -56,7 +56,7 @@ static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
56 return pfn; 56 return pfn;
57 57
58 while (gfn < end_gfn) 58 while (gfn < end_gfn)
59 gfn_to_pfn_memslot(kvm, slot, gfn++); 59 gfn_to_pfn_memslot(slot, gfn++);
60 60
61 return pfn; 61 return pfn;
62} 62}
@@ -105,7 +105,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
105 * Pin all pages we are about to map in memory. This is 105 * Pin all pages we are about to map in memory. This is
106 * important because we unmap and unpin in 4kb steps later. 106 * important because we unmap and unpin in 4kb steps later.
107 */ 107 */
108 pfn = kvm_pin_pages(kvm, slot, gfn, page_size); 108 pfn = kvm_pin_pages(slot, gfn, page_size);
109 if (is_error_pfn(pfn)) { 109 if (is_error_pfn(pfn)) {
110 gfn += 1; 110 gfn += 1;
111 continue; 111 continue;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f955eee92aa9..68dda513cd72 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1062,8 +1062,8 @@ static inline int check_user_page_hwpoison(unsigned long addr)
1062 return rc == -EHWPOISON; 1062 return rc == -EHWPOISON;
1063} 1063}
1064 1064
1065static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, 1065static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1066 bool *async, bool write_fault, bool *writable) 1066 bool write_fault, bool *writable)
1067{ 1067{
1068 struct page *page[1]; 1068 struct page *page[1];
1069 int npages = 0; 1069 int npages = 0;
@@ -1143,9 +1143,9 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1143 return pfn; 1143 return pfn;
1144} 1144}
1145 1145
1146pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr) 1146pfn_t hva_to_pfn_atomic(unsigned long addr)
1147{ 1147{
1148 return hva_to_pfn(kvm, addr, true, NULL, true, NULL); 1148 return hva_to_pfn(addr, true, NULL, true, NULL);
1149} 1149}
1150EXPORT_SYMBOL_GPL(hva_to_pfn_atomic); 1150EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1151 1151
@@ -1163,7 +1163,7 @@ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1163 return page_to_pfn(bad_page); 1163 return page_to_pfn(bad_page);
1164 } 1164 }
1165 1165
1166 return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable); 1166 return hva_to_pfn(addr, atomic, async, write_fault, writable);
1167} 1167}
1168 1168
1169pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1169pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
@@ -1192,11 +1192,10 @@ pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1192} 1192}
1193EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1193EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1194 1194
1195pfn_t gfn_to_pfn_memslot(struct kvm *kvm, 1195pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1196 struct kvm_memory_slot *slot, gfn_t gfn)
1197{ 1196{
1198 unsigned long addr = gfn_to_hva_memslot(slot, gfn); 1197 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1199 return hva_to_pfn(kvm, addr, false, NULL, true, NULL); 1198 return hva_to_pfn(addr, false, NULL, true, NULL);
1200} 1199}
1201 1200
1202int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 1201int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,