summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-01-15 19:56:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 20:56:32 -0500
commitba049e93aef7e8c571567088b1b73f4f5b99272a (patch)
treead6c02eca447f57f5787a5357290895e68e9463e
parent16da306849d0335af7c353ec14121cf422433d33 (diff)
kvm: rename pfn_t to kvm_pfn_t
To date, we have implemented two I/O usage models for persistent memory, PMEM (a persistent "ram disk") and DAX (mmap persistent memory into userspace). This series adds a third, DAX-GUP, that allows DAX mappings to be the target of direct-i/o. It allows userspace to coordinate DMA/RDMA from/to persistent memory. The implementation leverages the ZONE_DEVICE mm-zone that went into 4.3-rc1 (also discussed at kernel summit) to flag pages that are owned and dynamically mapped by a device driver. The pmem driver, after mapping a persistent memory range into the system memmap via devm_memremap_pages(), arranges for DAX to distinguish pfn-only versus page-backed pmem-pfns via flags in the new pfn_t type. The DAX code, upon seeing a PFN_DEV+PFN_MAP flagged pfn, flags the resulting pte(s) inserted into the process page tables with a new _PAGE_DEVMAP flag. Later, when get_user_pages() is walking ptes it keys off _PAGE_DEVMAP to pin the device hosting the page range active. Finally, get_page() and put_page() are modified to take references against the device driver established page mapping. Finally, this need for "struct page" for persistent memory requires memory capacity to store the memmap array. Given the memmap array for a large pool of persistent may exhaust available DRAM introduce a mechanism to allocate the memmap from persistent memory. The new "struct vmem_altmap *" parameter to devm_memremap_pages() enables arch_add_memory() to use reserved pmem capacity rather than the page allocator. This patch (of 18): The core has developed a need for a "pfn_t" type [1]. Move the existing pfn_t in KVM to kvm_pfn_t [2]. [1]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002199.html [2]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002218.html Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm/kvm/mmu.c10
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h6
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/tlb.c14
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c8
-rw-r--r--arch/powerpc/kvm/trace_pr.h2
-rw-r--r--arch/x86/kvm/iommu.c11
-rw-r--r--arch/x86/kvm/mmu.c37
-rw-r--r--arch/x86/kvm/mmu_audit.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h6
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--include/linux/kvm_host.h37
-rw-r--r--include/linux/kvm_types.h2
-rw-r--r--virt/kvm/kvm_main.c47
23 files changed, 110 insertions, 104 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 9203c21b4673..a520b7987a29 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -182,7 +182,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
182 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; 182 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
183} 183}
184 184
185static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 185static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
186 kvm_pfn_t pfn,
186 unsigned long size, 187 unsigned long size,
187 bool ipa_uncached) 188 bool ipa_uncached)
188{ 189{
@@ -246,7 +247,7 @@ static inline void __kvm_flush_dcache_pte(pte_t pte)
246static inline void __kvm_flush_dcache_pmd(pmd_t pmd) 247static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
247{ 248{
248 unsigned long size = PMD_SIZE; 249 unsigned long size = PMD_SIZE;
249 pfn_t pfn = pmd_pfn(pmd); 250 kvm_pfn_t pfn = pmd_pfn(pmd);
250 251
251 while (size) { 252 while (size) {
252 void *va = kmap_atomic_pfn(pfn); 253 void *va = kmap_atomic_pfn(pfn);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 22f7fa0124ec..aba61fd3697a 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -992,9 +992,9 @@ out:
992 return ret; 992 return ret;
993} 993}
994 994
995static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) 995static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
996{ 996{
997 pfn_t pfn = *pfnp; 997 kvm_pfn_t pfn = *pfnp;
998 gfn_t gfn = *ipap >> PAGE_SHIFT; 998 gfn_t gfn = *ipap >> PAGE_SHIFT;
999 999
1000 if (PageTransCompound(pfn_to_page(pfn))) { 1000 if (PageTransCompound(pfn_to_page(pfn))) {
@@ -1201,7 +1201,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1201 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 1201 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1202} 1202}
1203 1203
1204static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 1204static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
1205 unsigned long size, bool uncached) 1205 unsigned long size, bool uncached)
1206{ 1206{
1207 __coherent_cache_guest_page(vcpu, pfn, size, uncached); 1207 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
@@ -1218,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1218 struct kvm *kvm = vcpu->kvm; 1218 struct kvm *kvm = vcpu->kvm;
1219 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 1219 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1220 struct vm_area_struct *vma; 1220 struct vm_area_struct *vma;
1221 pfn_t pfn; 1221 kvm_pfn_t pfn;
1222 pgprot_t mem_type = PAGE_S2; 1222 pgprot_t mem_type = PAGE_S2;
1223 bool fault_ipa_uncached; 1223 bool fault_ipa_uncached;
1224 bool logging_active = memslot_is_logging(memslot); 1224 bool logging_active = memslot_is_logging(memslot);
@@ -1346,7 +1346,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1346{ 1346{
1347 pmd_t *pmd; 1347 pmd_t *pmd;
1348 pte_t *pte; 1348 pte_t *pte;
1349 pfn_t pfn; 1349 kvm_pfn_t pfn;
1350 bool pfn_valid = false; 1350 bool pfn_valid = false;
1351 1351
1352 trace_kvm_access_fault(fault_ipa); 1352 trace_kvm_access_fault(fault_ipa);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0bf8b4320a91..736433912a1e 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -230,7 +230,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
230 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 230 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
231} 231}
232 232
233static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 233static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
234 kvm_pfn_t pfn,
234 unsigned long size, 235 unsigned long size,
235 bool ipa_uncached) 236 bool ipa_uncached)
236{ 237{
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6ded8d347af9..7c191443c7ea 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -101,9 +101,9 @@
101#define CAUSEF_DC (_ULCAST_(1) << 27) 101#define CAUSEF_DC (_ULCAST_(1) << 27)
102 102
103extern atomic_t kvm_mips_instance; 103extern atomic_t kvm_mips_instance;
104extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); 104extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
105extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); 105extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
106extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn); 106extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
107 107
108struct kvm_vm_stat { 108struct kvm_vm_stat {
109 u32 remote_tlb_flush; 109 u32 remote_tlb_flush;
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 41b1b090f56f..1b675c7ce89f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1525,7 +1525,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1525 struct kvm *kvm = vcpu->kvm; 1525 struct kvm *kvm = vcpu->kvm;
1526 unsigned long pa; 1526 unsigned long pa;
1527 gfn_t gfn; 1527 gfn_t gfn;
1528 pfn_t pfn; 1528 kvm_pfn_t pfn;
1529 1529
1530 gfn = va >> PAGE_SHIFT; 1530 gfn = va >> PAGE_SHIFT;
1531 1531
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index aed0ac2a4972..570479c03bdc 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -38,13 +38,13 @@ atomic_t kvm_mips_instance;
38EXPORT_SYMBOL(kvm_mips_instance); 38EXPORT_SYMBOL(kvm_mips_instance);
39 39
40/* These function pointers are initialized once the KVM module is loaded */ 40/* These function pointers are initialized once the KVM module is loaded */
41pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); 41kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); 42EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
43 43
44void (*kvm_mips_release_pfn_clean)(pfn_t pfn); 44void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
45EXPORT_SYMBOL(kvm_mips_release_pfn_clean); 45EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
46 46
47bool (*kvm_mips_is_error_pfn)(pfn_t pfn); 47bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
48EXPORT_SYMBOL(kvm_mips_is_error_pfn); 48EXPORT_SYMBOL(kvm_mips_is_error_pfn);
49 49
50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
@@ -144,7 +144,7 @@ EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145{ 145{
146 int srcu_idx, err = 0; 146 int srcu_idx, err = 0;
147 pfn_t pfn; 147 kvm_pfn_t pfn;
148 148
149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
150 return 0; 150 return 0;
@@ -262,7 +262,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
262 struct kvm_vcpu *vcpu) 262 struct kvm_vcpu *vcpu)
263{ 263{
264 gfn_t gfn; 264 gfn_t gfn;
265 pfn_t pfn0, pfn1; 265 kvm_pfn_t pfn0, pfn1;
266 unsigned long vaddr = 0; 266 unsigned long vaddr = 0;
267 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 267 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
268 int even; 268 int even;
@@ -313,7 +313,7 @@ EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
314 struct kvm_vcpu *vcpu) 314 struct kvm_vcpu *vcpu)
315{ 315{
316 pfn_t pfn0, pfn1; 316 kvm_pfn_t pfn0, pfn1;
317 unsigned long flags, old_entryhi = 0, vaddr = 0; 317 unsigned long flags, old_entryhi = 0, vaddr = 0;
318 unsigned long entrylo0 = 0, entrylo1 = 0; 318 unsigned long entrylo0 = 0, entrylo1 = 0;
319 319
@@ -360,7 +360,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
360{ 360{
361 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 361 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362 struct kvm *kvm = vcpu->kvm; 362 struct kvm *kvm = vcpu->kvm;
363 pfn_t pfn0, pfn1; 363 kvm_pfn_t pfn0, pfn1;
364 364
365 if ((tlb->tlb_hi & VPN2_MASK) == 0) { 365 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
366 pfn0 = 0; 366 pfn0 = 0;
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 9fac01cb89c1..8f39796c9da8 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -154,8 +154,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
154 bool upper, u32 val); 154 bool upper, u32 val);
155extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 155extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
156extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 156extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
157extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 157extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
158 bool *writable); 158 bool writing, bool *writable);
159extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 159extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
160 unsigned long *rmap, long pte_index, int realmode); 160 unsigned long *rmap, long pte_index, int realmode);
161extern void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize); 161extern void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize);
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c6ef05bd0765..2241d5357129 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -515,7 +515,7 @@ void kvmppc_claim_lpid(long lpid);
515void kvmppc_free_lpid(long lpid); 515void kvmppc_free_lpid(long lpid);
516void kvmppc_init_lpid(unsigned long nr_lpids); 516void kvmppc_init_lpid(unsigned long nr_lpids);
517 517
518static inline void kvmppc_mmu_flush_icache(pfn_t pfn) 518static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
519{ 519{
520 struct page *page; 520 struct page *page;
521 /* 521 /*
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 099c79d8c160..638c6d9be9e0 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -366,7 +366,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
366} 366}
367EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); 367EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
368 368
369pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, 369kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
370 bool *writable) 370 bool *writable)
371{ 371{
372 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; 372 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
@@ -379,9 +379,9 @@ pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
379 gpa &= ~0xFFFULL; 379 gpa &= ~0xFFFULL;
380 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) { 380 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
381 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 381 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
382 pfn_t pfn; 382 kvm_pfn_t pfn;
383 383
384 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; 384 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
385 get_page(pfn_to_page(pfn)); 385 get_page(pfn_to_page(pfn));
386 if (writable) 386 if (writable)
387 *writable = true; 387 *writable = true;
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index d5c9bfeb0c9c..55c4d51ea3e2 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -142,7 +142,7 @@ extern char etext[];
142int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, 142int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
143 bool iswrite) 143 bool iswrite)
144{ 144{
145 pfn_t hpaddr; 145 kvm_pfn_t hpaddr;
146 u64 vpn; 146 u64 vpn;
147 u64 vsid; 147 u64 vsid;
148 struct kvmppc_sid_map *map; 148 struct kvmppc_sid_map *map;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 79ad35abd196..913cd2198fa6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -83,7 +83,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
83 bool iswrite) 83 bool iswrite)
84{ 84{
85 unsigned long vpn; 85 unsigned long vpn;
86 pfn_t hpaddr; 86 kvm_pfn_t hpaddr;
87 ulong hash, hpteg; 87 ulong hash, hpteg;
88 u64 vsid; 88 u64 vsid;
89 int ret; 89 int ret;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 72920bed3ac6..94f04fcb373e 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -41,7 +41,7 @@ enum vcpu_ftr {
41#define E500_TLB_MAS2_ATTR (0x7f) 41#define E500_TLB_MAS2_ATTR (0x7f)
42 42
43struct tlbe_ref { 43struct tlbe_ref {
44 pfn_t pfn; /* valid only for TLB0, except briefly */ 44 kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
45 unsigned int flags; /* E500_TLB_* */ 45 unsigned int flags; /* E500_TLB_* */
46}; 46};
47 47
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 34c43fff4adb..b0333cc737dd 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -163,9 +163,9 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
163 struct kvm_book3e_206_tlb_entry magic; 163 struct kvm_book3e_206_tlb_entry magic;
164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
165 unsigned int stid; 165 unsigned int stid;
166 pfn_t pfn; 166 kvm_pfn_t pfn;
167 167
168 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; 168 pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
169 get_page(pfn_to_page(pfn)); 169 get_page(pfn_to_page(pfn));
170 170
171 preempt_disable(); 171 preempt_disable();
@@ -246,7 +246,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
246 246
247static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 247static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
248 struct kvm_book3e_206_tlb_entry *gtlbe, 248 struct kvm_book3e_206_tlb_entry *gtlbe,
249 pfn_t pfn, unsigned int wimg) 249 kvm_pfn_t pfn, unsigned int wimg)
250{ 250{
251 ref->pfn = pfn; 251 ref->pfn = pfn;
252 ref->flags = E500_TLB_VALID; 252 ref->flags = E500_TLB_VALID;
@@ -309,7 +309,7 @@ static void kvmppc_e500_setup_stlbe(
309 int tsize, struct tlbe_ref *ref, u64 gvaddr, 309 int tsize, struct tlbe_ref *ref, u64 gvaddr,
310 struct kvm_book3e_206_tlb_entry *stlbe) 310 struct kvm_book3e_206_tlb_entry *stlbe)
311{ 311{
312 pfn_t pfn = ref->pfn; 312 kvm_pfn_t pfn = ref->pfn;
313 u32 pr = vcpu->arch.shared->msr & MSR_PR; 313 u32 pr = vcpu->arch.shared->msr & MSR_PR;
314 314
315 BUG_ON(!(ref->flags & E500_TLB_VALID)); 315 BUG_ON(!(ref->flags & E500_TLB_VALID));
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 810507cb688a..d44f324184fb 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -30,7 +30,7 @@ TRACE_EVENT(kvm_book3s_reenter,
30#ifdef CONFIG_PPC_BOOK3S_64 30#ifdef CONFIG_PPC_BOOK3S_64
31 31
32TRACE_EVENT(kvm_book3s_64_mmu_map, 32TRACE_EVENT(kvm_book3s_64_mmu_map,
33 TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, 33 TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
34 struct kvmppc_pte *orig_pte), 34 struct kvmppc_pte *orig_pte),
35 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), 35 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
36 36
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 5c520ebf6343..a22a488b4622 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -43,11 +43,11 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
43static void kvm_iommu_put_pages(struct kvm *kvm, 43static void kvm_iommu_put_pages(struct kvm *kvm,
44 gfn_t base_gfn, unsigned long npages); 44 gfn_t base_gfn, unsigned long npages);
45 45
46static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, 46static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
47 unsigned long npages) 47 unsigned long npages)
48{ 48{
49 gfn_t end_gfn; 49 gfn_t end_gfn;
50 pfn_t pfn; 50 kvm_pfn_t pfn;
51 51
52 pfn = gfn_to_pfn_memslot(slot, gfn); 52 pfn = gfn_to_pfn_memslot(slot, gfn);
53 end_gfn = gfn + npages; 53 end_gfn = gfn + npages;
@@ -62,7 +62,8 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
62 return pfn; 62 return pfn;
63} 63}
64 64
65static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) 65static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn,
66 unsigned long npages)
66{ 67{
67 unsigned long i; 68 unsigned long i;
68 69
@@ -73,7 +74,7 @@ static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
73int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) 74int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
74{ 75{
75 gfn_t gfn, end_gfn; 76 gfn_t gfn, end_gfn;
76 pfn_t pfn; 77 kvm_pfn_t pfn;
77 int r = 0; 78 int r = 0;
78 struct iommu_domain *domain = kvm->arch.iommu_domain; 79 struct iommu_domain *domain = kvm->arch.iommu_domain;
79 int flags; 80 int flags;
@@ -275,7 +276,7 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
275{ 276{
276 struct iommu_domain *domain; 277 struct iommu_domain *domain;
277 gfn_t end_gfn, gfn; 278 gfn_t end_gfn, gfn;
278 pfn_t pfn; 279 kvm_pfn_t pfn;
279 u64 phys; 280 u64 phys;
280 281
281 domain = kvm->arch.iommu_domain; 282 domain = kvm->arch.iommu_domain;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 420a5ca3c0ee..95a955de5964 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -259,7 +259,7 @@ static unsigned get_mmio_spte_access(u64 spte)
259} 259}
260 260
261static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, 261static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
262 pfn_t pfn, unsigned access) 262 kvm_pfn_t pfn, unsigned access)
263{ 263{
264 if (unlikely(is_noslot_pfn(pfn))) { 264 if (unlikely(is_noslot_pfn(pfn))) {
265 mark_mmio_spte(vcpu, sptep, gfn, access); 265 mark_mmio_spte(vcpu, sptep, gfn, access);
@@ -320,7 +320,7 @@ static int is_last_spte(u64 pte, int level)
320 return 0; 320 return 0;
321} 321}
322 322
323static pfn_t spte_to_pfn(u64 pte) 323static kvm_pfn_t spte_to_pfn(u64 pte)
324{ 324{
325 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 325 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
326} 326}
@@ -582,7 +582,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
582 */ 582 */
583static int mmu_spte_clear_track_bits(u64 *sptep) 583static int mmu_spte_clear_track_bits(u64 *sptep)
584{ 584{
585 pfn_t pfn; 585 kvm_pfn_t pfn;
586 u64 old_spte = *sptep; 586 u64 old_spte = *sptep;
587 587
588 if (!spte_has_volatile_bits(old_spte)) 588 if (!spte_has_volatile_bits(old_spte))
@@ -1372,7 +1372,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1372 int need_flush = 0; 1372 int need_flush = 0;
1373 u64 new_spte; 1373 u64 new_spte;
1374 pte_t *ptep = (pte_t *)data; 1374 pte_t *ptep = (pte_t *)data;
1375 pfn_t new_pfn; 1375 kvm_pfn_t new_pfn;
1376 1376
1377 WARN_ON(pte_huge(*ptep)); 1377 WARN_ON(pte_huge(*ptep));
1378 new_pfn = pte_pfn(*ptep); 1378 new_pfn = pte_pfn(*ptep);
@@ -2450,7 +2450,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2450 return 0; 2450 return 0;
2451} 2451}
2452 2452
2453static bool kvm_is_mmio_pfn(pfn_t pfn) 2453static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2454{ 2454{
2455 if (pfn_valid(pfn)) 2455 if (pfn_valid(pfn))
2456 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); 2456 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
@@ -2460,7 +2460,7 @@ static bool kvm_is_mmio_pfn(pfn_t pfn)
2460 2460
2461static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2461static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2462 unsigned pte_access, int level, 2462 unsigned pte_access, int level,
2463 gfn_t gfn, pfn_t pfn, bool speculative, 2463 gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2464 bool can_unsync, bool host_writable) 2464 bool can_unsync, bool host_writable)
2465{ 2465{
2466 u64 spte; 2466 u64 spte;
@@ -2539,7 +2539,7 @@ done:
2539} 2539}
2540 2540
2541static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, 2541static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
2542 int write_fault, int level, gfn_t gfn, pfn_t pfn, 2542 int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
2543 bool speculative, bool host_writable) 2543 bool speculative, bool host_writable)
2544{ 2544{
2545 int was_rmapped = 0; 2545 int was_rmapped = 0;
@@ -2602,7 +2602,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
2602 return emulate; 2602 return emulate;
2603} 2603}
2604 2604
2605static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, 2605static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2606 bool no_dirty_log) 2606 bool no_dirty_log)
2607{ 2607{
2608 struct kvm_memory_slot *slot; 2608 struct kvm_memory_slot *slot;
@@ -2684,7 +2684,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2684} 2684}
2685 2685
2686static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, 2686static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
2687 int level, gfn_t gfn, pfn_t pfn, bool prefault) 2687 int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault)
2688{ 2688{
2689 struct kvm_shadow_walk_iterator iterator; 2689 struct kvm_shadow_walk_iterator iterator;
2690 struct kvm_mmu_page *sp; 2690 struct kvm_mmu_page *sp;
@@ -2732,7 +2732,7 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
2732 send_sig_info(SIGBUS, &info, tsk); 2732 send_sig_info(SIGBUS, &info, tsk);
2733} 2733}
2734 2734
2735static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) 2735static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2736{ 2736{
2737 /* 2737 /*
2738 * Do not cache the mmio info caused by writing the readonly gfn 2738 * Do not cache the mmio info caused by writing the readonly gfn
@@ -2752,9 +2752,10 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
2752} 2752}
2753 2753
2754static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, 2754static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2755 gfn_t *gfnp, pfn_t *pfnp, int *levelp) 2755 gfn_t *gfnp, kvm_pfn_t *pfnp,
2756 int *levelp)
2756{ 2757{
2757 pfn_t pfn = *pfnp; 2758 kvm_pfn_t pfn = *pfnp;
2758 gfn_t gfn = *gfnp; 2759 gfn_t gfn = *gfnp;
2759 int level = *levelp; 2760 int level = *levelp;
2760 2761
@@ -2793,7 +2794,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2793} 2794}
2794 2795
2795static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 2796static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2796 pfn_t pfn, unsigned access, int *ret_val) 2797 kvm_pfn_t pfn, unsigned access, int *ret_val)
2797{ 2798{
2798 bool ret = true; 2799 bool ret = true;
2799 2800
@@ -2947,7 +2948,7 @@ exit:
2947} 2948}
2948 2949
2949static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 2950static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2950 gva_t gva, pfn_t *pfn, bool write, bool *writable); 2951 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
2951static void make_mmu_pages_available(struct kvm_vcpu *vcpu); 2952static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
2952 2953
2953static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, 2954static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
@@ -2956,7 +2957,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
2956 int r; 2957 int r;
2957 int level; 2958 int level;
2958 bool force_pt_level = false; 2959 bool force_pt_level = false;
2959 pfn_t pfn; 2960 kvm_pfn_t pfn;
2960 unsigned long mmu_seq; 2961 unsigned long mmu_seq;
2961 bool map_writable, write = error_code & PFERR_WRITE_MASK; 2962 bool map_writable, write = error_code & PFERR_WRITE_MASK;
2962 2963
@@ -3410,7 +3411,7 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
3410} 3411}
3411 3412
3412static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 3413static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3413 gva_t gva, pfn_t *pfn, bool write, bool *writable) 3414 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
3414{ 3415{
3415 struct kvm_memory_slot *slot; 3416 struct kvm_memory_slot *slot;
3416 bool async; 3417 bool async;
@@ -3448,7 +3449,7 @@ check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
3448static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, 3449static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3449 bool prefault) 3450 bool prefault)
3450{ 3451{
3451 pfn_t pfn; 3452 kvm_pfn_t pfn;
3452 int r; 3453 int r;
3453 int level; 3454 int level;
3454 bool force_pt_level; 3455 bool force_pt_level;
@@ -4601,7 +4602,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
4601 u64 *sptep; 4602 u64 *sptep;
4602 struct rmap_iterator iter; 4603 struct rmap_iterator iter;
4603 int need_tlb_flush = 0; 4604 int need_tlb_flush = 0;
4604 pfn_t pfn; 4605 kvm_pfn_t pfn;
4605 struct kvm_mmu_page *sp; 4606 struct kvm_mmu_page *sp;
4606 4607
4607restart: 4608restart:
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 1cee3ec20dd2..dcce533d420c 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -97,7 +97,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
97{ 97{
98 struct kvm_mmu_page *sp; 98 struct kvm_mmu_page *sp;
99 gfn_t gfn; 99 gfn_t gfn;
100 pfn_t pfn; 100 kvm_pfn_t pfn;
101 hpa_t hpa; 101 hpa_t hpa;
102 102
103 sp = page_header(__pa(sptep)); 103 sp = page_header(__pa(sptep));
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 91e939b486d1..6c9fed957cce 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -456,7 +456,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
456{ 456{
457 unsigned pte_access; 457 unsigned pte_access;
458 gfn_t gfn; 458 gfn_t gfn;
459 pfn_t pfn; 459 kvm_pfn_t pfn;
460 460
461 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) 461 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
462 return false; 462 return false;
@@ -551,7 +551,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
551static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 551static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
552 struct guest_walker *gw, 552 struct guest_walker *gw,
553 int write_fault, int hlevel, 553 int write_fault, int hlevel,
554 pfn_t pfn, bool map_writable, bool prefault) 554 kvm_pfn_t pfn, bool map_writable, bool prefault)
555{ 555{
556 struct kvm_mmu_page *sp = NULL; 556 struct kvm_mmu_page *sp = NULL;
557 struct kvm_shadow_walk_iterator it; 557 struct kvm_shadow_walk_iterator it;
@@ -694,7 +694,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
694 int user_fault = error_code & PFERR_USER_MASK; 694 int user_fault = error_code & PFERR_USER_MASK;
695 struct guest_walker walker; 695 struct guest_walker walker;
696 int r; 696 int r;
697 pfn_t pfn; 697 kvm_pfn_t pfn;
698 int level = PT_PAGE_TABLE_LEVEL; 698 int level = PT_PAGE_TABLE_LEVEL;
699 bool force_pt_level = false; 699 bool force_pt_level = false;
700 unsigned long mmu_seq; 700 unsigned long mmu_seq;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 04d61d496b14..e2951b6edbbc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4251,7 +4251,7 @@ out:
4251static int init_rmode_identity_map(struct kvm *kvm) 4251static int init_rmode_identity_map(struct kvm *kvm)
4252{ 4252{
4253 int i, idx, r = 0; 4253 int i, idx, r = 0;
4254 pfn_t identity_map_pfn; 4254 kvm_pfn_t identity_map_pfn;
4255 u32 tmp; 4255 u32 tmp;
4256 4256
4257 if (!enable_ept) 4257 if (!enable_ept)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f53f5b13c677..4244c2baf57d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5148,7 +5148,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5148 int emulation_type) 5148 int emulation_type)
5149{ 5149{
5150 gpa_t gpa = cr2; 5150 gpa_t gpa = cr2;
5151 pfn_t pfn; 5151 kvm_pfn_t pfn;
5152 5152
5153 if (emulation_type & EMULTYPE_NO_REEXECUTE) 5153 if (emulation_type & EMULTYPE_NO_REEXECUTE)
5154 return false; 5154 return false;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f707f74055c3..861f690aa791 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -66,7 +66,7 @@
66 * error pfns indicate that the gfn is in slot but faild to 66 * error pfns indicate that the gfn is in slot but faild to
67 * translate it to pfn on host. 67 * translate it to pfn on host.
68 */ 68 */
69static inline bool is_error_pfn(pfn_t pfn) 69static inline bool is_error_pfn(kvm_pfn_t pfn)
70{ 70{
71 return !!(pfn & KVM_PFN_ERR_MASK); 71 return !!(pfn & KVM_PFN_ERR_MASK);
72} 72}
@@ -76,13 +76,13 @@ static inline bool is_error_pfn(pfn_t pfn)
76 * translated to pfn - it is not in slot or failed to 76 * translated to pfn - it is not in slot or failed to
77 * translate it to pfn. 77 * translate it to pfn.
78 */ 78 */
79static inline bool is_error_noslot_pfn(pfn_t pfn) 79static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
80{ 80{
81 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 81 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
82} 82}
83 83
84/* noslot pfn indicates that the gfn is not in slot. */ 84/* noslot pfn indicates that the gfn is not in slot. */
85static inline bool is_noslot_pfn(pfn_t pfn) 85static inline bool is_noslot_pfn(kvm_pfn_t pfn)
86{ 86{
87 return pfn == KVM_PFN_NOSLOT; 87 return pfn == KVM_PFN_NOSLOT;
88} 88}
@@ -591,19 +591,20 @@ void kvm_release_page_clean(struct page *page);
591void kvm_release_page_dirty(struct page *page); 591void kvm_release_page_dirty(struct page *page);
592void kvm_set_page_accessed(struct page *page); 592void kvm_set_page_accessed(struct page *page);
593 593
594pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 594kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
595pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 595kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
596pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 596kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
597 bool *writable); 597 bool *writable);
598pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 598kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
599pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 599kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
600pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 600kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
601 bool *async, bool write_fault, bool *writable); 601 bool atomic, bool *async, bool write_fault,
602 bool *writable);
602 603
603void kvm_release_pfn_clean(pfn_t pfn); 604void kvm_release_pfn_clean(kvm_pfn_t pfn);
604void kvm_set_pfn_dirty(pfn_t pfn); 605void kvm_set_pfn_dirty(kvm_pfn_t pfn);
605void kvm_set_pfn_accessed(pfn_t pfn); 606void kvm_set_pfn_accessed(kvm_pfn_t pfn);
606void kvm_get_pfn(pfn_t pfn); 607void kvm_get_pfn(kvm_pfn_t pfn);
607 608
608int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 609int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
609 int len); 610 int len);
@@ -629,8 +630,8 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
629 630
630struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 631struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
631struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 632struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
632pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); 633kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
633pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 634kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
634struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); 635struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
635unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 636unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
636unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 637unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
@@ -811,7 +812,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
811int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 812int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
812void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 813void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
813 814
814bool kvm_is_reserved_pfn(pfn_t pfn); 815bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
815 816
816struct kvm_irq_ack_notifier { 817struct kvm_irq_ack_notifier {
817 struct hlist_node link; 818 struct hlist_node link;
@@ -965,7 +966,7 @@ static inline gfn_t gpa_to_gfn(gpa_t gpa)
965 return (gfn_t)(gpa >> PAGE_SHIFT); 966 return (gfn_t)(gpa >> PAGE_SHIFT);
966} 967}
967 968
968static inline hpa_t pfn_to_hpa(pfn_t pfn) 969static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
969{ 970{
970 return (hpa_t)pfn << PAGE_SHIFT; 971 return (hpa_t)pfn << PAGE_SHIFT;
971} 972}
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1b47a185c2f0..8bf259dae9f6 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -53,7 +53,7 @@ typedef unsigned long hva_t;
53typedef u64 hpa_t; 53typedef u64 hpa_t;
54typedef u64 hfn_t; 54typedef u64 hfn_t;
55 55
56typedef hfn_t pfn_t; 56typedef hfn_t kvm_pfn_t;
57 57
58struct gfn_to_hva_cache { 58struct gfn_to_hva_cache {
59 u64 generation; 59 u64 generation;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 314c7774652e..a11cfd20a6a0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -111,7 +111,7 @@ static void hardware_disable_all(void);
111 111
112static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 112static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
113 113
114static void kvm_release_pfn_dirty(pfn_t pfn); 114static void kvm_release_pfn_dirty(kvm_pfn_t pfn);
115static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 115static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
116 116
117__visible bool kvm_rebooting; 117__visible bool kvm_rebooting;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
119 119
120static bool largepages_enabled = true; 120static bool largepages_enabled = true;
121 121
122bool kvm_is_reserved_pfn(pfn_t pfn) 122bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
123{ 123{
124 if (pfn_valid(pfn)) 124 if (pfn_valid(pfn))
125 return PageReserved(pfn_to_page(pfn)); 125 return PageReserved(pfn_to_page(pfn));
@@ -1289,7 +1289,7 @@ static inline int check_user_page_hwpoison(unsigned long addr)
1289 * true indicates success, otherwise false is returned. 1289 * true indicates success, otherwise false is returned.
1290 */ 1290 */
1291static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1291static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1292 bool write_fault, bool *writable, pfn_t *pfn) 1292 bool write_fault, bool *writable, kvm_pfn_t *pfn)
1293{ 1293{
1294 struct page *page[1]; 1294 struct page *page[1];
1295 int npages; 1295 int npages;
@@ -1322,7 +1322,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1322 * 1 indicates success, -errno is returned if error is detected. 1322 * 1 indicates success, -errno is returned if error is detected.
1323 */ 1323 */
1324static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1324static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1325 bool *writable, pfn_t *pfn) 1325 bool *writable, kvm_pfn_t *pfn)
1326{ 1326{
1327 struct page *page[1]; 1327 struct page *page[1];
1328 int npages = 0; 1328 int npages = 0;
@@ -1386,11 +1386,11 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1386 * 2): @write_fault = false && @writable, @writable will tell the caller 1386 * 2): @write_fault = false && @writable, @writable will tell the caller
1387 * whether the mapping is writable. 1387 * whether the mapping is writable.
1388 */ 1388 */
1389static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1389static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1390 bool write_fault, bool *writable) 1390 bool write_fault, bool *writable)
1391{ 1391{
1392 struct vm_area_struct *vma; 1392 struct vm_area_struct *vma;
1393 pfn_t pfn = 0; 1393 kvm_pfn_t pfn = 0;
1394 int npages; 1394 int npages;
1395 1395
1396 /* we can do it either atomically or asynchronously, not both */ 1396 /* we can do it either atomically or asynchronously, not both */
@@ -1431,8 +1431,9 @@ exit:
1431 return pfn; 1431 return pfn;
1432} 1432}
1433 1433
1434pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 1434kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
1435 bool *async, bool write_fault, bool *writable) 1435 bool atomic, bool *async, bool write_fault,
1436 bool *writable)
1436{ 1437{
1437 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1438 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1438 1439
@@ -1453,7 +1454,7 @@ pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
1453} 1454}
1454EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1455EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
1455 1456
1456pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1457kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1457 bool *writable) 1458 bool *writable)
1458{ 1459{
1459 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1460 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
@@ -1461,37 +1462,37 @@ pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1461} 1462}
1462EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1463EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1463 1464
1464pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1465kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1465{ 1466{
1466 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1467 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1467} 1468}
1468EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1469EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
1469 1470
1470pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1471kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1471{ 1472{
1472 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1473 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1473} 1474}
1474EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1475EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1475 1476
1476pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1477kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1477{ 1478{
1478 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1479 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
1479} 1480}
1480EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1481EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1481 1482
1482pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1483kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
1483{ 1484{
1484 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1485 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1485} 1486}
1486EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1487EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
1487 1488
1488pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1489kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1489{ 1490{
1490 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1491 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
1491} 1492}
1492EXPORT_SYMBOL_GPL(gfn_to_pfn); 1493EXPORT_SYMBOL_GPL(gfn_to_pfn);
1493 1494
1494pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1495kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1495{ 1496{
1496 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1497 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1497} 1498}
@@ -1514,7 +1515,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1514} 1515}
1515EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1516EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1516 1517
1517static struct page *kvm_pfn_to_page(pfn_t pfn) 1518static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
1518{ 1519{
1519 if (is_error_noslot_pfn(pfn)) 1520 if (is_error_noslot_pfn(pfn))
1520 return KVM_ERR_PTR_BAD_PAGE; 1521 return KVM_ERR_PTR_BAD_PAGE;
@@ -1529,7 +1530,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
1529 1530
1530struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1531struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1531{ 1532{
1532 pfn_t pfn; 1533 kvm_pfn_t pfn;
1533 1534
1534 pfn = gfn_to_pfn(kvm, gfn); 1535 pfn = gfn_to_pfn(kvm, gfn);
1535 1536
@@ -1539,7 +1540,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
1539 1540
1540struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1541struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1541{ 1542{
1542 pfn_t pfn; 1543 kvm_pfn_t pfn;
1543 1544
1544 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1545 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
1545 1546
@@ -1555,7 +1556,7 @@ void kvm_release_page_clean(struct page *page)
1555} 1556}
1556EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1557EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1557 1558
1558void kvm_release_pfn_clean(pfn_t pfn) 1559void kvm_release_pfn_clean(kvm_pfn_t pfn)
1559{ 1560{
1560 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1561 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1561 put_page(pfn_to_page(pfn)); 1562 put_page(pfn_to_page(pfn));
@@ -1570,13 +1571,13 @@ void kvm_release_page_dirty(struct page *page)
1570} 1571}
1571EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1572EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1572 1573
1573static void kvm_release_pfn_dirty(pfn_t pfn) 1574static void kvm_release_pfn_dirty(kvm_pfn_t pfn)
1574{ 1575{
1575 kvm_set_pfn_dirty(pfn); 1576 kvm_set_pfn_dirty(pfn);
1576 kvm_release_pfn_clean(pfn); 1577 kvm_release_pfn_clean(pfn);
1577} 1578}
1578 1579
1579void kvm_set_pfn_dirty(pfn_t pfn) 1580void kvm_set_pfn_dirty(kvm_pfn_t pfn)
1580{ 1581{
1581 if (!kvm_is_reserved_pfn(pfn)) { 1582 if (!kvm_is_reserved_pfn(pfn)) {
1582 struct page *page = pfn_to_page(pfn); 1583 struct page *page = pfn_to_page(pfn);
@@ -1587,14 +1588,14 @@ void kvm_set_pfn_dirty(pfn_t pfn)
1587} 1588}
1588EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1589EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1589 1590
1590void kvm_set_pfn_accessed(pfn_t pfn) 1591void kvm_set_pfn_accessed(kvm_pfn_t pfn)
1591{ 1592{
1592 if (!kvm_is_reserved_pfn(pfn)) 1593 if (!kvm_is_reserved_pfn(pfn))
1593 mark_page_accessed(pfn_to_page(pfn)); 1594 mark_page_accessed(pfn_to_page(pfn));
1594} 1595}
1595EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1596EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1596 1597
1597void kvm_get_pfn(pfn_t pfn) 1598void kvm_get_pfn(kvm_pfn_t pfn)
1598{ 1599{
1599 if (!kvm_is_reserved_pfn(pfn)) 1600 if (!kvm_is_reserved_pfn(pfn))
1600 get_page(pfn_to_page(pfn)); 1601 get_page(pfn_to_page(pfn));