aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2016-12-20 00:49:00 -0500
committerPaul Mackerras <paulus@ozlabs.org>2017-01-31 05:59:28 -0500
commit3f9d4f5a5f35e402e91bedf0c15e29cef187a29d (patch)
tree45322e982a3681578a1187415ec1c28dc9341b0c
parentdb9a290d9c3c596e5325e2a42133594435e5de46 (diff)
KVM: PPC: Book3S HV: Gather HPT related variables into sub-structure
Currently, the powerpc kvm_arch structure contains a number of variables tracking the state of the guest's hashed page table (HPT) in KVM HV. This patch gathers them all together into a single kvm_hpt_info substructure. This makes life more convenient for the upcoming HPT resizing implementation. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
-rw-r--r--arch/powerpc/include/asm/kvm_host.h20
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c92
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c62
4 files changed, 92 insertions, 84 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index b2dbeac3f450..ea6f0c659936 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -241,12 +241,24 @@ struct kvm_arch_memory_slot {
241#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 241#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
242}; 242};
243 243
244struct kvm_hpt_info {
245 /* Host virtual (linear mapping) address of guest HPT */
246 unsigned long virt;
247 /* Array of reverse mapping entries for each guest HPTE */
248 struct revmap_entry *rev;
249 unsigned long npte;
250 unsigned long mask;
251 /* Guest HPT size is 2**(order) bytes */
252 u32 order;
253 /* 1 if HPT allocated with CMA, 0 otherwise */
254 int cma;
255};
256
244struct kvm_arch { 257struct kvm_arch {
245 unsigned int lpid; 258 unsigned int lpid;
246#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 259#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
247 unsigned int tlb_sets; 260 unsigned int tlb_sets;
248 unsigned long hpt_virt; 261 struct kvm_hpt_info hpt;
249 struct revmap_entry *revmap;
250 atomic64_t mmio_update; 262 atomic64_t mmio_update;
251 unsigned int host_lpid; 263 unsigned int host_lpid;
252 unsigned long host_lpcr; 264 unsigned long host_lpcr;
@@ -256,15 +268,11 @@ struct kvm_arch {
256 unsigned long lpcr; 268 unsigned long lpcr;
257 unsigned long vrma_slb_v; 269 unsigned long vrma_slb_v;
258 int hpte_setup_done; 270 int hpte_setup_done;
259 u32 hpt_order;
260 atomic_t vcpus_running; 271 atomic_t vcpus_running;
261 u32 online_vcores; 272 u32 online_vcores;
262 unsigned long hpt_npte;
263 unsigned long hpt_mask;
264 atomic_t hpte_mod_interest; 273 atomic_t hpte_mod_interest;
265 cpumask_t need_tlb_flush; 274 cpumask_t need_tlb_flush;
266 cpumask_t cpu_in_guest; 275 cpumask_t cpu_in_guest;
267 int hpt_cma_alloc;
268 u8 radix; 276 u8 radix;
269 pgd_t *pgtable; 277 pgd_t *pgtable;
270 u64 process_table; 278 u64 process_table;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 16f278417c69..2af63ce129bc 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -61,12 +61,12 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
61 order = PPC_MIN_HPT_ORDER; 61 order = PPC_MIN_HPT_ORDER;
62 } 62 }
63 63
64 kvm->arch.hpt_cma_alloc = 0; 64 kvm->arch.hpt.cma = 0;
65 page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); 65 page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
66 if (page) { 66 if (page) {
67 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 67 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
68 memset((void *)hpt, 0, (1ul << order)); 68 memset((void *)hpt, 0, (1ul << order));
69 kvm->arch.hpt_cma_alloc = 1; 69 kvm->arch.hpt.cma = 1;
70 } 70 }
71 71
72 /* Lastly try successively smaller sizes from the page allocator */ 72 /* Lastly try successively smaller sizes from the page allocator */
@@ -81,22 +81,22 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
81 if (!hpt) 81 if (!hpt)
82 return -ENOMEM; 82 return -ENOMEM;
83 83
84 kvm->arch.hpt_virt = hpt; 84 kvm->arch.hpt.virt = hpt;
85 kvm->arch.hpt_order = order; 85 kvm->arch.hpt.order = order;
86 /* HPTEs are 2**4 bytes long */ 86 /* HPTEs are 2**4 bytes long */
87 kvm->arch.hpt_npte = 1ul << (order - 4); 87 kvm->arch.hpt.npte = 1ul << (order - 4);
88 /* 128 (2**7) bytes in each HPTEG */ 88 /* 128 (2**7) bytes in each HPTEG */
89 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; 89 kvm->arch.hpt.mask = (1ul << (order - 7)) - 1;
90 90
91 atomic64_set(&kvm->arch.mmio_update, 0); 91 atomic64_set(&kvm->arch.mmio_update, 0);
92 92
93 /* Allocate reverse map array */ 93 /* Allocate reverse map array */
94 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); 94 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt.npte);
95 if (!rev) { 95 if (!rev) {
96 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); 96 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
97 goto out_freehpt; 97 goto out_freehpt;
98 } 98 }
99 kvm->arch.revmap = rev; 99 kvm->arch.hpt.rev = rev;
100 kvm->arch.sdr1 = __pa(hpt) | (order - 18); 100 kvm->arch.sdr1 = __pa(hpt) | (order - 18);
101 101
102 pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", 102 pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
@@ -107,7 +107,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
107 return 0; 107 return 0;
108 108
109 out_freehpt: 109 out_freehpt:
110 if (kvm->arch.hpt_cma_alloc) 110 if (kvm->arch.hpt.cma)
111 kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); 111 kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
112 else 112 else
113 free_pages(hpt, order - PAGE_SHIFT); 113 free_pages(hpt, order - PAGE_SHIFT);
@@ -132,10 +132,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
132 goto out; 132 goto out;
133 } 133 }
134 } 134 }
135 if (kvm->arch.hpt_virt) { 135 if (kvm->arch.hpt.virt) {
136 order = kvm->arch.hpt_order; 136 order = kvm->arch.hpt.order;
137 /* Set the entire HPT to 0, i.e. invalid HPTEs */ 137 /* Set the entire HPT to 0, i.e. invalid HPTEs */
138 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); 138 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
139 /* 139 /*
140 * Reset all the reverse-mapping chains for all memslots 140 * Reset all the reverse-mapping chains for all memslots
141 */ 141 */
@@ -155,13 +155,13 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
155 155
156void kvmppc_free_hpt(struct kvm *kvm) 156void kvmppc_free_hpt(struct kvm *kvm)
157{ 157{
158 vfree(kvm->arch.revmap); 158 vfree(kvm->arch.hpt.rev);
159 if (kvm->arch.hpt_cma_alloc) 159 if (kvm->arch.hpt.cma)
160 kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt_virt), 160 kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt.virt),
161 1 << (kvm->arch.hpt_order - PAGE_SHIFT)); 161 1 << (kvm->arch.hpt.order - PAGE_SHIFT));
162 else if (kvm->arch.hpt_virt) 162 else if (kvm->arch.hpt.virt)
163 free_pages(kvm->arch.hpt_virt, 163 free_pages(kvm->arch.hpt.virt,
164 kvm->arch.hpt_order - PAGE_SHIFT); 164 kvm->arch.hpt.order - PAGE_SHIFT);
165} 165}
166 166
167/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ 167/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
@@ -196,8 +196,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
196 if (npages > 1ul << (40 - porder)) 196 if (npages > 1ul << (40 - porder))
197 npages = 1ul << (40 - porder); 197 npages = 1ul << (40 - porder);
198 /* Can't use more than 1 HPTE per HPTEG */ 198 /* Can't use more than 1 HPTE per HPTEG */
199 if (npages > kvm->arch.hpt_mask + 1) 199 if (npages > kvm->arch.hpt.mask + 1)
200 npages = kvm->arch.hpt_mask + 1; 200 npages = kvm->arch.hpt.mask + 1;
201 201
202 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | 202 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
203 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); 203 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
@@ -207,7 +207,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
207 for (i = 0; i < npages; ++i) { 207 for (i = 0; i < npages; ++i) {
208 addr = i << porder; 208 addr = i << porder;
209 /* can't use hpt_hash since va > 64 bits */ 209 /* can't use hpt_hash since va > 64 bits */
210 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; 210 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt.mask;
211 /* 211 /*
212 * We assume that the hash table is empty and no 212 * We assume that the hash table is empty and no
213 * vcpus are using it at this stage. Since we create 213 * vcpus are using it at this stage. Since we create
@@ -340,11 +340,11 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
340 preempt_enable(); 340 preempt_enable();
341 return -ENOENT; 341 return -ENOENT;
342 } 342 }
343 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); 343 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
344 v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; 344 v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
345 if (cpu_has_feature(CPU_FTR_ARCH_300)) 345 if (cpu_has_feature(CPU_FTR_ARCH_300))
346 v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); 346 v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
347 gr = kvm->arch.revmap[index].guest_rpte; 347 gr = kvm->arch.hpt.rev[index].guest_rpte;
348 348
349 unlock_hpte(hptep, orig_v); 349 unlock_hpte(hptep, orig_v);
350 preempt_enable(); 350 preempt_enable();
@@ -485,8 +485,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
485 } 485 }
486 } 486 }
487 index = vcpu->arch.pgfault_index; 487 index = vcpu->arch.pgfault_index;
488 hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); 488 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
489 rev = &kvm->arch.revmap[index]; 489 rev = &kvm->arch.hpt.rev[index];
490 preempt_disable(); 490 preempt_disable();
491 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 491 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
492 cpu_relax(); 492 cpu_relax();
@@ -748,7 +748,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
748static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 748static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
749 unsigned long gfn) 749 unsigned long gfn)
750{ 750{
751 struct revmap_entry *rev = kvm->arch.revmap; 751 struct revmap_entry *rev = kvm->arch.hpt.rev;
752 unsigned long h, i, j; 752 unsigned long h, i, j;
753 __be64 *hptep; 753 __be64 *hptep;
754 unsigned long ptel, psize, rcbits; 754 unsigned long ptel, psize, rcbits;
@@ -768,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
768 * rmap chain lock. 768 * rmap chain lock.
769 */ 769 */
770 i = *rmapp & KVMPPC_RMAP_INDEX; 770 i = *rmapp & KVMPPC_RMAP_INDEX;
771 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); 771 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
772 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 772 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
773 /* unlock rmap before spinning on the HPTE lock */ 773 /* unlock rmap before spinning on the HPTE lock */
774 unlock_rmap(rmapp); 774 unlock_rmap(rmapp);
@@ -860,7 +860,7 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
860static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 860static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
861 unsigned long gfn) 861 unsigned long gfn)
862{ 862{
863 struct revmap_entry *rev = kvm->arch.revmap; 863 struct revmap_entry *rev = kvm->arch.hpt.rev;
864 unsigned long head, i, j; 864 unsigned long head, i, j;
865 __be64 *hptep; 865 __be64 *hptep;
866 int ret = 0; 866 int ret = 0;
@@ -880,7 +880,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
880 880
881 i = head = *rmapp & KVMPPC_RMAP_INDEX; 881 i = head = *rmapp & KVMPPC_RMAP_INDEX;
882 do { 882 do {
883 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); 883 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
884 j = rev[i].forw; 884 j = rev[i].forw;
885 885
886 /* If this HPTE isn't referenced, ignore it */ 886 /* If this HPTE isn't referenced, ignore it */
@@ -923,7 +923,7 @@ int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
923static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 923static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
924 unsigned long gfn) 924 unsigned long gfn)
925{ 925{
926 struct revmap_entry *rev = kvm->arch.revmap; 926 struct revmap_entry *rev = kvm->arch.hpt.rev;
927 unsigned long head, i, j; 927 unsigned long head, i, j;
928 unsigned long *hp; 928 unsigned long *hp;
929 int ret = 1; 929 int ret = 1;
@@ -940,7 +940,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
940 if (*rmapp & KVMPPC_RMAP_PRESENT) { 940 if (*rmapp & KVMPPC_RMAP_PRESENT) {
941 i = head = *rmapp & KVMPPC_RMAP_INDEX; 941 i = head = *rmapp & KVMPPC_RMAP_INDEX;
942 do { 942 do {
943 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); 943 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
944 j = rev[i].forw; 944 j = rev[i].forw;
945 if (be64_to_cpu(hp[1]) & HPTE_R_R) 945 if (be64_to_cpu(hp[1]) & HPTE_R_R)
946 goto out; 946 goto out;
@@ -980,7 +980,7 @@ static int vcpus_running(struct kvm *kvm)
980 */ 980 */
981static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) 981static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
982{ 982{
983 struct revmap_entry *rev = kvm->arch.revmap; 983 struct revmap_entry *rev = kvm->arch.hpt.rev;
984 unsigned long head, i, j; 984 unsigned long head, i, j;
985 unsigned long n; 985 unsigned long n;
986 unsigned long v, r; 986 unsigned long v, r;
@@ -1005,7 +1005,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1005 i = head = *rmapp & KVMPPC_RMAP_INDEX; 1005 i = head = *rmapp & KVMPPC_RMAP_INDEX;
1006 do { 1006 do {
1007 unsigned long hptep1; 1007 unsigned long hptep1;
1008 hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4)); 1008 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
1009 j = rev[i].forw; 1009 j = rev[i].forw;
1010 1010
1011 /* 1011 /*
@@ -1311,8 +1311,8 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1311 flags = ctx->flags; 1311 flags = ctx->flags;
1312 1312
1313 i = ctx->index; 1313 i = ctx->index;
1314 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1314 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1315 revp = kvm->arch.revmap + i; 1315 revp = kvm->arch.hpt.rev + i;
1316 lbuf = (unsigned long __user *)buf; 1316 lbuf = (unsigned long __user *)buf;
1317 1317
1318 nb = 0; 1318 nb = 0;
@@ -1327,7 +1327,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1327 1327
1328 /* Skip uninteresting entries, i.e. clean on not-first pass */ 1328 /* Skip uninteresting entries, i.e. clean on not-first pass */
1329 if (!first_pass) { 1329 if (!first_pass) {
1330 while (i < kvm->arch.hpt_npte && 1330 while (i < kvm->arch.hpt.npte &&
1331 !hpte_dirty(revp, hptp)) { 1331 !hpte_dirty(revp, hptp)) {
1332 ++i; 1332 ++i;
1333 hptp += 2; 1333 hptp += 2;
@@ -1337,7 +1337,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1337 hdr.index = i; 1337 hdr.index = i;
1338 1338
1339 /* Grab a series of valid entries */ 1339 /* Grab a series of valid entries */
1340 while (i < kvm->arch.hpt_npte && 1340 while (i < kvm->arch.hpt.npte &&
1341 hdr.n_valid < 0xffff && 1341 hdr.n_valid < 0xffff &&
1342 nb + HPTE_SIZE < count && 1342 nb + HPTE_SIZE < count &&
1343 record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { 1343 record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
@@ -1353,7 +1353,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1353 ++revp; 1353 ++revp;
1354 } 1354 }
1355 /* Now skip invalid entries while we can */ 1355 /* Now skip invalid entries while we can */
1356 while (i < kvm->arch.hpt_npte && 1356 while (i < kvm->arch.hpt.npte &&
1357 hdr.n_invalid < 0xffff && 1357 hdr.n_invalid < 0xffff &&
1358 record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { 1358 record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1359 /* found an invalid entry */ 1359 /* found an invalid entry */
@@ -1374,7 +1374,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1374 } 1374 }
1375 1375
1376 /* Check if we've wrapped around the hash table */ 1376 /* Check if we've wrapped around the hash table */
1377 if (i >= kvm->arch.hpt_npte) { 1377 if (i >= kvm->arch.hpt.npte) {
1378 i = 0; 1378 i = 0;
1379 ctx->first_pass = 0; 1379 ctx->first_pass = 0;
1380 break; 1380 break;
@@ -1433,11 +1433,11 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1433 1433
1434 err = -EINVAL; 1434 err = -EINVAL;
1435 i = hdr.index; 1435 i = hdr.index;
1436 if (i >= kvm->arch.hpt_npte || 1436 if (i >= kvm->arch.hpt.npte ||
1437 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) 1437 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt.npte)
1438 break; 1438 break;
1439 1439
1440 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1440 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1441 lbuf = (unsigned long __user *)buf; 1441 lbuf = (unsigned long __user *)buf;
1442 for (j = 0; j < hdr.n_valid; ++j) { 1442 for (j = 0; j < hdr.n_valid; ++j) {
1443 __be64 hpte_v; 1443 __be64 hpte_v;
@@ -1624,8 +1624,8 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
1624 1624
1625 kvm = p->kvm; 1625 kvm = p->kvm;
1626 i = p->hpt_index; 1626 i = p->hpt_index;
1627 hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); 1627 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1628 for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) { 1628 for (; len != 0 && i < kvm->arch.hpt.npte; ++i, hptp += 2) {
1629 if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) 1629 if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
1630 continue; 1630 continue;
1631 1631
@@ -1635,7 +1635,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
1635 cpu_relax(); 1635 cpu_relax();
1636 v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; 1636 v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
1637 hr = be64_to_cpu(hptp[1]); 1637 hr = be64_to_cpu(hptp[1]);
1638 gr = kvm->arch.revmap[i].guest_rpte; 1638 gr = kvm->arch.hpt.rev[i].guest_rpte;
1639 unlock_hpte(hptp, v); 1639 unlock_hpte(hptp, v);
1640 preempt_enable(); 1640 preempt_enable();
1641 1641
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index bdf281cc88c0..02607128a4d4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3197,7 +3197,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
3197 goto out; /* another vcpu beat us to it */ 3197 goto out; /* another vcpu beat us to it */
3198 3198
3199 /* Allocate hashed page table (if not done already) and reset it */ 3199 /* Allocate hashed page table (if not done already) and reset it */
3200 if (!kvm->arch.hpt_virt) { 3200 if (!kvm->arch.hpt.virt) {
3201 err = kvmppc_alloc_hpt(kvm, NULL); 3201 err = kvmppc_alloc_hpt(kvm, NULL);
3202 if (err) { 3202 if (err) {
3203 pr_err("KVM: Couldn't alloc HPT\n"); 3203 pr_err("KVM: Couldn't alloc HPT\n");
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index b095afcd4309..175748acc9a1 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -86,10 +86,10 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
86 86
87 if (*rmap & KVMPPC_RMAP_PRESENT) { 87 if (*rmap & KVMPPC_RMAP_PRESENT) {
88 i = *rmap & KVMPPC_RMAP_INDEX; 88 i = *rmap & KVMPPC_RMAP_INDEX;
89 head = &kvm->arch.revmap[i]; 89 head = &kvm->arch.hpt.rev[i];
90 if (realmode) 90 if (realmode)
91 head = real_vmalloc_addr(head); 91 head = real_vmalloc_addr(head);
92 tail = &kvm->arch.revmap[head->back]; 92 tail = &kvm->arch.hpt.rev[head->back];
93 if (realmode) 93 if (realmode)
94 tail = real_vmalloc_addr(tail); 94 tail = real_vmalloc_addr(tail);
95 rev->forw = i; 95 rev->forw = i;
@@ -154,8 +154,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
154 lock_rmap(rmap); 154 lock_rmap(rmap);
155 155
156 head = *rmap & KVMPPC_RMAP_INDEX; 156 head = *rmap & KVMPPC_RMAP_INDEX;
157 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); 157 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
158 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); 158 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
159 next->back = rev->back; 159 next->back = rev->back;
160 prev->forw = rev->forw; 160 prev->forw = rev->forw;
161 if (head == pte_index) { 161 if (head == pte_index) {
@@ -292,11 +292,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
292 292
293 /* Find and lock the HPTEG slot to use */ 293 /* Find and lock the HPTEG slot to use */
294 do_insert: 294 do_insert:
295 if (pte_index >= kvm->arch.hpt_npte) 295 if (pte_index >= kvm->arch.hpt.npte)
296 return H_PARAMETER; 296 return H_PARAMETER;
297 if (likely((flags & H_EXACT) == 0)) { 297 if (likely((flags & H_EXACT) == 0)) {
298 pte_index &= ~7UL; 298 pte_index &= ~7UL;
299 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 299 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
300 for (i = 0; i < 8; ++i) { 300 for (i = 0; i < 8; ++i) {
301 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && 301 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
302 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 302 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
@@ -327,7 +327,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
327 } 327 }
328 pte_index += i; 328 pte_index += i;
329 } else { 329 } else {
330 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 330 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
331 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 331 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
332 HPTE_V_ABSENT)) { 332 HPTE_V_ABSENT)) {
333 /* Lock the slot and check again */ 333 /* Lock the slot and check again */
@@ -344,7 +344,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
344 } 344 }
345 345
346 /* Save away the guest's idea of the second HPTE dword */ 346 /* Save away the guest's idea of the second HPTE dword */
347 rev = &kvm->arch.revmap[pte_index]; 347 rev = &kvm->arch.hpt.rev[pte_index];
348 if (realmode) 348 if (realmode)
349 rev = real_vmalloc_addr(rev); 349 rev = real_vmalloc_addr(rev);
350 if (rev) { 350 if (rev) {
@@ -469,9 +469,9 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
469 469
470 if (kvm_is_radix(kvm)) 470 if (kvm_is_radix(kvm))
471 return H_FUNCTION; 471 return H_FUNCTION;
472 if (pte_index >= kvm->arch.hpt_npte) 472 if (pte_index >= kvm->arch.hpt.npte)
473 return H_PARAMETER; 473 return H_PARAMETER;
474 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 474 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
475 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 475 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
476 cpu_relax(); 476 cpu_relax();
477 pte = orig_pte = be64_to_cpu(hpte[0]); 477 pte = orig_pte = be64_to_cpu(hpte[0]);
@@ -487,7 +487,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
487 return H_NOT_FOUND; 487 return H_NOT_FOUND;
488 } 488 }
489 489
490 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 490 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
491 v = pte & ~HPTE_V_HVLOCK; 491 v = pte & ~HPTE_V_HVLOCK;
492 if (v & HPTE_V_VALID) { 492 if (v & HPTE_V_VALID) {
493 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); 493 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
@@ -557,13 +557,13 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
557 break; 557 break;
558 } 558 }
559 if (req != 1 || flags == 3 || 559 if (req != 1 || flags == 3 ||
560 pte_index >= kvm->arch.hpt_npte) { 560 pte_index >= kvm->arch.hpt.npte) {
561 /* parameter error */ 561 /* parameter error */
562 args[j] = ((0xa0 | flags) << 56) + pte_index; 562 args[j] = ((0xa0 | flags) << 56) + pte_index;
563 ret = H_PARAMETER; 563 ret = H_PARAMETER;
564 break; 564 break;
565 } 565 }
566 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4)); 566 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
567 /* to avoid deadlock, don't spin except for first */ 567 /* to avoid deadlock, don't spin except for first */
568 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { 568 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
569 if (n) 569 if (n)
@@ -600,7 +600,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
600 } 600 }
601 601
602 args[j] = ((0x80 | flags) << 56) + pte_index; 602 args[j] = ((0x80 | flags) << 56) + pte_index;
603 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 603 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
604 note_hpte_modification(kvm, rev); 604 note_hpte_modification(kvm, rev);
605 605
606 if (!(hp0 & HPTE_V_VALID)) { 606 if (!(hp0 & HPTE_V_VALID)) {
@@ -657,10 +657,10 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
657 657
658 if (kvm_is_radix(kvm)) 658 if (kvm_is_radix(kvm))
659 return H_FUNCTION; 659 return H_FUNCTION;
660 if (pte_index >= kvm->arch.hpt_npte) 660 if (pte_index >= kvm->arch.hpt.npte)
661 return H_PARAMETER; 661 return H_PARAMETER;
662 662
663 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 663 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
664 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 664 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
665 cpu_relax(); 665 cpu_relax();
666 v = pte_v = be64_to_cpu(hpte[0]); 666 v = pte_v = be64_to_cpu(hpte[0]);
@@ -680,7 +680,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
680 /* Update guest view of 2nd HPTE dword */ 680 /* Update guest view of 2nd HPTE dword */
681 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | 681 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
682 HPTE_R_KEY_HI | HPTE_R_KEY_LO; 682 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
683 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 683 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
684 if (rev) { 684 if (rev) {
685 r = (rev->guest_rpte & ~mask) | bits; 685 r = (rev->guest_rpte & ~mask) | bits;
686 rev->guest_rpte = r; 686 rev->guest_rpte = r;
@@ -728,15 +728,15 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
728 728
729 if (kvm_is_radix(kvm)) 729 if (kvm_is_radix(kvm))
730 return H_FUNCTION; 730 return H_FUNCTION;
731 if (pte_index >= kvm->arch.hpt_npte) 731 if (pte_index >= kvm->arch.hpt.npte)
732 return H_PARAMETER; 732 return H_PARAMETER;
733 if (flags & H_READ_4) { 733 if (flags & H_READ_4) {
734 pte_index &= ~3; 734 pte_index &= ~3;
735 n = 4; 735 n = 4;
736 } 736 }
737 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 737 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
738 for (i = 0; i < n; ++i, ++pte_index) { 738 for (i = 0; i < n; ++i, ++pte_index) {
739 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 739 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
740 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; 740 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
741 r = be64_to_cpu(hpte[1]); 741 r = be64_to_cpu(hpte[1]);
742 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 742 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
@@ -769,11 +769,11 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
769 769
770 if (kvm_is_radix(kvm)) 770 if (kvm_is_radix(kvm))
771 return H_FUNCTION; 771 return H_FUNCTION;
772 if (pte_index >= kvm->arch.hpt_npte) 772 if (pte_index >= kvm->arch.hpt.npte)
773 return H_PARAMETER; 773 return H_PARAMETER;
774 774
775 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 775 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
776 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 776 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
777 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 777 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
778 cpu_relax(); 778 cpu_relax();
779 v = be64_to_cpu(hpte[0]); 779 v = be64_to_cpu(hpte[0]);
@@ -817,11 +817,11 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
817 817
818 if (kvm_is_radix(kvm)) 818 if (kvm_is_radix(kvm))
819 return H_FUNCTION; 819 return H_FUNCTION;
820 if (pte_index >= kvm->arch.hpt_npte) 820 if (pte_index >= kvm->arch.hpt.npte)
821 return H_PARAMETER; 821 return H_PARAMETER;
822 822
823 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 823 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
824 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4)); 824 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
825 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 825 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
826 cpu_relax(); 826 cpu_relax();
827 v = be64_to_cpu(hpte[0]); 827 v = be64_to_cpu(hpte[0]);
@@ -970,7 +970,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
970 somask = (1UL << 28) - 1; 970 somask = (1UL << 28) - 1;
971 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; 971 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
972 } 972 }
973 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; 973 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt.mask;
974 avpn = slb_v & ~(somask >> 16); /* also includes B */ 974 avpn = slb_v & ~(somask >> 16); /* also includes B */
975 avpn |= (eaddr & somask) >> 16; 975 avpn |= (eaddr & somask) >> 16;
976 976
@@ -981,7 +981,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
981 val |= avpn; 981 val |= avpn;
982 982
983 for (;;) { 983 for (;;) {
984 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7)); 984 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
985 985
986 for (i = 0; i < 16; i += 2) { 986 for (i = 0; i < 16; i += 2) {
987 /* Read the PTE racily */ 987 /* Read the PTE racily */
@@ -1017,7 +1017,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
1017 if (val & HPTE_V_SECONDARY) 1017 if (val & HPTE_V_SECONDARY)
1018 break; 1018 break;
1019 val |= HPTE_V_SECONDARY; 1019 val |= HPTE_V_SECONDARY;
1020 hash = hash ^ kvm->arch.hpt_mask; 1020 hash = hash ^ kvm->arch.hpt.mask;
1021 } 1021 }
1022 return -1; 1022 return -1;
1023} 1023}
@@ -1066,14 +1066,14 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
1066 return status; /* there really was no HPTE */ 1066 return status; /* there really was no HPTE */
1067 return 0; /* for prot fault, HPTE disappeared */ 1067 return 0; /* for prot fault, HPTE disappeared */
1068 } 1068 }
1069 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4)); 1069 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
1070 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; 1070 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
1071 r = be64_to_cpu(hpte[1]); 1071 r = be64_to_cpu(hpte[1]);
1072 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1072 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1073 v = hpte_new_to_old_v(v, r); 1073 v = hpte_new_to_old_v(v, r);
1074 r = hpte_new_to_old_r(r); 1074 r = hpte_new_to_old_r(r);
1075 } 1075 }
1076 rev = real_vmalloc_addr(&kvm->arch.revmap[index]); 1076 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
1077 gr = rev->guest_rpte; 1077 gr = rev->guest_rpte;
1078 1078
1079 unlock_hpte(hpte, orig_v); 1079 unlock_hpte(hpte, orig_v);