aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2015-03-20 05:39:43 -0400
committerAlexander Graf <agraf@suse.de>2015-04-21 09:21:29 -0400
commita4bd6eb07ca72d21a7a34499ad34cfef6f527d4e (patch)
treebba74c5424652cc64edb0911da97a670212c418d /arch/powerpc
parent31037ecad275e9ad9bc671c34f72b495cf708ca3 (diff)
KVM: PPC: Book3S HV: Add helpers for lock/unlock hpte
This adds helper routines for locking and unlocking HPTEs, and uses them in the rest of the code. We don't change any locking rules in this patch. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h14
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c25
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c25
3 files changed, 33 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2d81e202bdcc..0789a0f50969 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -85,6 +85,20 @@ static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
85 return old == 0; 85 return old == 0;
86} 86}
87 87
88static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
89{
90 hpte_v &= ~HPTE_V_HVLOCK;
91 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
92 hpte[0] = cpu_to_be64(hpte_v);
93}
94
95/* Without barrier */
96static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
97{
98 hpte_v &= ~HPTE_V_HVLOCK;
99 hpte[0] = cpu_to_be64(hpte_v);
100}
101
88static inline int __hpte_actual_psize(unsigned int lp, int psize) 102static inline int __hpte_actual_psize(unsigned int lp, int psize)
89{ 103{
90 int i, shift; 104 int i, shift;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index dbf127168ca4..6c6825a7ae49 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -338,9 +338,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
338 v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; 338 v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
339 gr = kvm->arch.revmap[index].guest_rpte; 339 gr = kvm->arch.revmap[index].guest_rpte;
340 340
341 /* Unlock the HPTE */ 341 unlock_hpte(hptep, v);
342 asm volatile("lwsync" : : : "memory");
343 hptep[0] = cpu_to_be64(v);
344 preempt_enable(); 342 preempt_enable();
345 343
346 gpte->eaddr = eaddr; 344 gpte->eaddr = eaddr;
@@ -469,8 +467,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
469 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; 467 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
470 hpte[1] = be64_to_cpu(hptep[1]); 468 hpte[1] = be64_to_cpu(hptep[1]);
471 hpte[2] = r = rev->guest_rpte; 469 hpte[2] = r = rev->guest_rpte;
472 asm volatile("lwsync" : : : "memory"); 470 unlock_hpte(hptep, hpte[0]);
473 hptep[0] = cpu_to_be64(hpte[0]);
474 preempt_enable(); 471 preempt_enable();
475 472
476 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || 473 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
@@ -621,7 +618,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
621 618
622 hptep[1] = cpu_to_be64(r); 619 hptep[1] = cpu_to_be64(r);
623 eieio(); 620 eieio();
624 hptep[0] = cpu_to_be64(hpte[0]); 621 __unlock_hpte(hptep, hpte[0]);
625 asm volatile("ptesync" : : : "memory"); 622 asm volatile("ptesync" : : : "memory");
626 preempt_enable(); 623 preempt_enable();
627 if (page && hpte_is_writable(r)) 624 if (page && hpte_is_writable(r))
@@ -642,7 +639,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
642 return ret; 639 return ret;
643 640
644 out_unlock: 641 out_unlock:
645 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); 642 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
646 preempt_enable(); 643 preempt_enable();
647 goto out_put; 644 goto out_put;
648} 645}
@@ -771,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
771 } 768 }
772 } 769 }
773 unlock_rmap(rmapp); 770 unlock_rmap(rmapp);
774 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); 771 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
775 } 772 }
776 return 0; 773 return 0;
777} 774}
@@ -857,7 +854,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
857 } 854 }
858 ret = 1; 855 ret = 1;
859 } 856 }
860 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); 857 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
861 } while ((i = j) != head); 858 } while ((i = j) != head);
862 859
863 unlock_rmap(rmapp); 860 unlock_rmap(rmapp);
@@ -974,8 +971,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
974 971
975 /* Now check and modify the HPTE */ 972 /* Now check and modify the HPTE */
976 if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { 973 if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
977 /* unlock and continue */ 974 __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
978 hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
979 continue; 975 continue;
980 } 976 }
981 977
@@ -996,9 +992,9 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
996 npages_dirty = n; 992 npages_dirty = n;
997 eieio(); 993 eieio();
998 } 994 }
999 v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); 995 v &= ~HPTE_V_ABSENT;
1000 v |= HPTE_V_VALID; 996 v |= HPTE_V_VALID;
1001 hptep[0] = cpu_to_be64(v); 997 __unlock_hpte(hptep, v);
1002 } while ((i = j) != head); 998 } while ((i = j) != head);
1003 999
1004 unlock_rmap(rmapp); 1000 unlock_rmap(rmapp);
@@ -1218,8 +1214,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
1218 r &= ~HPTE_GR_MODIFIED; 1214 r &= ~HPTE_GR_MODIFIED;
1219 revp->guest_rpte = r; 1215 revp->guest_rpte = r;
1220 } 1216 }
1221 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 1217 unlock_hpte(hptp, be64_to_cpu(hptp[0]));
1222 hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
1223 preempt_enable(); 1218 preempt_enable();
1224 if (!(valid == want_valid && (first_pass || dirty))) 1219 if (!(valid == want_valid && (first_pass || dirty)))
1225 ok = 0; 1220 ok = 0;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 625407e4d3b0..f6bf0b1de6d7 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -150,12 +150,6 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
150 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); 150 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
151} 151}
152 152
153static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
154{
155 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
156 hpte[0] = cpu_to_be64(hpte_v);
157}
158
159long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 153long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
160 long pte_index, unsigned long pteh, unsigned long ptel, 154 long pte_index, unsigned long pteh, unsigned long ptel,
161 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) 155 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
@@ -271,10 +265,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
271 u64 pte; 265 u64 pte;
272 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 266 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
273 cpu_relax(); 267 cpu_relax();
274 pte = be64_to_cpu(*hpte); 268 pte = be64_to_cpu(hpte[0]);
275 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) 269 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
276 break; 270 break;
277 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); 271 __unlock_hpte(hpte, pte);
278 hpte += 2; 272 hpte += 2;
279 } 273 }
280 if (i == 8) 274 if (i == 8)
@@ -290,9 +284,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
290 284
291 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 285 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
292 cpu_relax(); 286 cpu_relax();
293 pte = be64_to_cpu(*hpte); 287 pte = be64_to_cpu(hpte[0]);
294 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { 288 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
295 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); 289 __unlock_hpte(hpte, pte);
296 return H_PTEG_FULL; 290 return H_PTEG_FULL;
297 } 291 }
298 } 292 }
@@ -331,7 +325,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
331 325
332 /* Write the first HPTE dword, unlocking the HPTE and making it valid */ 326 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
333 eieio(); 327 eieio();
334 hpte[0] = cpu_to_be64(pteh); 328 __unlock_hpte(hpte, pteh);
335 asm volatile("ptesync" : : : "memory"); 329 asm volatile("ptesync" : : : "memory");
336 330
337 *pte_idx_ret = pte_index; 331 *pte_idx_ret = pte_index;
@@ -412,7 +406,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
412 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 406 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
413 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || 407 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
414 ((flags & H_ANDCOND) && (pte & avpn) != 0)) { 408 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
415 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); 409 __unlock_hpte(hpte, pte);
416 return H_NOT_FOUND; 410 return H_NOT_FOUND;
417 } 411 }
418 412
@@ -548,7 +542,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
548 be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); 542 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
549 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 543 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
550 args[j] |= rcbits << (56 - 5); 544 args[j] |= rcbits << (56 - 5);
551 hp[0] = 0; 545 __unlock_hpte(hp, 0);
552 } 546 }
553 } 547 }
554 548
@@ -574,7 +568,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
574 pte = be64_to_cpu(hpte[0]); 568 pte = be64_to_cpu(hpte[0]);
575 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 569 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
576 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) { 570 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
577 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); 571 __unlock_hpte(hpte, pte);
578 return H_NOT_FOUND; 572 return H_NOT_FOUND;
579 } 573 }
580 574
@@ -755,8 +749,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
755 /* Return with the HPTE still locked */ 749 /* Return with the HPTE still locked */
756 return (hash << 3) + (i >> 1); 750 return (hash << 3) + (i >> 1);
757 751
758 /* Unlock and move on */ 752 __unlock_hpte(&hpte[i], v);
759 hpte[i] = cpu_to_be64(v);
760 } 753 }
761 754
762 if (val & HPTE_V_SECONDARY) 755 if (val & HPTE_V_SECONDARY)