diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-03-20 05:39:43 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2015-04-21 09:21:29 -0400 |
commit | a4bd6eb07ca72d21a7a34499ad34cfef6f527d4e (patch) | |
tree | bba74c5424652cc64edb0911da97a670212c418d /arch/powerpc/kvm/book3s_hv_rm_mmu.c | |
parent | 31037ecad275e9ad9bc671c34f72b495cf708ca3 (diff) |
KVM: PPC: Book3S HV: Add helpers for lock/unlock hpte
This adds helper routines for locking and unlocking HPTEs, and uses
them in the rest of the code. We don't change any locking rules in
this patch.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 625407e4d3b0..f6bf0b1de6d7 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -150,12 +150,6 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva, | |||
150 | return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); | 150 | return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) | ||
154 | { | ||
155 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | ||
156 | hpte[0] = cpu_to_be64(hpte_v); | ||
157 | } | ||
158 | |||
159 | long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | 153 | long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, |
160 | long pte_index, unsigned long pteh, unsigned long ptel, | 154 | long pte_index, unsigned long pteh, unsigned long ptel, |
161 | pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) | 155 | pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) |
@@ -271,10 +265,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
271 | u64 pte; | 265 | u64 pte; |
272 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 266 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
273 | cpu_relax(); | 267 | cpu_relax(); |
274 | pte = be64_to_cpu(*hpte); | 268 | pte = be64_to_cpu(hpte[0]); |
275 | if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) | 269 | if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) |
276 | break; | 270 | break; |
277 | *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); | 271 | __unlock_hpte(hpte, pte); |
278 | hpte += 2; | 272 | hpte += 2; |
279 | } | 273 | } |
280 | if (i == 8) | 274 | if (i == 8) |
@@ -290,9 +284,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
290 | 284 | ||
291 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | 285 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
292 | cpu_relax(); | 286 | cpu_relax(); |
293 | pte = be64_to_cpu(*hpte); | 287 | pte = be64_to_cpu(hpte[0]); |
294 | if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { | 288 | if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
295 | *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK); | 289 | __unlock_hpte(hpte, pte); |
296 | return H_PTEG_FULL; | 290 | return H_PTEG_FULL; |
297 | } | 291 | } |
298 | } | 292 | } |
@@ -331,7 +325,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
331 | 325 | ||
332 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ | 326 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ |
333 | eieio(); | 327 | eieio(); |
334 | hpte[0] = cpu_to_be64(pteh); | 328 | __unlock_hpte(hpte, pteh); |
335 | asm volatile("ptesync" : : : "memory"); | 329 | asm volatile("ptesync" : : : "memory"); |
336 | 330 | ||
337 | *pte_idx_ret = pte_index; | 331 | *pte_idx_ret = pte_index; |
@@ -412,7 +406,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, | |||
412 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || | 406 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
413 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || | 407 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || |
414 | ((flags & H_ANDCOND) && (pte & avpn) != 0)) { | 408 | ((flags & H_ANDCOND) && (pte & avpn) != 0)) { |
415 | hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); | 409 | __unlock_hpte(hpte, pte); |
416 | return H_NOT_FOUND; | 410 | return H_NOT_FOUND; |
417 | } | 411 | } |
418 | 412 | ||
@@ -548,7 +542,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
548 | be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); | 542 | be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); |
549 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | 543 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); |
550 | args[j] |= rcbits << (56 - 5); | 544 | args[j] |= rcbits << (56 - 5); |
551 | hp[0] = 0; | 545 | __unlock_hpte(hp, 0); |
552 | } | 546 | } |
553 | } | 547 | } |
554 | 548 | ||
@@ -574,7 +568,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |||
574 | pte = be64_to_cpu(hpte[0]); | 568 | pte = be64_to_cpu(hpte[0]); |
575 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || | 569 | if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
576 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) { | 570 | ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) { |
577 | hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); | 571 | __unlock_hpte(hpte, pte); |
578 | return H_NOT_FOUND; | 572 | return H_NOT_FOUND; |
579 | } | 573 | } |
580 | 574 | ||
@@ -755,8 +749,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
755 | /* Return with the HPTE still locked */ | 749 | /* Return with the HPTE still locked */ |
756 | return (hash << 3) + (i >> 1); | 750 | return (hash << 3) + (i >> 1); |
757 | 751 | ||
758 | /* Unlock and move on */ | 752 | __unlock_hpte(&hpte[i], v); |
759 | hpte[i] = cpu_to_be64(v); | ||
760 | } | 753 | } |
761 | 754 | ||
762 | if (val & HPTE_V_SECONDARY) | 755 | if (val & HPTE_V_SECONDARY) |