aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-12 07:30:16 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:36 -0500
commit075295dd322b0c0de0c9ecf8e0cb19ee813438ed (patch)
tree5e87ae30e3d74feaf2f39f919529f21518e6e17a /arch
parent93e602490c1da83162a8b6ba86b4b48a7a0f0c9e (diff)
KVM: PPC: Make the H_ENTER hcall more reliable
At present, our implementation of H_ENTER only makes one try at locking each slot that it looks at, and doesn't even retry the ldarx/stdcx. atomic update sequence that it uses to attempt to lock the slot. Thus it can return the H_PTEG_FULL error unnecessarily, particularly when the H_EXACT flag is set, meaning that the caller wants a specific PTEG slot. This improves the situation by making a second pass when no free HPTE slot is found, where we spin until we succeed in locking each slot in turn and then check whether it is full while we hold the lock. If the second pass fails, then we return H_PTEG_FULL. This also moves lock_hpte to a header file (since later commits in this series will need to use it from other source files) and renames it to try_lock_hpte, which is a somewhat less misleading name. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h25
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c63
2 files changed, 59 insertions, 29 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fa3dc79af702..300ec04a8381 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -43,6 +43,31 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
43#define HPT_HASH_MASK (HPT_NPTEG - 1) 43#define HPT_HASH_MASK (HPT_NPTEG - 1)
44#endif 44#endif
45 45
46/*
47 * We use a lock bit in HPTE dword 0 to synchronize updates and
48 * accesses to each HPTE, and another bit to indicate non-present
49 * HPTEs.
50 */
51#define HPTE_V_HVLOCK 0x40UL
52
53static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
54{
55 unsigned long tmp, old;
56
57 asm volatile(" ldarx %0,0,%2\n"
58 " and. %1,%0,%3\n"
59 " bne 2f\n"
60 " ori %0,%0,%4\n"
61 " stdcx. %0,0,%2\n"
62 " beq+ 2f\n"
63 " li %1,%3\n"
64 "2: isync"
65 : "=&r" (tmp), "=&r" (old)
66 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
67 : "cc", "memory");
68 return old == 0;
69}
70
46static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 71static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
47 unsigned long pte_index) 72 unsigned long pte_index)
48{ 73{
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 84dae821b230..a28a6030ec90 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -53,26 +53,6 @@ static void *real_vmalloc_addr(void *x)
53 return __va(addr); 53 return __va(addr);
54} 54}
55 55
56#define HPTE_V_HVLOCK 0x40UL
57
58static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
59{
60 unsigned long tmp, old;
61
62 asm volatile(" ldarx %0,0,%2\n"
63 " and. %1,%0,%3\n"
64 " bne 2f\n"
65 " ori %0,%0,%4\n"
66 " stdcx. %0,0,%2\n"
67 " beq+ 2f\n"
68 " li %1,%3\n"
69 "2: isync"
70 : "=&r" (tmp), "=&r" (old)
71 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
72 : "cc", "memory");
73 return old == 0;
74}
75
76long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 56long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
77 long pte_index, unsigned long pteh, unsigned long ptel) 57 long pte_index, unsigned long pteh, unsigned long ptel)
78{ 58{
@@ -126,24 +106,49 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
126 pteh &= ~0x60UL; 106 pteh &= ~0x60UL;
127 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize); 107 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
128 ptel |= pa; 108 ptel |= pa;
109
129 if (pte_index >= HPT_NPTE) 110 if (pte_index >= HPT_NPTE)
130 return H_PARAMETER; 111 return H_PARAMETER;
131 if (likely((flags & H_EXACT) == 0)) { 112 if (likely((flags & H_EXACT) == 0)) {
132 pte_index &= ~7UL; 113 pte_index &= ~7UL;
133 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 114 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
134 for (i = 0; ; ++i) { 115 for (i = 0; i < 8; ++i) {
135 if (i == 8)
136 return H_PTEG_FULL;
137 if ((*hpte & HPTE_V_VALID) == 0 && 116 if ((*hpte & HPTE_V_VALID) == 0 &&
138 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) 117 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
139 break; 118 break;
140 hpte += 2; 119 hpte += 2;
141 } 120 }
121 if (i == 8) {
122 /*
123 * Since try_lock_hpte doesn't retry (not even stdcx.
124 * failures), it could be that there is a free slot
125 * but we transiently failed to lock it. Try again,
126 * actually locking each slot and checking it.
127 */
128 hpte -= 16;
129 for (i = 0; i < 8; ++i) {
130 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
131 cpu_relax();
132 if ((*hpte & HPTE_V_VALID) == 0)
133 break;
134 *hpte &= ~HPTE_V_HVLOCK;
135 hpte += 2;
136 }
137 if (i == 8)
138 return H_PTEG_FULL;
139 }
142 pte_index += i; 140 pte_index += i;
143 } else { 141 } else {
144 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 142 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
145 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) 143 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
146 return H_PTEG_FULL; 144 /* Lock the slot and check again */
145 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
146 cpu_relax();
147 if (*hpte & HPTE_V_VALID) {
148 *hpte &= ~HPTE_V_HVLOCK;
149 return H_PTEG_FULL;
150 }
151 }
147 } 152 }
148 153
149 /* Save away the guest's idea of the second HPTE dword */ 154 /* Save away the guest's idea of the second HPTE dword */
@@ -189,7 +194,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
189 if (pte_index >= HPT_NPTE) 194 if (pte_index >= HPT_NPTE)
190 return H_PARAMETER; 195 return H_PARAMETER;
191 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 196 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
192 while (!lock_hpte(hpte, HPTE_V_HVLOCK)) 197 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
193 cpu_relax(); 198 cpu_relax();
194 if ((hpte[0] & HPTE_V_VALID) == 0 || 199 if ((hpte[0] & HPTE_V_VALID) == 0 ||
195 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || 200 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
@@ -248,7 +253,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
248 break; 253 break;
249 } 254 }
250 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 255 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
251 while (!lock_hpte(hp, HPTE_V_HVLOCK)) 256 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
252 cpu_relax(); 257 cpu_relax();
253 found = 0; 258 found = 0;
254 if (hp[0] & HPTE_V_VALID) { 259 if (hp[0] & HPTE_V_VALID) {
@@ -310,7 +315,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
310 if (pte_index >= HPT_NPTE) 315 if (pte_index >= HPT_NPTE)
311 return H_PARAMETER; 316 return H_PARAMETER;
312 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 317 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
313 while (!lock_hpte(hpte, HPTE_V_HVLOCK)) 318 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
314 cpu_relax(); 319 cpu_relax();
315 if ((hpte[0] & HPTE_V_VALID) == 0 || 320 if ((hpte[0] & HPTE_V_VALID) == 0 ||
316 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { 321 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {