aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rm_mmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-12-12 07:27:39 -0500
committerAvi Kivity <avi@redhat.com>2012-03-05 07:52:35 -0500
commit8936dda4c2ed070ecebd786baf35b08584accf4a (patch)
tree7f75079f3814304050cbf880ecd7ddb9505f63a4 /arch/powerpc/kvm/book3s_hv_rm_mmu.c
parent4e72dbe13528394a413889d73e5025dbdf6cab70 (diff)
KVM: PPC: Keep a record of HV guest view of hashed page table entries
This adds an array that parallels the guest hashed page table (HPT), that is, it has one entry per HPTE, used to store the guest's view of the second doubleword of the corresponding HPTE. The first doubleword in the HPTE is the same as the guest's idea of it, so we don't need to store a copy, but the second doubleword in the HPTE has the real page number rather than the guest's logical page number. This allows us to remove the back_translate() and reverse_xlate() functions. This "reverse mapping" array is vmalloc'd, meaning that to access it in real mode we have to walk the kernel's page tables explicitly. That is done by the new real_vmalloc_addr() function. (In fact this returns an address in the linear mapping, so the result is usable both in real mode and in virtual mode.) There are also some minor cleanups here: moving the definitions of HPT_ORDER etc. to a header file and defining HPT_NPTE for HPT_NPTEG << 3. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c87
1 files changed, 53 insertions, 34 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index bacb0cfa3602..614849360a0a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -20,10 +20,19 @@
20#include <asm/synch.h> 20#include <asm/synch.h>
21#include <asm/ppc-opcode.h> 21#include <asm/ppc-opcode.h>
22 22
23/* For now use fixed-size 16MB page table */ 23/* Translate address of a vmalloc'd thing to a linear map address */
24#define HPT_ORDER 24 24static void *real_vmalloc_addr(void *x)
25#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ 25{
26#define HPT_HASH_MASK (HPT_NPTEG - 1) 26 unsigned long addr = (unsigned long) x;
27 pte_t *p;
28
29 p = find_linux_pte(swapper_pg_dir, addr);
30 if (!p || !pte_present(*p))
31 return NULL;
32 /* assume we don't have huge pages in vmalloc space... */
33 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
34 return __va(addr);
35}
27 36
28#define HPTE_V_HVLOCK 0x40UL 37#define HPTE_V_HVLOCK 0x40UL
29 38
@@ -52,6 +61,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
52 struct kvm *kvm = vcpu->kvm; 61 struct kvm *kvm = vcpu->kvm;
53 unsigned long i, lpn, pa; 62 unsigned long i, lpn, pa;
54 unsigned long *hpte; 63 unsigned long *hpte;
64 struct revmap_entry *rev;
65 unsigned long g_ptel = ptel;
55 66
56 /* only handle 4k, 64k and 16M pages for now */ 67 /* only handle 4k, 64k and 16M pages for now */
57 porder = 12; 68 porder = 12;
@@ -82,7 +93,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
82 pteh &= ~0x60UL; 93 pteh &= ~0x60UL;
83 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize); 94 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
84 ptel |= pa; 95 ptel |= pa;
85 if (pte_index >= (HPT_NPTEG << 3)) 96 if (pte_index >= HPT_NPTE)
86 return H_PARAMETER; 97 return H_PARAMETER;
87 if (likely((flags & H_EXACT) == 0)) { 98 if (likely((flags & H_EXACT) == 0)) {
88 pte_index &= ~7UL; 99 pte_index &= ~7UL;
@@ -95,18 +106,22 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
95 break; 106 break;
96 hpte += 2; 107 hpte += 2;
97 } 108 }
109 pte_index += i;
98 } else { 110 } else {
99 i = 0;
100 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 111 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
101 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) 112 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
102 return H_PTEG_FULL; 113 return H_PTEG_FULL;
103 } 114 }
115
116 /* Save away the guest's idea of the second HPTE dword */
117 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
118 if (rev)
119 rev->guest_rpte = g_ptel;
104 hpte[1] = ptel; 120 hpte[1] = ptel;
105 eieio(); 121 eieio();
106 hpte[0] = pteh; 122 hpte[0] = pteh;
107 asm volatile("ptesync" : : : "memory"); 123 asm volatile("ptesync" : : : "memory");
108 atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt); 124 vcpu->arch.gpr[4] = pte_index;
109 vcpu->arch.gpr[4] = pte_index + i;
110 return H_SUCCESS; 125 return H_SUCCESS;
111} 126}
112 127
@@ -138,7 +153,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
138 unsigned long *hpte; 153 unsigned long *hpte;
139 unsigned long v, r, rb; 154 unsigned long v, r, rb;
140 155
141 if (pte_index >= (HPT_NPTEG << 3)) 156 if (pte_index >= HPT_NPTE)
142 return H_PARAMETER; 157 return H_PARAMETER;
143 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 158 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
144 while (!lock_hpte(hpte, HPTE_V_HVLOCK)) 159 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
@@ -193,7 +208,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
193 if (req == 3) 208 if (req == 3)
194 break; 209 break;
195 if (req != 1 || flags == 3 || 210 if (req != 1 || flags == 3 ||
196 pte_index >= (HPT_NPTEG << 3)) { 211 pte_index >= HPT_NPTE) {
197 /* parameter error */ 212 /* parameter error */
198 args[i * 2] = ((0xa0 | flags) << 56) + pte_index; 213 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
199 ret = H_PARAMETER; 214 ret = H_PARAMETER;
@@ -256,9 +271,10 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
256{ 271{
257 struct kvm *kvm = vcpu->kvm; 272 struct kvm *kvm = vcpu->kvm;
258 unsigned long *hpte; 273 unsigned long *hpte;
259 unsigned long v, r, rb; 274 struct revmap_entry *rev;
275 unsigned long v, r, rb, mask, bits;
260 276
261 if (pte_index >= (HPT_NPTEG << 3)) 277 if (pte_index >= HPT_NPTE)
262 return H_PARAMETER; 278 return H_PARAMETER;
263 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 279 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
264 while (!lock_hpte(hpte, HPTE_V_HVLOCK)) 280 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
@@ -271,11 +287,21 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
271 if (atomic_read(&kvm->online_vcpus) == 1) 287 if (atomic_read(&kvm->online_vcpus) == 1)
272 flags |= H_LOCAL; 288 flags |= H_LOCAL;
273 v = hpte[0]; 289 v = hpte[0];
274 r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | 290 bits = (flags << 55) & HPTE_R_PP0;
275 HPTE_R_KEY_HI | HPTE_R_KEY_LO); 291 bits |= (flags << 48) & HPTE_R_KEY_HI;
276 r |= (flags << 55) & HPTE_R_PP0; 292 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
277 r |= (flags << 48) & HPTE_R_KEY_HI; 293
278 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); 294 /* Update guest view of 2nd HPTE dword */
295 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
296 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
297 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
298 if (rev) {
299 r = (rev->guest_rpte & ~mask) | bits;
300 rev->guest_rpte = r;
301 }
302 r = (hpte[1] & ~mask) | bits;
303
304 /* Update HPTE */
279 rb = compute_tlbie_rb(v, r, pte_index); 305 rb = compute_tlbie_rb(v, r, pte_index);
280 hpte[0] = v & ~HPTE_V_VALID; 306 hpte[0] = v & ~HPTE_V_VALID;
281 if (!(flags & H_LOCAL)) { 307 if (!(flags & H_LOCAL)) {
@@ -298,38 +324,31 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
298 return H_SUCCESS; 324 return H_SUCCESS;
299} 325}
300 326
301static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
302{
303 long int i;
304 unsigned long offset, rpn;
305
306 offset = realaddr & (kvm->arch.ram_psize - 1);
307 rpn = (realaddr - offset) >> PAGE_SHIFT;
308 for (i = 0; i < kvm->arch.ram_npages; ++i)
309 if (rpn == kvm->arch.ram_pginfo[i].pfn)
310 return (i << PAGE_SHIFT) + offset;
311 return HPTE_R_RPN; /* all 1s in the RPN field */
312}
313
314long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, 327long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
315 unsigned long pte_index) 328 unsigned long pte_index)
316{ 329{
317 struct kvm *kvm = vcpu->kvm; 330 struct kvm *kvm = vcpu->kvm;
318 unsigned long *hpte, r; 331 unsigned long *hpte, r;
319 int i, n = 1; 332 int i, n = 1;
333 struct revmap_entry *rev = NULL;
320 334
321 if (pte_index >= (HPT_NPTEG << 3)) 335 if (pte_index >= HPT_NPTE)
322 return H_PARAMETER; 336 return H_PARAMETER;
323 if (flags & H_READ_4) { 337 if (flags & H_READ_4) {
324 pte_index &= ~3; 338 pte_index &= ~3;
325 n = 4; 339 n = 4;
326 } 340 }
341 if (flags & H_R_XLATE)
342 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
327 for (i = 0; i < n; ++i, ++pte_index) { 343 for (i = 0; i < n; ++i, ++pte_index) {
328 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 344 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
329 r = hpte[1]; 345 r = hpte[1];
330 if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID)) 346 if (hpte[0] & HPTE_V_VALID) {
331 r = reverse_xlate(kvm, r & HPTE_R_RPN) | 347 if (rev)
332 (r & ~HPTE_R_RPN); 348 r = rev[i].guest_rpte;
349 else
350 r = hpte[1] | HPTE_R_RPN;
351 }
333 vcpu->arch.gpr[4 + i * 2] = hpte[0]; 352 vcpu->arch.gpr[4 + i * 2] = hpte[0];
334 vcpu->arch.gpr[5 + i * 2] = r; 353 vcpu->arch.gpr[5 + i * 2] = r;
335 } 354 }