aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2012-11-13 13:31:32 -0500
committerAlexander Graf <agraf@suse.de>2012-12-05 19:33:52 -0500
commit7ed661bf852cefa1ab57ad709a675bfb029d47ab (patch)
treea79534be515ac68645cb1c78c790c507c83874d2 /arch/powerpc
parent0e673fb679027600cad45bd61a4cc9ebd2ed2bb1 (diff)
KVM: PPC: Book3S HV: Restructure HPT entry creation code
This restructures the code that creates HPT (hashed page table) entries so that it can be called in situations where we don't have a struct vcpu pointer, only a struct kvm pointer. It also fixes a bug where kvmppc_map_vrma() would corrupt the guest R4 value. Most of the work of kvmppc_virtmode_h_enter is now done by a new function, kvmppc_virtmode_do_h_enter, which itself calls another new function, kvmppc_do_h_enter, which contains most of the old kvmppc_h_enter. The new kvmppc_do_h_enter takes explicit arguments for the place to return the HPTE index, the Linux page tables to use, and whether it is being called in real mode, thus removing the need for it to have the vcpu as an argument. Currently kvmppc_map_vrma creates the VRMA (virtual real mode area) HPTEs by calling kvmppc_virtmode_h_enter, which is designed primarily to handle H_ENTER hcalls from the guest that need to pin a page of memory. Since H_ENTER returns the index of the created HPTE in R4, kvmppc_virtmode_h_enter updates the guest R4, corrupting the guest R4 in the case when it gets called from kvmppc_map_vrma on the first VCPU_RUN ioctl. With this, kvmppc_map_vrma instead calls kvmppc_virtmode_do_h_enter with the address of a dummy word as the place to store the HPTE index, thus avoiding corrupting the guest R4. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c27
3 files changed, 45 insertions, 23 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 36fcf4190461..fea768f21cd7 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -157,8 +157,9 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
157extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr); 157extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
158extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 158extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
159 long pte_index, unsigned long pteh, unsigned long ptel); 159 long pte_index, unsigned long pteh, unsigned long ptel);
160extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 160extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
161 long pte_index, unsigned long pteh, unsigned long ptel); 161 long pte_index, unsigned long pteh, unsigned long ptel,
162 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
162extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, 163extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
163 struct kvm_memory_slot *memslot, unsigned long *map); 164 struct kvm_memory_slot *memslot, unsigned long *map);
164 165
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 2a89a36e7263..6ee6516a0bee 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -41,6 +41,10 @@
41/* Power architecture requires HPT is at least 256kB */ 41/* Power architecture requires HPT is at least 256kB */
42#define PPC_MIN_HPT_ORDER 18 42#define PPC_MIN_HPT_ORDER 18
43 43
44static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
45 long pte_index, unsigned long pteh,
46 unsigned long ptel, unsigned long *pte_idx_ret);
47
44long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) 48long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
45{ 49{
46 unsigned long hpt; 50 unsigned long hpt;
@@ -185,6 +189,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
185 unsigned long addr, hash; 189 unsigned long addr, hash;
186 unsigned long psize; 190 unsigned long psize;
187 unsigned long hp0, hp1; 191 unsigned long hp0, hp1;
192 unsigned long idx_ret;
188 long ret; 193 long ret;
189 struct kvm *kvm = vcpu->kvm; 194 struct kvm *kvm = vcpu->kvm;
190 195
@@ -216,7 +221,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
216 hash = (hash << 3) + 7; 221 hash = (hash << 3) + 7;
217 hp_v = hp0 | ((addr >> 16) & ~0x7fUL); 222 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
218 hp_r = hp1 | addr; 223 hp_r = hp1 | addr;
219 ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r); 224 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
225 &idx_ret);
220 if (ret != H_SUCCESS) { 226 if (ret != H_SUCCESS) {
221 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", 227 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
222 addr, ret); 228 addr, ret);
@@ -354,15 +360,10 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
354 return err; 360 return err;
355} 361}
356 362
357/* 363long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
358 * We come here on a H_ENTER call from the guest when we are not 364 long pte_index, unsigned long pteh,
359 * using mmu notifiers and we don't have the requested page pinned 365 unsigned long ptel, unsigned long *pte_idx_ret)
360 * already.
361 */
362long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
363 long pte_index, unsigned long pteh, unsigned long ptel)
364{ 366{
365 struct kvm *kvm = vcpu->kvm;
366 unsigned long psize, gpa, gfn; 367 unsigned long psize, gpa, gfn;
367 struct kvm_memory_slot *memslot; 368 struct kvm_memory_slot *memslot;
368 long ret; 369 long ret;
@@ -390,8 +391,8 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
390 do_insert: 391 do_insert:
391 /* Protect linux PTE lookup from page table destruction */ 392 /* Protect linux PTE lookup from page table destruction */
392 rcu_read_lock_sched(); /* this disables preemption too */ 393 rcu_read_lock_sched(); /* this disables preemption too */
393 vcpu->arch.pgdir = current->mm->pgd; 394 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
394 ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel); 395 current->mm->pgd, false, pte_idx_ret);
395 rcu_read_unlock_sched(); 396 rcu_read_unlock_sched();
396 if (ret == H_TOO_HARD) { 397 if (ret == H_TOO_HARD) {
397 /* this can't happen */ 398 /* this can't happen */
@@ -402,6 +403,19 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
402 403
403} 404}
404 405
406/*
407 * We come here on a H_ENTER call from the guest when we are not
408 * using mmu notifiers and we don't have the requested page pinned
409 * already.
410 */
411long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
412 long pte_index, unsigned long pteh,
413 unsigned long ptel)
414{
415 return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
416 pteh, ptel, &vcpu->arch.gpr[4]);
417}
418
405static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, 419static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
406 gva_t eaddr) 420 gva_t eaddr)
407{ 421{
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 5e06e3153888..362dffe4db10 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -103,14 +103,14 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
103 unlock_rmap(rmap); 103 unlock_rmap(rmap);
104} 104}
105 105
106static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva, 106static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
107 int writing, unsigned long *pte_sizep) 107 int writing, unsigned long *pte_sizep)
108{ 108{
109 pte_t *ptep; 109 pte_t *ptep;
110 unsigned long ps = *pte_sizep; 110 unsigned long ps = *pte_sizep;
111 unsigned int shift; 111 unsigned int shift;
112 112
113 ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift); 113 ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
114 if (!ptep) 114 if (!ptep)
115 return __pte(0); 115 return __pte(0);
116 if (shift) 116 if (shift)
@@ -130,10 +130,10 @@ static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
130 hpte[0] = hpte_v; 130 hpte[0] = hpte_v;
131} 131}
132 132
133long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 133long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
134 long pte_index, unsigned long pteh, unsigned long ptel) 134 long pte_index, unsigned long pteh, unsigned long ptel,
135 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
135{ 136{
136 struct kvm *kvm = vcpu->kvm;
137 unsigned long i, pa, gpa, gfn, psize; 137 unsigned long i, pa, gpa, gfn, psize;
138 unsigned long slot_fn, hva; 138 unsigned long slot_fn, hva;
139 unsigned long *hpte; 139 unsigned long *hpte;
@@ -147,7 +147,6 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
147 unsigned int writing; 147 unsigned int writing;
148 unsigned long mmu_seq; 148 unsigned long mmu_seq;
149 unsigned long rcbits; 149 unsigned long rcbits;
150 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
151 150
152 psize = hpte_page_size(pteh, ptel); 151 psize = hpte_page_size(pteh, ptel);
153 if (!psize) 152 if (!psize)
@@ -201,7 +200,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
201 200
202 /* Look up the Linux PTE for the backing page */ 201 /* Look up the Linux PTE for the backing page */
203 pte_size = psize; 202 pte_size = psize;
204 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size); 203 pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
205 if (pte_present(pte)) { 204 if (pte_present(pte)) {
206 if (writing && !pte_write(pte)) 205 if (writing && !pte_write(pte))
207 /* make the actual HPTE be read-only */ 206 /* make the actual HPTE be read-only */
@@ -210,6 +209,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
210 pa = pte_pfn(pte) << PAGE_SHIFT; 209 pa = pte_pfn(pte) << PAGE_SHIFT;
211 } 210 }
212 } 211 }
212
213 if (pte_size < psize) 213 if (pte_size < psize)
214 return H_PARAMETER; 214 return H_PARAMETER;
215 if (pa && pte_size > psize) 215 if (pa && pte_size > psize)
@@ -297,7 +297,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
297 lock_rmap(rmap); 297 lock_rmap(rmap);
298 /* Check for pending invalidations under the rmap chain lock */ 298 /* Check for pending invalidations under the rmap chain lock */
299 if (kvm->arch.using_mmu_notifiers && 299 if (kvm->arch.using_mmu_notifiers &&
300 mmu_notifier_retry(vcpu->kvm, mmu_seq)) { 300 mmu_notifier_retry(kvm, mmu_seq)) {
301 /* inval in progress, write a non-present HPTE */ 301 /* inval in progress, write a non-present HPTE */
302 pteh |= HPTE_V_ABSENT; 302 pteh |= HPTE_V_ABSENT;
303 pteh &= ~HPTE_V_VALID; 303 pteh &= ~HPTE_V_VALID;
@@ -318,10 +318,17 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
318 hpte[0] = pteh; 318 hpte[0] = pteh;
319 asm volatile("ptesync" : : : "memory"); 319 asm volatile("ptesync" : : : "memory");
320 320
321 vcpu->arch.gpr[4] = pte_index; 321 *pte_idx_ret = pte_index;
322 return H_SUCCESS; 322 return H_SUCCESS;
323} 323}
324EXPORT_SYMBOL_GPL(kvmppc_h_enter); 324EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
325
326long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
327 long pte_index, unsigned long pteh, unsigned long ptel)
328{
329 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
330 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
331}
325 332
326#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 333#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
327 334