aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-06-14 19:34:37 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:35 -0400
commit59c1f4e35c3db6c7ea5a04503a43bcbeb98977df (patch)
tree97e4a733a4e99e0208495eb04d158bfdf90b1181 /arch/powerpc/kvm
parent0ef309956cecbaf6d96c31371bf393c296886fa6 (diff)
KVM: PPC: e500: Eliminate shadow_pages[], and use pfns instead.
This is in line with what other architectures do, and will allow us to map things other than ordinary, unreserved kernel pages -- such as dedicated devices, or large contiguous reserved regions. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/e500_tlb.c56
1 files changed, 19 insertions, 37 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index f1b37e0de86c..0291c3cf5055 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -188,17 +188,16 @@ static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
188 int tlbsel, int esel) 188 int tlbsel, int esel)
189{ 189{
190 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; 190 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
191 struct page *page = vcpu_e500->shadow_pages[tlbsel][esel]; 191 unsigned long pfn;
192 192
193 if (page) { 193 pfn = stlbe->mas3 >> PAGE_SHIFT;
194 vcpu_e500->shadow_pages[tlbsel][esel] = NULL; 194 pfn |= stlbe->mas7 << (32 - PAGE_SHIFT);
195 195
196 if (get_tlb_v(stlbe)) { 196 if (get_tlb_v(stlbe)) {
197 if (tlbe_is_writable(stlbe)) 197 if (tlbe_is_writable(stlbe))
198 kvm_release_page_dirty(page); 198 kvm_release_pfn_dirty(pfn);
199 else 199 else
200 kvm_release_page_clean(page); 200 kvm_release_pfn_clean(pfn);
201 }
202 } 201 }
203} 202}
204 203
@@ -271,37 +270,36 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
271static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 270static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
272 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel) 271 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
273{ 272{
274 struct page *new_page;
275 struct tlbe *stlbe; 273 struct tlbe *stlbe;
276 hpa_t hpaddr; 274 unsigned long pfn;
277 275
278 stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel]; 276 stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
279 277
280 /* Get reference to new page. */ 278 /*
281 new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); 279 * Translate guest physical to true physical, acquiring
282 if (is_error_page(new_page)) { 280 * a page reference if it is normal, non-reserved memory.
283 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", 281 */
282 pfn = gfn_to_pfn(vcpu_e500->vcpu.kvm, gfn);
283 if (is_error_pfn(pfn)) {
284 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
284 (long)gfn); 285 (long)gfn);
285 kvm_release_page_clean(new_page); 286 kvm_release_pfn_clean(pfn);
286 return; 287 return;
287 } 288 }
288 hpaddr = page_to_phys(new_page);
289 289
290 /* Drop reference to old page. */ 290 /* Drop reference to old page. */
291 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); 291 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
292 292
293 vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
294
295 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */ 293 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
296 stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K) 294 stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
297 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; 295 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
298 stlbe->mas2 = (gvaddr & MAS2_EPN) 296 stlbe->mas2 = (gvaddr & MAS2_EPN)
299 | e500_shadow_mas2_attrib(gtlbe->mas2, 297 | e500_shadow_mas2_attrib(gtlbe->mas2,
300 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 298 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
301 stlbe->mas3 = (hpaddr & MAS3_RPN) 299 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
302 | e500_shadow_mas3_attrib(gtlbe->mas3, 300 | e500_shadow_mas3_attrib(gtlbe->mas3,
303 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 301 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
304 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; 302 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
305 303
306 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, 304 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
307 stlbe->mas3, stlbe->mas7); 305 stlbe->mas3, stlbe->mas7);
@@ -712,16 +710,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
712 if (vcpu_e500->shadow_tlb[1] == NULL) 710 if (vcpu_e500->shadow_tlb[1] == NULL)
713 goto err_out_guest1; 711 goto err_out_guest1;
714 712
715 vcpu_e500->shadow_pages[0] = (struct page **)
716 kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
717 if (vcpu_e500->shadow_pages[0] == NULL)
718 goto err_out_shadow1;
719
720 vcpu_e500->shadow_pages[1] = (struct page **)
721 kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
722 if (vcpu_e500->shadow_pages[1] == NULL)
723 goto err_out_page0;
724
725 /* Init TLB configuration register */ 713 /* Init TLB configuration register */
726 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; 714 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
727 vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0]; 715 vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
@@ -730,10 +718,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
730 718
731 return 0; 719 return 0;
732 720
733err_out_page0:
734 kfree(vcpu_e500->shadow_pages[0]);
735err_out_shadow1:
736 kfree(vcpu_e500->shadow_tlb[1]);
737err_out_guest1: 721err_out_guest1:
738 kfree(vcpu_e500->guest_tlb[1]); 722 kfree(vcpu_e500->guest_tlb[1]);
739err_out_shadow0: 723err_out_shadow0:
@@ -746,8 +730,6 @@ err_out:
746 730
747void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 731void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
748{ 732{
749 kfree(vcpu_e500->shadow_pages[1]);
750 kfree(vcpu_e500->shadow_pages[0]);
751 kfree(vcpu_e500->shadow_tlb[1]); 733 kfree(vcpu_e500->shadow_tlb[1]);
752 kfree(vcpu_e500->guest_tlb[1]); 734 kfree(vcpu_e500->guest_tlb[1]);
753 kfree(vcpu_e500->shadow_tlb[0]); 735 kfree(vcpu_e500->shadow_tlb[0]);