aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-30 05:29:05 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 11:01:21 -0500
commitd7824fff896a1698a07a8046dc362f4500c302f7 (patch)
tree249e23ec224bc621bea1ef24fa83f5a749d6b35b /arch/x86/kvm/paging_tmpl.h
parent7ec54588210df29ea637e6054489bc942c0ef371 (diff)
KVM: MMU: Avoid calling gfn_to_page() in mmu_set_spte()
Since gfn_to_page() is a sleeping function, and we want to make the core mmu spinlocked, we need to pass the page from the walker context (which can sleep) to the shadow context (which cannot). [marcelo: avoid recursive locking of mmap_sem] Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h23
1 files changed, 18 insertions, 5 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 136a65d72b0a..3d7846ba26e1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -245,6 +245,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
245{ 245{
246 pt_element_t gpte; 246 pt_element_t gpte;
247 unsigned pte_access; 247 unsigned pte_access;
248 struct page *npage;
248 249
249 gpte = *(const pt_element_t *)pte; 250 gpte = *(const pt_element_t *)pte;
250 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 251 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
@@ -256,8 +257,14 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
256 return; 257 return;
257 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); 258 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
258 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); 259 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
260 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
261 return;
262 npage = vcpu->arch.update_pte.page;
263 if (!npage)
264 return;
265 get_page(npage);
259 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 266 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
260 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte)); 267 gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
261} 268}
262 269
263/* 270/*
@@ -265,7 +272,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
265 */ 272 */
266static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 273static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
267 struct guest_walker *walker, 274 struct guest_walker *walker,
268 int user_fault, int write_fault, int *ptwrite) 275 int user_fault, int write_fault, int *ptwrite,
276 struct page *page)
269{ 277{
270 hpa_t shadow_addr; 278 hpa_t shadow_addr;
271 int level; 279 int level;
@@ -321,8 +329,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
321 r = kvm_read_guest_atomic(vcpu->kvm, 329 r = kvm_read_guest_atomic(vcpu->kvm,
322 walker->pte_gpa[level - 2], 330 walker->pte_gpa[level - 2],
323 &curr_pte, sizeof(curr_pte)); 331 &curr_pte, sizeof(curr_pte));
324 if (r || curr_pte != walker->ptes[level - 2]) 332 if (r || curr_pte != walker->ptes[level - 2]) {
333 kvm_release_page_clean(page);
325 return NULL; 334 return NULL;
335 }
326 } 336 }
327 shadow_addr = __pa(shadow_page->spt); 337 shadow_addr = __pa(shadow_page->spt);
328 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 338 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
@@ -333,7 +343,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
333 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 343 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
334 user_fault, write_fault, 344 user_fault, write_fault,
335 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 345 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
336 ptwrite, walker->gfn); 346 ptwrite, walker->gfn, page);
337 347
338 return shadow_ent; 348 return shadow_ent;
339} 349}
@@ -362,6 +372,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
362 u64 *shadow_pte; 372 u64 *shadow_pte;
363 int write_pt = 0; 373 int write_pt = 0;
364 int r; 374 int r;
375 struct page *page;
365 376
366 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); 377 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
367 kvm_mmu_audit(vcpu, "pre page fault"); 378 kvm_mmu_audit(vcpu, "pre page fault");
@@ -388,9 +399,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
388 return 0; 399 return 0;
389 } 400 }
390 401
402 page = gfn_to_page(vcpu->kvm, walker.gfn);
403
391 mutex_lock(&vcpu->kvm->lock); 404 mutex_lock(&vcpu->kvm->lock);
392 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 405 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
393 &write_pt); 406 &write_pt, page);
394 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, 407 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
395 shadow_pte, *shadow_pte, write_pt); 408 shadow_pte, *shadow_pte, write_pt);
396 409