aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/paging_tmpl.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9308be2d5c02..e461f2393d85 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -253,7 +253,7 @@ err:
253 return 0; 253 return 0;
254} 254}
255 255
256static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, 256static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
257 u64 *spte, const void *pte) 257 u64 *spte, const void *pte)
258{ 258{
259 pt_element_t gpte; 259 pt_element_t gpte;
@@ -264,7 +264,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
264 gpte = *(const pt_element_t *)pte; 264 gpte = *(const pt_element_t *)pte;
265 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 265 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
266 if (!is_present_gpte(gpte)) { 266 if (!is_present_gpte(gpte)) {
267 if (page->unsync) 267 if (sp->unsync)
268 new_spte = shadow_trap_nonpresent_pte; 268 new_spte = shadow_trap_nonpresent_pte;
269 else 269 else
270 new_spte = shadow_notrap_nonpresent_pte; 270 new_spte = shadow_notrap_nonpresent_pte;
@@ -273,7 +273,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
273 return; 273 return;
274 } 274 }
275 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 275 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
276 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); 276 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
277 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) 277 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
278 return; 278 return;
279 pfn = vcpu->arch.update_pte.pfn; 279 pfn = vcpu->arch.update_pte.pfn;
@@ -286,7 +286,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
286 * we call mmu_set_spte() with reset_host_protection = true beacuse that 286 * we call mmu_set_spte() with reset_host_protection = true beacuse that
287 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). 287 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
288 */ 288 */
289 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 289 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
290 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, 290 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
291 gpte_to_gfn(gpte), pfn, true, true); 291 gpte_to_gfn(gpte), pfn, true, true);
292} 292}
@@ -300,7 +300,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
300 int *ptwrite, pfn_t pfn) 300 int *ptwrite, pfn_t pfn)
301{ 301{
302 unsigned access = gw->pt_access; 302 unsigned access = gw->pt_access;
303 struct kvm_mmu_page *shadow_page; 303 struct kvm_mmu_page *sp;
304 u64 spte, *sptep = NULL; 304 u64 spte, *sptep = NULL;
305 int direct; 305 int direct;
306 gfn_t table_gfn; 306 gfn_t table_gfn;
@@ -341,9 +341,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
341 access &= ~ACC_WRITE_MASK; 341 access &= ~ACC_WRITE_MASK;
342 /* 342 /*
343 * It is a large guest pages backed by small host pages, 343 * It is a large guest pages backed by small host pages,
344 * So we set @direct(@shadow_page->role.direct)=1, and 344 * So we set @direct(@sp->role.direct)=1, and set
345 * set @table_gfn(@shadow_page->gfn)=the base page frame 345 * @table_gfn(@sp->gfn)=the base page frame for linear
346 * for linear translations. 346 * translations.
347 */ 347 */
348 table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); 348 table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
349 access &= gw->pte_access; 349 access &= gw->pte_access;
@@ -351,21 +351,21 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
351 direct = 0; 351 direct = 0;
352 table_gfn = gw->table_gfn[level - 2]; 352 table_gfn = gw->table_gfn[level - 2];
353 } 353 }
354 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 354 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
355 direct, access, sptep); 355 direct, access, sptep);
356 if (!direct) { 356 if (!direct) {
357 r = kvm_read_guest_atomic(vcpu->kvm, 357 r = kvm_read_guest_atomic(vcpu->kvm,
358 gw->pte_gpa[level - 2], 358 gw->pte_gpa[level - 2],
359 &curr_pte, sizeof(curr_pte)); 359 &curr_pte, sizeof(curr_pte));
360 if (r || curr_pte != gw->ptes[level - 2]) { 360 if (r || curr_pte != gw->ptes[level - 2]) {
361 kvm_mmu_put_page(shadow_page, sptep); 361 kvm_mmu_put_page(sp, sptep);
362 kvm_release_pfn_clean(pfn); 362 kvm_release_pfn_clean(pfn);
363 sptep = NULL; 363 sptep = NULL;
364 break; 364 break;
365 } 365 }
366 } 366 }
367 367
368 spte = __pa(shadow_page->spt) 368 spte = __pa(sp->spt)
369 | PT_PRESENT_MASK | PT_ACCESSED_MASK 369 | PT_PRESENT_MASK | PT_ACCESSED_MASK
370 | PT_WRITABLE_MASK | PT_USER_MASK; 370 | PT_WRITABLE_MASK | PT_USER_MASK;
371 *sptep = spte; 371 *sptep = spte;