aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-10 11:30:39 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:28 -0400
commitc5a78f2b649ae75ae788e7622ca5a586af2cb35a (patch)
tree969423628be1dd64996c10359edddf02ed4f7343 /arch
parent957446afce22df9a42b9482fcd55985f4037fe66 (diff)
KVM: MMU: Make tdp_enabled a mmu-context parameter
This patch changes the tdp_enabled flag from its global meaning to the mmu-context and renames it to direct_map there. This is necessary for Nested SVM with emulation of Nested Paging where we need an extra MMU context to shadow the Nested Nested Page Table. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c22
2 files changed, 15 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 789e9462668..80ef28bddcc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -249,6 +249,7 @@ struct kvm_mmu {
249 int root_level; 249 int root_level;
250 int shadow_root_level; 250 int shadow_root_level;
251 union kvm_mmu_page_role base_role; 251 union kvm_mmu_page_role base_role;
252 bool direct_map;
252 253
253 u64 *pae_root; 254 u64 *pae_root;
254 u64 rsvd_bits_mask[2][4]; 255 u64 rsvd_bits_mask[2][4];
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b2136f921d7..5c28e979d73 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1448,7 +1448,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1448 if (role.direct) 1448 if (role.direct)
1449 role.cr4_pae = 0; 1449 role.cr4_pae = 0;
1450 role.access = access; 1450 role.access = access;
1451 if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 1451 if (!vcpu->arch.mmu.direct_map
1452 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1452 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 1453 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1453 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1454 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1454 role.quadrant = quadrant; 1455 role.quadrant = quadrant;
@@ -1973,7 +1974,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1973 spte |= shadow_user_mask; 1974 spte |= shadow_user_mask;
1974 if (level > PT_PAGE_TABLE_LEVEL) 1975 if (level > PT_PAGE_TABLE_LEVEL)
1975 spte |= PT_PAGE_SIZE_MASK; 1976 spte |= PT_PAGE_SIZE_MASK;
1976 if (tdp_enabled) 1977 if (vcpu->arch.mmu.direct_map)
1977 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1978 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1978 kvm_is_mmio_pfn(pfn)); 1979 kvm_is_mmio_pfn(pfn));
1979 1980
@@ -1983,8 +1984,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1983 spte |= (u64)pfn << PAGE_SHIFT; 1984 spte |= (u64)pfn << PAGE_SHIFT;
1984 1985
1985 if ((pte_access & ACC_WRITE_MASK) 1986 if ((pte_access & ACC_WRITE_MASK)
1986 || (!tdp_enabled && write_fault && !is_write_protection(vcpu) 1987 || (!vcpu->arch.mmu.direct_map && write_fault
1987 && !user_fault)) { 1988 && !is_write_protection(vcpu) && !user_fault)) {
1988 1989
1989 if (level > PT_PAGE_TABLE_LEVEL && 1990 if (level > PT_PAGE_TABLE_LEVEL &&
1990 has_wrprotected_page(vcpu->kvm, gfn, level)) { 1991 has_wrprotected_page(vcpu->kvm, gfn, level)) {
@@ -1995,7 +1996,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1995 1996
1996 spte |= PT_WRITABLE_MASK; 1997 spte |= PT_WRITABLE_MASK;
1997 1998
1998 if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK)) 1999 if (!vcpu->arch.mmu.direct_map
2000 && !(pte_access & ACC_WRITE_MASK))
1999 spte &= ~PT_USER_MASK; 2001 spte &= ~PT_USER_MASK;
2000 2002
2001 /* 2003 /*
@@ -2371,7 +2373,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2371 ASSERT(!VALID_PAGE(root)); 2373 ASSERT(!VALID_PAGE(root));
2372 if (mmu_check_root(vcpu, root_gfn)) 2374 if (mmu_check_root(vcpu, root_gfn))
2373 return 1; 2375 return 1;
2374 if (tdp_enabled) { 2376 if (vcpu->arch.mmu.direct_map) {
2375 direct = 1; 2377 direct = 1;
2376 root_gfn = 0; 2378 root_gfn = 0;
2377 } 2379 }
@@ -2406,7 +2408,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2406 return 1; 2408 return 1;
2407 } else if (vcpu->arch.mmu.root_level == 0) 2409 } else if (vcpu->arch.mmu.root_level == 0)
2408 root_gfn = 0; 2410 root_gfn = 0;
2409 if (tdp_enabled) { 2411 if (vcpu->arch.mmu.direct_map) {
2410 direct = 1; 2412 direct = 1;
2411 root_gfn = i << 30; 2413 root_gfn = i << 30;
2412 } 2414 }
@@ -2544,6 +2546,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2544 context->root_level = 0; 2546 context->root_level = 0;
2545 context->shadow_root_level = PT32E_ROOT_LEVEL; 2547 context->shadow_root_level = PT32E_ROOT_LEVEL;
2546 context->root_hpa = INVALID_PAGE; 2548 context->root_hpa = INVALID_PAGE;
2549 context->direct_map = true;
2547 return 0; 2550 return 0;
2548} 2551}
2549 2552
@@ -2663,6 +2666,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2663 context->root_level = level; 2666 context->root_level = level;
2664 context->shadow_root_level = level; 2667 context->shadow_root_level = level;
2665 context->root_hpa = INVALID_PAGE; 2668 context->root_hpa = INVALID_PAGE;
2669 context->direct_map = false;
2666 return 0; 2670 return 0;
2667} 2671}
2668 2672
@@ -2687,6 +2691,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
2687 context->root_level = PT32_ROOT_LEVEL; 2691 context->root_level = PT32_ROOT_LEVEL;
2688 context->shadow_root_level = PT32E_ROOT_LEVEL; 2692 context->shadow_root_level = PT32E_ROOT_LEVEL;
2689 context->root_hpa = INVALID_PAGE; 2693 context->root_hpa = INVALID_PAGE;
2694 context->direct_map = false;
2690 return 0; 2695 return 0;
2691} 2696}
2692 2697
@@ -2708,6 +2713,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2708 context->invlpg = nonpaging_invlpg; 2713 context->invlpg = nonpaging_invlpg;
2709 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 2714 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2710 context->root_hpa = INVALID_PAGE; 2715 context->root_hpa = INVALID_PAGE;
2716 context->direct_map = true;
2711 2717
2712 if (!is_paging(vcpu)) { 2718 if (!is_paging(vcpu)) {
2713 context->gva_to_gpa = nonpaging_gva_to_gpa; 2719 context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -3060,7 +3066,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
3060 gpa_t gpa; 3066 gpa_t gpa;
3061 int r; 3067 int r;
3062 3068
3063 if (tdp_enabled) 3069 if (vcpu->arch.mmu.direct_map)
3064 return 0; 3070 return 0;
3065 3071
3066 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); 3072 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);