aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2008-02-07 07:47:44 -0500
committerAvi Kivity <avi@qumranet.com>2008-04-27 04:53:20 -0400
commitfb72d1674d860b0c9ef9b66b7f4f01fe5b3d2c00 (patch)
treed24d0f67fde02b87263ebfe8c3bde2d0ff3d67e4 /arch/x86/kvm/mmu.c
parentcc4b6871e771e76dc1de06adb8aed261a1c66be8 (diff)
KVM: MMU: add TDP support to the KVM MMU
This patch contains the changes to the KVM MMU necessary for support of the Nested Paging feature in AMD Barcelona and Phenom Processors. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c79
1 files changed, 76 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 33cd7c982dd3..f7541fe22cd8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1097,6 +1097,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1097 int i; 1097 int i;
1098 gfn_t root_gfn; 1098 gfn_t root_gfn;
1099 struct kvm_mmu_page *sp; 1099 struct kvm_mmu_page *sp;
1100 int metaphysical = 0;
1100 1101
1101 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 1102 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1102 1103
@@ -1105,14 +1106,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1105 hpa_t root = vcpu->arch.mmu.root_hpa; 1106 hpa_t root = vcpu->arch.mmu.root_hpa;
1106 1107
1107 ASSERT(!VALID_PAGE(root)); 1108 ASSERT(!VALID_PAGE(root));
1109 if (tdp_enabled)
1110 metaphysical = 1;
1108 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, 1111 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1109 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); 1112 PT64_ROOT_LEVEL, metaphysical,
1113 ACC_ALL, NULL);
1110 root = __pa(sp->spt); 1114 root = __pa(sp->spt);
1111 ++sp->root_count; 1115 ++sp->root_count;
1112 vcpu->arch.mmu.root_hpa = root; 1116 vcpu->arch.mmu.root_hpa = root;
1113 return; 1117 return;
1114 } 1118 }
1115#endif 1119#endif
1120 metaphysical = !is_paging(vcpu);
1121 if (tdp_enabled)
1122 metaphysical = 1;
1116 for (i = 0; i < 4; ++i) { 1123 for (i = 0; i < 4; ++i) {
1117 hpa_t root = vcpu->arch.mmu.pae_root[i]; 1124 hpa_t root = vcpu->arch.mmu.pae_root[i];
1118 1125
@@ -1126,7 +1133,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1126 } else if (vcpu->arch.mmu.root_level == 0) 1133 } else if (vcpu->arch.mmu.root_level == 0)
1127 root_gfn = 0; 1134 root_gfn = 0;
1128 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 1135 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1129 PT32_ROOT_LEVEL, !is_paging(vcpu), 1136 PT32_ROOT_LEVEL, metaphysical,
1130 ACC_ALL, NULL); 1137 ACC_ALL, NULL);
1131 root = __pa(sp->spt); 1138 root = __pa(sp->spt);
1132 ++sp->root_count; 1139 ++sp->root_count;
@@ -1160,6 +1167,36 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1160 error_code & PFERR_WRITE_MASK, gfn); 1167 error_code & PFERR_WRITE_MASK, gfn);
1161} 1168}
1162 1169
1170static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1171 u32 error_code)
1172{
1173 struct page *page;
1174 int r;
1175
1176 ASSERT(vcpu);
1177 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1178
1179 r = mmu_topup_memory_caches(vcpu);
1180 if (r)
1181 return r;
1182
1183 down_read(&current->mm->mmap_sem);
1184 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1185 if (is_error_page(page)) {
1186 kvm_release_page_clean(page);
1187 up_read(&current->mm->mmap_sem);
1188 return 1;
1189 }
1190 spin_lock(&vcpu->kvm->mmu_lock);
1191 kvm_mmu_free_some_pages(vcpu);
1192 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1193 gpa >> PAGE_SHIFT, page, TDP_ROOT_LEVEL);
1194 spin_unlock(&vcpu->kvm->mmu_lock);
1195 up_read(&current->mm->mmap_sem);
1196
1197 return r;
1198}
1199
1163static void nonpaging_free(struct kvm_vcpu *vcpu) 1200static void nonpaging_free(struct kvm_vcpu *vcpu)
1164{ 1201{
1165 mmu_free_roots(vcpu); 1202 mmu_free_roots(vcpu);
@@ -1253,7 +1290,35 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
1253 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL); 1290 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1254} 1291}
1255 1292
1256static int init_kvm_mmu(struct kvm_vcpu *vcpu) 1293static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1294{
1295 struct kvm_mmu *context = &vcpu->arch.mmu;
1296
1297 context->new_cr3 = nonpaging_new_cr3;
1298 context->page_fault = tdp_page_fault;
1299 context->free = nonpaging_free;
1300 context->prefetch_page = nonpaging_prefetch_page;
1301 context->shadow_root_level = TDP_ROOT_LEVEL;
1302 context->root_hpa = INVALID_PAGE;
1303
1304 if (!is_paging(vcpu)) {
1305 context->gva_to_gpa = nonpaging_gva_to_gpa;
1306 context->root_level = 0;
1307 } else if (is_long_mode(vcpu)) {
1308 context->gva_to_gpa = paging64_gva_to_gpa;
1309 context->root_level = PT64_ROOT_LEVEL;
1310 } else if (is_pae(vcpu)) {
1311 context->gva_to_gpa = paging64_gva_to_gpa;
1312 context->root_level = PT32E_ROOT_LEVEL;
1313 } else {
1314 context->gva_to_gpa = paging32_gva_to_gpa;
1315 context->root_level = PT32_ROOT_LEVEL;
1316 }
1317
1318 return 0;
1319}
1320
1321static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1257{ 1322{
1258 ASSERT(vcpu); 1323 ASSERT(vcpu);
1259 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 1324 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1268,6 +1333,14 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1268 return paging32_init_context(vcpu); 1333 return paging32_init_context(vcpu);
1269} 1334}
1270 1335
1336static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1337{
1338 if (tdp_enabled)
1339 return init_kvm_tdp_mmu(vcpu);
1340 else
1341 return init_kvm_softmmu(vcpu);
1342}
1343
1271static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) 1344static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1272{ 1345{
1273 ASSERT(vcpu); 1346 ASSERT(vcpu);