diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:31:00 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:43 -0400 |
commit | 81407ca553c0c852b8cd3f38f3ec362d307f829b (patch) | |
tree | d84b98769b9c14a2cb33d318fac7028bbb4e18eb /arch/x86/kvm/mmu.c | |
parent | 651dd37a9ce6fdacdcd75da86619c62111efcbc2 (diff) |
KVM: MMU: Allow long mode shadows for legacy page tables
Currently the KVM softmmu implementation can not shadow a 32
bit legacy or PAE page table with a long mode page table.
This is a required feature for nested paging emulation
because the nested page table must alway be in host format.
So this patch implements the missing pieces to allow long
mode page tables for page table types.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 60 |
1 files changed, 52 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 9cd5a717ede5..dd76765310ce 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1504,6 +1504,12 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, | |||
1504 | iterator->addr = addr; | 1504 | iterator->addr = addr; |
1505 | iterator->shadow_addr = vcpu->arch.mmu.root_hpa; | 1505 | iterator->shadow_addr = vcpu->arch.mmu.root_hpa; |
1506 | iterator->level = vcpu->arch.mmu.shadow_root_level; | 1506 | iterator->level = vcpu->arch.mmu.shadow_root_level; |
1507 | |||
1508 | if (iterator->level == PT64_ROOT_LEVEL && | ||
1509 | vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && | ||
1510 | !vcpu->arch.mmu.direct_map) | ||
1511 | --iterator->level; | ||
1512 | |||
1507 | if (iterator->level == PT32E_ROOT_LEVEL) { | 1513 | if (iterator->level == PT32E_ROOT_LEVEL) { |
1508 | iterator->shadow_addr | 1514 | iterator->shadow_addr |
1509 | = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; | 1515 | = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; |
@@ -2314,7 +2320,9 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) | |||
2314 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | 2320 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
2315 | return; | 2321 | return; |
2316 | spin_lock(&vcpu->kvm->mmu_lock); | 2322 | spin_lock(&vcpu->kvm->mmu_lock); |
2317 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 2323 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && |
2324 | (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || | ||
2325 | vcpu->arch.mmu.direct_map)) { | ||
2318 | hpa_t root = vcpu->arch.mmu.root_hpa; | 2326 | hpa_t root = vcpu->arch.mmu.root_hpa; |
2319 | 2327 | ||
2320 | sp = page_header(root); | 2328 | sp = page_header(root); |
@@ -2394,10 +2402,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
2394 | 2402 | ||
2395 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | 2403 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) |
2396 | { | 2404 | { |
2397 | int i; | ||
2398 | gfn_t root_gfn; | ||
2399 | struct kvm_mmu_page *sp; | 2405 | struct kvm_mmu_page *sp; |
2400 | u64 pdptr; | 2406 | u64 pdptr, pm_mask; |
2407 | gfn_t root_gfn; | ||
2408 | int i; | ||
2401 | 2409 | ||
2402 | root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; | 2410 | root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; |
2403 | 2411 | ||
@@ -2426,8 +2434,13 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
2426 | 2434 | ||
2427 | /* | 2435 | /* |
2428 | * We shadow a 32 bit page table. This may be a legacy 2-level | 2436 | * We shadow a 32 bit page table. This may be a legacy 2-level |
2429 | * or a PAE 3-level page table. | 2437 | * or a PAE 3-level page table. In either case we need to be aware that |
2438 | * the shadow page table may be a PAE or a long mode page table. | ||
2430 | */ | 2439 | */ |
2440 | pm_mask = PT_PRESENT_MASK; | ||
2441 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) | ||
2442 | pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; | ||
2443 | |||
2431 | for (i = 0; i < 4; ++i) { | 2444 | for (i = 0; i < 4; ++i) { |
2432 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 2445 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
2433 | 2446 | ||
@@ -2451,9 +2464,35 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
2451 | ++sp->root_count; | 2464 | ++sp->root_count; |
2452 | spin_unlock(&vcpu->kvm->mmu_lock); | 2465 | spin_unlock(&vcpu->kvm->mmu_lock); |
2453 | 2466 | ||
2454 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; | 2467 | vcpu->arch.mmu.pae_root[i] = root | pm_mask; |
2468 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); | ||
2455 | } | 2469 | } |
2456 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); | 2470 | |
2471 | /* | ||
2472 | * If we shadow a 32 bit page table with a long mode page | ||
2473 | * table we enter this path. | ||
2474 | */ | ||
2475 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | ||
2476 | if (vcpu->arch.mmu.lm_root == NULL) { | ||
2477 | /* | ||
2478 | * The additional page necessary for this is only | ||
2479 | * allocated on demand. | ||
2480 | */ | ||
2481 | |||
2482 | u64 *lm_root; | ||
2483 | |||
2484 | lm_root = (void*)get_zeroed_page(GFP_KERNEL); | ||
2485 | if (lm_root == NULL) | ||
2486 | return 1; | ||
2487 | |||
2488 | lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; | ||
2489 | |||
2490 | vcpu->arch.mmu.lm_root = lm_root; | ||
2491 | } | ||
2492 | |||
2493 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); | ||
2494 | } | ||
2495 | |||
2457 | return 0; | 2496 | return 0; |
2458 | } | 2497 | } |
2459 | 2498 | ||
@@ -2470,9 +2509,12 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) | |||
2470 | int i; | 2509 | int i; |
2471 | struct kvm_mmu_page *sp; | 2510 | struct kvm_mmu_page *sp; |
2472 | 2511 | ||
2512 | if (vcpu->arch.mmu.direct_map) | ||
2513 | return; | ||
2514 | |||
2473 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | 2515 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
2474 | return; | 2516 | return; |
2475 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 2517 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
2476 | hpa_t root = vcpu->arch.mmu.root_hpa; | 2518 | hpa_t root = vcpu->arch.mmu.root_hpa; |
2477 | sp = page_header(root); | 2519 | sp = page_header(root); |
2478 | mmu_sync_children(vcpu, sp); | 2520 | mmu_sync_children(vcpu, sp); |
@@ -3253,6 +3295,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp); | |||
3253 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | 3295 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
3254 | { | 3296 | { |
3255 | free_page((unsigned long)vcpu->arch.mmu.pae_root); | 3297 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
3298 | if (vcpu->arch.mmu.lm_root != NULL) | ||
3299 | free_page((unsigned long)vcpu->arch.mmu.lm_root); | ||
3256 | } | 3300 | } |
3257 | 3301 | ||
3258 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | 3302 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) |