diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:30:59 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:42 -0400 |
commit | 651dd37a9ce6fdacdcd75da86619c62111efcbc2 (patch) | |
tree | 62b8c506c076b4c66e1c144d6e831db551c61749 /arch/x86/kvm | |
parent | d41d1895eb856b5d1c82f3be106b7a3e75e4216b (diff) |
KVM: MMU: Refactor mmu_alloc_roots function
This patch factors out the direct-mapping paths of the
mmu_alloc_roots function into a seperate function. This
makes it a lot easier to avoid all the unnecessary checks
done in the shadow path which may break when running direct.
In fact, this patch already fixes a problem when running PAE
guests on a PAE shadow page table.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 82 |
1 files changed, 60 insertions, 22 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a25173a0d8b9..9cd5a717ede5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2357,42 +2357,77 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) | |||
2357 | return ret; | 2357 | return ret; |
2358 | } | 2358 | } |
2359 | 2359 | ||
2360 | static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | 2360 | static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) |
2361 | { | ||
2362 | struct kvm_mmu_page *sp; | ||
2363 | int i; | ||
2364 | |||
2365 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | ||
2366 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2367 | kvm_mmu_free_some_pages(vcpu); | ||
2368 | sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, | ||
2369 | 1, ACC_ALL, NULL); | ||
2370 | ++sp->root_count; | ||
2371 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2372 | vcpu->arch.mmu.root_hpa = __pa(sp->spt); | ||
2373 | } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { | ||
2374 | for (i = 0; i < 4; ++i) { | ||
2375 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | ||
2376 | |||
2377 | ASSERT(!VALID_PAGE(root)); | ||
2378 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2379 | kvm_mmu_free_some_pages(vcpu); | ||
2380 | sp = kvm_mmu_get_page(vcpu, i << 30, i << 30, | ||
2381 | PT32_ROOT_LEVEL, 1, ACC_ALL, | ||
2382 | NULL); | ||
2383 | root = __pa(sp->spt); | ||
2384 | ++sp->root_count; | ||
2385 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2386 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; | ||
2387 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); | ||
2388 | } | ||
2389 | } else | ||
2390 | BUG(); | ||
2391 | |||
2392 | return 0; | ||
2393 | } | ||
2394 | |||
2395 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | ||
2361 | { | 2396 | { |
2362 | int i; | 2397 | int i; |
2363 | gfn_t root_gfn; | 2398 | gfn_t root_gfn; |
2364 | struct kvm_mmu_page *sp; | 2399 | struct kvm_mmu_page *sp; |
2365 | int direct = 0; | ||
2366 | u64 pdptr; | 2400 | u64 pdptr; |
2367 | 2401 | ||
2368 | root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; | 2402 | root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; |
2369 | 2403 | ||
2370 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 2404 | if (mmu_check_root(vcpu, root_gfn)) |
2405 | return 1; | ||
2406 | |||
2407 | /* | ||
2408 | * Do we shadow a long mode page table? If so we need to | ||
2409 | * write-protect the guests page table root. | ||
2410 | */ | ||
2411 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { | ||
2371 | hpa_t root = vcpu->arch.mmu.root_hpa; | 2412 | hpa_t root = vcpu->arch.mmu.root_hpa; |
2372 | 2413 | ||
2373 | ASSERT(!VALID_PAGE(root)); | 2414 | ASSERT(!VALID_PAGE(root)); |
2374 | if (mmu_check_root(vcpu, root_gfn)) | 2415 | |
2375 | return 1; | ||
2376 | if (vcpu->arch.mmu.direct_map) { | ||
2377 | direct = 1; | ||
2378 | root_gfn = 0; | ||
2379 | } | ||
2380 | spin_lock(&vcpu->kvm->mmu_lock); | 2416 | spin_lock(&vcpu->kvm->mmu_lock); |
2381 | kvm_mmu_free_some_pages(vcpu); | 2417 | kvm_mmu_free_some_pages(vcpu); |
2382 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 2418 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, |
2383 | PT64_ROOT_LEVEL, direct, | 2419 | 0, ACC_ALL, NULL); |
2384 | ACC_ALL, NULL); | ||
2385 | root = __pa(sp->spt); | 2420 | root = __pa(sp->spt); |
2386 | ++sp->root_count; | 2421 | ++sp->root_count; |
2387 | spin_unlock(&vcpu->kvm->mmu_lock); | 2422 | spin_unlock(&vcpu->kvm->mmu_lock); |
2388 | vcpu->arch.mmu.root_hpa = root; | 2423 | vcpu->arch.mmu.root_hpa = root; |
2389 | return 0; | 2424 | return 0; |
2390 | } | 2425 | } |
2391 | direct = !is_paging(vcpu); | ||
2392 | |||
2393 | if (mmu_check_root(vcpu, root_gfn)) | ||
2394 | return 1; | ||
2395 | 2426 | ||
2427 | /* | ||
2428 | * We shadow a 32 bit page table. This may be a legacy 2-level | ||
2429 | * or a PAE 3-level page table. | ||
2430 | */ | ||
2396 | for (i = 0; i < 4; ++i) { | 2431 | for (i = 0; i < 4; ++i) { |
2397 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 2432 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
2398 | 2433 | ||
@@ -2406,16 +2441,11 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
2406 | root_gfn = pdptr >> PAGE_SHIFT; | 2441 | root_gfn = pdptr >> PAGE_SHIFT; |
2407 | if (mmu_check_root(vcpu, root_gfn)) | 2442 | if (mmu_check_root(vcpu, root_gfn)) |
2408 | return 1; | 2443 | return 1; |
2409 | } else if (vcpu->arch.mmu.root_level == 0) | ||
2410 | root_gfn = 0; | ||
2411 | if (vcpu->arch.mmu.direct_map) { | ||
2412 | direct = 1; | ||
2413 | root_gfn = i << 30; | ||
2414 | } | 2444 | } |
2415 | spin_lock(&vcpu->kvm->mmu_lock); | 2445 | spin_lock(&vcpu->kvm->mmu_lock); |
2416 | kvm_mmu_free_some_pages(vcpu); | 2446 | kvm_mmu_free_some_pages(vcpu); |
2417 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 2447 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
2418 | PT32_ROOT_LEVEL, direct, | 2448 | PT32_ROOT_LEVEL, 0, |
2419 | ACC_ALL, NULL); | 2449 | ACC_ALL, NULL); |
2420 | root = __pa(sp->spt); | 2450 | root = __pa(sp->spt); |
2421 | ++sp->root_count; | 2451 | ++sp->root_count; |
@@ -2427,6 +2457,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
2427 | return 0; | 2457 | return 0; |
2428 | } | 2458 | } |
2429 | 2459 | ||
2460 | static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | ||
2461 | { | ||
2462 | if (vcpu->arch.mmu.direct_map) | ||
2463 | return mmu_alloc_direct_roots(vcpu); | ||
2464 | else | ||
2465 | return mmu_alloc_shadow_roots(vcpu); | ||
2466 | } | ||
2467 | |||
2430 | static void mmu_sync_roots(struct kvm_vcpu *vcpu) | 2468 | static void mmu_sync_roots(struct kvm_vcpu *vcpu) |
2431 | { | 2469 | { |
2432 | int i; | 2470 | int i; |