diff options
author | Avi Kivity <avi@redhat.com> | 2010-05-04 05:58:32 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-19 04:36:35 -0400 |
commit | 8facbbff071ff2b19268d3732e31badc60471e21 (patch) | |
tree | 1779b2bb158c8e1bf99560fb3fd16647e3e371f7 /arch/x86 | |
parent | cafd66595d92591e4bd25c3904e004fc6f897e2d (diff) |
KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
On svm, kvm_read_pdptr() may require reading guest memory, which can sleep.
Push the spinlock into mmu_alloc_roots(), and only take it after we've read
the pdptr.
Tested-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 51eb6d6abd86..de996380ec26 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2065,11 +2065,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
2065 | direct = 1; | 2065 | direct = 1; |
2066 | root_gfn = 0; | 2066 | root_gfn = 0; |
2067 | } | 2067 | } |
2068 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2068 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 2069 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
2069 | PT64_ROOT_LEVEL, direct, | 2070 | PT64_ROOT_LEVEL, direct, |
2070 | ACC_ALL, NULL); | 2071 | ACC_ALL, NULL); |
2071 | root = __pa(sp->spt); | 2072 | root = __pa(sp->spt); |
2072 | ++sp->root_count; | 2073 | ++sp->root_count; |
2074 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2073 | vcpu->arch.mmu.root_hpa = root; | 2075 | vcpu->arch.mmu.root_hpa = root; |
2074 | return 0; | 2076 | return 0; |
2075 | } | 2077 | } |
@@ -2093,11 +2095,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
2093 | direct = 1; | 2095 | direct = 1; |
2094 | root_gfn = i << 30; | 2096 | root_gfn = i << 30; |
2095 | } | 2097 | } |
2098 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2096 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 2099 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
2097 | PT32_ROOT_LEVEL, direct, | 2100 | PT32_ROOT_LEVEL, direct, |
2098 | ACC_ALL, NULL); | 2101 | ACC_ALL, NULL); |
2099 | root = __pa(sp->spt); | 2102 | root = __pa(sp->spt); |
2100 | ++sp->root_count; | 2103 | ++sp->root_count; |
2104 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2105 | |||
2101 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; | 2106 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
2102 | } | 2107 | } |
2103 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); | 2108 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
@@ -2466,7 +2471,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) | |||
2466 | goto out; | 2471 | goto out; |
2467 | spin_lock(&vcpu->kvm->mmu_lock); | 2472 | spin_lock(&vcpu->kvm->mmu_lock); |
2468 | kvm_mmu_free_some_pages(vcpu); | 2473 | kvm_mmu_free_some_pages(vcpu); |
2474 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2469 | r = mmu_alloc_roots(vcpu); | 2475 | r = mmu_alloc_roots(vcpu); |
2476 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2470 | mmu_sync_roots(vcpu); | 2477 | mmu_sync_roots(vcpu); |
2471 | spin_unlock(&vcpu->kvm->mmu_lock); | 2478 | spin_unlock(&vcpu->kvm->mmu_lock); |
2472 | if (r) | 2479 | if (r) |