diff options
author | Avi Kivity <avi@qumranet.com> | 2007-09-14 13:26:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-14 16:59:55 -0400 |
commit | 22d95b1282810f5af599ee292b3fc443aefbdad0 (patch) | |
tree | dafe050aef12ea4bbac4ffb77070d8ec416a0838 /drivers/kvm | |
parent | 44e3ff32ac229a10a30b7b840f092f5b32a5f72a (diff) |
KVM: MMU: Fix rare oops on guest context switch
A guest context switch to an uncached cr3 can require allocation of
shadow pages, but we only recycle shadow pages in kvm_mmu_page_fault().
Move shadow page recycling to mmu_topup_memory_caches(), which is called
from both the page fault handler and from guest cr3 reload.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 10 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 5 |
2 files changed, 10 insertions, 5 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 3ac9cbce3369..336be86c6f5a 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -619,7 +619,7 @@ unsigned long segment_base(u16 selector); | |||
619 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 619 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
620 | const u8 *old, const u8 *new, int bytes); | 620 | const u8 *old, const u8 *new, int bytes); |
621 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | 621 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
622 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 622 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
623 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 623 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
624 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 624 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
625 | 625 | ||
@@ -628,11 +628,15 @@ int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); | |||
628 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | 628 | static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
629 | u32 error_code) | 629 | u32 error_code) |
630 | { | 630 | { |
631 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | ||
632 | kvm_mmu_free_some_pages(vcpu); | ||
633 | return vcpu->mmu.page_fault(vcpu, gva, error_code); | 631 | return vcpu->mmu.page_fault(vcpu, gva, error_code); |
634 | } | 632 | } |
635 | 633 | ||
634 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | ||
635 | { | ||
636 | if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | ||
637 | __kvm_mmu_free_some_pages(vcpu); | ||
638 | } | ||
639 | |||
636 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) | 640 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
637 | { | 641 | { |
638 | if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) | 642 | if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 1a87ba9d5156..23965aa5ee78 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -273,12 +273,14 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
273 | int r; | 273 | int r; |
274 | 274 | ||
275 | r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT); | 275 | r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT); |
276 | kvm_mmu_free_some_pages(vcpu); | ||
276 | if (r < 0) { | 277 | if (r < 0) { |
277 | spin_unlock(&vcpu->kvm->lock); | 278 | spin_unlock(&vcpu->kvm->lock); |
278 | kvm_arch_ops->vcpu_put(vcpu); | 279 | kvm_arch_ops->vcpu_put(vcpu); |
279 | r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL); | 280 | r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL); |
280 | kvm_arch_ops->vcpu_load(vcpu); | 281 | kvm_arch_ops->vcpu_load(vcpu); |
281 | spin_lock(&vcpu->kvm->lock); | 282 | spin_lock(&vcpu->kvm->lock); |
283 | kvm_mmu_free_some_pages(vcpu); | ||
282 | } | 284 | } |
283 | return r; | 285 | return r; |
284 | } | 286 | } |
@@ -1208,7 +1210,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1208 | return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); | 1210 | return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT); |
1209 | } | 1211 | } |
1210 | 1212 | ||
1211 | void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 1213 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
1212 | { | 1214 | { |
1213 | while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { | 1215 | while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { |
1214 | struct kvm_mmu_page *page; | 1216 | struct kvm_mmu_page *page; |
@@ -1218,7 +1220,6 @@ void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | |||
1218 | kvm_mmu_zap_page(vcpu->kvm, page); | 1220 | kvm_mmu_zap_page(vcpu->kvm, page); |
1219 | } | 1221 | } |
1220 | } | 1222 | } |
1221 | EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages); | ||
1222 | 1223 | ||
1223 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | 1224 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
1224 | { | 1225 | { |