diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:53 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:27 -0500 |
commit | 714b93da1a6d97307dfafb9915517879d8a66c0d (patch) | |
tree | 619f30567c9e13b79830301023bef58b98b8f433 /drivers/kvm/kvm.h | |
parent | f51234c2cd3ab8bed836e09686e27877e1b55f2a (diff) |
[PATCH] KVM: MMU: Replace atomic allocations by preallocated objects
The mmu sometimes needs memory for reverse mapping and parent pte chains.
however, we can't allocate from within the mmu because of the atomic context.
So, move the allocations to a central place that can be executed before the
main mmu machinery, where we can bail out on failure before any damage is
done.
(error handling is deffered for now, but the basic structure is there)
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm/kvm.h')
-rw-r--r-- | drivers/kvm/kvm.h | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index b24a86e1f434..91e0c75aca8f 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -168,6 +168,17 @@ struct kvm_mmu { | |||
168 | u64 *pae_root; | 168 | u64 *pae_root; |
169 | }; | 169 | }; |
170 | 170 | ||
171 | #define KVM_NR_MEM_OBJS 20 | ||
172 | |||
173 | struct kvm_mmu_memory_cache { | ||
174 | int nobjs; | ||
175 | void *objects[KVM_NR_MEM_OBJS]; | ||
176 | }; | ||
177 | |||
178 | /* | ||
179 | * We don't want allocation failures within the mmu code, so we preallocate | ||
180 | * enough memory for a single page fault in a cache. | ||
181 | */ | ||
171 | struct kvm_guest_debug { | 182 | struct kvm_guest_debug { |
172 | int enabled; | 183 | int enabled; |
173 | unsigned long bp[4]; | 184 | unsigned long bp[4]; |
@@ -239,6 +250,9 @@ struct kvm_vcpu { | |||
239 | struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; | 250 | struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; |
240 | struct kvm_mmu mmu; | 251 | struct kvm_mmu mmu; |
241 | 252 | ||
253 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; | ||
254 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | ||
255 | |||
242 | gfn_t last_pt_write_gfn; | 256 | gfn_t last_pt_write_gfn; |
243 | int last_pt_write_count; | 257 | int last_pt_write_count; |
244 | 258 | ||
@@ -381,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu); | |||
381 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | 395 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); |
382 | 396 | ||
383 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 397 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
384 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 398 | void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot); |
385 | 399 | ||
386 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | 400 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); |
387 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | 401 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |