diff options
author | Avi Kivity <avi@qumranet.com> | 2007-04-15 09:31:09 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-05-03 03:52:29 -0400 |
commit | b5a33a75720c03d58d8281a72b45ffd214f00ed7 (patch) | |
tree | 7451b196f292eb96caf5173ad3c6459ab9efb1d7 /drivers/kvm/mmu.c | |
parent | 417726a3fbecb2092f1054bbaee87bc442b05ef3 (diff) |
KVM: Use slab caches to allocate mmu data structures
Better leak detection, statistics, memory use, speed -- goodness all
around.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 39 |
1 files changed, 35 insertions, 4 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 9ff74805c7d1..a368ea8297f3 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -159,6 +159,9 @@ struct kvm_rmap_desc { | |||
159 | struct kvm_rmap_desc *more; | 159 | struct kvm_rmap_desc *more; |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static struct kmem_cache *pte_chain_cache; | ||
163 | static struct kmem_cache *rmap_desc_cache; | ||
164 | |||
162 | static int is_write_protection(struct kvm_vcpu *vcpu) | 165 | static int is_write_protection(struct kvm_vcpu *vcpu) |
163 | { | 166 | { |
164 | return vcpu->cr0 & CR0_WP_MASK; | 167 | return vcpu->cr0 & CR0_WP_MASK; |
@@ -196,14 +199,14 @@ static int is_rmap_pte(u64 pte) | |||
196 | } | 199 | } |
197 | 200 | ||
198 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 201 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
199 | size_t objsize, int min) | 202 | struct kmem_cache *base_cache, int min) |
200 | { | 203 | { |
201 | void *obj; | 204 | void *obj; |
202 | 205 | ||
203 | if (cache->nobjs >= min) | 206 | if (cache->nobjs >= min) |
204 | return 0; | 207 | return 0; |
205 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { | 208 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
206 | obj = kzalloc(objsize, GFP_NOWAIT); | 209 | obj = kmem_cache_zalloc(base_cache, GFP_NOWAIT); |
207 | if (!obj) | 210 | if (!obj) |
208 | return -ENOMEM; | 211 | return -ENOMEM; |
209 | cache->objects[cache->nobjs++] = obj; | 212 | cache->objects[cache->nobjs++] = obj; |
@@ -222,11 +225,11 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
222 | int r; | 225 | int r; |
223 | 226 | ||
224 | r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, | 227 | r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, |
225 | sizeof(struct kvm_pte_chain), 4); | 228 | pte_chain_cache, 4); |
226 | if (r) | 229 | if (r) |
227 | goto out; | 230 | goto out; |
228 | r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, | 231 | r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, |
229 | sizeof(struct kvm_rmap_desc), 1); | 232 | rmap_desc_cache, 1); |
230 | out: | 233 | out: |
231 | return r; | 234 | return r; |
232 | } | 235 | } |
@@ -1333,6 +1336,34 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu) | |||
1333 | init_kvm_mmu(vcpu); | 1336 | init_kvm_mmu(vcpu); |
1334 | } | 1337 | } |
1335 | 1338 | ||
1339 | void kvm_mmu_module_exit(void) | ||
1340 | { | ||
1341 | if (pte_chain_cache) | ||
1342 | kmem_cache_destroy(pte_chain_cache); | ||
1343 | if (rmap_desc_cache) | ||
1344 | kmem_cache_destroy(rmap_desc_cache); | ||
1345 | } | ||
1346 | |||
1347 | int kvm_mmu_module_init(void) | ||
1348 | { | ||
1349 | pte_chain_cache = kmem_cache_create("kvm_pte_chain", | ||
1350 | sizeof(struct kvm_pte_chain), | ||
1351 | 0, 0, NULL, NULL); | ||
1352 | if (!pte_chain_cache) | ||
1353 | goto nomem; | ||
1354 | rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", | ||
1355 | sizeof(struct kvm_rmap_desc), | ||
1356 | 0, 0, NULL, NULL); | ||
1357 | if (!rmap_desc_cache) | ||
1358 | goto nomem; | ||
1359 | |||
1360 | return 0; | ||
1361 | |||
1362 | nomem: | ||
1363 | kvm_mmu_module_exit(); | ||
1364 | return -ENOMEM; | ||
1365 | } | ||
1366 | |||
1336 | #ifdef AUDIT | 1367 | #ifdef AUDIT |
1337 | 1368 | ||
1338 | static const char *audit_msg; | 1369 | static const char *audit_msg; |