aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-05-30 05:34:53 -0400
committerAvi Kivity <avi@qumranet.com>2007-07-16 05:05:43 -0400
commitd3d25b048b9c7e5c1c20918157a71df734f71766 (patch)
tree3e1f2cf029a1c23497067dfdfeffe69838fe6f0f /drivers/kvm/mmu.c
parent8d7282036f82244c5a1146a1a7edf03c50d278d9 (diff)
KVM: MMU: Use slab caches for shadow pages and their headers
Use slab caches instead of a simple custom list. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c64
1 files changed, 39 insertions, 25 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index c85c6649280e..46491b4cd859 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -165,6 +165,8 @@ struct kvm_rmap_desc {
165 165
166static struct kmem_cache *pte_chain_cache; 166static struct kmem_cache *pte_chain_cache;
167static struct kmem_cache *rmap_desc_cache; 167static struct kmem_cache *rmap_desc_cache;
168static struct kmem_cache *mmu_page_cache;
169static struct kmem_cache *mmu_page_header_cache;
168 170
169static int is_write_protection(struct kvm_vcpu *vcpu) 171static int is_write_protection(struct kvm_vcpu *vcpu)
170{ 172{
@@ -235,6 +237,14 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
235 goto out; 237 goto out;
236 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, 238 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
237 rmap_desc_cache, 1, gfp_flags); 239 rmap_desc_cache, 1, gfp_flags);
240 if (r)
241 goto out;
242 r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
243 mmu_page_cache, 4, gfp_flags);
244 if (r)
245 goto out;
246 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
247 mmu_page_header_cache, 4, gfp_flags);
238out: 248out:
239 return r; 249 return r;
240} 250}
@@ -258,6 +268,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
258{ 268{
259 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 269 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
260 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 270 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
271 mmu_free_memory_cache(&vcpu->mmu_page_cache);
272 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
261} 273}
262 274
263static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, 275static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -458,7 +470,9 @@ static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
458 struct kvm_mmu_page *page_head) 470 struct kvm_mmu_page *page_head)
459{ 471{
460 ASSERT(is_empty_shadow_page(page_head->spt)); 472 ASSERT(is_empty_shadow_page(page_head->spt));
461 list_move(&page_head->link, &vcpu->free_pages); 473 list_del(&page_head->link);
474 mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
475 mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
462 ++vcpu->kvm->n_free_mmu_pages; 476 ++vcpu->kvm->n_free_mmu_pages;
463} 477}
464 478
@@ -472,11 +486,14 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
472{ 486{
473 struct kvm_mmu_page *page; 487 struct kvm_mmu_page *page;
474 488
475 if (list_empty(&vcpu->free_pages)) 489 if (!vcpu->kvm->n_free_mmu_pages)
476 return NULL; 490 return NULL;
477 491
478 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); 492 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
479 list_move(&page->link, &vcpu->kvm->active_mmu_pages); 493 sizeof *page);
494 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
495 set_page_private(virt_to_page(page->spt), (unsigned long)page);
496 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
480 ASSERT(is_empty_shadow_page(page->spt)); 497 ASSERT(is_empty_shadow_page(page->spt));
481 page->slot_bitmap = 0; 498 page->slot_bitmap = 0;
482 page->multimapped = 0; 499 page->multimapped = 0;
@@ -1083,6 +1100,7 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1083 ASSERT(vcpu); 1100 ASSERT(vcpu);
1084 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1101 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1085 1102
1103 mmu_topup_memory_caches(vcpu);
1086 if (!is_paging(vcpu)) 1104 if (!is_paging(vcpu))
1087 return nonpaging_init_context(vcpu); 1105 return nonpaging_init_context(vcpu);
1088 else if (is_long_mode(vcpu)) 1106 else if (is_long_mode(vcpu))
@@ -1256,13 +1274,6 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1256 struct kvm_mmu_page, link); 1274 struct kvm_mmu_page, link);
1257 kvm_mmu_zap_page(vcpu, page); 1275 kvm_mmu_zap_page(vcpu, page);
1258 } 1276 }
1259 while (!list_empty(&vcpu->free_pages)) {
1260 page = list_entry(vcpu->free_pages.next,
1261 struct kvm_mmu_page, link);
1262 list_del(&page->link);
1263 free_page((unsigned long)page->spt);
1264 page->spt = NULL;
1265 }
1266 free_page((unsigned long)vcpu->mmu.pae_root); 1277 free_page((unsigned long)vcpu->mmu.pae_root);
1267} 1278}
1268 1279
@@ -1273,18 +1284,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1273 1284
1274 ASSERT(vcpu); 1285 ASSERT(vcpu);
1275 1286
1276 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { 1287 vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
1277 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
1278
1279 INIT_LIST_HEAD(&page_header->link);
1280 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1281 goto error_1;
1282 set_page_private(page, (unsigned long)page_header);
1283 page_header->spt = page_address(page);
1284 memset(page_header->spt, 0, PAGE_SIZE);
1285 list_add(&page_header->link, &vcpu->free_pages);
1286 ++vcpu->kvm->n_free_mmu_pages;
1287 }
1288 1288
1289 /* 1289 /*
1290 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 1290 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1309,7 +1309,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
1309{ 1309{
1310 ASSERT(vcpu); 1310 ASSERT(vcpu);
1311 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1311 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1312 ASSERT(list_empty(&vcpu->free_pages));
1313 1312
1314 return alloc_mmu_pages(vcpu); 1313 return alloc_mmu_pages(vcpu);
1315} 1314}
@@ -1318,7 +1317,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1318{ 1317{
1319 ASSERT(vcpu); 1318 ASSERT(vcpu);
1320 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1319 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1321 ASSERT(!list_empty(&vcpu->free_pages));
1322 1320
1323 return init_kvm_mmu(vcpu); 1321 return init_kvm_mmu(vcpu);
1324} 1322}
@@ -1377,6 +1375,10 @@ void kvm_mmu_module_exit(void)
1377 kmem_cache_destroy(pte_chain_cache); 1375 kmem_cache_destroy(pte_chain_cache);
1378 if (rmap_desc_cache) 1376 if (rmap_desc_cache)
1379 kmem_cache_destroy(rmap_desc_cache); 1377 kmem_cache_destroy(rmap_desc_cache);
1378 if (mmu_page_cache)
1379 kmem_cache_destroy(mmu_page_cache);
1380 if (mmu_page_header_cache)
1381 kmem_cache_destroy(mmu_page_header_cache);
1380} 1382}
1381 1383
1382int kvm_mmu_module_init(void) 1384int kvm_mmu_module_init(void)
@@ -1392,6 +1394,18 @@ int kvm_mmu_module_init(void)
1392 if (!rmap_desc_cache) 1394 if (!rmap_desc_cache)
1393 goto nomem; 1395 goto nomem;
1394 1396
1397 mmu_page_cache = kmem_cache_create("kvm_mmu_page",
1398 PAGE_SIZE,
1399 PAGE_SIZE, 0, NULL, NULL);
1400 if (!mmu_page_cache)
1401 goto nomem;
1402
1403 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1404 sizeof(struct kvm_mmu_page),
1405 0, 0, NULL, NULL);
1406 if (!mmu_page_header_cache)
1407 goto nomem;
1408
1395 return 0; 1409 return 0;
1396 1410
1397nomem: 1411nomem: