aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:53 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:27 -0500
commit714b93da1a6d97307dfafb9915517879d8a66c0d (patch)
tree619f30567c9e13b79830301023bef58b98b8f433 /drivers/kvm
parentf51234c2cd3ab8bed836e09686e27877e1b55f2a (diff)
[PATCH] KVM: MMU: Replace atomic allocations by preallocated objects
The mmu sometimes needs memory for reverse mapping and parent pte chains. however, we can't allocate from within the mmu because of the atomic context. So, move the allocations to a central place that can be executed before the main mmu machinery, where we can bail out on failure before any damage is done. (error handling is deffered for now, but the basic structure is there) Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm.h16
-rw-r--r--drivers/kvm/kvm_main.c18
-rw-r--r--drivers/kvm/mmu.c151
-rw-r--r--drivers/kvm/paging_tmpl.h5
4 files changed, 153 insertions, 37 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index b24a86e1f434..91e0c75aca8f 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -168,6 +168,17 @@ struct kvm_mmu {
168 u64 *pae_root; 168 u64 *pae_root;
169}; 169};
170 170
171#define KVM_NR_MEM_OBJS 20
172
173struct kvm_mmu_memory_cache {
174 int nobjs;
175 void *objects[KVM_NR_MEM_OBJS];
176};
177
178/*
179 * We don't want allocation failures within the mmu code, so we preallocate
180 * enough memory for a single page fault in a cache.
181 */
171struct kvm_guest_debug { 182struct kvm_guest_debug {
172 int enabled; 183 int enabled;
173 unsigned long bp[4]; 184 unsigned long bp[4];
@@ -239,6 +250,9 @@ struct kvm_vcpu {
239 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; 250 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
240 struct kvm_mmu mmu; 251 struct kvm_mmu mmu;
241 252
253 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
254 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
255
242 gfn_t last_pt_write_gfn; 256 gfn_t last_pt_write_gfn;
243 int last_pt_write_count; 257 int last_pt_write_count;
244 258
@@ -381,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
381int kvm_mmu_setup(struct kvm_vcpu *vcpu); 395int kvm_mmu_setup(struct kvm_vcpu *vcpu);
382 396
383int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 397int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
384void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 398void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
385 399
386hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); 400hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
387#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 401#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 2e6bc5659953..6623fecff040 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -702,6 +702,13 @@ out:
702 return r; 702 return r;
703} 703}
704 704
705static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
706{
707 spin_lock(&vcpu->kvm->lock);
708 kvm_mmu_slot_remove_write_access(vcpu, slot);
709 spin_unlock(&vcpu->kvm->lock);
710}
711
705/* 712/*
706 * Get (and clear) the dirty memory log for a memory slot. 713 * Get (and clear) the dirty memory log for a memory slot.
707 */ 714 */
@@ -711,6 +718,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
711 struct kvm_memory_slot *memslot; 718 struct kvm_memory_slot *memslot;
712 int r, i; 719 int r, i;
713 int n; 720 int n;
721 int cleared;
714 unsigned long any = 0; 722 unsigned long any = 0;
715 723
716 spin_lock(&kvm->lock); 724 spin_lock(&kvm->lock);
@@ -741,15 +749,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
741 749
742 750
743 if (any) { 751 if (any) {
744 spin_lock(&kvm->lock); 752 cleared = 0;
745 kvm_mmu_slot_remove_write_access(kvm, log->slot);
746 spin_unlock(&kvm->lock);
747 memset(memslot->dirty_bitmap, 0, n);
748 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 753 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
749 struct kvm_vcpu *vcpu = vcpu_load(kvm, i); 754 struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
750 755
751 if (!vcpu) 756 if (!vcpu)
752 continue; 757 continue;
758 if (!cleared) {
759 do_remove_write_access(vcpu, log->slot);
760 memset(memslot->dirty_bitmap, 0, n);
761 cleared = 1;
762 }
753 kvm_arch_ops->tlb_flush(vcpu); 763 kvm_arch_ops->tlb_flush(vcpu);
754 vcpu_put(vcpu); 764 vcpu_put(vcpu);
755 } 765 }
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 0bd2a19709ce..e96362aa7947 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -166,6 +166,84 @@ static int is_rmap_pte(u64 pte)
166 == (PT_WRITABLE_MASK | PT_PRESENT_MASK); 166 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
167} 167}
168 168
169static void mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
170 size_t objsize, int min)
171{
172 void *obj;
173
174 if (cache->nobjs >= min)
175 return;
176 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
177 obj = kzalloc(objsize, GFP_NOWAIT);
178 if (!obj)
179 BUG();
180 cache->objects[cache->nobjs++] = obj;
181 }
182}
183
184static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
185{
186 while (mc->nobjs)
187 kfree(mc->objects[--mc->nobjs]);
188}
189
190static void mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
191{
192 mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
193 sizeof(struct kvm_pte_chain), 4);
194 mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
195 sizeof(struct kvm_rmap_desc), 1);
196}
197
198static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
199{
200 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
201 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
202}
203
204static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
205 size_t size)
206{
207 void *p;
208
209 BUG_ON(!mc->nobjs);
210 p = mc->objects[--mc->nobjs];
211 memset(p, 0, size);
212 return p;
213}
214
215static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
216{
217 if (mc->nobjs < KVM_NR_MEM_OBJS)
218 mc->objects[mc->nobjs++] = obj;
219 else
220 kfree(obj);
221}
222
223static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
224{
225 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
226 sizeof(struct kvm_pte_chain));
227}
228
229static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
230 struct kvm_pte_chain *pc)
231{
232 mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
233}
234
235static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
236{
237 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
238 sizeof(struct kvm_rmap_desc));
239}
240
241static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
242 struct kvm_rmap_desc *rd)
243{
244 mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
245}
246
169/* 247/*
170 * Reverse mapping data structures: 248 * Reverse mapping data structures:
171 * 249 *
@@ -175,7 +253,7 @@ static int is_rmap_pte(u64 pte)
175 * If page->private bit zero is one, (then page->private & ~1) points 253 * If page->private bit zero is one, (then page->private & ~1) points
176 * to a struct kvm_rmap_desc containing more mappings. 254 * to a struct kvm_rmap_desc containing more mappings.
177 */ 255 */
178static void rmap_add(struct kvm *kvm, u64 *spte) 256static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
179{ 257{
180 struct page *page; 258 struct page *page;
181 struct kvm_rmap_desc *desc; 259 struct kvm_rmap_desc *desc;
@@ -189,9 +267,7 @@ static void rmap_add(struct kvm *kvm, u64 *spte)
189 page->private = (unsigned long)spte; 267 page->private = (unsigned long)spte;
190 } else if (!(page->private & 1)) { 268 } else if (!(page->private & 1)) {
191 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 269 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
192 desc = kzalloc(sizeof *desc, GFP_NOWAIT); 270 desc = mmu_alloc_rmap_desc(vcpu);
193 if (!desc)
194 BUG(); /* FIXME: return error */
195 desc->shadow_ptes[0] = (u64 *)page->private; 271 desc->shadow_ptes[0] = (u64 *)page->private;
196 desc->shadow_ptes[1] = spte; 272 desc->shadow_ptes[1] = spte;
197 page->private = (unsigned long)desc | 1; 273 page->private = (unsigned long)desc | 1;
@@ -201,9 +277,7 @@ static void rmap_add(struct kvm *kvm, u64 *spte)
201 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) 277 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
202 desc = desc->more; 278 desc = desc->more;
203 if (desc->shadow_ptes[RMAP_EXT-1]) { 279 if (desc->shadow_ptes[RMAP_EXT-1]) {
204 desc->more = kzalloc(sizeof *desc->more, GFP_NOWAIT); 280 desc->more = mmu_alloc_rmap_desc(vcpu);
205 if (!desc->more)
206 BUG(); /* FIXME: return error */
207 desc = desc->more; 281 desc = desc->more;
208 } 282 }
209 for (i = 0; desc->shadow_ptes[i]; ++i) 283 for (i = 0; desc->shadow_ptes[i]; ++i)
@@ -212,7 +286,8 @@ static void rmap_add(struct kvm *kvm, u64 *spte)
212 } 286 }
213} 287}
214 288
215static void rmap_desc_remove_entry(struct page *page, 289static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
290 struct page *page,
216 struct kvm_rmap_desc *desc, 291 struct kvm_rmap_desc *desc,
217 int i, 292 int i,
218 struct kvm_rmap_desc *prev_desc) 293 struct kvm_rmap_desc *prev_desc)
@@ -232,10 +307,10 @@ static void rmap_desc_remove_entry(struct page *page,
232 prev_desc->more = desc->more; 307 prev_desc->more = desc->more;
233 else 308 else
234 page->private = (unsigned long)desc->more | 1; 309 page->private = (unsigned long)desc->more | 1;
235 kfree(desc); 310 mmu_free_rmap_desc(vcpu, desc);
236} 311}
237 312
238static void rmap_remove(struct kvm *kvm, u64 *spte) 313static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
239{ 314{
240 struct page *page; 315 struct page *page;
241 struct kvm_rmap_desc *desc; 316 struct kvm_rmap_desc *desc;
@@ -263,7 +338,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
263 while (desc) { 338 while (desc) {
264 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) 339 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
265 if (desc->shadow_ptes[i] == spte) { 340 if (desc->shadow_ptes[i] == spte) {
266 rmap_desc_remove_entry(page, desc, i, 341 rmap_desc_remove_entry(vcpu, page,
342 desc, i,
267 prev_desc); 343 prev_desc);
268 return; 344 return;
269 } 345 }
@@ -274,8 +350,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
274 } 350 }
275} 351}
276 352
277static void rmap_write_protect(struct kvm *kvm, u64 gfn) 353static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
278{ 354{
355 struct kvm *kvm = vcpu->kvm;
279 struct page *page; 356 struct page *page;
280 struct kvm_memory_slot *slot; 357 struct kvm_memory_slot *slot;
281 struct kvm_rmap_desc *desc; 358 struct kvm_rmap_desc *desc;
@@ -298,7 +375,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
298 BUG_ON(!(*spte & PT_PRESENT_MASK)); 375 BUG_ON(!(*spte & PT_PRESENT_MASK));
299 BUG_ON(!(*spte & PT_WRITABLE_MASK)); 376 BUG_ON(!(*spte & PT_WRITABLE_MASK));
300 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 377 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
301 rmap_remove(kvm, spte); 378 rmap_remove(vcpu, spte);
302 *spte &= ~(u64)PT_WRITABLE_MASK; 379 *spte &= ~(u64)PT_WRITABLE_MASK;
303 } 380 }
304} 381}
@@ -354,7 +431,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
354 return page; 431 return page;
355} 432}
356 433
357static void mmu_page_add_parent_pte(struct kvm_mmu_page *page, u64 *parent_pte) 434static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
435 struct kvm_mmu_page *page, u64 *parent_pte)
358{ 436{
359 struct kvm_pte_chain *pte_chain; 437 struct kvm_pte_chain *pte_chain;
360 struct hlist_node *node; 438 struct hlist_node *node;
@@ -370,8 +448,7 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_page *page, u64 *parent_pte)
370 return; 448 return;
371 } 449 }
372 page->multimapped = 1; 450 page->multimapped = 1;
373 pte_chain = kzalloc(sizeof(struct kvm_pte_chain), GFP_NOWAIT); 451 pte_chain = mmu_alloc_pte_chain(vcpu);
374 BUG_ON(!pte_chain);
375 INIT_HLIST_HEAD(&page->parent_ptes); 452 INIT_HLIST_HEAD(&page->parent_ptes);
376 hlist_add_head(&pte_chain->link, &page->parent_ptes); 453 hlist_add_head(&pte_chain->link, &page->parent_ptes);
377 pte_chain->parent_ptes[0] = old; 454 pte_chain->parent_ptes[0] = old;
@@ -385,13 +462,14 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_page *page, u64 *parent_pte)
385 return; 462 return;
386 } 463 }
387 } 464 }
388 pte_chain = kzalloc(sizeof(struct kvm_pte_chain), GFP_NOWAIT); 465 pte_chain = mmu_alloc_pte_chain(vcpu);
389 BUG_ON(!pte_chain); 466 BUG_ON(!pte_chain);
390 hlist_add_head(&pte_chain->link, &page->parent_ptes); 467 hlist_add_head(&pte_chain->link, &page->parent_ptes);
391 pte_chain->parent_ptes[0] = parent_pte; 468 pte_chain->parent_ptes[0] = parent_pte;
392} 469}
393 470
394static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, 471static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
472 struct kvm_mmu_page *page,
395 u64 *parent_pte) 473 u64 *parent_pte)
396{ 474{
397 struct kvm_pte_chain *pte_chain; 475 struct kvm_pte_chain *pte_chain;
@@ -418,7 +496,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
418 pte_chain->parent_ptes[i] = NULL; 496 pte_chain->parent_ptes[i] = NULL;
419 if (i == 0) { 497 if (i == 0) {
420 hlist_del(&pte_chain->link); 498 hlist_del(&pte_chain->link);
421 kfree(pte_chain); 499 mmu_free_pte_chain(vcpu, pte_chain);
422 if (hlist_empty(&page->parent_ptes)) { 500 if (hlist_empty(&page->parent_ptes)) {
423 page->multimapped = 0; 501 page->multimapped = 0;
424 page->parent_pte = NULL; 502 page->parent_pte = NULL;
@@ -478,7 +556,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
478 bucket = &vcpu->kvm->mmu_page_hash[index]; 556 bucket = &vcpu->kvm->mmu_page_hash[index];
479 hlist_for_each_entry(page, node, bucket, hash_link) 557 hlist_for_each_entry(page, node, bucket, hash_link)
480 if (page->gfn == gfn && page->role.word == role.word) { 558 if (page->gfn == gfn && page->role.word == role.word) {
481 mmu_page_add_parent_pte(page, parent_pte); 559 mmu_page_add_parent_pte(vcpu, page, parent_pte);
482 pgprintk("%s: found\n", __FUNCTION__); 560 pgprintk("%s: found\n", __FUNCTION__);
483 return page; 561 return page;
484 } 562 }
@@ -490,7 +568,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
490 page->role = role; 568 page->role = role;
491 hlist_add_head(&page->hash_link, bucket); 569 hlist_add_head(&page->hash_link, bucket);
492 if (!metaphysical) 570 if (!metaphysical)
493 rmap_write_protect(vcpu->kvm, gfn); 571 rmap_write_protect(vcpu, gfn);
494 return page; 572 return page;
495} 573}
496 574
@@ -506,7 +584,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
506 if (page->role.level == PT_PAGE_TABLE_LEVEL) { 584 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
507 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 585 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
508 if (pt[i] & PT_PRESENT_MASK) 586 if (pt[i] & PT_PRESENT_MASK)
509 rmap_remove(vcpu->kvm, &pt[i]); 587 rmap_remove(vcpu, &pt[i]);
510 pt[i] = 0; 588 pt[i] = 0;
511 } 589 }
512 return; 590 return;
@@ -519,7 +597,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
519 if (!(ent & PT_PRESENT_MASK)) 597 if (!(ent & PT_PRESENT_MASK))
520 continue; 598 continue;
521 ent &= PT64_BASE_ADDR_MASK; 599 ent &= PT64_BASE_ADDR_MASK;
522 mmu_page_remove_parent_pte(page_header(ent), &pt[i]); 600 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
523 } 601 }
524} 602}
525 603
@@ -527,7 +605,7 @@ static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
527 struct kvm_mmu_page *page, 605 struct kvm_mmu_page *page,
528 u64 *parent_pte) 606 u64 *parent_pte)
529{ 607{
530 mmu_page_remove_parent_pte(page, parent_pte); 608 mmu_page_remove_parent_pte(vcpu, page, parent_pte);
531} 609}
532 610
533static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu, 611static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
@@ -644,7 +722,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
644 page_header_update_slot(vcpu->kvm, table, v); 722 page_header_update_slot(vcpu->kvm, table, v);
645 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | 723 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
646 PT_USER_MASK; 724 PT_USER_MASK;
647 rmap_add(vcpu->kvm, &table[index]); 725 rmap_add(vcpu, &table[index]);
648 return 0; 726 return 0;
649 } 727 }
650 728
@@ -747,6 +825,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
747 gpa_t addr = gva; 825 gpa_t addr = gva;
748 hpa_t paddr; 826 hpa_t paddr;
749 827
828 mmu_topup_memory_caches(vcpu);
829
750 ASSERT(vcpu); 830 ASSERT(vcpu);
751 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 831 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
752 832
@@ -845,7 +925,7 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
845 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT); 925 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
846 926
847 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); 927 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
848 rmap_add(vcpu->kvm, shadow_pte); 928 rmap_add(vcpu, shadow_pte);
849} 929}
850 930
851static void inject_page_fault(struct kvm_vcpu *vcpu, 931static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -966,8 +1046,15 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
966 1046
967int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 1047int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
968{ 1048{
1049 int r;
1050
969 destroy_kvm_mmu(vcpu); 1051 destroy_kvm_mmu(vcpu);
970 return init_kvm_mmu(vcpu); 1052 r = init_kvm_mmu(vcpu);
1053 if (r < 0)
1054 goto out;
1055 mmu_topup_memory_caches(vcpu);
1056out:
1057 return r;
971} 1058}
972 1059
973void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) 1060void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
@@ -1030,10 +1117,10 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1030 pte = *spte; 1117 pte = *spte;
1031 if (is_present_pte(pte)) { 1118 if (is_present_pte(pte)) {
1032 if (level == PT_PAGE_TABLE_LEVEL) 1119 if (level == PT_PAGE_TABLE_LEVEL)
1033 rmap_remove(vcpu->kvm, spte); 1120 rmap_remove(vcpu, spte);
1034 else { 1121 else {
1035 child = page_header(pte & PT64_BASE_ADDR_MASK); 1122 child = page_header(pte & PT64_BASE_ADDR_MASK);
1036 mmu_page_remove_parent_pte(child, spte); 1123 mmu_page_remove_parent_pte(vcpu, child, spte);
1037 } 1124 }
1038 } 1125 }
1039 *spte = 0; 1126 *spte = 0;
@@ -1145,10 +1232,12 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1145 1232
1146 destroy_kvm_mmu(vcpu); 1233 destroy_kvm_mmu(vcpu);
1147 free_mmu_pages(vcpu); 1234 free_mmu_pages(vcpu);
1235 mmu_free_memory_caches(vcpu);
1148} 1236}
1149 1237
1150void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) 1238void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
1151{ 1239{
1240 struct kvm *kvm = vcpu->kvm;
1152 struct kvm_mmu_page *page; 1241 struct kvm_mmu_page *page;
1153 1242
1154 list_for_each_entry(page, &kvm->active_mmu_pages, link) { 1243 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
@@ -1162,7 +1251,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1162 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1251 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1163 /* avoid RMW */ 1252 /* avoid RMW */
1164 if (pt[i] & PT_WRITABLE_MASK) { 1253 if (pt[i] & PT_WRITABLE_MASK) {
1165 rmap_remove(kvm, &pt[i]); 1254 rmap_remove(vcpu, &pt[i]);
1166 pt[i] &= ~PT_WRITABLE_MASK; 1255 pt[i] &= ~PT_WRITABLE_MASK;
1167 } 1256 }
1168 } 1257 }
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 6acb16ea5ce2..4e6670ff1847 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -323,7 +323,7 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
323 mark_page_dirty(vcpu->kvm, gfn); 323 mark_page_dirty(vcpu->kvm, gfn);
324 *shadow_ent |= PT_WRITABLE_MASK; 324 *shadow_ent |= PT_WRITABLE_MASK;
325 *guest_ent |= PT_DIRTY_MASK; 325 *guest_ent |= PT_DIRTY_MASK;
326 rmap_add(vcpu->kvm, shadow_ent); 326 rmap_add(vcpu, shadow_ent);
327 327
328 return 1; 328 return 1;
329} 329}
@@ -353,6 +353,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
353 int write_pt = 0; 353 int write_pt = 0;
354 354
355 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); 355 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
356
357 mmu_topup_memory_caches(vcpu);
358
356 /* 359 /*
357 * Look up the shadow pte for the faulting address. 360 * Look up the shadow pte for the faulting address.
358 */ 361 */