diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-05-15 11:26:20 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-07-12 04:45:06 -0400 |
commit | 53c07b18787d564a105e1aa678795d67eeb27447 (patch) | |
tree | 2ad7226301479debf5d5b921bcc48d48b53f2778 /arch/x86/kvm/mmu.c | |
parent | 1249b96e72533ffdb2fa25b5d7471918b065ccc8 (diff) |
KVM: MMU: abstract the operation of rmap
Abstract the operation of rmap to spte_list, then we can use it for the
reverse mapping of parent pte in the later patch
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 260 |
1 files changed, 139 insertions, 121 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b4ae7afa6b3b..a6811cbdbf0d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -148,7 +148,7 @@ module_param(oos_shadow, bool, 0644); | |||
148 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ | 148 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ |
149 | | PT64_NX_MASK) | 149 | | PT64_NX_MASK) |
150 | 150 | ||
151 | #define RMAP_EXT 4 | 151 | #define PTE_LIST_EXT 4 |
152 | 152 | ||
153 | #define ACC_EXEC_MASK 1 | 153 | #define ACC_EXEC_MASK 1 |
154 | #define ACC_WRITE_MASK PT_WRITABLE_MASK | 154 | #define ACC_WRITE_MASK PT_WRITABLE_MASK |
@@ -164,9 +164,9 @@ module_param(oos_shadow, bool, 0644); | |||
164 | 164 | ||
165 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | 165 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
166 | 166 | ||
167 | struct kvm_rmap_desc { | 167 | struct pte_list_desc { |
168 | u64 *sptes[RMAP_EXT]; | 168 | u64 *sptes[PTE_LIST_EXT]; |
169 | struct kvm_rmap_desc *more; | 169 | struct pte_list_desc *more; |
170 | }; | 170 | }; |
171 | 171 | ||
172 | struct kvm_shadow_walk_iterator { | 172 | struct kvm_shadow_walk_iterator { |
@@ -185,7 +185,7 @@ struct kvm_shadow_walk_iterator { | |||
185 | typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); | 185 | typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); |
186 | 186 | ||
187 | static struct kmem_cache *pte_chain_cache; | 187 | static struct kmem_cache *pte_chain_cache; |
188 | static struct kmem_cache *rmap_desc_cache; | 188 | static struct kmem_cache *pte_list_desc_cache; |
189 | static struct kmem_cache *mmu_page_header_cache; | 189 | static struct kmem_cache *mmu_page_header_cache; |
190 | static struct percpu_counter kvm_total_used_mmu_pages; | 190 | static struct percpu_counter kvm_total_used_mmu_pages; |
191 | 191 | ||
@@ -401,8 +401,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
401 | pte_chain_cache, 4); | 401 | pte_chain_cache, 4); |
402 | if (r) | 402 | if (r) |
403 | goto out; | 403 | goto out; |
404 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, | 404 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
405 | rmap_desc_cache, 4 + PTE_PREFETCH_NUM); | 405 | pte_list_desc_cache, 4 + PTE_PREFETCH_NUM); |
406 | if (r) | 406 | if (r) |
407 | goto out; | 407 | goto out; |
408 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); | 408 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
@@ -416,8 +416,10 @@ out: | |||
416 | 416 | ||
417 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 417 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
418 | { | 418 | { |
419 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache); | 419 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, |
420 | mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache); | 420 | pte_chain_cache); |
421 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, | ||
422 | pte_list_desc_cache); | ||
421 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); | 423 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); |
422 | mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, | 424 | mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, |
423 | mmu_page_header_cache); | 425 | mmu_page_header_cache); |
@@ -444,15 +446,15 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc) | |||
444 | kmem_cache_free(pte_chain_cache, pc); | 446 | kmem_cache_free(pte_chain_cache, pc); |
445 | } | 447 | } |
446 | 448 | ||
447 | static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) | 449 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
448 | { | 450 | { |
449 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache, | 451 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, |
450 | sizeof(struct kvm_rmap_desc)); | 452 | sizeof(struct pte_list_desc)); |
451 | } | 453 | } |
452 | 454 | ||
453 | static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) | 455 | static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) |
454 | { | 456 | { |
455 | kmem_cache_free(rmap_desc_cache, rd); | 457 | kmem_cache_free(pte_list_desc_cache, pte_list_desc); |
456 | } | 458 | } |
457 | 459 | ||
458 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) | 460 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) |
@@ -590,67 +592,42 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
590 | } | 592 | } |
591 | 593 | ||
592 | /* | 594 | /* |
593 | * Take gfn and return the reverse mapping to it. | 595 | * Pte mapping structures: |
594 | */ | ||
595 | |||
596 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) | ||
597 | { | ||
598 | struct kvm_memory_slot *slot; | ||
599 | struct kvm_lpage_info *linfo; | ||
600 | |||
601 | slot = gfn_to_memslot(kvm, gfn); | ||
602 | if (likely(level == PT_PAGE_TABLE_LEVEL)) | ||
603 | return &slot->rmap[gfn - slot->base_gfn]; | ||
604 | |||
605 | linfo = lpage_info_slot(gfn, slot, level); | ||
606 | |||
607 | return &linfo->rmap_pde; | ||
608 | } | ||
609 | |||
610 | /* | ||
611 | * Reverse mapping data structures: | ||
612 | * | 596 | * |
613 | * If rmapp bit zero is zero, then rmapp point to the shadw page table entry | 597 | * If pte_list bit zero is zero, then pte_list point to the spte. |
614 | * that points to page_address(page). | ||
615 | * | 598 | * |
616 | * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc | 599 | * If pte_list bit zero is one, (then pte_list & ~1) points to a struct |
617 | * containing more mappings. | 600 | * pte_list_desc containing more mappings. |
618 | * | 601 | * |
619 | * Returns the number of rmap entries before the spte was added or zero if | 602 | * Returns the number of pte entries before the spte was added or zero if |
620 | * the spte was not added. | 603 | * the spte was not added. |
621 | * | 604 | * |
622 | */ | 605 | */ |
623 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | 606 | static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, |
607 | unsigned long *pte_list) | ||
624 | { | 608 | { |
625 | struct kvm_mmu_page *sp; | 609 | struct pte_list_desc *desc; |
626 | struct kvm_rmap_desc *desc; | ||
627 | unsigned long *rmapp; | ||
628 | int i, count = 0; | 610 | int i, count = 0; |
629 | 611 | ||
630 | if (!is_rmap_spte(*spte)) | 612 | if (!*pte_list) { |
631 | return count; | 613 | rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); |
632 | sp = page_header(__pa(spte)); | 614 | *pte_list = (unsigned long)spte; |
633 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); | 615 | } else if (!(*pte_list & 1)) { |
634 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 616 | rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); |
635 | if (!*rmapp) { | 617 | desc = mmu_alloc_pte_list_desc(vcpu); |
636 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); | 618 | desc->sptes[0] = (u64 *)*pte_list; |
637 | *rmapp = (unsigned long)spte; | ||
638 | } else if (!(*rmapp & 1)) { | ||
639 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); | ||
640 | desc = mmu_alloc_rmap_desc(vcpu); | ||
641 | desc->sptes[0] = (u64 *)*rmapp; | ||
642 | desc->sptes[1] = spte; | 619 | desc->sptes[1] = spte; |
643 | *rmapp = (unsigned long)desc | 1; | 620 | *pte_list = (unsigned long)desc | 1; |
644 | ++count; | 621 | ++count; |
645 | } else { | 622 | } else { |
646 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | 623 | rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); |
647 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 624 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
648 | while (desc->sptes[RMAP_EXT-1] && desc->more) { | 625 | while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { |
649 | desc = desc->more; | 626 | desc = desc->more; |
650 | count += RMAP_EXT; | 627 | count += PTE_LIST_EXT; |
651 | } | 628 | } |
652 | if (desc->sptes[RMAP_EXT-1]) { | 629 | if (desc->sptes[PTE_LIST_EXT-1]) { |
653 | desc->more = mmu_alloc_rmap_desc(vcpu); | 630 | desc->more = mmu_alloc_pte_list_desc(vcpu); |
654 | desc = desc->more; | 631 | desc = desc->more; |
655 | } | 632 | } |
656 | for (i = 0; desc->sptes[i]; ++i) | 633 | for (i = 0; desc->sptes[i]; ++i) |
@@ -660,59 +637,78 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
660 | return count; | 637 | return count; |
661 | } | 638 | } |
662 | 639 | ||
663 | static void rmap_desc_remove_entry(unsigned long *rmapp, | 640 | static u64 *pte_list_next(unsigned long *pte_list, u64 *spte) |
664 | struct kvm_rmap_desc *desc, | 641 | { |
665 | int i, | 642 | struct pte_list_desc *desc; |
666 | struct kvm_rmap_desc *prev_desc) | 643 | u64 *prev_spte; |
644 | int i; | ||
645 | |||
646 | if (!*pte_list) | ||
647 | return NULL; | ||
648 | else if (!(*pte_list & 1)) { | ||
649 | if (!spte) | ||
650 | return (u64 *)*pte_list; | ||
651 | return NULL; | ||
652 | } | ||
653 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); | ||
654 | prev_spte = NULL; | ||
655 | while (desc) { | ||
656 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { | ||
657 | if (prev_spte == spte) | ||
658 | return desc->sptes[i]; | ||
659 | prev_spte = desc->sptes[i]; | ||
660 | } | ||
661 | desc = desc->more; | ||
662 | } | ||
663 | return NULL; | ||
664 | } | ||
665 | |||
666 | static void | ||
667 | pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, | ||
668 | int i, struct pte_list_desc *prev_desc) | ||
667 | { | 669 | { |
668 | int j; | 670 | int j; |
669 | 671 | ||
670 | for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j) | 672 | for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) |
671 | ; | 673 | ; |
672 | desc->sptes[i] = desc->sptes[j]; | 674 | desc->sptes[i] = desc->sptes[j]; |
673 | desc->sptes[j] = NULL; | 675 | desc->sptes[j] = NULL; |
674 | if (j != 0) | 676 | if (j != 0) |
675 | return; | 677 | return; |
676 | if (!prev_desc && !desc->more) | 678 | if (!prev_desc && !desc->more) |
677 | *rmapp = (unsigned long)desc->sptes[0]; | 679 | *pte_list = (unsigned long)desc->sptes[0]; |
678 | else | 680 | else |
679 | if (prev_desc) | 681 | if (prev_desc) |
680 | prev_desc->more = desc->more; | 682 | prev_desc->more = desc->more; |
681 | else | 683 | else |
682 | *rmapp = (unsigned long)desc->more | 1; | 684 | *pte_list = (unsigned long)desc->more | 1; |
683 | mmu_free_rmap_desc(desc); | 685 | mmu_free_pte_list_desc(desc); |
684 | } | 686 | } |
685 | 687 | ||
686 | static void rmap_remove(struct kvm *kvm, u64 *spte) | 688 | static void pte_list_remove(u64 *spte, unsigned long *pte_list) |
687 | { | 689 | { |
688 | struct kvm_rmap_desc *desc; | 690 | struct pte_list_desc *desc; |
689 | struct kvm_rmap_desc *prev_desc; | 691 | struct pte_list_desc *prev_desc; |
690 | struct kvm_mmu_page *sp; | ||
691 | gfn_t gfn; | ||
692 | unsigned long *rmapp; | ||
693 | int i; | 692 | int i; |
694 | 693 | ||
695 | sp = page_header(__pa(spte)); | 694 | if (!*pte_list) { |
696 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); | 695 | printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); |
697 | rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); | ||
698 | if (!*rmapp) { | ||
699 | printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte); | ||
700 | BUG(); | 696 | BUG(); |
701 | } else if (!(*rmapp & 1)) { | 697 | } else if (!(*pte_list & 1)) { |
702 | rmap_printk("rmap_remove: %p 1->0\n", spte); | 698 | rmap_printk("pte_list_remove: %p 1->0\n", spte); |
703 | if ((u64 *)*rmapp != spte) { | 699 | if ((u64 *)*pte_list != spte) { |
704 | printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte); | 700 | printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); |
705 | BUG(); | 701 | BUG(); |
706 | } | 702 | } |
707 | *rmapp = 0; | 703 | *pte_list = 0; |
708 | } else { | 704 | } else { |
709 | rmap_printk("rmap_remove: %p many->many\n", spte); | 705 | rmap_printk("pte_list_remove: %p many->many\n", spte); |
710 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 706 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
711 | prev_desc = NULL; | 707 | prev_desc = NULL; |
712 | while (desc) { | 708 | while (desc) { |
713 | for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) | 709 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) |
714 | if (desc->sptes[i] == spte) { | 710 | if (desc->sptes[i] == spte) { |
715 | rmap_desc_remove_entry(rmapp, | 711 | pte_list_desc_remove_entry(pte_list, |
716 | desc, i, | 712 | desc, i, |
717 | prev_desc); | 713 | prev_desc); |
718 | return; | 714 | return; |
@@ -720,11 +716,59 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
720 | prev_desc = desc; | 716 | prev_desc = desc; |
721 | desc = desc->more; | 717 | desc = desc->more; |
722 | } | 718 | } |
723 | pr_err("rmap_remove: %p many->many\n", spte); | 719 | pr_err("pte_list_remove: %p many->many\n", spte); |
724 | BUG(); | 720 | BUG(); |
725 | } | 721 | } |
726 | } | 722 | } |
727 | 723 | ||
724 | /* | ||
725 | * Take gfn and return the reverse mapping to it. | ||
726 | */ | ||
727 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) | ||
728 | { | ||
729 | struct kvm_memory_slot *slot; | ||
730 | struct kvm_lpage_info *linfo; | ||
731 | |||
732 | slot = gfn_to_memslot(kvm, gfn); | ||
733 | if (likely(level == PT_PAGE_TABLE_LEVEL)) | ||
734 | return &slot->rmap[gfn - slot->base_gfn]; | ||
735 | |||
736 | linfo = lpage_info_slot(gfn, slot, level); | ||
737 | |||
738 | return &linfo->rmap_pde; | ||
739 | } | ||
740 | |||
741 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | ||
742 | { | ||
743 | struct kvm_mmu_page *sp; | ||
744 | unsigned long *rmapp; | ||
745 | |||
746 | if (!is_rmap_spte(*spte)) | ||
747 | return 0; | ||
748 | |||
749 | sp = page_header(__pa(spte)); | ||
750 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); | ||
751 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | ||
752 | return pte_list_add(vcpu, spte, rmapp); | ||
753 | } | ||
754 | |||
755 | static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) | ||
756 | { | ||
757 | return pte_list_next(rmapp, spte); | ||
758 | } | ||
759 | |||
760 | static void rmap_remove(struct kvm *kvm, u64 *spte) | ||
761 | { | ||
762 | struct kvm_mmu_page *sp; | ||
763 | gfn_t gfn; | ||
764 | unsigned long *rmapp; | ||
765 | |||
766 | sp = page_header(__pa(spte)); | ||
767 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); | ||
768 | rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); | ||
769 | pte_list_remove(spte, rmapp); | ||
770 | } | ||
771 | |||
728 | static int set_spte_track_bits(u64 *sptep, u64 new_spte) | 772 | static int set_spte_track_bits(u64 *sptep, u64 new_spte) |
729 | { | 773 | { |
730 | pfn_t pfn; | 774 | pfn_t pfn; |
@@ -752,32 +796,6 @@ static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) | |||
752 | rmap_remove(kvm, sptep); | 796 | rmap_remove(kvm, sptep); |
753 | } | 797 | } |
754 | 798 | ||
755 | static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) | ||
756 | { | ||
757 | struct kvm_rmap_desc *desc; | ||
758 | u64 *prev_spte; | ||
759 | int i; | ||
760 | |||
761 | if (!*rmapp) | ||
762 | return NULL; | ||
763 | else if (!(*rmapp & 1)) { | ||
764 | if (!spte) | ||
765 | return (u64 *)*rmapp; | ||
766 | return NULL; | ||
767 | } | ||
768 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | ||
769 | prev_spte = NULL; | ||
770 | while (desc) { | ||
771 | for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) { | ||
772 | if (prev_spte == spte) | ||
773 | return desc->sptes[i]; | ||
774 | prev_spte = desc->sptes[i]; | ||
775 | } | ||
776 | desc = desc->more; | ||
777 | } | ||
778 | return NULL; | ||
779 | } | ||
780 | |||
781 | static int rmap_write_protect(struct kvm *kvm, u64 gfn) | 799 | static int rmap_write_protect(struct kvm *kvm, u64 gfn) |
782 | { | 800 | { |
783 | unsigned long *rmapp; | 801 | unsigned long *rmapp; |
@@ -3601,8 +3619,8 @@ static void mmu_destroy_caches(void) | |||
3601 | { | 3619 | { |
3602 | if (pte_chain_cache) | 3620 | if (pte_chain_cache) |
3603 | kmem_cache_destroy(pte_chain_cache); | 3621 | kmem_cache_destroy(pte_chain_cache); |
3604 | if (rmap_desc_cache) | 3622 | if (pte_list_desc_cache) |
3605 | kmem_cache_destroy(rmap_desc_cache); | 3623 | kmem_cache_destroy(pte_list_desc_cache); |
3606 | if (mmu_page_header_cache) | 3624 | if (mmu_page_header_cache) |
3607 | kmem_cache_destroy(mmu_page_header_cache); | 3625 | kmem_cache_destroy(mmu_page_header_cache); |
3608 | } | 3626 | } |
@@ -3614,10 +3632,10 @@ int kvm_mmu_module_init(void) | |||
3614 | 0, 0, NULL); | 3632 | 0, 0, NULL); |
3615 | if (!pte_chain_cache) | 3633 | if (!pte_chain_cache) |
3616 | goto nomem; | 3634 | goto nomem; |
3617 | rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", | 3635 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
3618 | sizeof(struct kvm_rmap_desc), | 3636 | sizeof(struct pte_list_desc), |
3619 | 0, 0, NULL); | 3637 | 0, 0, NULL); |
3620 | if (!rmap_desc_cache) | 3638 | if (!pte_list_desc_cache) |
3621 | goto nomem; | 3639 | goto nomem; |
3622 | 3640 | ||
3623 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", | 3641 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |