aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c192
1 files changed, 163 insertions, 29 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ee3f53098f0c..3da2508eb22a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -66,7 +66,8 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
66#endif 66#endif
67 67
68#if defined(MMU_DEBUG) || defined(AUDIT) 68#if defined(MMU_DEBUG) || defined(AUDIT)
69static int dbg = 1; 69static int dbg = 0;
70module_param(dbg, bool, 0644);
70#endif 71#endif
71 72
72#ifndef MMU_DEBUG 73#ifndef MMU_DEBUG
@@ -640,6 +641,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
640 rmap_remove(kvm, spte); 641 rmap_remove(kvm, spte);
641 --kvm->stat.lpages; 642 --kvm->stat.lpages;
642 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 643 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
644 spte = NULL;
643 write_protected = 1; 645 write_protected = 1;
644 } 646 }
645 spte = rmap_next(kvm, rmapp, spte); 647 spte = rmap_next(kvm, rmapp, spte);
@@ -651,6 +653,88 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
651 account_shadowed(kvm, gfn); 653 account_shadowed(kvm, gfn);
652} 654}
653 655
656static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
657{
658 u64 *spte;
659 int need_tlb_flush = 0;
660
661 while ((spte = rmap_next(kvm, rmapp, NULL))) {
662 BUG_ON(!(*spte & PT_PRESENT_MASK));
663 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
664 rmap_remove(kvm, spte);
665 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
666 need_tlb_flush = 1;
667 }
668 return need_tlb_flush;
669}
670
671static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
672 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
673{
674 int i;
675 int retval = 0;
676
677 /*
678 * If mmap_sem isn't taken, we can look the memslots with only
679 * the mmu_lock by skipping over the slots with userspace_addr == 0.
680 */
681 for (i = 0; i < kvm->nmemslots; i++) {
682 struct kvm_memory_slot *memslot = &kvm->memslots[i];
683 unsigned long start = memslot->userspace_addr;
684 unsigned long end;
685
686 /* mmu_lock protects userspace_addr */
687 if (!start)
688 continue;
689
690 end = start + (memslot->npages << PAGE_SHIFT);
691 if (hva >= start && hva < end) {
692 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
693 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
694 retval |= handler(kvm,
695 &memslot->lpage_info[
696 gfn_offset /
697 KVM_PAGES_PER_HPAGE].rmap_pde);
698 }
699 }
700
701 return retval;
702}
703
704int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
705{
706 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
707}
708
709static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
710{
711 u64 *spte;
712 int young = 0;
713
714 /* always return old for EPT */
715 if (!shadow_accessed_mask)
716 return 0;
717
718 spte = rmap_next(kvm, rmapp, NULL);
719 while (spte) {
720 int _young;
721 u64 _spte = *spte;
722 BUG_ON(!(_spte & PT_PRESENT_MASK));
723 _young = _spte & PT_ACCESSED_MASK;
724 if (_young) {
725 young = 1;
726 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
727 }
728 spte = rmap_next(kvm, rmapp, spte);
729 }
730 return young;
731}
732
733int kvm_age_hva(struct kvm *kvm, unsigned long hva)
734{
735 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
736}
737
654#ifdef MMU_DEBUG 738#ifdef MMU_DEBUG
655static int is_empty_shadow_page(u64 *spt) 739static int is_empty_shadow_page(u64 *spt)
656{ 740{
@@ -775,6 +859,15 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
775 BUG(); 859 BUG();
776} 860}
777 861
862static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
863 struct kvm_mmu_page *sp)
864{
865 int i;
866
867 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
868 sp->spt[i] = shadow_trap_nonpresent_pte;
869}
870
778static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) 871static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
779{ 872{
780 unsigned index; 873 unsigned index;
@@ -840,7 +933,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
840 hlist_add_head(&sp->hash_link, bucket); 933 hlist_add_head(&sp->hash_link, bucket);
841 if (!metaphysical) 934 if (!metaphysical)
842 rmap_write_protect(vcpu->kvm, gfn); 935 rmap_write_protect(vcpu->kvm, gfn);
843 vcpu->arch.mmu.prefetch_page(vcpu, sp); 936 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
937 vcpu->arch.mmu.prefetch_page(vcpu, sp);
938 else
939 nonpaging_prefetch_page(vcpu, sp);
844 return sp; 940 return sp;
845} 941}
846 942
@@ -916,14 +1012,17 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
916 } 1012 }
917 kvm_mmu_page_unlink_children(kvm, sp); 1013 kvm_mmu_page_unlink_children(kvm, sp);
918 if (!sp->root_count) { 1014 if (!sp->root_count) {
919 if (!sp->role.metaphysical) 1015 if (!sp->role.metaphysical && !sp->role.invalid)
920 unaccount_shadowed(kvm, sp->gfn); 1016 unaccount_shadowed(kvm, sp->gfn);
921 hlist_del(&sp->hash_link); 1017 hlist_del(&sp->hash_link);
922 kvm_mmu_free_page(kvm, sp); 1018 kvm_mmu_free_page(kvm, sp);
923 } else { 1019 } else {
1020 int invalid = sp->role.invalid;
924 list_move(&sp->link, &kvm->arch.active_mmu_pages); 1021 list_move(&sp->link, &kvm->arch.active_mmu_pages);
925 sp->role.invalid = 1; 1022 sp->role.invalid = 1;
926 kvm_reload_remote_mmus(kvm); 1023 kvm_reload_remote_mmus(kvm);
1024 if (!sp->role.metaphysical && !invalid)
1025 unaccount_shadowed(kvm, sp->gfn);
927 } 1026 }
928 kvm_mmu_reset_last_pte_updated(kvm); 1027 kvm_mmu_reset_last_pte_updated(kvm);
929} 1028}
@@ -1082,10 +1181,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1082 struct kvm_mmu_page *shadow; 1181 struct kvm_mmu_page *shadow;
1083 1182
1084 spte |= PT_WRITABLE_MASK; 1183 spte |= PT_WRITABLE_MASK;
1085 if (user_fault) {
1086 mmu_unshadow(vcpu->kvm, gfn);
1087 goto unshadowed;
1088 }
1089 1184
1090 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); 1185 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1091 if (shadow || 1186 if (shadow ||
@@ -1102,13 +1197,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1102 } 1197 }
1103 } 1198 }
1104 1199
1105unshadowed:
1106
1107 if (pte_access & ACC_WRITE_MASK) 1200 if (pte_access & ACC_WRITE_MASK)
1108 mark_page_dirty(vcpu->kvm, gfn); 1201 mark_page_dirty(vcpu->kvm, gfn);
1109 1202
1110 pgprintk("%s: setting spte %llx\n", __func__, spte); 1203 pgprintk("%s: setting spte %llx\n", __func__, spte);
1111 pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", 1204 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1112 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", 1205 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1113 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); 1206 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1114 set_shadow_pte(shadow_pte, spte); 1207 set_shadow_pte(shadow_pte, spte);
@@ -1127,8 +1220,10 @@ unshadowed:
1127 else 1220 else
1128 kvm_release_pfn_clean(pfn); 1221 kvm_release_pfn_clean(pfn);
1129 } 1222 }
1130 if (!ptwrite || !*ptwrite) 1223 if (speculative) {
1131 vcpu->arch.last_pte_updated = shadow_pte; 1224 vcpu->arch.last_pte_updated = shadow_pte;
1225 vcpu->arch.last_pte_gfn = gfn;
1226 }
1132} 1227}
1133 1228
1134static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 1229static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -1176,9 +1271,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1176 return -ENOMEM; 1271 return -ENOMEM;
1177 } 1272 }
1178 1273
1179 table[index] = __pa(new_table->spt) 1274 set_shadow_pte(&table[index],
1180 | PT_PRESENT_MASK | PT_WRITABLE_MASK 1275 __pa(new_table->spt)
1181 | shadow_user_mask | shadow_x_mask; 1276 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1277 | shadow_user_mask | shadow_x_mask);
1182 } 1278 }
1183 table_addr = table[index] & PT64_BASE_ADDR_MASK; 1279 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1184 } 1280 }
@@ -1189,6 +1285,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1189 int r; 1285 int r;
1190 int largepage = 0; 1286 int largepage = 0;
1191 pfn_t pfn; 1287 pfn_t pfn;
1288 unsigned long mmu_seq;
1192 1289
1193 down_read(&current->mm->mmap_sem); 1290 down_read(&current->mm->mmap_sem);
1194 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1291 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@@ -1196,6 +1293,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1196 largepage = 1; 1293 largepage = 1;
1197 } 1294 }
1198 1295
1296 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1297 /* implicit mb(), we'll read before PT lock is unlocked */
1199 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1298 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1200 up_read(&current->mm->mmap_sem); 1299 up_read(&current->mm->mmap_sem);
1201 1300
@@ -1206,6 +1305,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1206 } 1305 }
1207 1306
1208 spin_lock(&vcpu->kvm->mmu_lock); 1307 spin_lock(&vcpu->kvm->mmu_lock);
1308 if (mmu_notifier_retry(vcpu, mmu_seq))
1309 goto out_unlock;
1209 kvm_mmu_free_some_pages(vcpu); 1310 kvm_mmu_free_some_pages(vcpu);
1210 r = __direct_map(vcpu, v, write, largepage, gfn, pfn, 1311 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
1211 PT32E_ROOT_LEVEL); 1312 PT32E_ROOT_LEVEL);
@@ -1213,18 +1314,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1213 1314
1214 1315
1215 return r; 1316 return r;
1216}
1217
1218
1219static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1220 struct kvm_mmu_page *sp)
1221{
1222 int i;
1223 1317
1224 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1318out_unlock:
1225 sp->spt[i] = shadow_trap_nonpresent_pte; 1319 spin_unlock(&vcpu->kvm->mmu_lock);
1320 kvm_release_pfn_clean(pfn);
1321 return 0;
1226} 1322}
1227 1323
1324
1228static void mmu_free_roots(struct kvm_vcpu *vcpu) 1325static void mmu_free_roots(struct kvm_vcpu *vcpu)
1229{ 1326{
1230 int i; 1327 int i;
@@ -1340,6 +1437,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1340 int r; 1437 int r;
1341 int largepage = 0; 1438 int largepage = 0;
1342 gfn_t gfn = gpa >> PAGE_SHIFT; 1439 gfn_t gfn = gpa >> PAGE_SHIFT;
1440 unsigned long mmu_seq;
1343 1441
1344 ASSERT(vcpu); 1442 ASSERT(vcpu);
1345 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 1443 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1353,6 +1451,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1353 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1451 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1354 largepage = 1; 1452 largepage = 1;
1355 } 1453 }
1454 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1455 /* implicit mb(), we'll read before PT lock is unlocked */
1356 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1456 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1357 up_read(&current->mm->mmap_sem); 1457 up_read(&current->mm->mmap_sem);
1358 if (is_error_pfn(pfn)) { 1458 if (is_error_pfn(pfn)) {
@@ -1360,12 +1460,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1360 return 1; 1460 return 1;
1361 } 1461 }
1362 spin_lock(&vcpu->kvm->mmu_lock); 1462 spin_lock(&vcpu->kvm->mmu_lock);
1463 if (mmu_notifier_retry(vcpu, mmu_seq))
1464 goto out_unlock;
1363 kvm_mmu_free_some_pages(vcpu); 1465 kvm_mmu_free_some_pages(vcpu);
1364 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 1466 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1365 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); 1467 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
1366 spin_unlock(&vcpu->kvm->mmu_lock); 1468 spin_unlock(&vcpu->kvm->mmu_lock);
1367 1469
1368 return r; 1470 return r;
1471
1472out_unlock:
1473 spin_unlock(&vcpu->kvm->mmu_lock);
1474 kvm_release_pfn_clean(pfn);
1475 return 0;
1369} 1476}
1370 1477
1371static void nonpaging_free(struct kvm_vcpu *vcpu) 1478static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1580,11 +1687,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1580 u64 *spte, 1687 u64 *spte,
1581 const void *new) 1688 const void *new)
1582{ 1689{
1583 if ((sp->role.level != PT_PAGE_TABLE_LEVEL) 1690 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1584 && !vcpu->arch.update_pte.largepage) { 1691 if (!vcpu->arch.update_pte.largepage ||
1585 ++vcpu->kvm->stat.mmu_pde_zapped; 1692 sp->role.glevels == PT32_ROOT_LEVEL) {
1586 return; 1693 ++vcpu->kvm->stat.mmu_pde_zapped;
1587 } 1694 return;
1695 }
1696 }
1588 1697
1589 ++vcpu->kvm->stat.mmu_pte_updated; 1698 ++vcpu->kvm->stat.mmu_pte_updated;
1590 if (sp->role.glevels == PT32_ROOT_LEVEL) 1699 if (sp->role.glevels == PT32_ROOT_LEVEL)
@@ -1663,6 +1772,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1663 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1772 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1664 vcpu->arch.update_pte.largepage = 1; 1773 vcpu->arch.update_pte.largepage = 1;
1665 } 1774 }
1775 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
1776 /* implicit mb(), we'll read before PT lock is unlocked */
1666 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1777 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1667 up_read(&current->mm->mmap_sem); 1778 up_read(&current->mm->mmap_sem);
1668 1779
@@ -1674,6 +1785,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1674 vcpu->arch.update_pte.pfn = pfn; 1785 vcpu->arch.update_pte.pfn = pfn;
1675} 1786}
1676 1787
1788static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1789{
1790 u64 *spte = vcpu->arch.last_pte_updated;
1791
1792 if (spte
1793 && vcpu->arch.last_pte_gfn == gfn
1794 && shadow_accessed_mask
1795 && !(*spte & shadow_accessed_mask)
1796 && is_shadow_present_pte(*spte))
1797 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1798}
1799
1677void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 1800void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1678 const u8 *new, int bytes) 1801 const u8 *new, int bytes)
1679{ 1802{
@@ -1697,6 +1820,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1697 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); 1820 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
1698 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); 1821 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1699 spin_lock(&vcpu->kvm->mmu_lock); 1822 spin_lock(&vcpu->kvm->mmu_lock);
1823 kvm_mmu_access_page(vcpu, gfn);
1700 kvm_mmu_free_some_pages(vcpu); 1824 kvm_mmu_free_some_pages(vcpu);
1701 ++vcpu->kvm->stat.mmu_pte_write; 1825 ++vcpu->kvm->stat.mmu_pte_write;
1702 kvm_mmu_audit(vcpu, "pre pte write"); 1826 kvm_mmu_audit(vcpu, "pre pte write");
@@ -1794,6 +1918,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1794 spin_unlock(&vcpu->kvm->mmu_lock); 1918 spin_unlock(&vcpu->kvm->mmu_lock);
1795 return r; 1919 return r;
1796} 1920}
1921EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
1797 1922
1798void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1923void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1799{ 1924{
@@ -1850,6 +1975,12 @@ void kvm_enable_tdp(void)
1850} 1975}
1851EXPORT_SYMBOL_GPL(kvm_enable_tdp); 1976EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1852 1977
1978void kvm_disable_tdp(void)
1979{
1980 tdp_enabled = false;
1981}
1982EXPORT_SYMBOL_GPL(kvm_disable_tdp);
1983
1853static void free_mmu_pages(struct kvm_vcpu *vcpu) 1984static void free_mmu_pages(struct kvm_vcpu *vcpu)
1854{ 1985{
1855 struct kvm_mmu_page *sp; 1986 struct kvm_mmu_page *sp;
@@ -1951,7 +2082,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
1951 kvm_flush_remote_tlbs(kvm); 2082 kvm_flush_remote_tlbs(kvm);
1952} 2083}
1953 2084
1954void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm) 2085static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
1955{ 2086{
1956 struct kvm_mmu_page *page; 2087 struct kvm_mmu_page *page;
1957 2088
@@ -1971,6 +2102,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1971 list_for_each_entry(kvm, &vm_list, vm_list) { 2102 list_for_each_entry(kvm, &vm_list, vm_list) {
1972 int npages; 2103 int npages;
1973 2104
2105 if (!down_read_trylock(&kvm->slots_lock))
2106 continue;
1974 spin_lock(&kvm->mmu_lock); 2107 spin_lock(&kvm->mmu_lock);
1975 npages = kvm->arch.n_alloc_mmu_pages - 2108 npages = kvm->arch.n_alloc_mmu_pages -
1976 kvm->arch.n_free_mmu_pages; 2109 kvm->arch.n_free_mmu_pages;
@@ -1983,6 +2116,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1983 nr_to_scan--; 2116 nr_to_scan--;
1984 2117
1985 spin_unlock(&kvm->mmu_lock); 2118 spin_unlock(&kvm->mmu_lock);
2119 up_read(&kvm->slots_lock);
1986 } 2120 }
1987 if (kvm_freed) 2121 if (kvm_freed)
1988 list_move_tail(&kvm_freed->vm_list, &vm_list); 2122 list_move_tail(&kvm_freed->vm_list, &vm_list);