aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-06-04 09:55:29 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:39:27 -0400
commitd98ba053656c033180781007241f2c9d54606d56 (patch)
treee62b4c3be2762640c6a3753f0d9fa6ef0fd76f9f /arch/x86/kvm/mmu.c
parent103ad25a86a6ec5418b3dca6a0d2bf2ba01a8318 (diff)
KVM: MMU: gather remote tlb flush which occurs during page zapped
Using kvm_mmu_prepare_zap_page() and kvm_mmu_zap_page() instead of kvm_mmu_zap_page() that can reduce remote tlb flush IPI Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c84
1 files changed, 53 insertions, 31 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1aad8e713f78..44548e346976 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1200,7 +1200,6 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1200 --kvm->stat.mmu_unsync; 1200 --kvm->stat.mmu_unsync;
1201} 1201}
1202 1202
1203static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1204static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 1203static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1205 struct list_head *invalid_list); 1204 struct list_head *invalid_list);
1206static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1205static void kvm_mmu_commit_zap_page(struct kvm *kvm,
@@ -1218,10 +1217,10 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1218 (sp)->role.invalid) {} else 1217 (sp)->role.invalid) {} else
1219 1218
1220static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1219static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1221 bool clear_unsync) 1220 struct list_head *invalid_list, bool clear_unsync)
1222{ 1221{
1223 if (sp->role.cr4_pae != !!is_pae(vcpu)) { 1222 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1224 kvm_mmu_zap_page(vcpu->kvm, sp); 1223 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1225 return 1; 1224 return 1;
1226 } 1225 }
1227 1226
@@ -1232,7 +1231,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1232 } 1231 }
1233 1232
1234 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1233 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1235 kvm_mmu_zap_page(vcpu->kvm, sp); 1234 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1236 return 1; 1235 return 1;
1237 } 1236 }
1238 1237
@@ -1244,17 +1243,22 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp);
1244static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, 1243static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1245 struct kvm_mmu_page *sp) 1244 struct kvm_mmu_page *sp)
1246{ 1245{
1246 LIST_HEAD(invalid_list);
1247 int ret; 1247 int ret;
1248 1248
1249 ret = __kvm_sync_page(vcpu, sp, false); 1249 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1250 if (!ret) 1250 if (!ret)
1251 mmu_convert_notrap(sp); 1251 mmu_convert_notrap(sp);
1252 else
1253 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1254
1252 return ret; 1255 return ret;
1253} 1256}
1254 1257
1255static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1258static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1259 struct list_head *invalid_list)
1256{ 1260{
1257 return __kvm_sync_page(vcpu, sp, true); 1261 return __kvm_sync_page(vcpu, sp, invalid_list, true);
1258} 1262}
1259 1263
1260/* @gfn should be write-protected at the call site */ 1264/* @gfn should be write-protected at the call site */
@@ -1262,6 +1266,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1262{ 1266{
1263 struct kvm_mmu_page *s; 1267 struct kvm_mmu_page *s;
1264 struct hlist_node *node, *n; 1268 struct hlist_node *node, *n;
1269 LIST_HEAD(invalid_list);
1265 bool flush = false; 1270 bool flush = false;
1266 1271
1267 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) { 1272 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
@@ -1271,13 +1276,14 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1271 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1276 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1272 if ((s->role.cr4_pae != !!is_pae(vcpu)) || 1277 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1273 (vcpu->arch.mmu.sync_page(vcpu, s))) { 1278 (vcpu->arch.mmu.sync_page(vcpu, s))) {
1274 kvm_mmu_zap_page(vcpu->kvm, s); 1279 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1275 continue; 1280 continue;
1276 } 1281 }
1277 kvm_unlink_unsync_page(vcpu->kvm, s); 1282 kvm_unlink_unsync_page(vcpu->kvm, s);
1278 flush = true; 1283 flush = true;
1279 } 1284 }
1280 1285
1286 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1281 if (flush) 1287 if (flush)
1282 kvm_mmu_flush_tlb(vcpu); 1288 kvm_mmu_flush_tlb(vcpu);
1283} 1289}
@@ -1348,6 +1354,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
1348 struct kvm_mmu_page *sp; 1354 struct kvm_mmu_page *sp;
1349 struct mmu_page_path parents; 1355 struct mmu_page_path parents;
1350 struct kvm_mmu_pages pages; 1356 struct kvm_mmu_pages pages;
1357 LIST_HEAD(invalid_list);
1351 1358
1352 kvm_mmu_pages_init(parent, &parents, &pages); 1359 kvm_mmu_pages_init(parent, &parents, &pages);
1353 while (mmu_unsync_walk(parent, &pages)) { 1360 while (mmu_unsync_walk(parent, &pages)) {
@@ -1360,9 +1367,10 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
1360 kvm_flush_remote_tlbs(vcpu->kvm); 1367 kvm_flush_remote_tlbs(vcpu->kvm);
1361 1368
1362 for_each_sp(pages, sp, parents, i) { 1369 for_each_sp(pages, sp, parents, i) {
1363 kvm_sync_page(vcpu, sp); 1370 kvm_sync_page(vcpu, sp, &invalid_list);
1364 mmu_pages_clear_parents(&parents); 1371 mmu_pages_clear_parents(&parents);
1365 } 1372 }
1373 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1366 cond_resched_lock(&vcpu->kvm->mmu_lock); 1374 cond_resched_lock(&vcpu->kvm->mmu_lock);
1367 kvm_mmu_pages_init(parent, &parents, &pages); 1375 kvm_mmu_pages_init(parent, &parents, &pages);
1368 } 1376 }
@@ -1606,16 +1614,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1606 1614
1607} 1615}
1608 1616
1609static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1610{
1611 LIST_HEAD(invalid_list);
1612 int ret;
1613
1614 ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1615 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1616 return ret;
1617}
1618
1619/* 1617/*
1620 * Changing the number of mmu pages allocated to the vm 1618 * Changing the number of mmu pages allocated to the vm
1621 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock 1619 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
@@ -1623,6 +1621,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1623void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) 1621void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1624{ 1622{
1625 int used_pages; 1623 int used_pages;
1624 LIST_HEAD(invalid_list);
1626 1625
1627 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; 1626 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1628 used_pages = max(0, used_pages); 1627 used_pages = max(0, used_pages);
@@ -1640,8 +1639,10 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1640 1639
1641 page = container_of(kvm->arch.active_mmu_pages.prev, 1640 page = container_of(kvm->arch.active_mmu_pages.prev,
1642 struct kvm_mmu_page, link); 1641 struct kvm_mmu_page, link);
1643 used_pages -= kvm_mmu_zap_page(kvm, page); 1642 used_pages -= kvm_mmu_prepare_zap_page(kvm, page,
1643 &invalid_list);
1644 } 1644 }
1645 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1645 kvm_nr_mmu_pages = used_pages; 1646 kvm_nr_mmu_pages = used_pages;
1646 kvm->arch.n_free_mmu_pages = 0; 1647 kvm->arch.n_free_mmu_pages = 0;
1647 } 1648 }
@@ -1656,6 +1657,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1656{ 1657{
1657 struct kvm_mmu_page *sp; 1658 struct kvm_mmu_page *sp;
1658 struct hlist_node *node, *n; 1659 struct hlist_node *node, *n;
1660 LIST_HEAD(invalid_list);
1659 int r; 1661 int r;
1660 1662
1661 pgprintk("%s: looking for gfn %lx\n", __func__, gfn); 1663 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
@@ -1665,9 +1667,10 @@ restart:
1665 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1667 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1666 sp->role.word); 1668 sp->role.word);
1667 r = 1; 1669 r = 1;
1668 if (kvm_mmu_zap_page(kvm, sp)) 1670 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
1669 goto restart; 1671 goto restart;
1670 } 1672 }
1673 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1671 return r; 1674 return r;
1672} 1675}
1673 1676
@@ -1675,14 +1678,16 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1675{ 1678{
1676 struct kvm_mmu_page *sp; 1679 struct kvm_mmu_page *sp;
1677 struct hlist_node *node, *nn; 1680 struct hlist_node *node, *nn;
1681 LIST_HEAD(invalid_list);
1678 1682
1679restart: 1683restart:
1680 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) { 1684 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
1681 pgprintk("%s: zap %lx %x\n", 1685 pgprintk("%s: zap %lx %x\n",
1682 __func__, gfn, sp->role.word); 1686 __func__, gfn, sp->role.word);
1683 if (kvm_mmu_zap_page(kvm, sp)) 1687 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
1684 goto restart; 1688 goto restart;
1685 } 1689 }
1690 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1686} 1691}
1687 1692
1688static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) 1693static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
@@ -2123,6 +2128,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
2123{ 2128{
2124 int i; 2129 int i;
2125 struct kvm_mmu_page *sp; 2130 struct kvm_mmu_page *sp;
2131 LIST_HEAD(invalid_list);
2126 2132
2127 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2133 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2128 return; 2134 return;
@@ -2132,8 +2138,10 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
2132 2138
2133 sp = page_header(root); 2139 sp = page_header(root);
2134 --sp->root_count; 2140 --sp->root_count;
2135 if (!sp->root_count && sp->role.invalid) 2141 if (!sp->root_count && sp->role.invalid) {
2136 kvm_mmu_zap_page(vcpu->kvm, sp); 2142 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2143 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2144 }
2137 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 2145 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2138 spin_unlock(&vcpu->kvm->mmu_lock); 2146 spin_unlock(&vcpu->kvm->mmu_lock);
2139 return; 2147 return;
@@ -2146,10 +2154,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
2146 sp = page_header(root); 2154 sp = page_header(root);
2147 --sp->root_count; 2155 --sp->root_count;
2148 if (!sp->root_count && sp->role.invalid) 2156 if (!sp->root_count && sp->role.invalid)
2149 kvm_mmu_zap_page(vcpu->kvm, sp); 2157 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2158 &invalid_list);
2150 } 2159 }
2151 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 2160 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2152 } 2161 }
2162 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2153 spin_unlock(&vcpu->kvm->mmu_lock); 2163 spin_unlock(&vcpu->kvm->mmu_lock);
2154 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 2164 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2155} 2165}
@@ -2715,6 +2725,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2715 gfn_t gfn = gpa >> PAGE_SHIFT; 2725 gfn_t gfn = gpa >> PAGE_SHIFT;
2716 struct kvm_mmu_page *sp; 2726 struct kvm_mmu_page *sp;
2717 struct hlist_node *node, *n; 2727 struct hlist_node *node, *n;
2728 LIST_HEAD(invalid_list);
2718 u64 entry, gentry; 2729 u64 entry, gentry;
2719 u64 *spte; 2730 u64 *spte;
2720 unsigned offset = offset_in_page(gpa); 2731 unsigned offset = offset_in_page(gpa);
@@ -2801,7 +2812,8 @@ restart:
2801 */ 2812 */
2802 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 2813 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2803 gpa, bytes, sp->role.word); 2814 gpa, bytes, sp->role.word);
2804 if (kvm_mmu_zap_page(vcpu->kvm, sp)) 2815 if (kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2816 &invalid_list))
2805 goto restart; 2817 goto restart;
2806 ++vcpu->kvm->stat.mmu_flooded; 2818 ++vcpu->kvm->stat.mmu_flooded;
2807 continue; 2819 continue;
@@ -2836,6 +2848,7 @@ restart:
2836 ++spte; 2848 ++spte;
2837 } 2849 }
2838 } 2850 }
2851 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2839 kvm_mmu_audit(vcpu, "post pte write"); 2852 kvm_mmu_audit(vcpu, "post pte write");
2840 spin_unlock(&vcpu->kvm->mmu_lock); 2853 spin_unlock(&vcpu->kvm->mmu_lock);
2841 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) { 2854 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
@@ -2864,6 +2877,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2864void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 2877void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2865{ 2878{
2866 int free_pages; 2879 int free_pages;
2880 LIST_HEAD(invalid_list);
2867 2881
2868 free_pages = vcpu->kvm->arch.n_free_mmu_pages; 2882 free_pages = vcpu->kvm->arch.n_free_mmu_pages;
2869 while (free_pages < KVM_REFILL_PAGES && 2883 while (free_pages < KVM_REFILL_PAGES &&
@@ -2872,9 +2886,11 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2872 2886
2873 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 2887 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2874 struct kvm_mmu_page, link); 2888 struct kvm_mmu_page, link);
2875 free_pages += kvm_mmu_zap_page(vcpu->kvm, sp); 2889 free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2890 &invalid_list);
2876 ++vcpu->kvm->stat.mmu_recycled; 2891 ++vcpu->kvm->stat.mmu_recycled;
2877 } 2892 }
2893 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2878} 2894}
2879 2895
2880int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) 2896int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
@@ -3009,25 +3025,28 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3009void kvm_mmu_zap_all(struct kvm *kvm) 3025void kvm_mmu_zap_all(struct kvm *kvm)
3010{ 3026{
3011 struct kvm_mmu_page *sp, *node; 3027 struct kvm_mmu_page *sp, *node;
3028 LIST_HEAD(invalid_list);
3012 3029
3013 spin_lock(&kvm->mmu_lock); 3030 spin_lock(&kvm->mmu_lock);
3014restart: 3031restart:
3015 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) 3032 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3016 if (kvm_mmu_zap_page(kvm, sp)) 3033 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3017 goto restart; 3034 goto restart;
3018 3035
3036 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3019 spin_unlock(&kvm->mmu_lock); 3037 spin_unlock(&kvm->mmu_lock);
3020 3038
3021 kvm_flush_remote_tlbs(kvm); 3039 kvm_flush_remote_tlbs(kvm);
3022} 3040}
3023 3041
3024static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm) 3042static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3043 struct list_head *invalid_list)
3025{ 3044{
3026 struct kvm_mmu_page *page; 3045 struct kvm_mmu_page *page;
3027 3046
3028 page = container_of(kvm->arch.active_mmu_pages.prev, 3047 page = container_of(kvm->arch.active_mmu_pages.prev,
3029 struct kvm_mmu_page, link); 3048 struct kvm_mmu_page, link);
3030 return kvm_mmu_zap_page(kvm, page); 3049 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3031} 3050}
3032 3051
3033static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3052static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
@@ -3040,6 +3059,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3040 3059
3041 list_for_each_entry(kvm, &vm_list, vm_list) { 3060 list_for_each_entry(kvm, &vm_list, vm_list) {
3042 int npages, idx, freed_pages; 3061 int npages, idx, freed_pages;
3062 LIST_HEAD(invalid_list);
3043 3063
3044 idx = srcu_read_lock(&kvm->srcu); 3064 idx = srcu_read_lock(&kvm->srcu);
3045 spin_lock(&kvm->mmu_lock); 3065 spin_lock(&kvm->mmu_lock);
@@ -3047,12 +3067,14 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3047 kvm->arch.n_free_mmu_pages; 3067 kvm->arch.n_free_mmu_pages;
3048 cache_count += npages; 3068 cache_count += npages;
3049 if (!kvm_freed && nr_to_scan > 0 && npages > 0) { 3069 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
3050 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm); 3070 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3071 &invalid_list);
3051 cache_count -= freed_pages; 3072 cache_count -= freed_pages;
3052 kvm_freed = kvm; 3073 kvm_freed = kvm;
3053 } 3074 }
3054 nr_to_scan--; 3075 nr_to_scan--;
3055 3076
3077 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3056 spin_unlock(&kvm->mmu_lock); 3078 spin_unlock(&kvm->mmu_lock);
3057 srcu_read_unlock(&kvm->srcu, idx); 3079 srcu_read_unlock(&kvm->srcu, idx);
3058 } 3080 }