aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c52
-rw-r--r--arch/x86/kvm/mmutrace.h2
2 files changed, 44 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 881ad918455c..9b849a70742d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -916,6 +916,7 @@ static int is_empty_shadow_page(u64 *spt)
916static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) 916static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
917{ 917{
918 ASSERT(is_empty_shadow_page(sp->spt)); 918 ASSERT(is_empty_shadow_page(sp->spt));
919 hlist_del(&sp->hash_link);
919 list_del(&sp->link); 920 list_del(&sp->link);
920 __free_page(virt_to_page(sp->spt)); 921 __free_page(virt_to_page(sp->spt));
921 if (!sp->role.direct) 922 if (!sp->role.direct)
@@ -1200,6 +1201,10 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1200} 1201}
1201 1202
1202static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); 1203static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1204static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1205 struct list_head *invalid_list);
1206static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1207 struct list_head *invalid_list);
1203 1208
1204#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \ 1209#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
1205 hlist_for_each_entry_safe(sp, pos, n, \ 1210 hlist_for_each_entry_safe(sp, pos, n, \
@@ -1530,7 +1535,8 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1530} 1535}
1531 1536
1532static int mmu_zap_unsync_children(struct kvm *kvm, 1537static int mmu_zap_unsync_children(struct kvm *kvm,
1533 struct kvm_mmu_page *parent) 1538 struct kvm_mmu_page *parent,
1539 struct list_head *invalid_list)
1534{ 1540{
1535 int i, zapped = 0; 1541 int i, zapped = 0;
1536 struct mmu_page_path parents; 1542 struct mmu_page_path parents;
@@ -1544,7 +1550,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1544 struct kvm_mmu_page *sp; 1550 struct kvm_mmu_page *sp;
1545 1551
1546 for_each_sp(pages, sp, parents, i) { 1552 for_each_sp(pages, sp, parents, i) {
1547 kvm_mmu_zap_page(kvm, sp); 1553 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1548 mmu_pages_clear_parents(&parents); 1554 mmu_pages_clear_parents(&parents);
1549 zapped++; 1555 zapped++;
1550 } 1556 }
@@ -1554,16 +1560,16 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1554 return zapped; 1560 return zapped;
1555} 1561}
1556 1562
1557static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1563static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1564 struct list_head *invalid_list)
1558{ 1565{
1559 int ret; 1566 int ret;
1560 1567
1561 trace_kvm_mmu_zap_page(sp); 1568 trace_kvm_mmu_prepare_zap_page(sp);
1562 ++kvm->stat.mmu_shadow_zapped; 1569 ++kvm->stat.mmu_shadow_zapped;
1563 ret = mmu_zap_unsync_children(kvm, sp); 1570 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1564 kvm_mmu_page_unlink_children(kvm, sp); 1571 kvm_mmu_page_unlink_children(kvm, sp);
1565 kvm_mmu_unlink_parents(kvm, sp); 1572 kvm_mmu_unlink_parents(kvm, sp);
1566 kvm_flush_remote_tlbs(kvm);
1567 if (!sp->role.invalid && !sp->role.direct) 1573 if (!sp->role.invalid && !sp->role.direct)
1568 unaccount_shadowed(kvm, sp->gfn); 1574 unaccount_shadowed(kvm, sp->gfn);
1569 if (sp->unsync) 1575 if (sp->unsync)
@@ -1571,17 +1577,45 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1571 if (!sp->root_count) { 1577 if (!sp->root_count) {
1572 /* Count self */ 1578 /* Count self */
1573 ret++; 1579 ret++;
1574 hlist_del(&sp->hash_link); 1580 list_move(&sp->link, invalid_list);
1575 kvm_mmu_free_page(kvm, sp);
1576 } else { 1581 } else {
1577 sp->role.invalid = 1;
1578 list_move(&sp->link, &kvm->arch.active_mmu_pages); 1582 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1579 kvm_reload_remote_mmus(kvm); 1583 kvm_reload_remote_mmus(kvm);
1580 } 1584 }
1585
1586 sp->role.invalid = 1;
1581 kvm_mmu_reset_last_pte_updated(kvm); 1587 kvm_mmu_reset_last_pte_updated(kvm);
1582 return ret; 1588 return ret;
1583} 1589}
1584 1590
1591static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1592 struct list_head *invalid_list)
1593{
1594 struct kvm_mmu_page *sp;
1595
1596 if (list_empty(invalid_list))
1597 return;
1598
1599 kvm_flush_remote_tlbs(kvm);
1600
1601 do {
1602 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1603 WARN_ON(!sp->role.invalid || sp->root_count);
1604 kvm_mmu_free_page(kvm, sp);
1605 } while (!list_empty(invalid_list));
1606
1607}
1608
1609static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1610{
1611 LIST_HEAD(invalid_list);
1612 int ret;
1613
1614 ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1615 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1616 return ret;
1617}
1618
1585/* 1619/*
1586 * Changing the number of mmu pages allocated to the vm 1620 * Changing the number of mmu pages allocated to the vm
1587 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock 1621 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 42f07b1bfbc9..3aab0f0930ef 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -190,7 +190,7 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
190 TP_ARGS(sp) 190 TP_ARGS(sp)
191); 191);
192 192
193DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_zap_page, 193DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
194 TP_PROTO(struct kvm_mmu_page *sp), 194 TP_PROTO(struct kvm_mmu_page *sp),
195 195
196 TP_ARGS(sp) 196 TP_ARGS(sp)