aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-06-04 09:53:54 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:39:27 -0400
commit7775834a233478ec855b97e30727248f12eafe76 (patch)
tree06902a6515f0c65b46d1aae37c3d362ca07258d5 /arch/x86/kvm/mmu.c
parent7ae680eb2d5f0cb10ca0e6d1ff5ecb145befe8e4 (diff)
KVM: MMU: split the operations of kvm_mmu_zap_page()
Using kvm_mmu_prepare_zap_page() and kvm_mmu_commit_zap_page() to split kvm_mmu_zap_page() function, then we can: - traverse hlist safely - easily to gather remote tlb flush which occurs during page zapped Those feature can be used in the later patches Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c52
1 files changed, 43 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 881ad918455..9b849a70742 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -916,6 +916,7 @@ static int is_empty_shadow_page(u64 *spt)
916static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) 916static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
917{ 917{
918 ASSERT(is_empty_shadow_page(sp->spt)); 918 ASSERT(is_empty_shadow_page(sp->spt));
919 hlist_del(&sp->hash_link);
919 list_del(&sp->link); 920 list_del(&sp->link);
920 __free_page(virt_to_page(sp->spt)); 921 __free_page(virt_to_page(sp->spt));
921 if (!sp->role.direct) 922 if (!sp->role.direct)
@@ -1200,6 +1201,10 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1200} 1201}
1201 1202
1202static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); 1203static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1204static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1205 struct list_head *invalid_list);
1206static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1207 struct list_head *invalid_list);
1203 1208
1204#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \ 1209#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
1205 hlist_for_each_entry_safe(sp, pos, n, \ 1210 hlist_for_each_entry_safe(sp, pos, n, \
@@ -1530,7 +1535,8 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1530} 1535}
1531 1536
1532static int mmu_zap_unsync_children(struct kvm *kvm, 1537static int mmu_zap_unsync_children(struct kvm *kvm,
1533 struct kvm_mmu_page *parent) 1538 struct kvm_mmu_page *parent,
1539 struct list_head *invalid_list)
1534{ 1540{
1535 int i, zapped = 0; 1541 int i, zapped = 0;
1536 struct mmu_page_path parents; 1542 struct mmu_page_path parents;
@@ -1544,7 +1550,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1544 struct kvm_mmu_page *sp; 1550 struct kvm_mmu_page *sp;
1545 1551
1546 for_each_sp(pages, sp, parents, i) { 1552 for_each_sp(pages, sp, parents, i) {
1547 kvm_mmu_zap_page(kvm, sp); 1553 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1548 mmu_pages_clear_parents(&parents); 1554 mmu_pages_clear_parents(&parents);
1549 zapped++; 1555 zapped++;
1550 } 1556 }
@@ -1554,16 +1560,16 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1554 return zapped; 1560 return zapped;
1555} 1561}
1556 1562
1557static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1563static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1564 struct list_head *invalid_list)
1558{ 1565{
1559 int ret; 1566 int ret;
1560 1567
1561 trace_kvm_mmu_zap_page(sp); 1568 trace_kvm_mmu_prepare_zap_page(sp);
1562 ++kvm->stat.mmu_shadow_zapped; 1569 ++kvm->stat.mmu_shadow_zapped;
1563 ret = mmu_zap_unsync_children(kvm, sp); 1570 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1564 kvm_mmu_page_unlink_children(kvm, sp); 1571 kvm_mmu_page_unlink_children(kvm, sp);
1565 kvm_mmu_unlink_parents(kvm, sp); 1572 kvm_mmu_unlink_parents(kvm, sp);
1566 kvm_flush_remote_tlbs(kvm);
1567 if (!sp->role.invalid && !sp->role.direct) 1573 if (!sp->role.invalid && !sp->role.direct)
1568 unaccount_shadowed(kvm, sp->gfn); 1574 unaccount_shadowed(kvm, sp->gfn);
1569 if (sp->unsync) 1575 if (sp->unsync)
@@ -1571,17 +1577,45 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1571 if (!sp->root_count) { 1577 if (!sp->root_count) {
1572 /* Count self */ 1578 /* Count self */
1573 ret++; 1579 ret++;
1574 hlist_del(&sp->hash_link); 1580 list_move(&sp->link, invalid_list);
1575 kvm_mmu_free_page(kvm, sp);
1576 } else { 1581 } else {
1577 sp->role.invalid = 1;
1578 list_move(&sp->link, &kvm->arch.active_mmu_pages); 1582 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1579 kvm_reload_remote_mmus(kvm); 1583 kvm_reload_remote_mmus(kvm);
1580 } 1584 }
1585
1586 sp->role.invalid = 1;
1581 kvm_mmu_reset_last_pte_updated(kvm); 1587 kvm_mmu_reset_last_pte_updated(kvm);
1582 return ret; 1588 return ret;
1583} 1589}
1584 1590
1591static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1592 struct list_head *invalid_list)
1593{
1594 struct kvm_mmu_page *sp;
1595
1596 if (list_empty(invalid_list))
1597 return;
1598
1599 kvm_flush_remote_tlbs(kvm);
1600
1601 do {
1602 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1603 WARN_ON(!sp->role.invalid || sp->root_count);
1604 kvm_mmu_free_page(kvm, sp);
1605 } while (!list_empty(invalid_list));
1606
1607}
1608
1609static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1610{
1611 LIST_HEAD(invalid_list);
1612 int ret;
1613
1614 ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1615 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1616 return ret;
1617}
1618
1585/* 1619/*
1586 * Changing the number of mmu pages allocated to the vm 1620 * Changing the number of mmu pages allocated to the vm
1587 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock 1621 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock