aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-06-04 09:56:11 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:39:28 -0400
commitf41d335a02d5132c14ec0459d3b2790eeb16fb11 (patch)
treecd513bfecaf1eafa38cedbe1729e60c6c44c008b /arch
parentd98ba053656c033180781007241f2c9d54606d56 (diff)
KVM: MMU: traverse sp hlish safely
Now, we can safely to traverse sp hlish Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c51
1 files changed, 23 insertions, 28 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 44548e346976..3b75689eda95 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1205,13 +1205,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1205static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1205static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1206 struct list_head *invalid_list); 1206 struct list_head *invalid_list);
1207 1207
1208#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \ 1208#define for_each_gfn_sp(kvm, sp, gfn, pos) \
1209 hlist_for_each_entry_safe(sp, pos, n, \ 1209 hlist_for_each_entry(sp, pos, \
1210 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1210 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1211 if ((sp)->gfn != (gfn)) {} else 1211 if ((sp)->gfn != (gfn)) {} else
1212 1212
1213#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \ 1213#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
1214 hlist_for_each_entry_safe(sp, pos, n, \ 1214 hlist_for_each_entry(sp, pos, \
1215 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1215 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1216 if ((sp)->gfn != (gfn) || (sp)->role.direct || \ 1216 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1217 (sp)->role.invalid) {} else 1217 (sp)->role.invalid) {} else
@@ -1265,11 +1265,11 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1265static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1265static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1266{ 1266{
1267 struct kvm_mmu_page *s; 1267 struct kvm_mmu_page *s;
1268 struct hlist_node *node, *n; 1268 struct hlist_node *node;
1269 LIST_HEAD(invalid_list); 1269 LIST_HEAD(invalid_list);
1270 bool flush = false; 1270 bool flush = false;
1271 1271
1272 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) { 1272 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1273 if (!s->unsync) 1273 if (!s->unsync)
1274 continue; 1274 continue;
1275 1275
@@ -1387,7 +1387,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1387 union kvm_mmu_page_role role; 1387 union kvm_mmu_page_role role;
1388 unsigned quadrant; 1388 unsigned quadrant;
1389 struct kvm_mmu_page *sp; 1389 struct kvm_mmu_page *sp;
1390 struct hlist_node *node, *tmp; 1390 struct hlist_node *node;
1391 bool need_sync = false; 1391 bool need_sync = false;
1392 1392
1393 role = vcpu->arch.mmu.base_role; 1393 role = vcpu->arch.mmu.base_role;
@@ -1401,7 +1401,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1401 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1401 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1402 role.quadrant = quadrant; 1402 role.quadrant = quadrant;
1403 } 1403 }
1404 for_each_gfn_sp(vcpu->kvm, sp, gfn, node, tmp) { 1404 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1405 if (!need_sync && sp->unsync) 1405 if (!need_sync && sp->unsync)
1406 need_sync = true; 1406 need_sync = true;
1407 1407
@@ -1656,19 +1656,18 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1656static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 1656static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1657{ 1657{
1658 struct kvm_mmu_page *sp; 1658 struct kvm_mmu_page *sp;
1659 struct hlist_node *node, *n; 1659 struct hlist_node *node;
1660 LIST_HEAD(invalid_list); 1660 LIST_HEAD(invalid_list);
1661 int r; 1661 int r;
1662 1662
1663 pgprintk("%s: looking for gfn %lx\n", __func__, gfn); 1663 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1664 r = 0; 1664 r = 0;
1665restart: 1665
1666 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, n) { 1666 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1667 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1667 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1668 sp->role.word); 1668 sp->role.word);
1669 r = 1; 1669 r = 1;
1670 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) 1670 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1671 goto restart;
1672 } 1671 }
1673 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1672 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1674 return r; 1673 return r;
@@ -1677,15 +1676,13 @@ restart:
1677static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) 1676static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1678{ 1677{
1679 struct kvm_mmu_page *sp; 1678 struct kvm_mmu_page *sp;
1680 struct hlist_node *node, *nn; 1679 struct hlist_node *node;
1681 LIST_HEAD(invalid_list); 1680 LIST_HEAD(invalid_list);
1682 1681
1683restart: 1682 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1684 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
1685 pgprintk("%s: zap %lx %x\n", 1683 pgprintk("%s: zap %lx %x\n",
1686 __func__, gfn, sp->role.word); 1684 __func__, gfn, sp->role.word);
1687 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) 1685 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1688 goto restart;
1689 } 1686 }
1690 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1687 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1691} 1688}
@@ -1830,9 +1827,9 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1830static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1827static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1831{ 1828{
1832 struct kvm_mmu_page *s; 1829 struct kvm_mmu_page *s;
1833 struct hlist_node *node, *n; 1830 struct hlist_node *node;
1834 1831
1835 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) { 1832 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1836 if (s->unsync) 1833 if (s->unsync)
1837 continue; 1834 continue;
1838 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1835 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -1844,10 +1841,10 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1844 bool can_unsync) 1841 bool can_unsync)
1845{ 1842{
1846 struct kvm_mmu_page *s; 1843 struct kvm_mmu_page *s;
1847 struct hlist_node *node, *n; 1844 struct hlist_node *node;
1848 bool need_unsync = false; 1845 bool need_unsync = false;
1849 1846
1850 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) { 1847 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1851 if (s->role.level != PT_PAGE_TABLE_LEVEL) 1848 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1852 return 1; 1849 return 1;
1853 1850
@@ -2724,7 +2721,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2724{ 2721{
2725 gfn_t gfn = gpa >> PAGE_SHIFT; 2722 gfn_t gfn = gpa >> PAGE_SHIFT;
2726 struct kvm_mmu_page *sp; 2723 struct kvm_mmu_page *sp;
2727 struct hlist_node *node, *n; 2724 struct hlist_node *node;
2728 LIST_HEAD(invalid_list); 2725 LIST_HEAD(invalid_list);
2729 u64 entry, gentry; 2726 u64 entry, gentry;
2730 u64 *spte; 2727 u64 *spte;
@@ -2794,8 +2791,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2794 } 2791 }
2795 } 2792 }
2796 2793
2797restart: 2794 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
2798 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node, n) {
2799 pte_size = sp->role.cr4_pae ? 8 : 4; 2795 pte_size = sp->role.cr4_pae ? 8 : 4;
2800 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 2796 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2801 misaligned |= bytes < 4; 2797 misaligned |= bytes < 4;
@@ -2812,9 +2808,8 @@ restart:
2812 */ 2808 */
2813 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 2809 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2814 gpa, bytes, sp->role.word); 2810 gpa, bytes, sp->role.word);
2815 if (kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 2811 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2816 &invalid_list)) 2812 &invalid_list);
2817 goto restart;
2818 ++vcpu->kvm->stat.mmu_flooded; 2813 ++vcpu->kvm->stat.mmu_flooded;
2819 continue; 2814 continue;
2820 } 2815 }