aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-06-04 09:53:07 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:39:27 -0400
commit7ae680eb2d5f0cb10ca0e6d1ff5ecb145befe8e4 (patch)
treeace3e87b6bccc4e61b32b2b38bb300fb34f9bd2e /arch/x86
parent03116aa57e75b1bbe8b5e04f3cd21cdb6588c4ba (diff)
KVM: MMU: introduce some macros to cleanup hlist traverseing
Introduce for_each_gfn_sp() and for_each_gfn_indirect_valid_sp() to cleanup hlist traverseing Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c122
1 files changed, 47 insertions, 75 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3ac51153bc47..881ad918455c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1201,6 +1201,17 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1201 1201
1202static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); 1202static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1203 1203
1204#define for_each_gfn_sp(kvm, sp, gfn, pos, n) \
1205 hlist_for_each_entry_safe(sp, pos, n, \
1206 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1207 if ((sp)->gfn != (gfn)) {} else
1208
1209#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos, n) \
1210 hlist_for_each_entry_safe(sp, pos, n, \
1211 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1212 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1213 (sp)->role.invalid) {} else
1214
1204static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1215static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1205 bool clear_unsync) 1216 bool clear_unsync)
1206{ 1217{
@@ -1244,16 +1255,12 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1244/* @gfn should be write-protected at the call site */ 1255/* @gfn should be write-protected at the call site */
1245static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1256static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1246{ 1257{
1247 struct hlist_head *bucket;
1248 struct kvm_mmu_page *s; 1258 struct kvm_mmu_page *s;
1249 struct hlist_node *node, *n; 1259 struct hlist_node *node, *n;
1250 unsigned index;
1251 bool flush = false; 1260 bool flush = false;
1252 1261
1253 index = kvm_page_table_hashfn(gfn); 1262 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
1254 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1263 if (!s->unsync)
1255 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1256 if (s->gfn != gfn || !s->unsync || s->role.invalid)
1257 continue; 1264 continue;
1258 1265
1259 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1266 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -1365,9 +1372,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1365 u64 *parent_pte) 1372 u64 *parent_pte)
1366{ 1373{
1367 union kvm_mmu_page_role role; 1374 union kvm_mmu_page_role role;
1368 unsigned index;
1369 unsigned quadrant; 1375 unsigned quadrant;
1370 struct hlist_head *bucket;
1371 struct kvm_mmu_page *sp; 1376 struct kvm_mmu_page *sp;
1372 struct hlist_node *node, *tmp; 1377 struct hlist_node *node, *tmp;
1373 bool need_sync = false; 1378 bool need_sync = false;
@@ -1383,36 +1388,34 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1383 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1388 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1384 role.quadrant = quadrant; 1389 role.quadrant = quadrant;
1385 } 1390 }
1386 index = kvm_page_table_hashfn(gfn); 1391 for_each_gfn_sp(vcpu->kvm, sp, gfn, node, tmp) {
1387 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1392 if (!need_sync && sp->unsync)
1388 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) 1393 need_sync = true;
1389 if (sp->gfn == gfn) {
1390 if (!need_sync && sp->unsync)
1391 need_sync = true;
1392 1394
1393 if (sp->role.word != role.word) 1395 if (sp->role.word != role.word)
1394 continue; 1396 continue;
1395 1397
1396 if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) 1398 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1397 break; 1399 break;
1398 1400
1399 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1401 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1400 if (sp->unsync_children) { 1402 if (sp->unsync_children) {
1401 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 1403 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1402 kvm_mmu_mark_parents_unsync(sp); 1404 kvm_mmu_mark_parents_unsync(sp);
1403 } else if (sp->unsync) 1405 } else if (sp->unsync)
1404 kvm_mmu_mark_parents_unsync(sp); 1406 kvm_mmu_mark_parents_unsync(sp);
1405 1407
1406 trace_kvm_mmu_get_page(sp, false); 1408 trace_kvm_mmu_get_page(sp, false);
1407 return sp; 1409 return sp;
1408 } 1410 }
1409 ++vcpu->kvm->stat.mmu_cache_miss; 1411 ++vcpu->kvm->stat.mmu_cache_miss;
1410 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); 1412 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1411 if (!sp) 1413 if (!sp)
1412 return sp; 1414 return sp;
1413 sp->gfn = gfn; 1415 sp->gfn = gfn;
1414 sp->role = role; 1416 sp->role = role;
1415 hlist_add_head(&sp->hash_link, bucket); 1417 hlist_add_head(&sp->hash_link,
1418 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1416 if (!direct) { 1419 if (!direct) {
1417 if (rmap_write_protect(vcpu->kvm, gfn)) 1420 if (rmap_write_protect(vcpu->kvm, gfn))
1418 kvm_flush_remote_tlbs(vcpu->kvm); 1421 kvm_flush_remote_tlbs(vcpu->kvm);
@@ -1617,46 +1620,34 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1617 1620
1618static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 1621static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1619{ 1622{
1620 unsigned index;
1621 struct hlist_head *bucket;
1622 struct kvm_mmu_page *sp; 1623 struct kvm_mmu_page *sp;
1623 struct hlist_node *node, *n; 1624 struct hlist_node *node, *n;
1624 int r; 1625 int r;
1625 1626
1626 pgprintk("%s: looking for gfn %lx\n", __func__, gfn); 1627 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1627 r = 0; 1628 r = 0;
1628 index = kvm_page_table_hashfn(gfn);
1629 bucket = &kvm->arch.mmu_page_hash[index];
1630restart: 1629restart:
1631 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) 1630 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, n) {
1632 if (sp->gfn == gfn && !sp->role.direct && !sp->role.invalid) { 1631 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1633 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1632 sp->role.word);
1634 sp->role.word); 1633 r = 1;
1635 r = 1; 1634 if (kvm_mmu_zap_page(kvm, sp))
1636 if (kvm_mmu_zap_page(kvm, sp)) 1635 goto restart;
1637 goto restart; 1636 }
1638 }
1639 return r; 1637 return r;
1640} 1638}
1641 1639
1642static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) 1640static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1643{ 1641{
1644 unsigned index;
1645 struct hlist_head *bucket;
1646 struct kvm_mmu_page *sp; 1642 struct kvm_mmu_page *sp;
1647 struct hlist_node *node, *nn; 1643 struct hlist_node *node, *nn;
1648 1644
1649 index = kvm_page_table_hashfn(gfn);
1650 bucket = &kvm->arch.mmu_page_hash[index];
1651restart: 1645restart:
1652 hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { 1646 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node, nn) {
1653 if (sp->gfn == gfn && !sp->role.direct 1647 pgprintk("%s: zap %lx %x\n",
1654 && !sp->role.invalid) { 1648 __func__, gfn, sp->role.word);
1655 pgprintk("%s: zap %lx %x\n", 1649 if (kvm_mmu_zap_page(kvm, sp))
1656 __func__, gfn, sp->role.word); 1650 goto restart;
1657 if (kvm_mmu_zap_page(kvm, sp))
1658 goto restart;
1659 }
1660 } 1651 }
1661} 1652}
1662 1653
@@ -1799,17 +1790,11 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1799 1790
1800static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1791static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1801{ 1792{
1802 struct hlist_head *bucket;
1803 struct kvm_mmu_page *s; 1793 struct kvm_mmu_page *s;
1804 struct hlist_node *node, *n; 1794 struct hlist_node *node, *n;
1805 unsigned index;
1806
1807 index = kvm_page_table_hashfn(gfn);
1808 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1809 1795
1810 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { 1796 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
1811 if (s->gfn != gfn || s->role.direct || s->unsync || 1797 if (s->unsync)
1812 s->role.invalid)
1813 continue; 1798 continue;
1814 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1799 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1815 __kvm_unsync_page(vcpu, s); 1800 __kvm_unsync_page(vcpu, s);
@@ -1819,18 +1804,11 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1819static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 1804static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1820 bool can_unsync) 1805 bool can_unsync)
1821{ 1806{
1822 unsigned index;
1823 struct hlist_head *bucket;
1824 struct kvm_mmu_page *s; 1807 struct kvm_mmu_page *s;
1825 struct hlist_node *node, *n; 1808 struct hlist_node *node, *n;
1826 bool need_unsync = false; 1809 bool need_unsync = false;
1827 1810
1828 index = kvm_page_table_hashfn(gfn); 1811 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node, n) {
1829 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1830 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1831 if (s->gfn != gfn || s->role.direct || s->role.invalid)
1832 continue;
1833
1834 if (s->role.level != PT_PAGE_TABLE_LEVEL) 1812 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1835 return 1; 1813 return 1;
1836 1814
@@ -2703,8 +2681,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2703 gfn_t gfn = gpa >> PAGE_SHIFT; 2681 gfn_t gfn = gpa >> PAGE_SHIFT;
2704 struct kvm_mmu_page *sp; 2682 struct kvm_mmu_page *sp;
2705 struct hlist_node *node, *n; 2683 struct hlist_node *node, *n;
2706 struct hlist_head *bucket;
2707 unsigned index;
2708 u64 entry, gentry; 2684 u64 entry, gentry;
2709 u64 *spte; 2685 u64 *spte;
2710 unsigned offset = offset_in_page(gpa); 2686 unsigned offset = offset_in_page(gpa);
@@ -2772,13 +2748,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2772 vcpu->arch.last_pte_updated = NULL; 2748 vcpu->arch.last_pte_updated = NULL;
2773 } 2749 }
2774 } 2750 }
2775 index = kvm_page_table_hashfn(gfn);
2776 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2777 2751
2778restart: 2752restart:
2779 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 2753 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node, n) {
2780 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2781 continue;
2782 pte_size = sp->role.cr4_pae ? 8 : 4; 2754 pte_size = sp->role.cr4_pae ? 8 : 4;
2783 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 2755 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2784 misaligned |= bytes < 4; 2756 misaligned |= bytes < 4;