diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-04-16 04:35:54 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:17:56 -0400 |
commit | 3246af0ece6c61689847417977733f0b12dc4b6f (patch) | |
tree | 34ce82a3f2bac9dc6073b5f89bf6358cb7d9f2f1 /arch/x86/kvm/mmu.c | |
parent | acb5451789f21ad51215897bb8f9306a05e8acd4 (diff) |
KVM: MMU: cleanup for hlist walk restart
Quote from Avi:
|Just change the assignment to a 'goto restart;' please,
|I don't like playing with list_for_each internals.
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 45f3aa5213c5..7a17db1cdcd6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1565,13 +1565,14 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
1565 | r = 0; | 1565 | r = 0; |
1566 | index = kvm_page_table_hashfn(gfn); | 1566 | index = kvm_page_table_hashfn(gfn); |
1567 | bucket = &kvm->arch.mmu_page_hash[index]; | 1567 | bucket = &kvm->arch.mmu_page_hash[index]; |
1568 | restart: | ||
1568 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 1569 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
1569 | if (sp->gfn == gfn && !sp->role.direct) { | 1570 | if (sp->gfn == gfn && !sp->role.direct) { |
1570 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, | 1571 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, |
1571 | sp->role.word); | 1572 | sp->role.word); |
1572 | r = 1; | 1573 | r = 1; |
1573 | if (kvm_mmu_zap_page(kvm, sp)) | 1574 | if (kvm_mmu_zap_page(kvm, sp)) |
1574 | n = bucket->first; | 1575 | goto restart; |
1575 | } | 1576 | } |
1576 | return r; | 1577 | return r; |
1577 | } | 1578 | } |
@@ -1585,13 +1586,14 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1585 | 1586 | ||
1586 | index = kvm_page_table_hashfn(gfn); | 1587 | index = kvm_page_table_hashfn(gfn); |
1587 | bucket = &kvm->arch.mmu_page_hash[index]; | 1588 | bucket = &kvm->arch.mmu_page_hash[index]; |
1589 | restart: | ||
1588 | hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { | 1590 | hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { |
1589 | if (sp->gfn == gfn && !sp->role.direct | 1591 | if (sp->gfn == gfn && !sp->role.direct |
1590 | && !sp->role.invalid) { | 1592 | && !sp->role.invalid) { |
1591 | pgprintk("%s: zap %lx %x\n", | 1593 | pgprintk("%s: zap %lx %x\n", |
1592 | __func__, gfn, sp->role.word); | 1594 | __func__, gfn, sp->role.word); |
1593 | if (kvm_mmu_zap_page(kvm, sp)) | 1595 | if (kvm_mmu_zap_page(kvm, sp)) |
1594 | nn = bucket->first; | 1596 | goto restart; |
1595 | } | 1597 | } |
1596 | } | 1598 | } |
1597 | } | 1599 | } |
@@ -2671,6 +2673,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2671 | } | 2673 | } |
2672 | index = kvm_page_table_hashfn(gfn); | 2674 | index = kvm_page_table_hashfn(gfn); |
2673 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 2675 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
2676 | |||
2677 | restart: | ||
2674 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { | 2678 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
2675 | if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) | 2679 | if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) |
2676 | continue; | 2680 | continue; |
@@ -2691,7 +2695,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2691 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", | 2695 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
2692 | gpa, bytes, sp->role.word); | 2696 | gpa, bytes, sp->role.word); |
2693 | if (kvm_mmu_zap_page(vcpu->kvm, sp)) | 2697 | if (kvm_mmu_zap_page(vcpu->kvm, sp)) |
2694 | n = bucket->first; | 2698 | goto restart; |
2695 | ++vcpu->kvm->stat.mmu_flooded; | 2699 | ++vcpu->kvm->stat.mmu_flooded; |
2696 | continue; | 2700 | continue; |
2697 | } | 2701 | } |
@@ -2900,10 +2904,11 @@ void kvm_mmu_zap_all(struct kvm *kvm) | |||
2900 | struct kvm_mmu_page *sp, *node; | 2904 | struct kvm_mmu_page *sp, *node; |
2901 | 2905 | ||
2902 | spin_lock(&kvm->mmu_lock); | 2906 | spin_lock(&kvm->mmu_lock); |
2907 | restart: | ||
2903 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) | 2908 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) |
2904 | if (kvm_mmu_zap_page(kvm, sp)) | 2909 | if (kvm_mmu_zap_page(kvm, sp)) |
2905 | node = container_of(kvm->arch.active_mmu_pages.next, | 2910 | goto restart; |
2906 | struct kvm_mmu_page, link); | 2911 | |
2907 | spin_unlock(&kvm->mmu_lock); | 2912 | spin_unlock(&kvm->mmu_lock); |
2908 | 2913 | ||
2909 | kvm_flush_remote_tlbs(kvm); | 2914 | kvm_flush_remote_tlbs(kvm); |