diff options
author | Alexander Graf <agraf@suse.de> | 2010-08-02 15:24:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:06 -0400 |
commit | e7c1d14e3bf40b87e6a3f68964b36dbb2c875c0f (patch) | |
tree | 2dd5d3e6f3ac88f532b790cc4215d349c8b95790 /arch/powerpc/kvm | |
parent | 2e602847d9c2d6b487bda62bbbe550db40ca912f (diff) |
KVM: PPC: Make invalidation code more reliable
There is a race condition in the pte invalidation code path where we can't
be sure if a pte was invalidated already. So let's move the spin lock around
to get rid of the race.
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index bd6a7676d0c8..79751d8dd131 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -92,10 +92,6 @@ static void free_pte_rcu(struct rcu_head *head) | |||
92 | 92 | ||
93 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 93 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
94 | { | 94 | { |
95 | /* pte already invalidated? */ | ||
96 | if (hlist_unhashed(&pte->list_pte)) | ||
97 | return; | ||
98 | |||
99 | trace_kvm_book3s_mmu_invalidate(pte); | 95 | trace_kvm_book3s_mmu_invalidate(pte); |
100 | 96 | ||
101 | /* Different for 32 and 64 bit */ | 97 | /* Different for 32 and 64 bit */ |
@@ -103,18 +99,24 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
103 | 99 | ||
104 | spin_lock(&vcpu->arch.mmu_lock); | 100 | spin_lock(&vcpu->arch.mmu_lock); |
105 | 101 | ||
102 | /* pte already invalidated in between? */ | ||
103 | if (hlist_unhashed(&pte->list_pte)) { | ||
104 | spin_unlock(&vcpu->arch.mmu_lock); | ||
105 | return; | ||
106 | } | ||
107 | |||
106 | hlist_del_init_rcu(&pte->list_pte); | 108 | hlist_del_init_rcu(&pte->list_pte); |
107 | hlist_del_init_rcu(&pte->list_pte_long); | 109 | hlist_del_init_rcu(&pte->list_pte_long); |
108 | hlist_del_init_rcu(&pte->list_vpte); | 110 | hlist_del_init_rcu(&pte->list_vpte); |
109 | hlist_del_init_rcu(&pte->list_vpte_long); | 111 | hlist_del_init_rcu(&pte->list_vpte_long); |
110 | 112 | ||
111 | spin_unlock(&vcpu->arch.mmu_lock); | ||
112 | |||
113 | if (pte->pte.may_write) | 113 | if (pte->pte.may_write) |
114 | kvm_release_pfn_dirty(pte->pfn); | 114 | kvm_release_pfn_dirty(pte->pfn); |
115 | else | 115 | else |
116 | kvm_release_pfn_clean(pte->pfn); | 116 | kvm_release_pfn_clean(pte->pfn); |
117 | 117 | ||
118 | spin_unlock(&vcpu->arch.mmu_lock); | ||
119 | |||
118 | vcpu->arch.hpte_cache_count--; | 120 | vcpu->arch.hpte_cache_count--; |
119 | call_rcu(&pte->rcu_head, free_pte_rcu); | 121 | call_rcu(&pte->rcu_head, free_pte_rcu); |
120 | } | 122 | } |