diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-11-01 05:03:44 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:29:03 -0500 |
commit | 64f638c7c44fa87e65f51eaf0f8302b9cba2d696 (patch) | |
tree | 8aff59c0397f9231f5889d21b682a7a76e7dc66d /virt/kvm | |
parent | 15096ffceabb9693306982127348890886384aaa (diff) |
KVM: fix the race while wakeup all pv guest
In kvm_async_pf_wakeup_all(), we add a dummy apf to vcpu->async_pf.done
without holding vcpu->async_pf.lock, it will break if we are handling apfs
at this time.
Also use 'list_empty_careful()' instead of 'list_empty()'
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Acked-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/async_pf.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 100c66ee0220..74268b4c2ee1 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
@@ -196,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |||
196 | { | 196 | { |
197 | struct kvm_async_pf *work; | 197 | struct kvm_async_pf *work; |
198 | 198 | ||
199 | if (!list_empty(&vcpu->async_pf.done)) | 199 | if (!list_empty_careful(&vcpu->async_pf.done)) |
200 | return 0; | 200 | return 0; |
201 | 201 | ||
202 | work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); | 202 | work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); |
@@ -207,7 +207,10 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) | |||
207 | get_page(bad_page); | 207 | get_page(bad_page); |
208 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ | 208 | INIT_LIST_HEAD(&work->queue); /* for list_del to work */ |
209 | 209 | ||
210 | spin_lock(&vcpu->async_pf.lock); | ||
210 | list_add_tail(&work->link, &vcpu->async_pf.done); | 211 | list_add_tail(&work->link, &vcpu->async_pf.done); |
212 | spin_unlock(&vcpu->async_pf.lock); | ||
213 | |||
211 | vcpu->async_pf.queued++; | 214 | vcpu->async_pf.queued++; |
212 | return 0; | 215 | return 0; |
213 | } | 216 | } |