diff options
| -rw-r--r-- | virt/kvm/async_pf.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 8035cc1eb955..efeceb0a222d 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
| @@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work) | |||
| 91 | 91 | ||
| 92 | spin_lock(&vcpu->async_pf.lock); | 92 | spin_lock(&vcpu->async_pf.lock); |
| 93 | list_add_tail(&apf->link, &vcpu->async_pf.done); | 93 | list_add_tail(&apf->link, &vcpu->async_pf.done); |
| 94 | apf->vcpu = NULL; | ||
| 94 | spin_unlock(&vcpu->async_pf.lock); | 95 | spin_unlock(&vcpu->async_pf.lock); |
| 95 | 96 | ||
| 96 | /* | 97 | /* |
| @@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work) | |||
| 113 | 114 | ||
| 114 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | 115 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) |
| 115 | { | 116 | { |
| 117 | spin_lock(&vcpu->async_pf.lock); | ||
| 118 | |||
| 116 | /* cancel outstanding work queue item */ | 119 | /* cancel outstanding work queue item */ |
| 117 | while (!list_empty(&vcpu->async_pf.queue)) { | 120 | while (!list_empty(&vcpu->async_pf.queue)) { |
| 118 | struct kvm_async_pf *work = | 121 | struct kvm_async_pf *work = |
| @@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |||
| 120 | typeof(*work), queue); | 123 | typeof(*work), queue); |
| 121 | list_del(&work->queue); | 124 | list_del(&work->queue); |
| 122 | 125 | ||
| 126 | /* | ||
| 127 | * We know it's present in vcpu->async_pf.done, do | ||
| 128 | * nothing here. | ||
| 129 | */ | ||
| 130 | if (!work->vcpu) | ||
| 131 | continue; | ||
| 132 | |||
| 133 | spin_unlock(&vcpu->async_pf.lock); | ||
| 123 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | 134 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC |
| 124 | flush_work(&work->work); | 135 | flush_work(&work->work); |
| 125 | #else | 136 | #else |
| @@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |||
| 129 | kmem_cache_free(async_pf_cache, work); | 140 | kmem_cache_free(async_pf_cache, work); |
| 130 | } | 141 | } |
| 131 | #endif | 142 | #endif |
| 143 | spin_lock(&vcpu->async_pf.lock); | ||
| 132 | } | 144 | } |
| 133 | 145 | ||
| 134 | spin_lock(&vcpu->async_pf.lock); | ||
| 135 | while (!list_empty(&vcpu->async_pf.done)) { | 146 | while (!list_empty(&vcpu->async_pf.done)) { |
| 136 | struct kvm_async_pf *work = | 147 | struct kvm_async_pf *work = |
| 137 | list_first_entry(&vcpu->async_pf.done, | 148 | list_first_entry(&vcpu->async_pf.done, |
