aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-01-12 02:41:22 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-17 12:08:26 -0400
commit3cba41307a2b1344ab8c1b9f55202d1e9d7bf81b (patch)
tree87191d63557cc722906ceaec98a1f8f148cf1fcc
parent6b7e2d0991489559a1df4500d77f7b76c4607ed0 (diff)
KVM: make make_all_cpus_request() lockless
Now, we have 'vcpu->mode' to judge whether need to send ipi to other cpus, this way is very exact, so checking request bit is needless, then we can drop the spinlock let it's collateral Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--include/linux/kvm_host.h6
-rw-r--r--virt/kvm/kvm_main.c9
2 files changed, 3 insertions, 12 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b99eacd988ab..c8dee22b1945 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -224,7 +224,6 @@ struct kvm_memslots {
224 224
225struct kvm { 225struct kvm {
226 spinlock_t mmu_lock; 226 spinlock_t mmu_lock;
227 raw_spinlock_t requests_lock;
228 struct mutex slots_lock; 227 struct mutex slots_lock;
229 struct mm_struct *mm; /* userspace tied to this vm */ 228 struct mm_struct *mm; /* userspace tied to this vm */
230 struct kvm_memslots *memslots; 229 struct kvm_memslots *memslots;
@@ -731,11 +730,6 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
731 set_bit(req, &vcpu->requests); 730 set_bit(req, &vcpu->requests);
732} 731}
733 732
734static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
735{
736 return test_and_set_bit(req, &vcpu->requests);
737}
738
739static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 733static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
740{ 734{
741 if (test_bit(req, &vcpu->requests)) { 735 if (test_bit(req, &vcpu->requests)) {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 19209f849cf7..4856a7dcbd7f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -165,11 +165,9 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
165 165
166 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 166 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167 167
168 raw_spin_lock(&kvm->requests_lock); 168 me = get_cpu();
169 me = smp_processor_id();
170 kvm_for_each_vcpu(i, vcpu, kvm) { 169 kvm_for_each_vcpu(i, vcpu, kvm) {
171 if (kvm_make_check_request(req, vcpu)) 170 kvm_make_request(req, vcpu);
172 continue;
173 cpu = vcpu->cpu; 171 cpu = vcpu->cpu;
174 172
175 /* Set ->requests bit before we read ->mode */ 173 /* Set ->requests bit before we read ->mode */
@@ -185,7 +183,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
185 smp_call_function_many(cpus, ack_flush, NULL, 1); 183 smp_call_function_many(cpus, ack_flush, NULL, 1);
186 else 184 else
187 called = false; 185 called = false;
188 raw_spin_unlock(&kvm->requests_lock); 186 put_cpu();
189 free_cpumask_var(cpus); 187 free_cpumask_var(cpus);
190 return called; 188 return called;
191} 189}
@@ -468,7 +466,6 @@ static struct kvm *kvm_create_vm(void)
468 kvm->mm = current->mm; 466 kvm->mm = current->mm;
469 atomic_inc(&kvm->mm->mm_count); 467 atomic_inc(&kvm->mm->mm_count);
470 spin_lock_init(&kvm->mmu_lock); 468 spin_lock_init(&kvm->mmu_lock);
471 raw_spin_lock_init(&kvm->requests_lock);
472 kvm_eventfd_init(kvm); 469 kvm_eventfd_init(kvm);
473 mutex_init(&kvm->lock); 470 mutex_init(&kvm->lock);
474 mutex_init(&kvm->irq_lock); 471 mutex_init(&kvm->irq_lock);