aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2013-06-12 07:54:55 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-06-17 11:09:17 -0400
commitd0321a24bf10e2299a997c4747b924f79f70a232 (patch)
treee10307eb964122b951ea0ca56d756092143605bd /arch
parentb110feaf4d0bbc31802589ea6b956389afdabcee (diff)
KVM: s390: Use common waitqueue
Lets use the common waitqueue for kvm cpus on s390. By itself it is just a cleanup, but it should also improve the accuracy of diag 0x44 which is implemented via kvm_vcpu_on_spin. kvm_vcpu_on_spin has an explicit check for waiting on the waitqueue to optimize the yielding. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/kvm/interrupt.c18
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/sigp.c16
4 files changed, 19 insertions, 19 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 43207dd45fab..d3ffd7eded3c 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -228,7 +228,7 @@ struct kvm_s390_local_interrupt {
228 atomic_t active; 228 atomic_t active;
229 struct kvm_s390_float_interrupt *float_int; 229 struct kvm_s390_float_interrupt *float_int;
230 int timer_due; /* event indicator for waitqueue below */ 230 int timer_due; /* event indicator for waitqueue below */
231 wait_queue_head_t wq; 231 wait_queue_head_t *wq;
232 atomic_t *cpuflags; 232 atomic_t *cpuflags;
233 unsigned int action_bits; 233 unsigned int action_bits;
234}; 234};
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5c948177529e..7f35cb33e510 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -438,7 +438,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
438no_timer: 438no_timer:
439 spin_lock(&vcpu->arch.local_int.float_int->lock); 439 spin_lock(&vcpu->arch.local_int.float_int->lock);
440 spin_lock_bh(&vcpu->arch.local_int.lock); 440 spin_lock_bh(&vcpu->arch.local_int.lock);
441 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 441 add_wait_queue(&vcpu->wq, &wait);
442 while (list_empty(&vcpu->arch.local_int.list) && 442 while (list_empty(&vcpu->arch.local_int.list) &&
443 list_empty(&vcpu->arch.local_int.float_int->list) && 443 list_empty(&vcpu->arch.local_int.float_int->list) &&
444 (!vcpu->arch.local_int.timer_due) && 444 (!vcpu->arch.local_int.timer_due) &&
@@ -452,7 +452,7 @@ no_timer:
452 } 452 }
453 __unset_cpu_idle(vcpu); 453 __unset_cpu_idle(vcpu);
454 __set_current_state(TASK_RUNNING); 454 __set_current_state(TASK_RUNNING);
455 remove_wait_queue(&vcpu->arch.local_int.wq, &wait); 455 remove_wait_queue(&vcpu->wq, &wait);
456 spin_unlock_bh(&vcpu->arch.local_int.lock); 456 spin_unlock_bh(&vcpu->arch.local_int.lock);
457 spin_unlock(&vcpu->arch.local_int.float_int->lock); 457 spin_unlock(&vcpu->arch.local_int.float_int->lock);
458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
@@ -465,8 +465,8 @@ void kvm_s390_tasklet(unsigned long parm)
465 465
466 spin_lock(&vcpu->arch.local_int.lock); 466 spin_lock(&vcpu->arch.local_int.lock);
467 vcpu->arch.local_int.timer_due = 1; 467 vcpu->arch.local_int.timer_due = 1;
468 if (waitqueue_active(&vcpu->arch.local_int.wq)) 468 if (waitqueue_active(&vcpu->wq))
469 wake_up_interruptible(&vcpu->arch.local_int.wq); 469 wake_up_interruptible(&vcpu->wq);
470 spin_unlock(&vcpu->arch.local_int.lock); 470 spin_unlock(&vcpu->arch.local_int.lock);
471} 471}
472 472
@@ -613,7 +613,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
613 spin_lock_bh(&li->lock); 613 spin_lock_bh(&li->lock);
614 list_add(&inti->list, &li->list); 614 list_add(&inti->list, &li->list);
615 atomic_set(&li->active, 1); 615 atomic_set(&li->active, 1);
616 BUG_ON(waitqueue_active(&li->wq)); 616 BUG_ON(waitqueue_active(li->wq));
617 spin_unlock_bh(&li->lock); 617 spin_unlock_bh(&li->lock);
618 return 0; 618 return 0;
619} 619}
@@ -746,8 +746,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
746 li = fi->local_int[sigcpu]; 746 li = fi->local_int[sigcpu];
747 spin_lock_bh(&li->lock); 747 spin_lock_bh(&li->lock);
748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
749 if (waitqueue_active(&li->wq)) 749 if (waitqueue_active(li->wq))
750 wake_up_interruptible(&li->wq); 750 wake_up_interruptible(li->wq);
751 spin_unlock_bh(&li->lock); 751 spin_unlock_bh(&li->lock);
752 spin_unlock(&fi->lock); 752 spin_unlock(&fi->lock);
753 mutex_unlock(&kvm->lock); 753 mutex_unlock(&kvm->lock);
@@ -832,8 +832,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
832 if (inti->type == KVM_S390_SIGP_STOP) 832 if (inti->type == KVM_S390_SIGP_STOP)
833 li->action_bits |= ACTION_STOP_ON_STOP; 833 li->action_bits |= ACTION_STOP_ON_STOP;
834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
835 if (waitqueue_active(&li->wq)) 835 if (waitqueue_active(&vcpu->wq))
836 wake_up_interruptible(&vcpu->arch.local_int.wq); 836 wake_up_interruptible(&vcpu->wq);
837 spin_unlock_bh(&li->lock); 837 spin_unlock_bh(&li->lock);
838 mutex_unlock(&vcpu->kvm->lock); 838 mutex_unlock(&vcpu->kvm->lock);
839 return 0; 839 return 0;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a3183651ff45..ba694d2ba51e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -438,7 +438,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
438 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 438 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
439 spin_lock(&kvm->arch.float_int.lock); 439 spin_lock(&kvm->arch.float_int.lock);
440 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 440 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
441 init_waitqueue_head(&vcpu->arch.local_int.wq); 441 vcpu->arch.local_int.wq = &vcpu->wq;
442 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 442 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
443 spin_unlock(&kvm->arch.float_int.lock); 443 spin_unlock(&kvm->arch.float_int.lock);
444 444
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 1c48ab2845e0..033c864f1ae8 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -79,8 +79,8 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
79 list_add_tail(&inti->list, &li->list); 79 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1); 80 atomic_set(&li->active, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 if (waitqueue_active(&li->wq)) 82 if (waitqueue_active(li->wq))
83 wake_up_interruptible(&li->wq); 83 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock); 84 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
@@ -117,8 +117,8 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
117 list_add_tail(&inti->list, &li->list); 117 list_add_tail(&inti->list, &li->list);
118 atomic_set(&li->active, 1); 118 atomic_set(&li->active, 1);
119 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 119 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
120 if (waitqueue_active(&li->wq)) 120 if (waitqueue_active(li->wq))
121 wake_up_interruptible(&li->wq); 121 wake_up_interruptible(li->wq);
122 spin_unlock_bh(&li->lock); 122 spin_unlock_bh(&li->lock);
123 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 123 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
124 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); 124 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
@@ -145,8 +145,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
145 atomic_set(&li->active, 1); 145 atomic_set(&li->active, 1);
146 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 146 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
147 li->action_bits |= action; 147 li->action_bits |= action;
148 if (waitqueue_active(&li->wq)) 148 if (waitqueue_active(li->wq))
149 wake_up_interruptible(&li->wq); 149 wake_up_interruptible(li->wq);
150out: 150out:
151 spin_unlock_bh(&li->lock); 151 spin_unlock_bh(&li->lock);
152 152
@@ -250,8 +250,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250 250
251 list_add_tail(&inti->list, &li->list); 251 list_add_tail(&inti->list, &li->list);
252 atomic_set(&li->active, 1); 252 atomic_set(&li->active, 1);
253 if (waitqueue_active(&li->wq)) 253 if (waitqueue_active(li->wq))
254 wake_up_interruptible(&li->wq); 254 wake_up_interruptible(li->wq);
255 rc = SIGP_CC_ORDER_CODE_ACCEPTED; 255 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
256 256
257 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); 257 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);