aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2009-06-12 03:53:47 -0400
committerArnd Bergmann <arnd@arndb.de>2009-06-12 05:32:58 -0400
commit5b02ee3d219f9e01b6e9146e25613822cfc2e5ce (patch)
tree7ce9126738c3cf4b37d67170d0e4b34818c057a9 /arch/s390
parent26a28fa4fea5b8c65713aa50c124f76a88c7924d (diff)
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
asm-generic: merge branch 'master' of torvalds/linux-2.6
Fixes a merge conflict against the x86 tree caused by a fix to atomic.h which I renamed to atomic_long.h. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/kvm/intercept.c28
-rw-r--r--arch/s390/kvm/interrupt.c59
-rw-r--r--arch/s390/kvm/kvm-s390.c63
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/kvm/sigp.c16
7 files changed, 126 insertions, 53 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 54ea39f96ec..a27d0d5a6f8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -13,6 +13,8 @@
13 13
14#ifndef ASM_KVM_HOST_H 14#ifndef ASM_KVM_HOST_H
15#define ASM_KVM_HOST_H 15#define ASM_KVM_HOST_H
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
16#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
17#include <asm/debug.h> 19#include <asm/debug.h>
18#include <asm/cpuid.h> 20#include <asm/cpuid.h>
@@ -210,7 +212,8 @@ struct kvm_vcpu_arch {
210 s390_fp_regs guest_fpregs; 212 s390_fp_regs guest_fpregs;
211 unsigned int guest_acrs[NUM_ACRS]; 213 unsigned int guest_acrs[NUM_ACRS];
212 struct kvm_s390_local_interrupt local_int; 214 struct kvm_s390_local_interrupt local_int;
213 struct timer_list ckc_timer; 215 struct hrtimer ckc_timer;
216 struct tasklet_struct tasklet;
214 union { 217 union {
215 cpuid_t cpu_id; 218 cpuid_t cpu_id;
216 u64 stidp_data; 219 u64 stidp_data;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 9d19803111b..98997ccba50 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -154,17 +154,25 @@ static int handle_stop(struct kvm_vcpu *vcpu)
154static int handle_validity(struct kvm_vcpu *vcpu) 154static int handle_validity(struct kvm_vcpu *vcpu)
155{ 155{
156 int viwhy = vcpu->arch.sie_block->ipb >> 16; 156 int viwhy = vcpu->arch.sie_block->ipb >> 16;
157 int rc;
158
157 vcpu->stat.exit_validity++; 159 vcpu->stat.exit_validity++;
158 if (viwhy == 0x37) { 160 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
159 fault_in_pages_writeable((char __user *) 161 <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){
160 vcpu->kvm->arch.guest_origin + 162 rc = fault_in_pages_writeable((char __user *)
161 vcpu->arch.sie_block->prefix, 163 vcpu->kvm->arch.guest_origin +
162 PAGE_SIZE); 164 vcpu->arch.sie_block->prefix,
163 return 0; 165 2*PAGE_SIZE);
164 } 166 if (rc)
165 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", 167 /* user will receive sigsegv, exit to user */
166 viwhy); 168 rc = -ENOTSUPP;
167 return -ENOTSUPP; 169 } else
170 rc = -ENOTSUPP;
171
172 if (rc)
173 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
174 viwhy);
175 return rc;
168} 176}
169 177
170static int handle_instruction(struct kvm_vcpu *vcpu) 178static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 0189356fe20..f04f5301b1b 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -12,6 +12,8 @@
12 12
13#include <asm/lowcore.h> 13#include <asm/lowcore.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h>
15#include <linux/kvm_host.h> 17#include <linux/kvm_host.h>
16#include <linux/signal.h> 18#include <linux/signal.h>
17#include "kvm-s390.h" 19#include "kvm-s390.h"
@@ -299,13 +301,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
299 } 301 }
300 302
301 if ((!rc) && atomic_read(&fi->active)) { 303 if ((!rc) && atomic_read(&fi->active)) {
302 spin_lock_bh(&fi->lock); 304 spin_lock(&fi->lock);
303 list_for_each_entry(inti, &fi->list, list) 305 list_for_each_entry(inti, &fi->list, list)
304 if (__interrupt_is_deliverable(vcpu, inti)) { 306 if (__interrupt_is_deliverable(vcpu, inti)) {
305 rc = 1; 307 rc = 1;
306 break; 308 break;
307 } 309 }
308 spin_unlock_bh(&fi->lock); 310 spin_unlock(&fi->lock);
309 } 311 }
310 312
311 if ((!rc) && (vcpu->arch.sie_block->ckc < 313 if ((!rc) && (vcpu->arch.sie_block->ckc <
@@ -318,6 +320,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
318 return rc; 320 return rc;
319} 321}
320 322
323int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
324{
325 /* do real check here */
326 return 1;
327}
328
321int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 329int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
322{ 330{
323 return 0; 331 return 0;
@@ -355,14 +363,12 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
355 return 0; 363 return 0;
356 } 364 }
357 365
358 sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1; 366 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
359 367
360 vcpu->arch.ckc_timer.expires = jiffies + sltime; 368 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
361 369 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
362 add_timer(&vcpu->arch.ckc_timer);
363 VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
364no_timer: 370no_timer:
365 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 371 spin_lock(&vcpu->arch.local_int.float_int->lock);
366 spin_lock_bh(&vcpu->arch.local_int.lock); 372 spin_lock_bh(&vcpu->arch.local_int.lock);
367 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 373 add_wait_queue(&vcpu->arch.local_int.wq, &wait);
368 while (list_empty(&vcpu->arch.local_int.list) && 374 while (list_empty(&vcpu->arch.local_int.list) &&
@@ -371,33 +377,46 @@ no_timer:
371 !signal_pending(current)) { 377 !signal_pending(current)) {
372 set_current_state(TASK_INTERRUPTIBLE); 378 set_current_state(TASK_INTERRUPTIBLE);
373 spin_unlock_bh(&vcpu->arch.local_int.lock); 379 spin_unlock_bh(&vcpu->arch.local_int.lock);
374 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 380 spin_unlock(&vcpu->arch.local_int.float_int->lock);
375 vcpu_put(vcpu); 381 vcpu_put(vcpu);
376 schedule(); 382 schedule();
377 vcpu_load(vcpu); 383 vcpu_load(vcpu);
378 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 384 spin_lock(&vcpu->arch.local_int.float_int->lock);
379 spin_lock_bh(&vcpu->arch.local_int.lock); 385 spin_lock_bh(&vcpu->arch.local_int.lock);
380 } 386 }
381 __unset_cpu_idle(vcpu); 387 __unset_cpu_idle(vcpu);
382 __set_current_state(TASK_RUNNING); 388 __set_current_state(TASK_RUNNING);
383 remove_wait_queue(&vcpu->wq, &wait); 389 remove_wait_queue(&vcpu->wq, &wait);
384 spin_unlock_bh(&vcpu->arch.local_int.lock); 390 spin_unlock_bh(&vcpu->arch.local_int.lock);
385 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 391 spin_unlock(&vcpu->arch.local_int.float_int->lock);
386 del_timer(&vcpu->arch.ckc_timer); 392 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
387 return 0; 393 return 0;
388} 394}
389 395
390void kvm_s390_idle_wakeup(unsigned long data) 396void kvm_s390_tasklet(unsigned long parm)
391{ 397{
392 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 398 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
393 399
394 spin_lock_bh(&vcpu->arch.local_int.lock); 400 spin_lock(&vcpu->arch.local_int.lock);
395 vcpu->arch.local_int.timer_due = 1; 401 vcpu->arch.local_int.timer_due = 1;
396 if (waitqueue_active(&vcpu->arch.local_int.wq)) 402 if (waitqueue_active(&vcpu->arch.local_int.wq))
397 wake_up_interruptible(&vcpu->arch.local_int.wq); 403 wake_up_interruptible(&vcpu->arch.local_int.wq);
398 spin_unlock_bh(&vcpu->arch.local_int.lock); 404 spin_unlock(&vcpu->arch.local_int.lock);
399} 405}
400 406
407/*
408 * low level hrtimer wake routine. Because this runs in hardirq context
409 * we schedule a tasklet to do the real work.
410 */
411enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
412{
413 struct kvm_vcpu *vcpu;
414
415 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
416 tasklet_schedule(&vcpu->arch.tasklet);
417
418 return HRTIMER_NORESTART;
419}
401 420
402void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 421void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
403{ 422{
@@ -436,7 +455,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
436 if (atomic_read(&fi->active)) { 455 if (atomic_read(&fi->active)) {
437 do { 456 do {
438 deliver = 0; 457 deliver = 0;
439 spin_lock_bh(&fi->lock); 458 spin_lock(&fi->lock);
440 list_for_each_entry_safe(inti, n, &fi->list, list) { 459 list_for_each_entry_safe(inti, n, &fi->list, list) {
441 if (__interrupt_is_deliverable(vcpu, inti)) { 460 if (__interrupt_is_deliverable(vcpu, inti)) {
442 list_del(&inti->list); 461 list_del(&inti->list);
@@ -447,7 +466,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
447 } 466 }
448 if (list_empty(&fi->list)) 467 if (list_empty(&fi->list))
449 atomic_set(&fi->active, 0); 468 atomic_set(&fi->active, 0);
450 spin_unlock_bh(&fi->lock); 469 spin_unlock(&fi->lock);
451 if (deliver) { 470 if (deliver) {
452 __do_deliver_interrupt(vcpu, inti); 471 __do_deliver_interrupt(vcpu, inti);
453 kfree(inti); 472 kfree(inti);
@@ -512,7 +531,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
512 531
513 mutex_lock(&kvm->lock); 532 mutex_lock(&kvm->lock);
514 fi = &kvm->arch.float_int; 533 fi = &kvm->arch.float_int;
515 spin_lock_bh(&fi->lock); 534 spin_lock(&fi->lock);
516 list_add_tail(&inti->list, &fi->list); 535 list_add_tail(&inti->list, &fi->list);
517 atomic_set(&fi->active, 1); 536 atomic_set(&fi->active, 1);
518 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 537 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
@@ -529,7 +548,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
529 if (waitqueue_active(&li->wq)) 548 if (waitqueue_active(&li->wq))
530 wake_up_interruptible(&li->wq); 549 wake_up_interruptible(&li->wq);
531 spin_unlock_bh(&li->lock); 550 spin_unlock_bh(&li->lock);
532 spin_unlock_bh(&fi->lock); 551 spin_unlock(&fi->lock);
533 mutex_unlock(&kvm->lock); 552 mutex_unlock(&kvm->lock);
534 return 0; 553 return 0;
535} 554}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f4d56e9939c..10bccd1f8ae 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/hrtimer.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/kvm.h> 20#include <linux/kvm.h>
20#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
@@ -195,6 +196,10 @@ out_nokvm:
195void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 196void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
196{ 197{
197 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 198 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
199 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
200 (__u64) vcpu->arch.sie_block)
201 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
202 smp_mb();
198 free_page((unsigned long)(vcpu->arch.sie_block)); 203 free_page((unsigned long)(vcpu->arch.sie_block));
199 kvm_vcpu_uninit(vcpu); 204 kvm_vcpu_uninit(vcpu);
200 kfree(vcpu); 205 kfree(vcpu);
@@ -283,8 +288,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
283 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin; 288 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
284 vcpu->arch.sie_block->ecb = 2; 289 vcpu->arch.sie_block->ecb = 2;
285 vcpu->arch.sie_block->eca = 0xC1002001U; 290 vcpu->arch.sie_block->eca = 0xC1002001U;
286 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, 291 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
287 (unsigned long) vcpu); 292 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
293 (unsigned long) vcpu);
294 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
288 get_cpu_id(&vcpu->arch.cpu_id); 295 get_cpu_id(&vcpu->arch.cpu_id);
289 vcpu->arch.cpu_id.version = 0xff; 296 vcpu->arch.cpu_id.version = 0xff;
290 return 0; 297 return 0;
@@ -307,19 +314,21 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
307 314
308 vcpu->arch.sie_block->icpua = id; 315 vcpu->arch.sie_block->icpua = id;
309 BUG_ON(!kvm->arch.sca); 316 BUG_ON(!kvm->arch.sca);
310 BUG_ON(kvm->arch.sca->cpu[id].sda); 317 if (!kvm->arch.sca->cpu[id].sda)
311 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; 318 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
319 else
320 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); 321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
313 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
314 323
315 spin_lock_init(&vcpu->arch.local_int.lock); 324 spin_lock_init(&vcpu->arch.local_int.lock);
316 INIT_LIST_HEAD(&vcpu->arch.local_int.list); 325 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
317 vcpu->arch.local_int.float_int = &kvm->arch.float_int; 326 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
318 spin_lock_bh(&kvm->arch.float_int.lock); 327 spin_lock(&kvm->arch.float_int.lock);
319 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int; 328 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
320 init_waitqueue_head(&vcpu->arch.local_int.wq); 329 init_waitqueue_head(&vcpu->arch.local_int.wq);
321 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 330 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
322 spin_unlock_bh(&kvm->arch.float_int.lock); 331 spin_unlock(&kvm->arch.float_int.lock);
323 332
324 rc = kvm_vcpu_init(vcpu, kvm, id); 333 rc = kvm_vcpu_init(vcpu, kvm, id);
325 if (rc) 334 if (rc)
@@ -478,6 +487,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
478 487
479 vcpu_load(vcpu); 488 vcpu_load(vcpu);
480 489
490 /* verify, that memory has been registered */
491 if (!vcpu->kvm->arch.guest_memsize) {
492 vcpu_put(vcpu);
493 return -EINVAL;
494 }
495
481 if (vcpu->sigset_active) 496 if (vcpu->sigset_active)
482 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 497 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
483 498
@@ -657,6 +672,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
657 struct kvm_memory_slot old, 672 struct kvm_memory_slot old,
658 int user_alloc) 673 int user_alloc)
659{ 674{
675 int i;
676
660 /* A few sanity checks. We can have exactly one memory slot which has 677 /* A few sanity checks. We can have exactly one memory slot which has
661 to start at guest virtual zero and which has to be located at a 678 to start at guest virtual zero and which has to be located at a
662 page boundary in userland and which has to end at a page boundary. 679 page boundary in userland and which has to end at a page boundary.
@@ -664,7 +681,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
664 vmas. It is okay to mmap() and munmap() stuff in this slot after 681 vmas. It is okay to mmap() and munmap() stuff in this slot after
665 doing this call at any time */ 682 doing this call at any time */
666 683
667 if (mem->slot) 684 if (mem->slot || kvm->arch.guest_memsize)
668 return -EINVAL; 685 return -EINVAL;
669 686
670 if (mem->guest_phys_addr) 687 if (mem->guest_phys_addr)
@@ -676,15 +693,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
676 if (mem->memory_size & (PAGE_SIZE - 1)) 693 if (mem->memory_size & (PAGE_SIZE - 1))
677 return -EINVAL; 694 return -EINVAL;
678 695
696 if (!user_alloc)
697 return -EINVAL;
698
699 /* lock all vcpus */
700 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
701 if (!kvm->vcpus[i])
702 continue;
703 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
704 goto fail_out;
705 }
706
679 kvm->arch.guest_origin = mem->userspace_addr; 707 kvm->arch.guest_origin = mem->userspace_addr;
680 kvm->arch.guest_memsize = mem->memory_size; 708 kvm->arch.guest_memsize = mem->memory_size;
681 709
682 /* FIXME: we do want to interrupt running CPUs and update their memory 710 /* update sie control blocks, and unlock all vcpus */
683 configuration now to avoid race conditions. But hey, changing the 711 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
684 memory layout while virtual CPUs are running is usually bad 712 if (kvm->vcpus[i]) {
685 programming practice. */ 713 kvm->vcpus[i]->arch.sie_block->gmsor =
714 kvm->arch.guest_origin;
715 kvm->vcpus[i]->arch.sie_block->gmslm =
716 kvm->arch.guest_memsize +
717 kvm->arch.guest_origin +
718 VIRTIODESCSPACE - 1ul;
719 mutex_unlock(&kvm->vcpus[i]->mutex);
720 }
721 }
686 722
687 return 0; 723 return 0;
724
725fail_out:
726 for (; i >= 0; i--)
727 mutex_unlock(&kvm->vcpus[i]->mutex);
728 return -EINVAL;
688} 729}
689 730
690void kvm_arch_flush_shadow(struct kvm *kvm) 731void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 00bbe69b78d..748fee87232 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -14,6 +14,7 @@
14#ifndef ARCH_S390_KVM_S390_H 14#ifndef ARCH_S390_KVM_S390_H
15#define ARCH_S390_KVM_S390_H 15#define ARCH_S390_KVM_S390_H
16 16
17#include <linux/hrtimer.h>
17#include <linux/kvm.h> 18#include <linux/kvm.h>
18#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
19 20
@@ -41,7 +42,8 @@ static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
41} 42}
42 43
43int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); 44int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
44void kvm_s390_idle_wakeup(unsigned long data); 45enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
46void kvm_s390_tasklet(unsigned long parm);
45void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); 47void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
46int kvm_s390_inject_vm(struct kvm *kvm, 48int kvm_s390_inject_vm(struct kvm *kvm,
47 struct kvm_s390_interrupt *s390int); 49 struct kvm_s390_interrupt *s390int);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 4b88834b8dd..93ecd06e1a7 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -204,11 +204,11 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
204 int cpus = 0; 204 int cpus = 0;
205 int n; 205 int n;
206 206
207 spin_lock_bh(&fi->lock); 207 spin_lock(&fi->lock);
208 for (n = 0; n < KVM_MAX_VCPUS; n++) 208 for (n = 0; n < KVM_MAX_VCPUS; n++)
209 if (fi->local_int[n]) 209 if (fi->local_int[n])
210 cpus++; 210 cpus++;
211 spin_unlock_bh(&fi->lock); 211 spin_unlock(&fi->lock);
212 212
213 /* deal with other level 3 hypervisors */ 213 /* deal with other level 3 hypervisors */
214 if (stsi(mem, 3, 2, 2) == -ENOSYS) 214 if (stsi(mem, 3, 2, 2) == -ENOSYS)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index f27dbedf086..36678835034 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -52,7 +52,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
52 if (cpu_addr >= KVM_MAX_VCPUS) 52 if (cpu_addr >= KVM_MAX_VCPUS)
53 return 3; /* not operational */ 53 return 3; /* not operational */
54 54
55 spin_lock_bh(&fi->lock); 55 spin_lock(&fi->lock);
56 if (fi->local_int[cpu_addr] == NULL) 56 if (fi->local_int[cpu_addr] == NULL)
57 rc = 3; /* not operational */ 57 rc = 3; /* not operational */
58 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) 58 else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
@@ -64,7 +64,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
64 *reg |= SIGP_STAT_STOPPED; 64 *reg |= SIGP_STAT_STOPPED;
65 rc = 1; /* status stored */ 65 rc = 1; /* status stored */
66 } 66 }
67 spin_unlock_bh(&fi->lock); 67 spin_unlock(&fi->lock);
68 68
69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); 69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
70 return rc; 70 return rc;
@@ -86,7 +86,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
86 86
87 inti->type = KVM_S390_INT_EMERGENCY; 87 inti->type = KVM_S390_INT_EMERGENCY;
88 88
89 spin_lock_bh(&fi->lock); 89 spin_lock(&fi->lock);
90 li = fi->local_int[cpu_addr]; 90 li = fi->local_int[cpu_addr];
91 if (li == NULL) { 91 if (li == NULL) {
92 rc = 3; /* not operational */ 92 rc = 3; /* not operational */
@@ -102,7 +102,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
102 spin_unlock_bh(&li->lock); 102 spin_unlock_bh(&li->lock);
103 rc = 0; /* order accepted */ 103 rc = 0; /* order accepted */
104unlock: 104unlock:
105 spin_unlock_bh(&fi->lock); 105 spin_unlock(&fi->lock);
106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); 106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
107 return rc; 107 return rc;
108} 108}
@@ -123,7 +123,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
123 123
124 inti->type = KVM_S390_SIGP_STOP; 124 inti->type = KVM_S390_SIGP_STOP;
125 125
126 spin_lock_bh(&fi->lock); 126 spin_lock(&fi->lock);
127 li = fi->local_int[cpu_addr]; 127 li = fi->local_int[cpu_addr];
128 if (li == NULL) { 128 if (li == NULL) {
129 rc = 3; /* not operational */ 129 rc = 3; /* not operational */
@@ -142,7 +142,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
142 spin_unlock_bh(&li->lock); 142 spin_unlock_bh(&li->lock);
143 rc = 0; /* order accepted */ 143 rc = 0; /* order accepted */
144unlock: 144unlock:
145 spin_unlock_bh(&fi->lock); 145 spin_unlock(&fi->lock);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); 146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
147 return rc; 147 return rc;
148} 148}
@@ -188,7 +188,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
188 if (!inti) 188 if (!inti)
189 return 2; /* busy */ 189 return 2; /* busy */
190 190
191 spin_lock_bh(&fi->lock); 191 spin_lock(&fi->lock);
192 li = fi->local_int[cpu_addr]; 192 li = fi->local_int[cpu_addr];
193 193
194 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) { 194 if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
@@ -220,7 +220,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
220out_li: 220out_li:
221 spin_unlock_bh(&li->lock); 221 spin_unlock_bh(&li->lock);
222out_fi: 222out_fi:
223 spin_unlock_bh(&fi->lock); 223 spin_unlock(&fi->lock);
224 return rc; 224 return rc;
225} 225}
226 226