diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 13:43:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 13:43:01 -0400 |
commit | 5e83f6fbdb020b70c0e413312801424d13c58d68 (patch) | |
tree | ca270178fa891813dbc47751c331fed975d3766c /arch/s390 | |
parent | fe445c6e2cb62a566e1a89f8798de11459975710 (diff) | |
parent | 3444d7da1839b851eefedd372978d8a982316c36 (diff) |
Merge branch 'kvm-updates/2.6.36' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.36' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (198 commits)
KVM: VMX: Fix host GDT.LIMIT corruption
KVM: MMU: using __xchg_spte more smarter
KVM: MMU: cleanup spte set and accssed/dirty tracking
KVM: MMU: don't atomicly set spte if it's not present
KVM: MMU: fix page dirty tracking lost while sync page
KVM: MMU: fix broken page accessed tracking with ept enabled
KVM: MMU: add missing reserved bits check in speculative path
KVM: MMU: fix mmu notifier invalidate handler for huge spte
KVM: x86 emulator: fix xchg instruction emulation
KVM: x86: Call mask notifiers from pic
KVM: x86: never re-execute instruction with enabled tdp
KVM: Document KVM_GET_SUPPORTED_CPUID2 ioctl
KVM: x86: emulator: inc/dec can have lock prefix
KVM: MMU: Eliminate redundant temporaries in FNAME(fetch)
KVM: MMU: Validate all gptes during fetch, not just those used for new pages
KVM: MMU: Simplify spte fetch() function
KVM: MMU: Add gpte_valid() helper
KVM: MMU: Add validate_direct_spte() helper
KVM: MMU: Add drop_large_spte() helper
KVM: MMU: Use __set_spte to link shadow pages
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 64 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 2 |
4 files changed, 24 insertions, 49 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 27605b62b980..cef7dbf69dfc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | struct sca_entry { | 27 | struct sca_entry { |
28 | atomic_t scn; | 28 | atomic_t scn; |
29 | __u64 reserved; | 29 | __u32 reserved; |
30 | __u64 sda; | 30 | __u64 sda; |
31 | __u64 reserved2[2]; | 31 | __u64 reserved2[2]; |
32 | } __attribute__((packed)); | 32 | } __attribute__((packed)); |
@@ -41,7 +41,8 @@ struct sca_block { | |||
41 | } __attribute__((packed)); | 41 | } __attribute__((packed)); |
42 | 42 | ||
43 | #define KVM_NR_PAGE_SIZES 2 | 43 | #define KVM_NR_PAGE_SIZES 2 |
44 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8) | 44 | #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8) |
45 | #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) | ||
45 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) | 46 | #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) |
46 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) | 47 | #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) |
47 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) | 48 | #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 3ddc30895e31..f7b6df45d8be 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -135,7 +135,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
135 | spin_lock_bh(&vcpu->arch.local_int.lock); | 135 | spin_lock_bh(&vcpu->arch.local_int.lock); |
136 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { | 136 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { |
137 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; | 137 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; |
138 | rc = __kvm_s390_vcpu_store_status(vcpu, | 138 | rc = kvm_s390_vcpu_store_status(vcpu, |
139 | KVM_S390_STORE_STATUS_NOADDR); | 139 | KVM_S390_STORE_STATUS_NOADDR); |
140 | if (rc >= 0) | 140 | if (rc >= 0) |
141 | rc = -EOPNOTSUPP; | 141 | rc = -EOPNOTSUPP; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ae3705816878..4fe68650535c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -207,6 +207,7 @@ out_nokvm: | |||
207 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 207 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
208 | { | 208 | { |
209 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); | 209 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); |
210 | clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn); | ||
210 | if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == | 211 | if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == |
211 | (__u64) vcpu->arch.sie_block) | 212 | (__u64) vcpu->arch.sie_block) |
212 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; | 213 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; |
@@ -296,7 +297,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
296 | { | 297 | { |
297 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); | 298 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); |
298 | set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); | 299 | set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); |
299 | vcpu->arch.sie_block->ecb = 2; | 300 | vcpu->arch.sie_block->ecb = 6; |
300 | vcpu->arch.sie_block->eca = 0xC1002001U; | 301 | vcpu->arch.sie_block->eca = 0xC1002001U; |
301 | vcpu->arch.sie_block->fac = (int) (long) facilities; | 302 | vcpu->arch.sie_block->fac = (int) (long) facilities; |
302 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 303 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
@@ -329,6 +330,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
329 | kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; | 330 | kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; |
330 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); | 331 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); |
331 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 332 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
333 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | ||
332 | 334 | ||
333 | spin_lock_init(&vcpu->arch.local_int.lock); | 335 | spin_lock_init(&vcpu->arch.local_int.lock); |
334 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); | 336 | INIT_LIST_HEAD(&vcpu->arch.local_int.list); |
@@ -363,63 +365,49 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |||
363 | 365 | ||
364 | static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) | 366 | static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) |
365 | { | 367 | { |
366 | vcpu_load(vcpu); | ||
367 | kvm_s390_vcpu_initial_reset(vcpu); | 368 | kvm_s390_vcpu_initial_reset(vcpu); |
368 | vcpu_put(vcpu); | ||
369 | return 0; | 369 | return 0; |
370 | } | 370 | } |
371 | 371 | ||
372 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 372 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
373 | { | 373 | { |
374 | vcpu_load(vcpu); | ||
375 | memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs)); | 374 | memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs)); |
376 | vcpu_put(vcpu); | ||
377 | return 0; | 375 | return 0; |
378 | } | 376 | } |
379 | 377 | ||
380 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 378 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
381 | { | 379 | { |
382 | vcpu_load(vcpu); | ||
383 | memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); | 380 | memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); |
384 | vcpu_put(vcpu); | ||
385 | return 0; | 381 | return 0; |
386 | } | 382 | } |
387 | 383 | ||
388 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 384 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
389 | struct kvm_sregs *sregs) | 385 | struct kvm_sregs *sregs) |
390 | { | 386 | { |
391 | vcpu_load(vcpu); | ||
392 | memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); | 387 | memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); |
393 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); | 388 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); |
394 | vcpu_put(vcpu); | ||
395 | return 0; | 389 | return 0; |
396 | } | 390 | } |
397 | 391 | ||
398 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 392 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
399 | struct kvm_sregs *sregs) | 393 | struct kvm_sregs *sregs) |
400 | { | 394 | { |
401 | vcpu_load(vcpu); | ||
402 | memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); | 395 | memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); |
403 | memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); | 396 | memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); |
404 | vcpu_put(vcpu); | ||
405 | return 0; | 397 | return 0; |
406 | } | 398 | } |
407 | 399 | ||
408 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 400 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
409 | { | 401 | { |
410 | vcpu_load(vcpu); | ||
411 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); | 402 | memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); |
412 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; | 403 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; |
413 | vcpu_put(vcpu); | ||
414 | return 0; | 404 | return 0; |
415 | } | 405 | } |
416 | 406 | ||
417 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 407 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
418 | { | 408 | { |
419 | vcpu_load(vcpu); | ||
420 | memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); | 409 | memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); |
421 | fpu->fpc = vcpu->arch.guest_fpregs.fpc; | 410 | fpu->fpc = vcpu->arch.guest_fpregs.fpc; |
422 | vcpu_put(vcpu); | ||
423 | return 0; | 411 | return 0; |
424 | } | 412 | } |
425 | 413 | ||
@@ -427,14 +415,12 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
427 | { | 415 | { |
428 | int rc = 0; | 416 | int rc = 0; |
429 | 417 | ||
430 | vcpu_load(vcpu); | ||
431 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | 418 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) |
432 | rc = -EBUSY; | 419 | rc = -EBUSY; |
433 | else { | 420 | else { |
434 | vcpu->run->psw_mask = psw.mask; | 421 | vcpu->run->psw_mask = psw.mask; |
435 | vcpu->run->psw_addr = psw.addr; | 422 | vcpu->run->psw_addr = psw.addr; |
436 | } | 423 | } |
437 | vcpu_put(vcpu); | ||
438 | return rc; | 424 | return rc; |
439 | } | 425 | } |
440 | 426 | ||
@@ -498,8 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
498 | int rc; | 484 | int rc; |
499 | sigset_t sigsaved; | 485 | sigset_t sigsaved; |
500 | 486 | ||
501 | vcpu_load(vcpu); | ||
502 | |||
503 | rerun_vcpu: | 487 | rerun_vcpu: |
504 | if (vcpu->requests) | 488 | if (vcpu->requests) |
505 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | 489 | if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) |
@@ -568,8 +552,6 @@ rerun_vcpu: | |||
568 | if (vcpu->sigset_active) | 552 | if (vcpu->sigset_active) |
569 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 553 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
570 | 554 | ||
571 | vcpu_put(vcpu); | ||
572 | |||
573 | vcpu->stat.exit_userspace++; | 555 | vcpu->stat.exit_userspace++; |
574 | return rc; | 556 | return rc; |
575 | } | 557 | } |
@@ -589,7 +571,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, | |||
589 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit | 571 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit |
590 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix | 572 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix |
591 | */ | 573 | */ |
592 | int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 574 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
593 | { | 575 | { |
594 | const unsigned char archmode = 1; | 576 | const unsigned char archmode = 1; |
595 | int prefix; | 577 | int prefix; |
@@ -651,45 +633,42 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
651 | return 0; | 633 | return 0; |
652 | } | 634 | } |
653 | 635 | ||
654 | static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | ||
655 | { | ||
656 | int rc; | ||
657 | |||
658 | vcpu_load(vcpu); | ||
659 | rc = __kvm_s390_vcpu_store_status(vcpu, addr); | ||
660 | vcpu_put(vcpu); | ||
661 | return rc; | ||
662 | } | ||
663 | |||
664 | long kvm_arch_vcpu_ioctl(struct file *filp, | 636 | long kvm_arch_vcpu_ioctl(struct file *filp, |
665 | unsigned int ioctl, unsigned long arg) | 637 | unsigned int ioctl, unsigned long arg) |
666 | { | 638 | { |
667 | struct kvm_vcpu *vcpu = filp->private_data; | 639 | struct kvm_vcpu *vcpu = filp->private_data; |
668 | void __user *argp = (void __user *)arg; | 640 | void __user *argp = (void __user *)arg; |
641 | long r; | ||
669 | 642 | ||
670 | switch (ioctl) { | 643 | switch (ioctl) { |
671 | case KVM_S390_INTERRUPT: { | 644 | case KVM_S390_INTERRUPT: { |
672 | struct kvm_s390_interrupt s390int; | 645 | struct kvm_s390_interrupt s390int; |
673 | 646 | ||
647 | r = -EFAULT; | ||
674 | if (copy_from_user(&s390int, argp, sizeof(s390int))) | 648 | if (copy_from_user(&s390int, argp, sizeof(s390int))) |
675 | return -EFAULT; | 649 | break; |
676 | return kvm_s390_inject_vcpu(vcpu, &s390int); | 650 | r = kvm_s390_inject_vcpu(vcpu, &s390int); |
651 | break; | ||
677 | } | 652 | } |
678 | case KVM_S390_STORE_STATUS: | 653 | case KVM_S390_STORE_STATUS: |
679 | return kvm_s390_vcpu_store_status(vcpu, arg); | 654 | r = kvm_s390_vcpu_store_status(vcpu, arg); |
655 | break; | ||
680 | case KVM_S390_SET_INITIAL_PSW: { | 656 | case KVM_S390_SET_INITIAL_PSW: { |
681 | psw_t psw; | 657 | psw_t psw; |
682 | 658 | ||
659 | r = -EFAULT; | ||
683 | if (copy_from_user(&psw, argp, sizeof(psw))) | 660 | if (copy_from_user(&psw, argp, sizeof(psw))) |
684 | return -EFAULT; | 661 | break; |
685 | return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); | 662 | r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); |
663 | break; | ||
686 | } | 664 | } |
687 | case KVM_S390_INITIAL_RESET: | 665 | case KVM_S390_INITIAL_RESET: |
688 | return kvm_arch_vcpu_ioctl_initial_reset(vcpu); | 666 | r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
667 | break; | ||
689 | default: | 668 | default: |
690 | ; | 669 | r = -EINVAL; |
691 | } | 670 | } |
692 | return -EINVAL; | 671 | return r; |
693 | } | 672 | } |
694 | 673 | ||
695 | /* Section: memory related */ | 674 | /* Section: memory related */ |
@@ -744,11 +723,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
744 | { | 723 | { |
745 | } | 724 | } |
746 | 725 | ||
747 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
748 | { | ||
749 | return gfn; | ||
750 | } | ||
751 | |||
752 | static int __init kvm_s390_init(void) | 726 | static int __init kvm_s390_init(void) |
753 | { | 727 | { |
754 | int ret; | 728 | int ret; |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index cfa9d1777457..a7b7586626db 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -92,7 +92,7 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); | |||
92 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | 92 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); |
93 | 93 | ||
94 | /* implemented in kvm-s390.c */ | 94 | /* implemented in kvm-s390.c */ |
95 | int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, | 95 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, |
96 | unsigned long addr); | 96 | unsigned long addr); |
97 | /* implemented in diag.c */ | 97 | /* implemented in diag.c */ |
98 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); | 98 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); |