aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-05-30 06:57:10 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-30 06:57:10 -0400
commit146b2cfe0c4a82857d15719a4707e6bd13fccafb (patch)
tree97a106fa30dc1800070119befbacdc3ce5b446db
parent356d4c2040539a6c7ff75723b9503bb8ddc9cb07 (diff)
parent5a5e65361f01b44caa51ba202e6720d458829fc5 (diff)
Merge tag 'kvm-s390-20140530' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next
1. Several minor fixes and cleanups for KVM: 2. Fix flag check for gdb support 3. Remove unnecessary vcpu start 4. Remove code duplication for sigp interrupts 5. Better DAT handling for the TPROT instruction 6. Correct addressing exception for standby memory
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/kvm/gaccess.c57
-rw-r--r--arch/s390/kvm/gaccess.h5
-rw-r--r--arch/s390/kvm/interrupt.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c56
-rw-r--r--arch/s390/kvm/sigp.c56
7 files changed, 116 insertions, 66 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a27f5007062a..4181d7baabba 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -110,6 +110,7 @@ struct kvm_s390_sie_block {
110#define ICTL_ISKE 0x00004000 110#define ICTL_ISKE 0x00004000
111#define ICTL_SSKE 0x00002000 111#define ICTL_SSKE 0x00002000
112#define ICTL_RRBE 0x00001000 112#define ICTL_RRBE 0x00001000
113#define ICTL_TPROT 0x00000200
113 __u32 ictl; /* 0x0048 */ 114 __u32 ictl; /* 0x0048 */
114 __u32 eca; /* 0x004c */ 115 __u32 eca; /* 0x004c */
115#define ICPT_INST 0x04 116#define ICPT_INST 0x04
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index db608c3f9303..4653ac6e182b 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -292,7 +292,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
292 wake_up(&vcpu->kvm->arch.ipte_wq); 292 wake_up(&vcpu->kvm->arch.ipte_wq);
293} 293}
294 294
295static void ipte_lock(struct kvm_vcpu *vcpu) 295void ipte_lock(struct kvm_vcpu *vcpu)
296{ 296{
297 if (vcpu->arch.sie_block->eca & 1) 297 if (vcpu->arch.sie_block->eca & 1)
298 ipte_lock_siif(vcpu); 298 ipte_lock_siif(vcpu);
@@ -300,7 +300,7 @@ static void ipte_lock(struct kvm_vcpu *vcpu)
300 ipte_lock_simple(vcpu); 300 ipte_lock_simple(vcpu);
301} 301}
302 302
303static void ipte_unlock(struct kvm_vcpu *vcpu) 303void ipte_unlock(struct kvm_vcpu *vcpu)
304{ 304{
305 if (vcpu->arch.sie_block->eca & 1) 305 if (vcpu->arch.sie_block->eca & 1)
306 ipte_unlock_siif(vcpu); 306 ipte_unlock_siif(vcpu);
@@ -645,6 +645,59 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
645} 645}
646 646
647/** 647/**
648 * guest_translate_address - translate guest logical into guest absolute address
649 *
650 * Parameter semantics are the same as the ones from guest_translate.
651 * The memory contents at the guest address are not changed.
652 *
653 * Note: The IPTE lock is not taken during this function, so the caller
654 * has to take care of this.
655 */
656int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
657 unsigned long *gpa, int write)
658{
659 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
660 psw_t *psw = &vcpu->arch.sie_block->gpsw;
661 struct trans_exc_code_bits *tec;
662 union asce asce;
663 int rc;
664
665 /* Access register mode is not supported yet. */
666 if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
667 return -EOPNOTSUPP;
668
669 gva = kvm_s390_logical_to_effective(vcpu, gva);
670 memset(pgm, 0, sizeof(*pgm));
671 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
672 tec->as = psw_bits(*psw).as;
673 tec->fsi = write ? FSI_STORE : FSI_FETCH;
674 tec->addr = gva >> PAGE_SHIFT;
675 if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
676 if (write) {
677 rc = pgm->code = PGM_PROTECTION;
678 return rc;
679 }
680 }
681
682 asce.val = get_vcpu_asce(vcpu);
683 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
684 rc = guest_translate(vcpu, gva, gpa, write);
685 if (rc > 0) {
686 if (rc == PGM_PROTECTION)
687 tec->b61 = 1;
688 pgm->code = rc;
689 }
690 } else {
691 rc = 0;
692 *gpa = kvm_s390_real_to_abs(vcpu, gva);
693 if (kvm_is_error_gpa(vcpu->kvm, *gpa))
694 rc = pgm->code = PGM_ADDRESSING;
695 }
696
697 return rc;
698}
699
700/**
648 * kvm_s390_check_low_addr_protection - check for low-address protection 701 * kvm_s390_check_low_addr_protection - check for low-address protection
649 * @ga: Guest address 702 * @ga: Guest address
650 * 703 *
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index a07ee08ac478..0149cf15058a 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -155,6 +155,9 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
155 return kvm_read_guest(vcpu->kvm, gpa, data, len); 155 return kvm_read_guest(vcpu->kvm, gpa, data, len);
156} 156}
157 157
158int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
159 unsigned long *gpa, int write);
160
158int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 161int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
159 unsigned long len, int write); 162 unsigned long len, int write);
160 163
@@ -324,6 +327,8 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
324 return access_guest_real(vcpu, gra, data, len, 0); 327 return access_guest_real(vcpu, gra, data, len, 0);
325} 328}
326 329
330void ipte_lock(struct kvm_vcpu *vcpu);
331void ipte_unlock(struct kvm_vcpu *vcpu);
327int ipte_lock_held(struct kvm_vcpu *vcpu); 332int ipte_lock_held(struct kvm_vcpu *vcpu);
328int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); 333int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
329 334
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index bf0d9bc15bcd..90c8de22a2a0 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -442,7 +442,6 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
442 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), 442 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
443 &vcpu->arch.sie_block->gpsw, 443 &vcpu->arch.sie_block->gpsw,
444 sizeof(psw_t)); 444 sizeof(psw_t));
445 kvm_s390_vcpu_start(vcpu);
446 break; 445 break;
447 case KVM_S390_PROGRAM_INT: 446 case KVM_S390_PROGRAM_INT:
448 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 447 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e519860c6031..43e191b25789 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -637,7 +637,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
637 if (sclp_has_siif()) 637 if (sclp_has_siif())
638 vcpu->arch.sie_block->eca |= 1; 638 vcpu->arch.sie_block->eca |= 1;
639 vcpu->arch.sie_block->fac = (int) (long) vfacilities; 639 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
640 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 640 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
641 ICTL_TPROT;
642
641 if (kvm_s390_cmma_enabled(vcpu->kvm)) { 643 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
642 rc = kvm_s390_vcpu_setup_cmma(vcpu); 644 rc = kvm_s390_vcpu_setup_cmma(vcpu);
643 if (rc) 645 if (rc)
@@ -950,7 +952,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
950 vcpu->guest_debug = 0; 952 vcpu->guest_debug = 0;
951 kvm_s390_clear_bp_data(vcpu); 953 kvm_s390_clear_bp_data(vcpu);
952 954
953 if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS) 955 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
954 return -EINVAL; 956 return -EINVAL;
955 957
956 if (dbg->control & KVM_GUESTDBG_ENABLE) { 958 if (dbg->control & KVM_GUESTDBG_ENABLE) {
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 6296159ac883..f89c1cd67751 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -930,8 +930,9 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
930static int handle_tprot(struct kvm_vcpu *vcpu) 930static int handle_tprot(struct kvm_vcpu *vcpu)
931{ 931{
932 u64 address1, address2; 932 u64 address1, address2;
933 struct vm_area_struct *vma; 933 unsigned long hva, gpa;
934 unsigned long user_address; 934 int ret = 0, cc = 0;
935 bool writable;
935 936
936 vcpu->stat.instruction_tprot++; 937 vcpu->stat.instruction_tprot++;
937 938
@@ -942,32 +943,41 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
942 943
943 /* we only handle the Linux memory detection case: 944 /* we only handle the Linux memory detection case:
944 * access key == 0 945 * access key == 0
945 * guest DAT == off
946 * everything else goes to userspace. */ 946 * everything else goes to userspace. */
947 if (address2 & 0xf0) 947 if (address2 & 0xf0)
948 return -EOPNOTSUPP; 948 return -EOPNOTSUPP;
949 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 949 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
950 return -EOPNOTSUPP; 950 ipte_lock(vcpu);
951 951 ret = guest_translate_address(vcpu, address1, &gpa, 1);
952 down_read(&current->mm->mmap_sem); 952 if (ret == PGM_PROTECTION) {
953 user_address = __gmap_translate(address1, vcpu->arch.gmap); 953 /* Write protected? Try again with read-only... */
954 if (IS_ERR_VALUE(user_address)) 954 cc = 1;
955 goto out_inject; 955 ret = guest_translate_address(vcpu, address1, &gpa, 0);
956 vma = find_vma(current->mm, user_address); 956 }
957 if (!vma) 957 if (ret) {
958 goto out_inject; 958 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
959 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 959 ret = kvm_s390_inject_program_int(vcpu, ret);
960 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) 960 } else if (ret > 0) {
961 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); 961 /* Translation not available */
962 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) 962 kvm_s390_set_psw_cc(vcpu, 3);
963 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); 963 ret = 0;
964 964 }
965 up_read(&current->mm->mmap_sem); 965 goto out_unlock;
966 return 0; 966 }
967 967
968out_inject: 968 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
969 up_read(&current->mm->mmap_sem); 969 if (kvm_is_error_hva(hva)) {
970 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 970 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
971 } else {
972 if (!writable)
973 cc = 1; /* Write not permitted ==> read-only */
974 kvm_s390_set_psw_cc(vcpu, cc);
975 /* Note: CC2 only occurs for storage keys (not supported yet) */
976 }
977out_unlock:
978 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
979 ipte_unlock(vcpu);
980 return ret;
971} 981}
972 982
973int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) 983int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index d0341d2e54b1..43079a48cc98 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -54,33 +54,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
54 54
55static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 55static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
56{ 56{
57 struct kvm_s390_local_interrupt *li; 57 struct kvm_s390_interrupt s390int = {
58 struct kvm_s390_interrupt_info *inti; 58 .type = KVM_S390_INT_EMERGENCY,
59 .parm = vcpu->vcpu_id,
60 };
59 struct kvm_vcpu *dst_vcpu = NULL; 61 struct kvm_vcpu *dst_vcpu = NULL;
62 int rc = 0;
60 63
61 if (cpu_addr < KVM_MAX_VCPUS) 64 if (cpu_addr < KVM_MAX_VCPUS)
62 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 65 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
63 if (!dst_vcpu) 66 if (!dst_vcpu)
64 return SIGP_CC_NOT_OPERATIONAL; 67 return SIGP_CC_NOT_OPERATIONAL;
65 68
66 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 69 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
67 if (!inti) 70 if (!rc)
68 return -ENOMEM; 71 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
69
70 inti->type = KVM_S390_INT_EMERGENCY;
71 inti->emerg.code = vcpu->vcpu_id;
72 72
73 li = &dst_vcpu->arch.local_int; 73 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
74 spin_lock_bh(&li->lock);
75 list_add_tail(&inti->list, &li->list);
76 atomic_set(&li->active, 1);
77 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
78 if (waitqueue_active(li->wq))
79 wake_up_interruptible(li->wq);
80 spin_unlock_bh(&li->lock);
81 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
82
83 return SIGP_CC_ORDER_CODE_ACCEPTED;
84} 74}
85 75
86static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr, 76static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
@@ -116,33 +106,23 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
116 106
117static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) 107static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
118{ 108{
119 struct kvm_s390_local_interrupt *li; 109 struct kvm_s390_interrupt s390int = {
120 struct kvm_s390_interrupt_info *inti; 110 .type = KVM_S390_INT_EXTERNAL_CALL,
111 .parm = vcpu->vcpu_id,
112 };
121 struct kvm_vcpu *dst_vcpu = NULL; 113 struct kvm_vcpu *dst_vcpu = NULL;
114 int rc;
122 115
123 if (cpu_addr < KVM_MAX_VCPUS) 116 if (cpu_addr < KVM_MAX_VCPUS)
124 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 117 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
125 if (!dst_vcpu) 118 if (!dst_vcpu)
126 return SIGP_CC_NOT_OPERATIONAL; 119 return SIGP_CC_NOT_OPERATIONAL;
127 120
128 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 121 rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
129 if (!inti) 122 if (!rc)
130 return -ENOMEM; 123 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
131
132 inti->type = KVM_S390_INT_EXTERNAL_CALL;
133 inti->extcall.code = vcpu->vcpu_id;
134
135 li = &dst_vcpu->arch.local_int;
136 spin_lock_bh(&li->lock);
137 list_add_tail(&inti->list, &li->list);
138 atomic_set(&li->active, 1);
139 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
140 if (waitqueue_active(li->wq))
141 wake_up_interruptible(li->wq);
142 spin_unlock_bh(&li->lock);
143 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
144 124
145 return SIGP_CC_ORDER_CODE_ACCEPTED; 125 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
146} 126}
147 127
148static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) 128static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)