aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Yarygin <yarygin@linux.vnet.ibm.com>2015-01-19 05:24:51 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2015-03-17 11:25:04 -0400
commit8ae04b8f500b9f46652c63431bf658223d875597 (patch)
treec1d7e4c3b8ab4cdf61aabc79c6ba351e49ad510d
parentdd9e5b7bdba3250c075a212ff632d31edfa91ae7 (diff)
KVM: s390: Guest's memory access functions get access registers
In access register mode, the write_guest() read_guest() and other functions will invoke the access register translation, which requires an ar, designated by one of the instruction fields. Signed-off-by: Alexander Yarygin <yarygin@linux.vnet.ibm.com> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com> Acked-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/gaccess.c4
-rw-r--r--arch/s390/kvm/gaccess.h14
-rw-r--r--arch/s390/kvm/intercept.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/kvm-s390.h25
-rw-r--r--arch/s390/kvm/priv.c72
-rw-r--r--arch/s390/kvm/sigp.c4
8 files changed, 81 insertions, 48 deletions
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 9254afff250c..89140ddb998c 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
77 77
78 if (vcpu->run->s.regs.gprs[rx] & 7) 78 if (vcpu->run->s.regs.gprs[rx] & 7)
79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); 80 rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
81 if (rc) 81 if (rc)
82 return kvm_s390_inject_prog_cond(vcpu, rc); 82 return kvm_s390_inject_prog_cond(vcpu, rc);
83 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) 83 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
@@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
230 230
231int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 231int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
232{ 232{
233 int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; 233 int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
234 234
235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index c230904429cc..494131eda8c1 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -578,7 +578,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
578 return 0; 578 return 0;
579} 579}
580 580
581int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 581int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
582 unsigned long len, int write) 582 unsigned long len, int write)
583{ 583{
584 psw_t *psw = &vcpu->arch.sie_block->gpsw; 584 psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -652,7 +652,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
652 * Note: The IPTE lock is not taken during this function, so the caller 652 * Note: The IPTE lock is not taken during this function, so the caller
653 * has to take care of this. 653 * has to take care of this.
654 */ 654 */
655int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 655int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
656 unsigned long *gpa, int write) 656 unsigned long *gpa, int write)
657{ 657{
658 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 658 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 20de77ed8eba..7c2866bfa63f 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -156,9 +156,9 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
156} 156}
157 157
158int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 158int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
159 unsigned long *gpa, int write); 159 ar_t ar, unsigned long *gpa, int write);
160 160
161int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 161int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
162 unsigned long len, int write); 162 unsigned long len, int write);
163 163
164int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, 164int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
@@ -168,6 +168,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
168 * write_guest - copy data from kernel space to guest space 168 * write_guest - copy data from kernel space to guest space
169 * @vcpu: virtual cpu 169 * @vcpu: virtual cpu
170 * @ga: guest address 170 * @ga: guest address
171 * @ar: access register
171 * @data: source address in kernel space 172 * @data: source address in kernel space
172 * @len: number of bytes to copy 173 * @len: number of bytes to copy
173 * 174 *
@@ -210,16 +211,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
210 * if data has been changed in guest space in case of an exception. 211 * if data has been changed in guest space in case of an exception.
211 */ 212 */
212static inline __must_check 213static inline __must_check
213int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 214int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
214 unsigned long len) 215 unsigned long len)
215{ 216{
216 return access_guest(vcpu, ga, data, len, 1); 217 return access_guest(vcpu, ga, ar, data, len, 1);
217} 218}
218 219
219/** 220/**
220 * read_guest - copy data from guest space to kernel space 221 * read_guest - copy data from guest space to kernel space
221 * @vcpu: virtual cpu 222 * @vcpu: virtual cpu
222 * @ga: guest address 223 * @ga: guest address
224 * @ar: access register
223 * @data: destination address in kernel space 225 * @data: destination address in kernel space
224 * @len: number of bytes to copy 226 * @len: number of bytes to copy
225 * 227 *
@@ -229,10 +231,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
229 * data will be copied from guest space to kernel space. 231 * data will be copied from guest space to kernel space.
230 */ 232 */
231static inline __must_check 233static inline __must_check
232int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 234int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
233 unsigned long len) 235 unsigned long len)
234{ 236{
235 return access_guest(vcpu, ga, data, len, 0); 237 return access_guest(vcpu, ga, ar, data, len, 0);
236} 238}
237 239
238/** 240/**
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 08ae10a3b406..9e3779e3e496 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -320,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
320 320
321 /* Make sure that the source is paged-in */ 321 /* Make sure that the source is paged-in */
322 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], 322 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
323 &srcaddr, 0); 323 reg2, &srcaddr, 0);
324 if (rc) 324 if (rc)
325 return kvm_s390_inject_prog_cond(vcpu, rc); 325 return kvm_s390_inject_prog_cond(vcpu, rc);
326 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); 326 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -329,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
329 329
330 /* Make sure that the destination is paged-in */ 330 /* Make sure that the destination is paged-in */
331 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], 331 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
332 &dstaddr, 1); 332 reg1, &dstaddr, 1);
333 if (rc) 333 if (rc)
334 return kvm_s390_inject_prog_cond(vcpu, rc); 334 return kvm_s390_inject_prog_cond(vcpu, rc);
335 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); 335 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4075acb7c517..610e90afadf2 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1776,7 +1776,7 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1776 * to look up the current opcode to get the length of the instruction 1776 * to look up the current opcode to get the length of the instruction
1777 * to be able to forward the PSW. 1777 * to be able to forward the PSW.
1778 */ 1778 */
1779 rc = read_guest(vcpu, psw->addr, &opcode, 1); 1779 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1780 if (rc) 1780 if (rc)
1781 return kvm_s390_inject_prog_cond(vcpu, rc); 1781 return kvm_s390_inject_prog_cond(vcpu, rc);
1782 psw->addr = __rewind_psw(*psw, -insn_length(opcode)); 1782 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 83f32a147d72..5d54191e573e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
71} 71}
72 72
73static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) 73typedef u8 __bitwise ar_t;
74
75static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
74{ 76{
75 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 77 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
76 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 78 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
77 79
80 if (ar)
81 *ar = base2;
82
78 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 83 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
79} 84}
80 85
81static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, 86static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
82 u64 *address1, u64 *address2) 87 u64 *address1, u64 *address2,
88 ar_t *ar_b1, ar_t *ar_b2)
83{ 89{
84 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 90 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
85 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; 91 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
@@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
88 94
89 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; 95 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
90 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 96 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
97
98 if (ar_b1)
99 *ar_b1 = base1;
100 if (ar_b2)
101 *ar_b2 = base2;
91} 102}
92 103
93static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) 104static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
@@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
98 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; 109 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
99} 110}
100 111
101static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) 112static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
102{ 113{
103 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 114 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
104 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + 115 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
@@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
107 if (disp2 & 0x80000) 118 if (disp2 & 0x80000)
108 disp2+=0xfff00000; 119 disp2+=0xfff00000;
109 120
121 if (ar)
122 *ar = base2;
123
110 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; 124 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
111} 125}
112 126
113static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) 127static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
114{ 128{
115 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 129 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
116 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 130 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
117 131
132 if (ar)
133 *ar = base2;
134
118 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 135 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
119} 136}
120 137
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 5f2642576797..f4fe02e84326 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
36 struct kvm_vcpu *cpup; 36 struct kvm_vcpu *cpup;
37 s64 hostclk, val; 37 s64 hostclk, val;
38 int i, rc; 38 int i, rc;
39 ar_t ar;
39 u64 op2; 40 u64 op2;
40 41
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43 44
44 op2 = kvm_s390_get_base_disp_s(vcpu); 45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 rc = read_guest(vcpu, op2, &val, sizeof(val)); 48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
48 if (rc) 49 if (rc)
49 return kvm_s390_inject_prog_cond(vcpu, rc); 50 return kvm_s390_inject_prog_cond(vcpu, rc);
50 51
@@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
68 u64 operand2; 69 u64 operand2;
69 u32 address; 70 u32 address;
70 int rc; 71 int rc;
72 ar_t ar;
71 73
72 vcpu->stat.instruction_spx++; 74 vcpu->stat.instruction_spx++;
73 75
74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 76 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 77 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
76 78
77 operand2 = kvm_s390_get_base_disp_s(vcpu); 79 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
78 80
79 /* must be word boundary */ 81 /* must be word boundary */
80 if (operand2 & 3) 82 if (operand2 & 3)
81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 83 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
82 84
83 /* get the value */ 85 /* get the value */
84 rc = read_guest(vcpu, operand2, &address, sizeof(address)); 86 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
85 if (rc) 87 if (rc)
86 return kvm_s390_inject_prog_cond(vcpu, rc); 88 return kvm_s390_inject_prog_cond(vcpu, rc);
87 89
@@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
107 u64 operand2; 109 u64 operand2;
108 u32 address; 110 u32 address;
109 int rc; 111 int rc;
112 ar_t ar;
110 113
111 vcpu->stat.instruction_stpx++; 114 vcpu->stat.instruction_stpx++;
112 115
113 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 116 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
114 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 117 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
115 118
116 operand2 = kvm_s390_get_base_disp_s(vcpu); 119 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
117 120
118 /* must be word boundary */ 121 /* must be word boundary */
119 if (operand2 & 3) 122 if (operand2 & 3)
@@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
122 address = kvm_s390_get_prefix(vcpu); 125 address = kvm_s390_get_prefix(vcpu);
123 126
124 /* get the value */ 127 /* get the value */
125 rc = write_guest(vcpu, operand2, &address, sizeof(address)); 128 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
126 if (rc) 129 if (rc)
127 return kvm_s390_inject_prog_cond(vcpu, rc); 130 return kvm_s390_inject_prog_cond(vcpu, rc);
128 131
@@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
136 u16 vcpu_id = vcpu->vcpu_id; 139 u16 vcpu_id = vcpu->vcpu_id;
137 u64 ga; 140 u64 ga;
138 int rc; 141 int rc;
142 ar_t ar;
139 143
140 vcpu->stat.instruction_stap++; 144 vcpu->stat.instruction_stap++;
141 145
142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 146 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 147 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
144 148
145 ga = kvm_s390_get_base_disp_s(vcpu); 149 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
146 150
147 if (ga & 1) 151 if (ga & 1)
148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 152 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
149 153
150 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); 154 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
151 if (rc) 155 if (rc)
152 return kvm_s390_inject_prog_cond(vcpu, rc); 156 return kvm_s390_inject_prog_cond(vcpu, rc);
153 157
@@ -231,8 +235,9 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
231 u32 tpi_data[3]; 235 u32 tpi_data[3];
232 int rc; 236 int rc;
233 u64 addr; 237 u64 addr;
238 ar_t ar;
234 239
235 addr = kvm_s390_get_base_disp_s(vcpu); 240 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
236 if (addr & 3) 241 if (addr & 3)
237 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 242 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
238 243
@@ -251,7 +256,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
251 * provided area. 256 * provided area.
252 */ 257 */
253 len = sizeof(tpi_data) - 4; 258 len = sizeof(tpi_data) - 4;
254 rc = write_guest(vcpu, addr, &tpi_data, len); 259 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
255 if (rc) { 260 if (rc) {
256 rc = kvm_s390_inject_prog_cond(vcpu, rc); 261 rc = kvm_s390_inject_prog_cond(vcpu, rc);
257 goto reinject_interrupt; 262 goto reinject_interrupt;
@@ -395,15 +400,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
395 psw_compat_t new_psw; 400 psw_compat_t new_psw;
396 u64 addr; 401 u64 addr;
397 int rc; 402 int rc;
403 ar_t ar;
398 404
399 if (gpsw->mask & PSW_MASK_PSTATE) 405 if (gpsw->mask & PSW_MASK_PSTATE)
400 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 406 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
401 407
402 addr = kvm_s390_get_base_disp_s(vcpu); 408 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
403 if (addr & 7) 409 if (addr & 7)
404 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 410 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
405 411
406 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 412 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
407 if (rc) 413 if (rc)
408 return kvm_s390_inject_prog_cond(vcpu, rc); 414 return kvm_s390_inject_prog_cond(vcpu, rc);
409 if (!(new_psw.mask & PSW32_MASK_BASE)) 415 if (!(new_psw.mask & PSW32_MASK_BASE))
@@ -421,14 +427,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
421 psw_t new_psw; 427 psw_t new_psw;
422 u64 addr; 428 u64 addr;
423 int rc; 429 int rc;
430 ar_t ar;
424 431
425 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 432 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
426 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 433 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
427 434
428 addr = kvm_s390_get_base_disp_s(vcpu); 435 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
429 if (addr & 7) 436 if (addr & 7)
430 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 437 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
431 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 438 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
432 if (rc) 439 if (rc)
433 return kvm_s390_inject_prog_cond(vcpu, rc); 440 return kvm_s390_inject_prog_cond(vcpu, rc);
434 vcpu->arch.sie_block->gpsw = new_psw; 441 vcpu->arch.sie_block->gpsw = new_psw;
@@ -442,18 +449,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
442 u64 stidp_data = vcpu->arch.stidp_data; 449 u64 stidp_data = vcpu->arch.stidp_data;
443 u64 operand2; 450 u64 operand2;
444 int rc; 451 int rc;
452 ar_t ar;
445 453
446 vcpu->stat.instruction_stidp++; 454 vcpu->stat.instruction_stidp++;
447 455
448 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 456 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
449 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 457 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
450 458
451 operand2 = kvm_s390_get_base_disp_s(vcpu); 459 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
452 460
453 if (operand2 & 7) 461 if (operand2 & 7)
454 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 462 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
455 463
456 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); 464 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
457 if (rc) 465 if (rc)
458 return kvm_s390_inject_prog_cond(vcpu, rc); 466 return kvm_s390_inject_prog_cond(vcpu, rc);
459 467
@@ -496,6 +504,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
496 unsigned long mem = 0; 504 unsigned long mem = 0;
497 u64 operand2; 505 u64 operand2;
498 int rc = 0; 506 int rc = 0;
507 ar_t ar;
499 508
500 vcpu->stat.instruction_stsi++; 509 vcpu->stat.instruction_stsi++;
501 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); 510 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
@@ -518,7 +527,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
518 return 0; 527 return 0;
519 } 528 }
520 529
521 operand2 = kvm_s390_get_base_disp_s(vcpu); 530 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
522 531
523 if (operand2 & 0xfff) 532 if (operand2 & 0xfff)
524 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 533 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -542,7 +551,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
542 break; 551 break;
543 } 552 }
544 553
545 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); 554 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
546 if (rc) { 555 if (rc) {
547 rc = kvm_s390_inject_prog_cond(vcpu, rc); 556 rc = kvm_s390_inject_prog_cond(vcpu, rc);
548 goto out; 557 goto out;
@@ -786,13 +795,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
786 int reg, rc, nr_regs; 795 int reg, rc, nr_regs;
787 u32 ctl_array[16]; 796 u32 ctl_array[16];
788 u64 ga; 797 u64 ga;
798 ar_t ar;
789 799
790 vcpu->stat.instruction_lctl++; 800 vcpu->stat.instruction_lctl++;
791 801
792 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 802 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
793 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 803 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
794 804
795 ga = kvm_s390_get_base_disp_rs(vcpu); 805 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
796 806
797 if (ga & 3) 807 if (ga & 3)
798 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 808 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -801,7 +811,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
801 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 811 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
802 812
803 nr_regs = ((reg3 - reg1) & 0xf) + 1; 813 nr_regs = ((reg3 - reg1) & 0xf) + 1;
804 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 814 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
805 if (rc) 815 if (rc)
806 return kvm_s390_inject_prog_cond(vcpu, rc); 816 return kvm_s390_inject_prog_cond(vcpu, rc);
807 reg = reg1; 817 reg = reg1;
@@ -824,13 +834,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
824 int reg, rc, nr_regs; 834 int reg, rc, nr_regs;
825 u32 ctl_array[16]; 835 u32 ctl_array[16];
826 u64 ga; 836 u64 ga;
837 ar_t ar;
827 838
828 vcpu->stat.instruction_stctl++; 839 vcpu->stat.instruction_stctl++;
829 840
830 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 841 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
831 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 842 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
832 843
833 ga = kvm_s390_get_base_disp_rs(vcpu); 844 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
834 845
835 if (ga & 3) 846 if (ga & 3)
836 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 847 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -846,7 +857,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
846 break; 857 break;
847 reg = (reg + 1) % 16; 858 reg = (reg + 1) % 16;
848 } while (1); 859 } while (1);
849 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 860 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
850 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 861 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
851} 862}
852 863
@@ -857,13 +868,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
857 int reg, rc, nr_regs; 868 int reg, rc, nr_regs;
858 u64 ctl_array[16]; 869 u64 ctl_array[16];
859 u64 ga; 870 u64 ga;
871 ar_t ar;
860 872
861 vcpu->stat.instruction_lctlg++; 873 vcpu->stat.instruction_lctlg++;
862 874
863 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 875 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
864 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 876 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
865 877
866 ga = kvm_s390_get_base_disp_rsy(vcpu); 878 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
867 879
868 if (ga & 7) 880 if (ga & 7)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 881 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -872,7 +884,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
872 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 884 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
873 885
874 nr_regs = ((reg3 - reg1) & 0xf) + 1; 886 nr_regs = ((reg3 - reg1) & 0xf) + 1;
875 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 887 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
876 if (rc) 888 if (rc)
877 return kvm_s390_inject_prog_cond(vcpu, rc); 889 return kvm_s390_inject_prog_cond(vcpu, rc);
878 reg = reg1; 890 reg = reg1;
@@ -894,13 +906,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
894 int reg, rc, nr_regs; 906 int reg, rc, nr_regs;
895 u64 ctl_array[16]; 907 u64 ctl_array[16];
896 u64 ga; 908 u64 ga;
909 ar_t ar;
897 910
898 vcpu->stat.instruction_stctg++; 911 vcpu->stat.instruction_stctg++;
899 912
900 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 913 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
901 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 914 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
902 915
903 ga = kvm_s390_get_base_disp_rsy(vcpu); 916 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
904 917
905 if (ga & 7) 918 if (ga & 7)
906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 919 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -916,7 +929,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
916 break; 929 break;
917 reg = (reg + 1) % 16; 930 reg = (reg + 1) % 16;
918 } while (1); 931 } while (1);
919 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 932 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
920 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 933 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
921} 934}
922 935
@@ -941,13 +954,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
941 unsigned long hva, gpa; 954 unsigned long hva, gpa;
942 int ret = 0, cc = 0; 955 int ret = 0, cc = 0;
943 bool writable; 956 bool writable;
957 ar_t ar;
944 958
945 vcpu->stat.instruction_tprot++; 959 vcpu->stat.instruction_tprot++;
946 960
947 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 961 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
948 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 962 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
949 963
950 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 964 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
951 965
952 /* we only handle the Linux memory detection case: 966 /* we only handle the Linux memory detection case:
953 * access key == 0 967 * access key == 0
@@ -956,11 +970,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
956 return -EOPNOTSUPP; 970 return -EOPNOTSUPP;
957 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 971 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
958 ipte_lock(vcpu); 972 ipte_lock(vcpu);
959 ret = guest_translate_address(vcpu, address1, &gpa, 1); 973 ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
960 if (ret == PGM_PROTECTION) { 974 if (ret == PGM_PROTECTION) {
961 /* Write protected? Try again with read-only... */ 975 /* Write protected? Try again with read-only... */
962 cc = 1; 976 cc = 1;
963 ret = guest_translate_address(vcpu, address1, &gpa, 0); 977 ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
964 } 978 }
965 if (ret) { 979 if (ret) {
966 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { 980 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 755a7330d361..72e58bd2bee7 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -434,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
434 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 434 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
436 436
437 order_code = kvm_s390_get_base_disp_rs(vcpu); 437 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
438 if (handle_sigp_order_in_user_space(vcpu, order_code)) 438 if (handle_sigp_order_in_user_space(vcpu, order_code))
439 return -EOPNOTSUPP; 439 return -EOPNOTSUPP;
440 440
@@ -476,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
476 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 476 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
477 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 477 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
478 struct kvm_vcpu *dest_vcpu; 478 struct kvm_vcpu *dest_vcpu;
479 u8 order_code = kvm_s390_get_base_disp_rs(vcpu); 479 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
480 480
481 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 481 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
482 482