diff options
author | Carsten Otte <cotte@de.ibm.com> | 2011-07-24 04:48:22 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-07-24 04:48:21 -0400 |
commit | 092670cd90eb88c33661de21f1b4ee08d2597171 (patch) | |
tree | 2960d3ef459eccd0c2a7d0e2a77fad9bdb2437f2 /arch/s390 | |
parent | 598841ca9919d008b520114d8a4378c4ce4e40a1 (diff) |
[S390] Use gmap translation for accessing guest memory
This patch removes kvm-s390 internal assumption of a linear mapping
of guest address space to user space. Previously, guest memory was
translated to user addresses using a fixed offset (gmsor). The new
code uses gmap_fault to resolve guest addresses.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/s390/kvm/gaccess.h | 243 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 24 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 23 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 6 |
6 files changed, 194 insertions, 110 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 8264b0b0f1ce..e5d082c4f3aa 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -93,9 +93,7 @@ struct kvm_s390_sie_block { | |||
93 | __u32 scaol; /* 0x0064 */ | 93 | __u32 scaol; /* 0x0064 */ |
94 | __u8 reserved68[4]; /* 0x0068 */ | 94 | __u8 reserved68[4]; /* 0x0068 */ |
95 | __u32 todpr; /* 0x006c */ | 95 | __u32 todpr; /* 0x006c */ |
96 | __u8 reserved70[16]; /* 0x0070 */ | 96 | __u8 reserved70[32]; /* 0x0070 */ |
97 | __u64 gmsor; /* 0x0080 */ | ||
98 | __u64 gmslm; /* 0x0088 */ | ||
99 | psw_t gpsw; /* 0x0090 */ | 97 | psw_t gpsw; /* 0x0090 */ |
100 | __u64 gg14; /* 0x00a0 */ | 98 | __u64 gg14; /* 0x00a0 */ |
101 | __u64 gg15; /* 0x00a8 */ | 99 | __u64 gg15; /* 0x00a8 */ |
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 03c716a0f01f..c86f6ae43f76 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * gaccess.h - access guest memory | 2 | * access.h - access guest memory |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008,2009 | 4 | * Copyright IBM Corp. 2008,2009 |
5 | * | 5 | * |
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | |||
22 | unsigned long guestaddr) | 22 | unsigned long guestaddr) |
23 | { | 23 | { |
24 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 24 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
25 | unsigned long origin = vcpu->arch.sie_block->gmsor; | ||
26 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | ||
27 | 25 | ||
28 | if (guestaddr < 2 * PAGE_SIZE) | 26 | if (guestaddr < 2 * PAGE_SIZE) |
29 | guestaddr += prefix; | 27 | guestaddr += prefix; |
30 | else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) | 28 | else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) |
31 | guestaddr -= prefix; | 29 | guestaddr -= prefix; |
32 | 30 | ||
33 | if (guestaddr > memsize) | 31 | return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap); |
34 | return (void __user __force *) ERR_PTR(-EFAULT); | ||
35 | |||
36 | guestaddr += origin; | ||
37 | |||
38 | return (void __user *) guestaddr; | ||
39 | } | 32 | } |
40 | 33 | ||
41 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, | 34 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, | |||
141 | 134 | ||
142 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, | 135 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, |
143 | unsigned long guestdest, | 136 | unsigned long guestdest, |
144 | const void *from, unsigned long n) | 137 | void *from, unsigned long n) |
145 | { | 138 | { |
146 | int rc; | 139 | int rc; |
147 | unsigned long i; | 140 | unsigned long i; |
148 | const u8 *data = from; | 141 | u8 *data = from; |
149 | 142 | ||
150 | for (i = 0; i < n; i++) { | 143 | for (i = 0; i < n; i++) { |
151 | rc = put_guest_u8(vcpu, guestdest++, *(data++)); | 144 | rc = put_guest_u8(vcpu, guestdest++, *(data++)); |
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, | |||
155 | return 0; | 148 | return 0; |
156 | } | 149 | } |
157 | 150 | ||
151 | static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, | ||
152 | unsigned long guestdest, | ||
153 | void *from, unsigned long n) | ||
154 | { | ||
155 | int r; | ||
156 | void __user *uptr; | ||
157 | unsigned long size; | ||
158 | |||
159 | if (guestdest + n < guestdest) | ||
160 | return -EFAULT; | ||
161 | |||
162 | /* simple case: all within one segment table entry? */ | ||
163 | if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) { | ||
164 | uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap); | ||
165 | |||
166 | if (IS_ERR((void __force *) uptr)) | ||
167 | return PTR_ERR((void __force *) uptr); | ||
168 | |||
169 | r = copy_to_user(uptr, from, n); | ||
170 | |||
171 | if (r) | ||
172 | r = -EFAULT; | ||
173 | |||
174 | goto out; | ||
175 | } | ||
176 | |||
177 | /* copy first segment */ | ||
178 | uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); | ||
179 | |||
180 | if (IS_ERR((void __force *) uptr)) | ||
181 | return PTR_ERR((void __force *) uptr); | ||
182 | |||
183 | size = PMD_SIZE - (guestdest & ~PMD_MASK); | ||
184 | |||
185 | r = copy_to_user(uptr, from, size); | ||
186 | |||
187 | if (r) { | ||
188 | r = -EFAULT; | ||
189 | goto out; | ||
190 | } | ||
191 | from += size; | ||
192 | n -= size; | ||
193 | guestdest += size; | ||
194 | |||
195 | /* copy full segments */ | ||
196 | while (n >= PMD_SIZE) { | ||
197 | uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); | ||
198 | |||
199 | if (IS_ERR((void __force *) uptr)) | ||
200 | return PTR_ERR((void __force *) uptr); | ||
201 | |||
202 | r = copy_to_user(uptr, from, PMD_SIZE); | ||
203 | |||
204 | if (r) { | ||
205 | r = -EFAULT; | ||
206 | goto out; | ||
207 | } | ||
208 | from += PMD_SIZE; | ||
209 | n -= PMD_SIZE; | ||
210 | guestdest += PMD_SIZE; | ||
211 | } | ||
212 | |||
213 | /* copy the tail segment */ | ||
214 | if (n) { | ||
215 | uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); | ||
216 | |||
217 | if (IS_ERR((void __force *) uptr)) | ||
218 | return PTR_ERR((void __force *) uptr); | ||
219 | |||
220 | r = copy_to_user(uptr, from, n); | ||
221 | |||
222 | if (r) | ||
223 | r = -EFAULT; | ||
224 | } | ||
225 | out: | ||
226 | return r; | ||
227 | } | ||
228 | |||
229 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, | ||
230 | unsigned long guestdest, | ||
231 | void *from, unsigned long n) | ||
232 | { | ||
233 | return __copy_to_guest_fast(vcpu, guestdest, from, n); | ||
234 | } | ||
235 | |||
158 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, | 236 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, |
159 | const void *from, unsigned long n) | 237 | void *from, unsigned long n) |
160 | { | 238 | { |
161 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 239 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
162 | unsigned long origin = vcpu->arch.sie_block->gmsor; | ||
163 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | ||
164 | 240 | ||
165 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) | 241 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) |
166 | goto slowpath; | 242 | goto slowpath; |
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, | |||
177 | else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) | 253 | else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) |
178 | guestdest -= prefix; | 254 | guestdest -= prefix; |
179 | 255 | ||
180 | if (guestdest + n > memsize) | 256 | return __copy_to_guest_fast(vcpu, guestdest, from, n); |
181 | return -EFAULT; | ||
182 | |||
183 | if (guestdest + n < guestdest) | ||
184 | return -EFAULT; | ||
185 | |||
186 | guestdest += origin; | ||
187 | |||
188 | return copy_to_user((void __user *) guestdest, from, n); | ||
189 | slowpath: | 257 | slowpath: |
190 | return __copy_to_guest_slow(vcpu, guestdest, from, n); | 258 | return __copy_to_guest_slow(vcpu, guestdest, from, n); |
191 | } | 259 | } |
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | |||
206 | return 0; | 274 | return 0; |
207 | } | 275 | } |
208 | 276 | ||
209 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, | 277 | static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, |
210 | unsigned long guestsrc, unsigned long n) | 278 | unsigned long guestsrc, |
279 | unsigned long n) | ||
211 | { | 280 | { |
212 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 281 | int r; |
213 | unsigned long origin = vcpu->arch.sie_block->gmsor; | 282 | void __user *uptr; |
214 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | 283 | unsigned long size; |
215 | 284 | ||
216 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) | 285 | if (guestsrc + n < guestsrc) |
217 | goto slowpath; | 286 | return -EFAULT; |
218 | 287 | ||
219 | if ((guestsrc < prefix) && (guestsrc + n > prefix)) | 288 | /* simple case: all within one segment table entry? */ |
220 | goto slowpath; | 289 | if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) { |
290 | uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap); | ||
221 | 291 | ||
222 | if ((guestsrc < prefix + 2 * PAGE_SIZE) | 292 | if (IS_ERR((void __force *) uptr)) |
223 | && (guestsrc + n > prefix + 2 * PAGE_SIZE)) | 293 | return PTR_ERR((void __force *) uptr); |
224 | goto slowpath; | ||
225 | 294 | ||
226 | if (guestsrc < 2 * PAGE_SIZE) | 295 | r = copy_from_user(to, uptr, n); |
227 | guestsrc += prefix; | ||
228 | else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) | ||
229 | guestsrc -= prefix; | ||
230 | 296 | ||
231 | if (guestsrc + n > memsize) | 297 | if (r) |
232 | return -EFAULT; | 298 | r = -EFAULT; |
233 | 299 | ||
234 | if (guestsrc + n < guestsrc) | 300 | goto out; |
235 | return -EFAULT; | 301 | } |
236 | 302 | ||
237 | guestsrc += origin; | 303 | /* copy first segment */ |
304 | uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); | ||
238 | 305 | ||
239 | return copy_from_user(to, (void __user *) guestsrc, n); | 306 | if (IS_ERR((void __force *) uptr)) |
240 | slowpath: | 307 | return PTR_ERR((void __force *) uptr); |
241 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | ||
242 | } | ||
243 | 308 | ||
244 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, | 309 | size = PMD_SIZE - (guestsrc & ~PMD_MASK); |
245 | unsigned long guestdest, | ||
246 | const void *from, unsigned long n) | ||
247 | { | ||
248 | unsigned long origin = vcpu->arch.sie_block->gmsor; | ||
249 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | ||
250 | 310 | ||
251 | if (guestdest + n > memsize) | 311 | r = copy_from_user(to, uptr, size); |
252 | return -EFAULT; | ||
253 | 312 | ||
254 | if (guestdest + n < guestdest) | 313 | if (r) { |
255 | return -EFAULT; | 314 | r = -EFAULT; |
315 | goto out; | ||
316 | } | ||
317 | to += size; | ||
318 | n -= size; | ||
319 | guestsrc += size; | ||
320 | |||
321 | /* copy full segments */ | ||
322 | while (n >= PMD_SIZE) { | ||
323 | uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); | ||
324 | |||
325 | if (IS_ERR((void __force *) uptr)) | ||
326 | return PTR_ERR((void __force *) uptr); | ||
327 | |||
328 | r = copy_from_user(to, uptr, PMD_SIZE); | ||
329 | |||
330 | if (r) { | ||
331 | r = -EFAULT; | ||
332 | goto out; | ||
333 | } | ||
334 | to += PMD_SIZE; | ||
335 | n -= PMD_SIZE; | ||
336 | guestsrc += PMD_SIZE; | ||
337 | } | ||
338 | |||
339 | /* copy the tail segment */ | ||
340 | if (n) { | ||
341 | uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); | ||
256 | 342 | ||
257 | guestdest += origin; | 343 | if (IS_ERR((void __force *) uptr)) |
344 | return PTR_ERR((void __force *) uptr); | ||
258 | 345 | ||
259 | return copy_to_user((void __user *) guestdest, from, n); | 346 | r = copy_from_user(to, uptr, n); |
347 | |||
348 | if (r) | ||
349 | r = -EFAULT; | ||
350 | } | ||
351 | out: | ||
352 | return r; | ||
260 | } | 353 | } |
261 | 354 | ||
262 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, | 355 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, |
263 | unsigned long guestsrc, | 356 | unsigned long guestsrc, |
264 | unsigned long n) | 357 | unsigned long n) |
265 | { | 358 | { |
266 | unsigned long origin = vcpu->arch.sie_block->gmsor; | 359 | return __copy_from_guest_fast(vcpu, to, guestsrc, n); |
267 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | 360 | } |
268 | 361 | ||
269 | if (guestsrc + n > memsize) | 362 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, |
270 | return -EFAULT; | 363 | unsigned long guestsrc, unsigned long n) |
364 | { | ||
365 | unsigned long prefix = vcpu->arch.sie_block->prefix; | ||
271 | 366 | ||
272 | if (guestsrc + n < guestsrc) | 367 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) |
273 | return -EFAULT; | 368 | goto slowpath; |
274 | 369 | ||
275 | guestsrc += origin; | 370 | if ((guestsrc < prefix) && (guestsrc + n > prefix)) |
371 | goto slowpath; | ||
372 | |||
373 | if ((guestsrc < prefix + 2 * PAGE_SIZE) | ||
374 | && (guestsrc + n > prefix + 2 * PAGE_SIZE)) | ||
375 | goto slowpath; | ||
376 | |||
377 | if (guestsrc < 2 * PAGE_SIZE) | ||
378 | guestsrc += prefix; | ||
379 | else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) | ||
380 | guestsrc -= prefix; | ||
276 | 381 | ||
277 | return copy_from_user(to, (void __user *) guestsrc, n); | 382 | return __copy_from_guest_fast(vcpu, to, guestsrc, n); |
383 | slowpath: | ||
384 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | ||
278 | } | 385 | } |
279 | #endif | 386 | #endif |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 654fc1fa37e7..c7c51898984e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -165,26 +165,30 @@ static int handle_validity(struct kvm_vcpu *vcpu) | |||
165 | int rc; | 165 | int rc; |
166 | 166 | ||
167 | vcpu->stat.exit_validity++; | 167 | vcpu->stat.exit_validity++; |
168 | if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix | 168 | if (viwhy == 0x37) { |
169 | <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) { | 169 | vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, |
170 | rc = fault_in_pages_writeable((char __user *) | 170 | vcpu->arch.gmap); |
171 | vcpu->arch.sie_block->gmsor + | 171 | if (IS_ERR_VALUE(vmaddr)) { |
172 | vcpu->arch.sie_block->prefix, | 172 | rc = -EOPNOTSUPP; |
173 | 2*PAGE_SIZE); | 173 | goto out; |
174 | } | ||
175 | rc = fault_in_pages_writeable((char __user *) vmaddr, | ||
176 | PAGE_SIZE); | ||
174 | if (rc) { | 177 | if (rc) { |
175 | /* user will receive sigsegv, exit to user */ | 178 | /* user will receive sigsegv, exit to user */ |
176 | rc = -EOPNOTSUPP; | 179 | rc = -EOPNOTSUPP; |
177 | goto out; | 180 | goto out; |
178 | } | 181 | } |
179 | vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, | 182 | vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, |
180 | vcpu->arch.gmap); | 183 | vcpu->arch.gmap); |
181 | if (IS_ERR_VALUE(vmaddr)) { | 184 | if (IS_ERR_VALUE(vmaddr)) { |
182 | rc = -EOPNOTSUPP; | 185 | rc = -EOPNOTSUPP; |
183 | goto out; | 186 | goto out; |
184 | } | 187 | } |
185 | vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, | 188 | rc = fault_in_pages_writeable((char __user *) vmaddr, |
186 | vcpu->arch.gmap); | 189 | PAGE_SIZE); |
187 | if (IS_ERR_VALUE(vmaddr)) { | 190 | if (rc) { |
191 | /* user will receive sigsegv, exit to user */ | ||
188 | rc = -EOPNOTSUPP; | 192 | rc = -EOPNOTSUPP; |
189 | goto out; | 193 | goto out; |
190 | } | 194 | } |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3ebb4ba83d9d..5a99f342fd0b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -549,7 +549,7 @@ rerun_vcpu: | |||
549 | return rc; | 549 | return rc; |
550 | } | 550 | } |
551 | 551 | ||
552 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, | 552 | static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, |
553 | unsigned long n, int prefix) | 553 | unsigned long n, int prefix) |
554 | { | 554 | { |
555 | if (prefix) | 555 | if (prefix) |
@@ -566,7 +566,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, | |||
566 | */ | 566 | */ |
567 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | 567 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
568 | { | 568 | { |
569 | const unsigned char archmode = 1; | 569 | unsigned char archmode = 1; |
570 | int prefix; | 570 | int prefix; |
571 | 571 | ||
572 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { | 572 | if (addr == KVM_S390_STORE_STATUS_NOADDR) { |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 65e2201a555b..63e5190776e9 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -58,31 +58,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
58 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); | 58 | int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
59 | int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); | 59 | int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); |
60 | 60 | ||
61 | static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | return vcpu->arch.sie_block->gmslm | ||
64 | - vcpu->arch.sie_block->gmsor | ||
65 | - VIRTIODESCSPACE + 1ul; | ||
66 | } | ||
67 | |||
68 | static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) | 61 | static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) |
69 | { | 62 | { |
70 | int idx; | ||
71 | struct kvm_memory_slot *mem; | ||
72 | struct kvm_memslots *memslots; | ||
73 | |||
74 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
75 | memslots = kvm_memslots(vcpu->kvm); | ||
76 | |||
77 | mem = &memslots->memslots[0]; | ||
78 | |||
79 | vcpu->arch.sie_block->gmsor = mem->userspace_addr; | ||
80 | vcpu->arch.sie_block->gmslm = | ||
81 | mem->userspace_addr + | ||
82 | (mem->npages << PAGE_SHIFT) + | ||
83 | VIRTIODESCSPACE - 1ul; | ||
84 | |||
85 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
86 | } | 63 | } |
87 | 64 | ||
88 | /* implemented in priv.c */ | 65 | /* implemented in priv.c */ |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 702276f5e2fa..d6a50c1fb2e6 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
189 | 189 | ||
190 | /* make sure that the new value is valid memory */ | 190 | /* make sure that the new value is valid memory */ |
191 | address = address & 0x7fffe000u; | 191 | address = address & 0x7fffe000u; |
192 | if ((copy_from_user(&tmp, (void __user *) | 192 | if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || |
193 | (address + vcpu->arch.sie_block->gmsor) , 1)) || | 193 | copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { |
194 | (copy_from_user(&tmp, (void __user *)(address + | ||
195 | vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { | ||
196 | *reg |= SIGP_STAT_INVALID_PARAMETER; | 194 | *reg |= SIGP_STAT_INVALID_PARAMETER; |
197 | return 1; /* invalid parameter */ | 195 | return 1; /* invalid parameter */ |
198 | } | 196 | } |