diff options
Diffstat (limited to 'arch/s390/kvm/gaccess.h')
-rw-r--r-- | arch/s390/kvm/gaccess.h | 243 |
1 files changed, 175 insertions, 68 deletions
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 03c716a0f01..c86f6ae43f7 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * gaccess.h - access guest memory | 2 | * access.h - access guest memory |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008,2009 | 4 | * Copyright IBM Corp. 2008,2009 |
5 | * | 5 | * |
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | |||
22 | unsigned long guestaddr) | 22 | unsigned long guestaddr) |
23 | { | 23 | { |
24 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 24 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
25 | unsigned long origin = vcpu->arch.sie_block->gmsor; | ||
26 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | ||
27 | 25 | ||
28 | if (guestaddr < 2 * PAGE_SIZE) | 26 | if (guestaddr < 2 * PAGE_SIZE) |
29 | guestaddr += prefix; | 27 | guestaddr += prefix; |
30 | else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) | 28 | else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) |
31 | guestaddr -= prefix; | 29 | guestaddr -= prefix; |
32 | 30 | ||
33 | if (guestaddr > memsize) | 31 | return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap); |
34 | return (void __user __force *) ERR_PTR(-EFAULT); | ||
35 | |||
36 | guestaddr += origin; | ||
37 | |||
38 | return (void __user *) guestaddr; | ||
39 | } | 32 | } |
40 | 33 | ||
41 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, | 34 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, |
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, | |||
141 | 134 | ||
142 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, | 135 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, |
143 | unsigned long guestdest, | 136 | unsigned long guestdest, |
144 | const void *from, unsigned long n) | 137 | void *from, unsigned long n) |
145 | { | 138 | { |
146 | int rc; | 139 | int rc; |
147 | unsigned long i; | 140 | unsigned long i; |
148 | const u8 *data = from; | 141 | u8 *data = from; |
149 | 142 | ||
150 | for (i = 0; i < n; i++) { | 143 | for (i = 0; i < n; i++) { |
151 | rc = put_guest_u8(vcpu, guestdest++, *(data++)); | 144 | rc = put_guest_u8(vcpu, guestdest++, *(data++)); |
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, | |||
155 | return 0; | 148 | return 0; |
156 | } | 149 | } |
157 | 150 | ||
151 | static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, | ||
152 | unsigned long guestdest, | ||
153 | void *from, unsigned long n) | ||
154 | { | ||
155 | int r; | ||
156 | void __user *uptr; | ||
157 | unsigned long size; | ||
158 | |||
159 | if (guestdest + n < guestdest) | ||
160 | return -EFAULT; | ||
161 | |||
162 | /* simple case: all within one segment table entry? */ | ||
163 | if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) { | ||
164 | uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap); | ||
165 | |||
166 | if (IS_ERR((void __force *) uptr)) | ||
167 | return PTR_ERR((void __force *) uptr); | ||
168 | |||
169 | r = copy_to_user(uptr, from, n); | ||
170 | |||
171 | if (r) | ||
172 | r = -EFAULT; | ||
173 | |||
174 | goto out; | ||
175 | } | ||
176 | |||
177 | /* copy first segment */ | ||
178 | uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); | ||
179 | |||
180 | if (IS_ERR((void __force *) uptr)) | ||
181 | return PTR_ERR((void __force *) uptr); | ||
182 | |||
183 | size = PMD_SIZE - (guestdest & ~PMD_MASK); | ||
184 | |||
185 | r = copy_to_user(uptr, from, size); | ||
186 | |||
187 | if (r) { | ||
188 | r = -EFAULT; | ||
189 | goto out; | ||
190 | } | ||
191 | from += size; | ||
192 | n -= size; | ||
193 | guestdest += size; | ||
194 | |||
195 | /* copy full segments */ | ||
196 | while (n >= PMD_SIZE) { | ||
197 | uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); | ||
198 | |||
199 | if (IS_ERR((void __force *) uptr)) | ||
200 | return PTR_ERR((void __force *) uptr); | ||
201 | |||
202 | r = copy_to_user(uptr, from, PMD_SIZE); | ||
203 | |||
204 | if (r) { | ||
205 | r = -EFAULT; | ||
206 | goto out; | ||
207 | } | ||
208 | from += PMD_SIZE; | ||
209 | n -= PMD_SIZE; | ||
210 | guestdest += PMD_SIZE; | ||
211 | } | ||
212 | |||
213 | /* copy the tail segment */ | ||
214 | if (n) { | ||
215 | uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); | ||
216 | |||
217 | if (IS_ERR((void __force *) uptr)) | ||
218 | return PTR_ERR((void __force *) uptr); | ||
219 | |||
220 | r = copy_to_user(uptr, from, n); | ||
221 | |||
222 | if (r) | ||
223 | r = -EFAULT; | ||
224 | } | ||
225 | out: | ||
226 | return r; | ||
227 | } | ||
228 | |||
229 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, | ||
230 | unsigned long guestdest, | ||
231 | void *from, unsigned long n) | ||
232 | { | ||
233 | return __copy_to_guest_fast(vcpu, guestdest, from, n); | ||
234 | } | ||
235 | |||
158 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, | 236 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, |
159 | const void *from, unsigned long n) | 237 | void *from, unsigned long n) |
160 | { | 238 | { |
161 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 239 | unsigned long prefix = vcpu->arch.sie_block->prefix; |
162 | unsigned long origin = vcpu->arch.sie_block->gmsor; | ||
163 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | ||
164 | 240 | ||
165 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) | 241 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) |
166 | goto slowpath; | 242 | goto slowpath; |
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, | |||
177 | else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) | 253 | else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) |
178 | guestdest -= prefix; | 254 | guestdest -= prefix; |
179 | 255 | ||
180 | if (guestdest + n > memsize) | 256 | return __copy_to_guest_fast(vcpu, guestdest, from, n); |
181 | return -EFAULT; | ||
182 | |||
183 | if (guestdest + n < guestdest) | ||
184 | return -EFAULT; | ||
185 | |||
186 | guestdest += origin; | ||
187 | |||
188 | return copy_to_user((void __user *) guestdest, from, n); | ||
189 | slowpath: | 257 | slowpath: |
190 | return __copy_to_guest_slow(vcpu, guestdest, from, n); | 258 | return __copy_to_guest_slow(vcpu, guestdest, from, n); |
191 | } | 259 | } |
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | |||
206 | return 0; | 274 | return 0; |
207 | } | 275 | } |
208 | 276 | ||
209 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, | 277 | static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, |
210 | unsigned long guestsrc, unsigned long n) | 278 | unsigned long guestsrc, |
279 | unsigned long n) | ||
211 | { | 280 | { |
212 | unsigned long prefix = vcpu->arch.sie_block->prefix; | 281 | int r; |
213 | unsigned long origin = vcpu->arch.sie_block->gmsor; | 282 | void __user *uptr; |
214 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | 283 | unsigned long size; |
215 | 284 | ||
216 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) | 285 | if (guestsrc + n < guestsrc) |
217 | goto slowpath; | 286 | return -EFAULT; |
218 | 287 | ||
219 | if ((guestsrc < prefix) && (guestsrc + n > prefix)) | 288 | /* simple case: all within one segment table entry? */ |
220 | goto slowpath; | 289 | if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) { |
290 | uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap); | ||
221 | 291 | ||
222 | if ((guestsrc < prefix + 2 * PAGE_SIZE) | 292 | if (IS_ERR((void __force *) uptr)) |
223 | && (guestsrc + n > prefix + 2 * PAGE_SIZE)) | 293 | return PTR_ERR((void __force *) uptr); |
224 | goto slowpath; | ||
225 | 294 | ||
226 | if (guestsrc < 2 * PAGE_SIZE) | 295 | r = copy_from_user(to, uptr, n); |
227 | guestsrc += prefix; | ||
228 | else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) | ||
229 | guestsrc -= prefix; | ||
230 | 296 | ||
231 | if (guestsrc + n > memsize) | 297 | if (r) |
232 | return -EFAULT; | 298 | r = -EFAULT; |
233 | 299 | ||
234 | if (guestsrc + n < guestsrc) | 300 | goto out; |
235 | return -EFAULT; | 301 | } |
236 | 302 | ||
237 | guestsrc += origin; | 303 | /* copy first segment */ |
304 | uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); | ||
238 | 305 | ||
239 | return copy_from_user(to, (void __user *) guestsrc, n); | 306 | if (IS_ERR((void __force *) uptr)) |
240 | slowpath: | 307 | return PTR_ERR((void __force *) uptr); |
241 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | ||
242 | } | ||
243 | 308 | ||
244 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, | 309 | size = PMD_SIZE - (guestsrc & ~PMD_MASK); |
245 | unsigned long guestdest, | ||
246 | const void *from, unsigned long n) | ||
247 | { | ||
248 | unsigned long origin = vcpu->arch.sie_block->gmsor; | ||
249 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | ||
250 | 310 | ||
251 | if (guestdest + n > memsize) | 311 | r = copy_from_user(to, uptr, size); |
252 | return -EFAULT; | ||
253 | 312 | ||
254 | if (guestdest + n < guestdest) | 313 | if (r) { |
255 | return -EFAULT; | 314 | r = -EFAULT; |
315 | goto out; | ||
316 | } | ||
317 | to += size; | ||
318 | n -= size; | ||
319 | guestsrc += size; | ||
320 | |||
321 | /* copy full segments */ | ||
322 | while (n >= PMD_SIZE) { | ||
323 | uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); | ||
324 | |||
325 | if (IS_ERR((void __force *) uptr)) | ||
326 | return PTR_ERR((void __force *) uptr); | ||
327 | |||
328 | r = copy_from_user(to, uptr, PMD_SIZE); | ||
329 | |||
330 | if (r) { | ||
331 | r = -EFAULT; | ||
332 | goto out; | ||
333 | } | ||
334 | to += PMD_SIZE; | ||
335 | n -= PMD_SIZE; | ||
336 | guestsrc += PMD_SIZE; | ||
337 | } | ||
338 | |||
339 | /* copy the tail segment */ | ||
340 | if (n) { | ||
341 | uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); | ||
256 | 342 | ||
257 | guestdest += origin; | 343 | if (IS_ERR((void __force *) uptr)) |
344 | return PTR_ERR((void __force *) uptr); | ||
258 | 345 | ||
259 | return copy_to_user((void __user *) guestdest, from, n); | 346 | r = copy_from_user(to, uptr, n); |
347 | |||
348 | if (r) | ||
349 | r = -EFAULT; | ||
350 | } | ||
351 | out: | ||
352 | return r; | ||
260 | } | 353 | } |
261 | 354 | ||
262 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, | 355 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, |
263 | unsigned long guestsrc, | 356 | unsigned long guestsrc, |
264 | unsigned long n) | 357 | unsigned long n) |
265 | { | 358 | { |
266 | unsigned long origin = vcpu->arch.sie_block->gmsor; | 359 | return __copy_from_guest_fast(vcpu, to, guestsrc, n); |
267 | unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); | 360 | } |
268 | 361 | ||
269 | if (guestsrc + n > memsize) | 362 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, |
270 | return -EFAULT; | 363 | unsigned long guestsrc, unsigned long n) |
364 | { | ||
365 | unsigned long prefix = vcpu->arch.sie_block->prefix; | ||
271 | 366 | ||
272 | if (guestsrc + n < guestsrc) | 367 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) |
273 | return -EFAULT; | 368 | goto slowpath; |
274 | 369 | ||
275 | guestsrc += origin; | 370 | if ((guestsrc < prefix) && (guestsrc + n > prefix)) |
371 | goto slowpath; | ||
372 | |||
373 | if ((guestsrc < prefix + 2 * PAGE_SIZE) | ||
374 | && (guestsrc + n > prefix + 2 * PAGE_SIZE)) | ||
375 | goto slowpath; | ||
376 | |||
377 | if (guestsrc < 2 * PAGE_SIZE) | ||
378 | guestsrc += prefix; | ||
379 | else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) | ||
380 | guestsrc -= prefix; | ||
276 | 381 | ||
277 | return copy_from_user(to, (void __user *) guestsrc, n); | 382 | return __copy_from_guest_fast(vcpu, to, guestsrc, n); |
383 | slowpath: | ||
384 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); | ||
278 | } | 385 | } |
279 | #endif | 386 | #endif |