diff options
author | Andres Lagar-Cavilla <andres@lagarcavilla.org> | 2013-01-14 22:35:40 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-01-15 16:00:52 -0500 |
commit | 99beae6cb8f4dd5dab81a370b79c3b1085848d89 (patch) | |
tree | fa508d4649e0046b318221254fb2c496ca51e818 | |
parent | 7bcc1ec07748cae3552dc9b46701c117926c8923 (diff) |
xen/privcmd: Fix mmap batch ioctl.
1. If any individual mapping error happens, the V1 case will mark *all*
operations as failed. Fixed.
2. The err_array was allocated with kcalloc, resulting in potentially O(n) page
allocations. Refactor code to not use this array.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r-- | drivers/xen/privcmd.c | 83 |
1 files changed, 47 insertions, 36 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 421375a9196a..ca2b00e9d558 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -258,11 +258,12 @@ struct mmap_batch_state { | |||
258 | * -ENOENT if at least 1 -ENOENT has happened. | 258 | * -ENOENT if at least 1 -ENOENT has happened. |
259 | */ | 259 | */ |
260 | int global_error; | 260 | int global_error; |
261 | /* An array for individual errors */ | 261 | int version; |
262 | int *err; | ||
263 | 262 | ||
264 | /* User-space mfn array to store errors in the second pass for V1. */ | 263 | /* User-space mfn array to store errors in the second pass for V1. */ |
265 | xen_pfn_t __user *user_mfn; | 264 | xen_pfn_t __user *user_mfn; |
265 | /* User-space int array to store errors in the second pass for V2. */ | ||
266 | int __user *user_err; | ||
266 | }; | 267 | }; |
267 | 268 | ||
268 | /* auto translated dom0 note: if domU being created is PV, then mfn is | 269 | /* auto translated dom0 note: if domU being created is PV, then mfn is |
@@ -285,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state) | |||
285 | &cur_page); | 286 | &cur_page); |
286 | 287 | ||
287 | /* Store error code for second pass. */ | 288 | /* Store error code for second pass. */ |
288 | *(st->err++) = ret; | 289 | if (st->version == 1) { |
290 | if (ret < 0) { | ||
291 | /* | ||
292 | * V1 encodes the error codes in the 32bit top nibble of the | ||
293 | * mfn (with its known limitations vis-a-vis 64 bit callers). | ||
294 | */ | ||
295 | *mfnp |= (ret == -ENOENT) ? | ||
296 | PRIVCMD_MMAPBATCH_PAGED_ERROR : | ||
297 | PRIVCMD_MMAPBATCH_MFN_ERROR; | ||
298 | } | ||
299 | } else { /* st->version == 2 */ | ||
300 | *((int *) mfnp) = ret; | ||
301 | } | ||
289 | 302 | ||
290 | /* And see if it affects the global_error. */ | 303 | /* And see if it affects the global_error. */ |
291 | if (ret < 0) { | 304 | if (ret < 0) { |
@@ -302,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state) | |||
302 | return 0; | 315 | return 0; |
303 | } | 316 | } |
304 | 317 | ||
305 | static int mmap_return_errors_v1(void *data, void *state) | 318 | static int mmap_return_errors(void *data, void *state) |
306 | { | 319 | { |
307 | xen_pfn_t *mfnp = data; | ||
308 | struct mmap_batch_state *st = state; | 320 | struct mmap_batch_state *st = state; |
309 | int err = *(st->err++); | ||
310 | 321 | ||
311 | /* | 322 | if (st->version == 1) { |
312 | * V1 encodes the error codes in the 32bit top nibble of the | 323 | xen_pfn_t mfnp = *((xen_pfn_t *) data); |
313 | * mfn (with its known limitations vis-a-vis 64 bit callers). | 324 | if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) |
314 | */ | 325 | return __put_user(mfnp, st->user_mfn++); |
315 | *mfnp |= (err == -ENOENT) ? | 326 | else |
316 | PRIVCMD_MMAPBATCH_PAGED_ERROR : | 327 | st->user_mfn++; |
317 | PRIVCMD_MMAPBATCH_MFN_ERROR; | 328 | } else { /* st->version == 2 */ |
318 | return __put_user(*mfnp, st->user_mfn++); | 329 | int err = *((int *) data); |
330 | if (err) | ||
331 | return __put_user(err, st->user_err++); | ||
332 | else | ||
333 | st->user_err++; | ||
334 | } | ||
335 | |||
336 | return 0; | ||
319 | } | 337 | } |
320 | 338 | ||
321 | /* Allocate pfns that are then mapped with gmfns from foreign domid. Update | 339 | /* Allocate pfns that are then mapped with gmfns from foreign domid. Update |
@@ -354,7 +372,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
354 | struct vm_area_struct *vma; | 372 | struct vm_area_struct *vma; |
355 | unsigned long nr_pages; | 373 | unsigned long nr_pages; |
356 | LIST_HEAD(pagelist); | 374 | LIST_HEAD(pagelist); |
357 | int *err_array = NULL; | ||
358 | struct mmap_batch_state state; | 375 | struct mmap_batch_state state; |
359 | 376 | ||
360 | switch (version) { | 377 | switch (version) { |
@@ -390,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
390 | goto out; | 407 | goto out; |
391 | } | 408 | } |
392 | 409 | ||
393 | err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); | 410 | if (version == 2) { |
394 | if (err_array == NULL) { | 411 | /* Zero error array now to only copy back actual errors. */ |
395 | ret = -ENOMEM; | 412 | if (clear_user(m.err, sizeof(int) * m.num)) { |
396 | goto out; | 413 | ret = -EFAULT; |
414 | goto out; | ||
415 | } | ||
397 | } | 416 | } |
398 | 417 | ||
399 | down_write(&mm->mmap_sem); | 418 | down_write(&mm->mmap_sem); |
@@ -421,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
421 | state.va = m.addr; | 440 | state.va = m.addr; |
422 | state.index = 0; | 441 | state.index = 0; |
423 | state.global_error = 0; | 442 | state.global_error = 0; |
424 | state.err = err_array; | 443 | state.version = version; |
425 | 444 | ||
426 | /* mmap_batch_fn guarantees ret == 0 */ | 445 | /* mmap_batch_fn guarantees ret == 0 */ |
427 | BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), | 446 | BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), |
@@ -429,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
429 | 448 | ||
430 | up_write(&mm->mmap_sem); | 449 | up_write(&mm->mmap_sem); |
431 | 450 | ||
432 | if (version == 1) { | 451 | if (state.global_error) { |
433 | if (state.global_error) { | 452 | /* Write back errors in second pass. */ |
434 | /* Write back errors in second pass. */ | 453 | state.user_mfn = (xen_pfn_t *)m.arr; |
435 | state.user_mfn = (xen_pfn_t *)m.arr; | 454 | state.user_err = m.err; |
436 | state.err = err_array; | 455 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), |
437 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), | 456 | &pagelist, mmap_return_errors, &state); |
438 | &pagelist, mmap_return_errors_v1, &state); | 457 | } else |
439 | } else | 458 | ret = 0; |
440 | ret = 0; | ||
441 | |||
442 | } else if (version == 2) { | ||
443 | ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); | ||
444 | if (ret) | ||
445 | ret = -EFAULT; | ||
446 | } | ||
447 | 459 | ||
448 | /* If we have not had any EFAULT-like global errors then set the global | 460 | /* If we have not had any EFAULT-like global errors then set the global |
449 | * error to -ENOENT if necessary. */ | 461 | * error to -ENOENT if necessary. */ |
@@ -451,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
451 | ret = -ENOENT; | 463 | ret = -ENOENT; |
452 | 464 | ||
453 | out: | 465 | out: |
454 | kfree(err_array); | ||
455 | free_page_list(&pagelist); | 466 | free_page_list(&pagelist); |
456 | 467 | ||
457 | return ret; | 468 | return ret; |