diff options
author | Andres Lagar-Cavilla <andreslc@gridcentric.ca> | 2012-08-31 09:59:30 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-09-05 16:36:26 -0400 |
commit | ceb90fa0a8008059ecbbf9114cb89dc71a730bb6 (patch) | |
tree | 0f0507ed14176b7e17508b551cd22fef722dde0a /drivers/xen/privcmd.c | |
parent | 69870a847856a1ba81f655a8633fce5f5b614730 (diff) |
xen/privcmd: add PRIVCMD_MMAPBATCH_V2 ioctl
PRIVCMD_MMAPBATCH_V2 extends PRIVCMD_MMAPBATCH with an additional
field for reporting the error code for every frame that could not be
mapped. libxc prefers PRIVCMD_MMAPBATCH_V2 over PRIVCMD_MMAPBATCH.
Also expand PRIVCMD_MMAPBATCH to return appropriate error-encoding top nibble
in the mfn array.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r-- | drivers/xen/privcmd.c | 130 |
1 files changed, 100 insertions, 30 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 85226cbeca24..92a285b1b96a 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -76,7 +76,7 @@ static void free_page_list(struct list_head *pages) | |||
76 | */ | 76 | */ |
77 | static int gather_array(struct list_head *pagelist, | 77 | static int gather_array(struct list_head *pagelist, |
78 | unsigned nelem, size_t size, | 78 | unsigned nelem, size_t size, |
79 | void __user *data) | 79 | const void __user *data) |
80 | { | 80 | { |
81 | unsigned pageidx; | 81 | unsigned pageidx; |
82 | void *pagedata; | 82 | void *pagedata; |
@@ -246,61 +246,117 @@ struct mmap_batch_state { | |||
246 | domid_t domain; | 246 | domid_t domain; |
247 | unsigned long va; | 247 | unsigned long va; |
248 | struct vm_area_struct *vma; | 248 | struct vm_area_struct *vma; |
249 | int err; | 249 | /* A tristate: |
250 | 250 | * 0 for no errors | |
251 | xen_pfn_t __user *user; | 251 | * 1 if at least one error has happened (and no |
252 | * -ENOENT errors have happened) | ||
253 | * -ENOENT if at least 1 -ENOENT has happened. | ||
254 | */ | ||
255 | int global_error; | ||
256 | /* An array for individual errors */ | ||
257 | int *err; | ||
258 | |||
259 | /* User-space mfn array to store errors in the second pass for V1. */ | ||
260 | xen_pfn_t __user *user_mfn; | ||
252 | }; | 261 | }; |
253 | 262 | ||
254 | static int mmap_batch_fn(void *data, void *state) | 263 | static int mmap_batch_fn(void *data, void *state) |
255 | { | 264 | { |
256 | xen_pfn_t *mfnp = data; | 265 | xen_pfn_t *mfnp = data; |
257 | struct mmap_batch_state *st = state; | 266 | struct mmap_batch_state *st = state; |
267 | int ret; | ||
258 | 268 | ||
259 | if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, | 269 | ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, |
260 | st->vma->vm_page_prot, st->domain) < 0) { | 270 | st->vma->vm_page_prot, st->domain); |
261 | *mfnp |= 0xf0000000U; | 271 | |
262 | st->err++; | 272 | /* Store error code for second pass. */ |
273 | *(st->err++) = ret; | ||
274 | |||
275 | /* And see if it affects the global_error. */ | ||
276 | if (ret < 0) { | ||
277 | if (ret == -ENOENT) | ||
278 | st->global_error = -ENOENT; | ||
279 | else { | ||
280 | /* Record that at least one error has happened. */ | ||
281 | if (st->global_error == 0) | ||
282 | st->global_error = 1; | ||
283 | } | ||
263 | } | 284 | } |
264 | st->va += PAGE_SIZE; | 285 | st->va += PAGE_SIZE; |
265 | 286 | ||
266 | return 0; | 287 | return 0; |
267 | } | 288 | } |
268 | 289 | ||
269 | static int mmap_return_errors(void *data, void *state) | 290 | static int mmap_return_errors_v1(void *data, void *state) |
270 | { | 291 | { |
271 | xen_pfn_t *mfnp = data; | 292 | xen_pfn_t *mfnp = data; |
272 | struct mmap_batch_state *st = state; | 293 | struct mmap_batch_state *st = state; |
273 | 294 | int err = *(st->err++); | |
274 | return put_user(*mfnp, st->user++); | 295 | |
296 | /* | ||
297 | * V1 encodes the error codes in the 32bit top nibble of the | ||
298 | * mfn (with its known limitations vis-a-vis 64 bit callers). | ||
299 | */ | ||
300 | *mfnp |= (err == -ENOENT) ? | ||
301 | PRIVCMD_MMAPBATCH_PAGED_ERROR : | ||
302 | PRIVCMD_MMAPBATCH_MFN_ERROR; | ||
303 | return __put_user(*mfnp, st->user_mfn++); | ||
275 | } | 304 | } |
276 | 305 | ||
277 | static struct vm_operations_struct privcmd_vm_ops; | 306 | static struct vm_operations_struct privcmd_vm_ops; |
278 | 307 | ||
279 | static long privcmd_ioctl_mmap_batch(void __user *udata) | 308 | static long privcmd_ioctl_mmap_batch(void __user *udata, int version) |
280 | { | 309 | { |
281 | int ret; | 310 | int ret; |
282 | struct privcmd_mmapbatch m; | 311 | struct privcmd_mmapbatch_v2 m; |
283 | struct mm_struct *mm = current->mm; | 312 | struct mm_struct *mm = current->mm; |
284 | struct vm_area_struct *vma; | 313 | struct vm_area_struct *vma; |
285 | unsigned long nr_pages; | 314 | unsigned long nr_pages; |
286 | LIST_HEAD(pagelist); | 315 | LIST_HEAD(pagelist); |
316 | int *err_array = NULL; | ||
287 | struct mmap_batch_state state; | 317 | struct mmap_batch_state state; |
288 | 318 | ||
289 | if (!xen_initial_domain()) | 319 | if (!xen_initial_domain()) |
290 | return -EPERM; | 320 | return -EPERM; |
291 | 321 | ||
292 | if (copy_from_user(&m, udata, sizeof(m))) | 322 | switch (version) { |
293 | return -EFAULT; | 323 | case 1: |
324 | if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) | ||
325 | return -EFAULT; | ||
326 | /* Returns per-frame error in m.arr. */ | ||
327 | m.err = NULL; | ||
328 | if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr))) | ||
329 | return -EFAULT; | ||
330 | break; | ||
331 | case 2: | ||
332 | if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2))) | ||
333 | return -EFAULT; | ||
334 | /* Returns per-frame error code in m.err. */ | ||
335 | if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err)))) | ||
336 | return -EFAULT; | ||
337 | break; | ||
338 | default: | ||
339 | return -EINVAL; | ||
340 | } | ||
294 | 341 | ||
295 | nr_pages = m.num; | 342 | nr_pages = m.num; |
296 | if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) | 343 | if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) |
297 | return -EINVAL; | 344 | return -EINVAL; |
298 | 345 | ||
299 | ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), | 346 | ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr); |
300 | m.arr); | 347 | |
348 | if (ret) | ||
349 | goto out; | ||
350 | if (list_empty(&pagelist)) { | ||
351 | ret = -EINVAL; | ||
352 | goto out; | ||
353 | } | ||
301 | 354 | ||
302 | if (ret || list_empty(&pagelist)) | 355 | err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); |
356 | if (err_array == NULL) { | ||
357 | ret = -ENOMEM; | ||
303 | goto out; | 358 | goto out; |
359 | } | ||
304 | 360 | ||
305 | down_write(&mm->mmap_sem); | 361 | down_write(&mm->mmap_sem); |
306 | 362 | ||
@@ -315,24 +371,34 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) | |||
315 | goto out; | 371 | goto out; |
316 | } | 372 | } |
317 | 373 | ||
318 | state.domain = m.dom; | 374 | state.domain = m.dom; |
319 | state.vma = vma; | 375 | state.vma = vma; |
320 | state.va = m.addr; | 376 | state.va = m.addr; |
321 | state.err = 0; | 377 | state.global_error = 0; |
378 | state.err = err_array; | ||
322 | 379 | ||
323 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), | 380 | /* mmap_batch_fn guarantees ret == 0 */ |
324 | &pagelist, mmap_batch_fn, &state); | 381 | BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), |
382 | &pagelist, mmap_batch_fn, &state)); | ||
325 | 383 | ||
326 | up_write(&mm->mmap_sem); | 384 | up_write(&mm->mmap_sem); |
327 | 385 | ||
328 | if (state.err > 0) { | 386 | if (state.global_error && (version == 1)) { |
329 | state.user = m.arr; | 387 | /* Write back errors in second pass. */ |
388 | state.user_mfn = (xen_pfn_t *)m.arr; | ||
389 | state.err = err_array; | ||
330 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), | 390 | ret = traverse_pages(m.num, sizeof(xen_pfn_t), |
331 | &pagelist, | 391 | &pagelist, mmap_return_errors_v1, &state); |
332 | mmap_return_errors, &state); | 392 | } else |
333 | } | 393 | ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); |
394 | |||
395 | /* If we have not had any EFAULT-like global errors then set the global | ||
396 | * error to -ENOENT if necessary. */ | ||
397 | if ((ret == 0) && (state.global_error == -ENOENT)) | ||
398 | ret = -ENOENT; | ||
334 | 399 | ||
335 | out: | 400 | out: |
401 | kfree(err_array); | ||
336 | free_page_list(&pagelist); | 402 | free_page_list(&pagelist); |
337 | 403 | ||
338 | return ret; | 404 | return ret; |
@@ -354,7 +420,11 @@ static long privcmd_ioctl(struct file *file, | |||
354 | break; | 420 | break; |
355 | 421 | ||
356 | case IOCTL_PRIVCMD_MMAPBATCH: | 422 | case IOCTL_PRIVCMD_MMAPBATCH: |
357 | ret = privcmd_ioctl_mmap_batch(udata); | 423 | ret = privcmd_ioctl_mmap_batch(udata, 1); |
424 | break; | ||
425 | |||
426 | case IOCTL_PRIVCMD_MMAPBATCH_V2: | ||
427 | ret = privcmd_ioctl_mmap_batch(udata, 2); | ||
358 | break; | 428 | break; |
359 | 429 | ||
360 | default: | 430 | default: |