aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/privcmd.c
diff options
context:
space:
mode:
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>2013-08-23 13:10:06 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-08-30 08:44:53 -0400
commita5deabe0e62203350369020687c3fc3b7445a0d0 (patch)
treec336f20e2618949cd250aae388060baaef4182df /drivers/xen/privcmd.c
parent669b0ae961e87c824233475e987b2d39996d4849 (diff)
Xen: Fix retry calls into PRIVCMD_MMAPBATCH*.
When a foreign mapper attempts to map guest frames that are paged out, the mapper receives an ENOENT response and will have to try again while a helper process pages the target frame back in. Gating checks on PRIVCMD_MMAPBATCH* ioctl args were preventing retries of mapping calls. Permit subsequent calls to update a sub-range of the VMA, iff nothing is yet mapped in that range. Since it is now valid to call PRIVCMD_MMAPBATCH* multiple times, only set vma->vm_private_data if the parameters are valid and (if necessary) the pages for the auto_translated_physmap case have been allocated. This prevents subsequent calls from incorrectly entering the 'retry' path when there are no pages allocated etc. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r--drivers/xen/privcmd.c83
1 files changed, 63 insertions, 20 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index f8e5dd701ecb..8e74590fa1bb 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -43,9 +43,10 @@ MODULE_LICENSE("GPL");
43 43
44#define PRIV_VMA_LOCKED ((void *)1) 44#define PRIV_VMA_LOCKED ((void *)1)
45 45
46#ifndef HAVE_ARCH_PRIVCMD_MMAP 46static int privcmd_vma_range_is_mapped(
47static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); 47 struct vm_area_struct *vma,
48#endif 48 unsigned long addr,
49 unsigned long nr_pages);
49 50
50static long privcmd_ioctl_hypercall(void __user *udata) 51static long privcmd_ioctl_hypercall(void __user *udata)
51{ 52{
@@ -225,9 +226,9 @@ static long privcmd_ioctl_mmap(void __user *udata)
225 vma = find_vma(mm, msg->va); 226 vma = find_vma(mm, msg->va);
226 rc = -EINVAL; 227 rc = -EINVAL;
227 228
228 if (!vma || (msg->va != vma->vm_start) || 229 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
229 !privcmd_enforce_singleshot_mapping(vma))
230 goto out_up; 230 goto out_up;
231 vma->vm_private_data = PRIV_VMA_LOCKED;
231 } 232 }
232 233
233 state.va = vma->vm_start; 234 state.va = vma->vm_start;
@@ -358,7 +359,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
358 kfree(pages); 359 kfree(pages);
359 return -ENOMEM; 360 return -ENOMEM;
360 } 361 }
361 BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED); 362 BUG_ON(vma->vm_private_data != NULL);
362 vma->vm_private_data = pages; 363 vma->vm_private_data = pages;
363 364
364 return 0; 365 return 0;
@@ -421,19 +422,43 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
421 422
422 vma = find_vma(mm, m.addr); 423 vma = find_vma(mm, m.addr);
423 if (!vma || 424 if (!vma ||
424 vma->vm_ops != &privcmd_vm_ops || 425 vma->vm_ops != &privcmd_vm_ops) {
425 (m.addr != vma->vm_start) ||
426 ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
427 !privcmd_enforce_singleshot_mapping(vma)) {
428 up_write(&mm->mmap_sem);
429 ret = -EINVAL; 426 ret = -EINVAL;
430 goto out; 427 goto out_unlock;
431 } 428 }
432 if (xen_feature(XENFEAT_auto_translated_physmap)) { 429
433 ret = alloc_empty_pages(vma, m.num); 430 /*
434 if (ret < 0) { 431 * Caller must either:
435 up_write(&mm->mmap_sem); 432 *
436 goto out; 433 * Map the whole VMA range, which will also allocate all the
434 * pages required for the auto_translated_physmap case.
435 *
436 * Or
437 *
438 * Map unmapped holes left from a previous map attempt (e.g.,
439 * because those foreign frames were previously paged out).
440 */
441 if (vma->vm_private_data == NULL) {
442 if (m.addr != vma->vm_start ||
443 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
444 ret = -EINVAL;
445 goto out_unlock;
446 }
447 if (xen_feature(XENFEAT_auto_translated_physmap)) {
448 ret = alloc_empty_pages(vma, m.num);
449 if (ret < 0)
450 goto out_unlock;
451 } else
452 vma->vm_private_data = PRIV_VMA_LOCKED;
453 } else {
454 if (m.addr < vma->vm_start ||
455 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
456 ret = -EINVAL;
457 goto out_unlock;
458 }
459 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
460 ret = -EINVAL;
461 goto out_unlock;
437 } 462 }
438 } 463 }
439 464
@@ -466,8 +491,11 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
466 491
467out: 492out:
468 free_page_list(&pagelist); 493 free_page_list(&pagelist);
469
470 return ret; 494 return ret;
495
496out_unlock:
497 up_write(&mm->mmap_sem);
498 goto out;
471} 499}
472 500
473static long privcmd_ioctl(struct file *file, 501static long privcmd_ioctl(struct file *file,
@@ -540,9 +568,24 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
540 return 0; 568 return 0;
541} 569}
542 570
543static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) 571/*
572 * For MMAPBATCH*. This allows asserting the singleshot mapping
573 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
574 * can be then retried until success.
575 */
576static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
577 unsigned long addr, void *data)
578{
579 return pte_none(*pte) ? 0 : -EBUSY;
580}
581
582static int privcmd_vma_range_is_mapped(
583 struct vm_area_struct *vma,
584 unsigned long addr,
585 unsigned long nr_pages)
544{ 586{
545 return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED); 587 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
588 is_mapped_fn, NULL) != 0;
546} 589}
547 590
548const struct file_operations xen_privcmd_fops = { 591const struct file_operations xen_privcmd_fops = {