summaryrefslogtreecommitdiffstats
path: root/drivers/xen/privcmd.c
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-05-05 11:54:12 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-10-23 09:20:42 -0400
commit5995a68a6272e4e8f4fe4de82cdc877e650fe8be (patch)
tree2699b77a0e00a61dfb0a1d3eb65b4645a67274a8 /drivers/xen/privcmd.c
parentd0089e8a0e4c9723d85b01713671358e3d6960df (diff)
xen/privcmd: Add support for Linux 64KB page granularity
The hypercall interface (as well as the toolstack) is always using 4KB page granularity. When the toolstack is asking for mapping a series of guest PFN in a batch, it expects to have the page map contiguously in its virtual memory. When Linux is using 64KB page granularity, the privcmd driver will have to map multiple Xen PFN in a single Linux page. Note that this solution works on page granularity which is a multiple of 4KB. Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r--drivers/xen/privcmd.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index b199ad3d4587..df2e6f783318 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -446,7 +446,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
446 return -EINVAL; 446 return -EINVAL;
447 } 447 }
448 448
449 nr_pages = m.num; 449 nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) 450 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
451 return -EINVAL; 451 return -EINVAL;
452 452
@@ -494,7 +494,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
494 goto out_unlock; 494 goto out_unlock;
495 } 495 }
496 if (xen_feature(XENFEAT_auto_translated_physmap)) { 496 if (xen_feature(XENFEAT_auto_translated_physmap)) {
497 ret = alloc_empty_pages(vma, m.num); 497 ret = alloc_empty_pages(vma, nr_pages);
498 if (ret < 0) 498 if (ret < 0)
499 goto out_unlock; 499 goto out_unlock;
500 } else 500 } else
@@ -518,6 +518,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
518 state.global_error = 0; 518 state.global_error = 0;
519 state.version = version; 519 state.version = version;
520 520
521 BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
521 /* mmap_batch_fn guarantees ret == 0 */ 522 /* mmap_batch_fn guarantees ret == 0 */
522 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t), 523 BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
523 &pagelist, mmap_batch_fn, &state)); 524 &pagelist, mmap_batch_fn, &state));
@@ -582,12 +583,13 @@ static void privcmd_close(struct vm_area_struct *vma)
582{ 583{
583 struct page **pages = vma->vm_private_data; 584 struct page **pages = vma->vm_private_data;
584 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 585 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
586 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
585 int rc; 587 int rc;
586 588
587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 589 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
588 return; 590 return;
589 591
590 rc = xen_unmap_domain_gfn_range(vma, numpgs, pages); 592 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
591 if (rc == 0) 593 if (rc == 0)
592 free_xenballooned_pages(numpgs, pages); 594 free_xenballooned_pages(numpgs, pages);
593 else 595 else