aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/gntdev.c
diff options
context:
space:
mode:
authorDaniel De Graaf <dgdegra@tycho.nsa.gov>2011-02-03 12:19:02 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-02-14 14:15:55 -0500
commitaab8f11a6b4641fcb8c139420f2eae879b5d1698 (patch)
tree11142851a24c5c26e83199980926558788335c49 /drivers/xen/gntdev.c
parent68b025c813c2eb41ff25628e3d4952d5185eb1a4 (diff)
xen-gntdev: Support mapping in HVM domains
HVM does not allow direct PTE modification, so instead we request that Xen change its internal p2m mappings on the allocated pages and map the memory into userspace normally. Note: The HVM path for map and unmap is slightly different: HVM keeps the pages mapped until the area is deleted, while the PV case (use_ptemod being true) must unmap them when userspace unmaps the range. In the normal use case, this makes no difference to users since unmap time is deletion time. [v2: Expanded commit descr.] Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/gntdev.c')
-rw-r--r--drivers/xen/gntdev.c117
1 files changed, 83 insertions, 34 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 256162b56691..bcaf797216d1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -32,6 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/highmem.h>
35 36
36#include <xen/xen.h> 37#include <xen/xen.h>
37#include <xen/grant_table.h> 38#include <xen/grant_table.h>
@@ -52,6 +53,8 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
52 53
53static atomic_t pages_mapped = ATOMIC_INIT(0); 54static atomic_t pages_mapped = ATOMIC_INIT(0);
54 55
56static int use_ptemod;
57
55struct gntdev_priv { 58struct gntdev_priv {
56 struct list_head maps; 59 struct list_head maps;
57 /* lock protects maps from concurrent changes */ 60 /* lock protects maps from concurrent changes */
@@ -74,6 +77,8 @@ struct grant_map {
74 struct page **pages; 77 struct page **pages;
75}; 78};
76 79
80static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
81
77/* ------------------------------------------------------------------ */ 82/* ------------------------------------------------------------------ */
78 83
79static void gntdev_print_maps(struct gntdev_priv *priv, 84static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -179,11 +184,34 @@ static void gntdev_put_map(struct grant_map *map)
179 184
180 atomic_sub(map->count, &pages_mapped); 185 atomic_sub(map->count, &pages_mapped);
181 186
182 if (map->pages) 187 if (map->pages) {
188 if (!use_ptemod)
189 unmap_grant_pages(map, 0, map->count);
190
183 for (i = 0; i < map->count; i++) { 191 for (i = 0; i < map->count; i++) {
184 if (map->pages[i]) 192 uint32_t check, *tmp;
193 if (!map->pages[i])
194 continue;
195 /* XXX When unmapping in an HVM domain, Xen will
196 * sometimes end up mapping the GFN to an invalid MFN.
197 * In this case, writes will be discarded and reads will
198 * return all 0xFF bytes. Leak these unusable GFNs
199 * until Xen supports fixing their p2m mapping.
200 *
201 * Confirmed present in Xen 4.1-RC3 with HVM source
202 */
203 tmp = kmap(map->pages[i]);
204 *tmp = 0xdeaddead;
205 mb();
206 check = *tmp;
207 kunmap(map->pages[i]);
208 if (check == 0xdeaddead)
185 __free_page(map->pages[i]); 209 __free_page(map->pages[i]);
210 else
211 pr_debug("Discard page %d=%ld\n", i,
212 page_to_pfn(map->pages[i]));
186 } 213 }
214 }
187 kfree(map->pages); 215 kfree(map->pages);
188 kfree(map->grants); 216 kfree(map->grants);
189 kfree(map->map_ops); 217 kfree(map->map_ops);
@@ -198,17 +226,16 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
198{ 226{
199 struct grant_map *map = data; 227 struct grant_map *map = data;
200 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; 228 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
229 int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
201 u64 pte_maddr; 230 u64 pte_maddr;
202 231
203 BUG_ON(pgnr >= map->count); 232 BUG_ON(pgnr >= map->count);
204 pte_maddr = arbitrary_virt_to_machine(pte).maddr; 233 pte_maddr = arbitrary_virt_to_machine(pte).maddr;
205 234
206 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, 235 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
207 GNTMAP_contains_pte | map->flags,
208 map->grants[pgnr].ref, 236 map->grants[pgnr].ref,
209 map->grants[pgnr].domid); 237 map->grants[pgnr].domid);
210 gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, 238 gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
211 GNTMAP_contains_pte | map->flags,
212 0 /* handle */); 239 0 /* handle */);
213 return 0; 240 return 0;
214} 241}
@@ -216,6 +243,19 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
216static int map_grant_pages(struct grant_map *map) 243static int map_grant_pages(struct grant_map *map)
217{ 244{
218 int i, err = 0; 245 int i, err = 0;
246 phys_addr_t addr;
247
248 if (!use_ptemod) {
249 for (i = 0; i < map->count; i++) {
250 addr = (phys_addr_t)
251 pfn_to_kaddr(page_to_pfn(map->pages[i]));
252 gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
253 map->grants[i].ref,
254 map->grants[i].domid);
255 gnttab_set_unmap_op(&map->unmap_ops[i], addr,
256 map->flags, 0 /* handle */);
257 }
258 }
219 259
220 pr_debug("map %d+%d\n", map->index, map->count); 260 pr_debug("map %d+%d\n", map->index, map->count);
221 err = gnttab_map_refs(map->map_ops, map->pages, map->count); 261 err = gnttab_map_refs(map->map_ops, map->pages, map->count);
@@ -260,17 +300,8 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
260 gntdev_put_map(map); 300 gntdev_put_map(map);
261} 301}
262 302
263static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
264{
265 pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
266 vmf->virtual_address, vmf->pgoff);
267 vmf->flags = VM_FAULT_ERROR;
268 return 0;
269}
270
271static struct vm_operations_struct gntdev_vmops = { 303static struct vm_operations_struct gntdev_vmops = {
272 .close = gntdev_vma_close, 304 .close = gntdev_vma_close,
273 .fault = gntdev_vma_fault,
274}; 305};
275 306
276/* ------------------------------------------------------------------ */ 307/* ------------------------------------------------------------------ */
@@ -355,14 +386,16 @@ static int gntdev_open(struct inode *inode, struct file *flip)
355 INIT_LIST_HEAD(&priv->maps); 386 INIT_LIST_HEAD(&priv->maps);
356 spin_lock_init(&priv->lock); 387 spin_lock_init(&priv->lock);
357 388
358 priv->mm = get_task_mm(current); 389 if (use_ptemod) {
359 if (!priv->mm) { 390 priv->mm = get_task_mm(current);
360 kfree(priv); 391 if (!priv->mm) {
361 return -ENOMEM; 392 kfree(priv);
393 return -ENOMEM;
394 }
395 priv->mn.ops = &gntdev_mmu_ops;
396 ret = mmu_notifier_register(&priv->mn, priv->mm);
397 mmput(priv->mm);
362 } 398 }
363 priv->mn.ops = &gntdev_mmu_ops;
364 ret = mmu_notifier_register(&priv->mn, priv->mm);
365 mmput(priv->mm);
366 399
367 if (ret) { 400 if (ret) {
368 kfree(priv); 401 kfree(priv);
@@ -390,7 +423,8 @@ static int gntdev_release(struct inode *inode, struct file *flip)
390 } 423 }
391 spin_unlock(&priv->lock); 424 spin_unlock(&priv->lock);
392 425
393 mmu_notifier_unregister(&priv->mn, priv->mm); 426 if (use_ptemod)
427 mmu_notifier_unregister(&priv->mn, priv->mm);
394 kfree(priv); 428 kfree(priv);
395 return 0; 429 return 0;
396} 430}
@@ -515,7 +549,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
515 int index = vma->vm_pgoff; 549 int index = vma->vm_pgoff;
516 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 550 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
517 struct grant_map *map; 551 struct grant_map *map;
518 int err = -EINVAL; 552 int i, err = -EINVAL;
519 553
520 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) 554 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
521 return -EINVAL; 555 return -EINVAL;
@@ -527,9 +561,9 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
527 map = gntdev_find_map_index(priv, index, count); 561 map = gntdev_find_map_index(priv, index, count);
528 if (!map) 562 if (!map)
529 goto unlock_out; 563 goto unlock_out;
530 if (map->vma) 564 if (use_ptemod && map->vma)
531 goto unlock_out; 565 goto unlock_out;
532 if (priv->mm != vma->vm_mm) { 566 if (use_ptemod && priv->mm != vma->vm_mm) {
533 printk(KERN_WARNING "Huh? Other mm?\n"); 567 printk(KERN_WARNING "Huh? Other mm?\n");
534 goto unlock_out; 568 goto unlock_out;
535 } 569 }
@@ -541,20 +575,24 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
541 vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; 575 vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
542 576
543 vma->vm_private_data = map; 577 vma->vm_private_data = map;
544 map->vma = vma;
545 578
546 map->flags = GNTMAP_host_map | GNTMAP_application_map; 579 if (use_ptemod)
580 map->vma = vma;
581
582 map->flags = GNTMAP_host_map;
547 if (!(vma->vm_flags & VM_WRITE)) 583 if (!(vma->vm_flags & VM_WRITE))
548 map->flags |= GNTMAP_readonly; 584 map->flags |= GNTMAP_readonly;
549 585
550 spin_unlock(&priv->lock); 586 spin_unlock(&priv->lock);
551 587
552 err = apply_to_page_range(vma->vm_mm, vma->vm_start, 588 if (use_ptemod) {
553 vma->vm_end - vma->vm_start, 589 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
554 find_grant_ptes, map); 590 vma->vm_end - vma->vm_start,
555 if (err) { 591 find_grant_ptes, map);
556 printk(KERN_WARNING "find_grant_ptes() failure.\n"); 592 if (err) {
557 return err; 593 printk(KERN_WARNING "find_grant_ptes() failure.\n");
594 return err;
595 }
558 } 596 }
559 597
560 err = map_grant_pages(map); 598 err = map_grant_pages(map);
@@ -565,6 +603,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
565 603
566 map->is_mapped = 1; 604 map->is_mapped = 1;
567 605
606 if (!use_ptemod) {
607 for (i = 0; i < count; i++) {
608 err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
609 map->pages[i]);
610 if (err)
611 return err;
612 }
613 }
614
568 return 0; 615 return 0;
569 616
570unlock_out: 617unlock_out:
@@ -595,6 +642,8 @@ static int __init gntdev_init(void)
595 if (!xen_domain()) 642 if (!xen_domain())
596 return -ENODEV; 643 return -ENODEV;
597 644
645 use_ptemod = xen_pv_domain();
646
598 err = misc_register(&gntdev_miscdev); 647 err = misc_register(&gntdev_miscdev);
599 if (err != 0) { 648 if (err != 0) {
600 printk(KERN_ERR "Could not register gntdev device\n"); 649 printk(KERN_ERR "Could not register gntdev device\n");