aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel De Graaf <dgdegra@tycho.nsa.gov>2013-01-02 17:57:11 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-01-15 16:01:06 -0500
commit2512f298cb9886e06938e761c9e924c8448d9ab8 (patch)
tree959938beb9ef789e72217844d159c95604817ac5
parent99beae6cb8f4dd5dab81a370b79c3b1085848d89 (diff)
xen/gntdev: fix unsafe vma access
In gntdev_ioctl_get_offset_for_vaddr, we need to hold mmap_sem while calling find_vma() to avoid potentially having the result freed out from under us. Similarly, the MMU notifier functions need to synchronize with gntdev_vma_close to avoid map->vma being freed during their iteration. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Reported-by: Al Viro <viro@ZenIV.linux.org.uk> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--drivers/xen/gntdev.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2e22df2f7a3f..cca62cc8549b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -378,7 +378,20 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
378 struct grant_map *map = vma->vm_private_data; 378 struct grant_map *map = vma->vm_private_data;
379 379
380 pr_debug("gntdev_vma_close %p\n", vma); 380 pr_debug("gntdev_vma_close %p\n", vma);
381 map->vma = NULL; 381 if (use_ptemod) {
382 struct file *file = vma->vm_file;
383 struct gntdev_priv *priv = file->private_data;
384 /* It is possible that an mmu notifier could be running
385 * concurrently, so take priv->lock to ensure that the vma won't
386 * vanishing during the unmap_grant_pages call, since we will
387 * spin here until that completes. Such a concurrent call will
388 * not do any unmapping, since that has been done prior to
389 * closing the vma, but it may still iterate the unmap_ops list.
390 */
391 spin_lock(&priv->lock);
392 map->vma = NULL;
393 spin_unlock(&priv->lock);
394 }
382 vma->vm_private_data = NULL; 395 vma->vm_private_data = NULL;
383 gntdev_put_map(map); 396 gntdev_put_map(map);
384} 397}
@@ -579,25 +592,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
579 struct ioctl_gntdev_get_offset_for_vaddr op; 592 struct ioctl_gntdev_get_offset_for_vaddr op;
580 struct vm_area_struct *vma; 593 struct vm_area_struct *vma;
581 struct grant_map *map; 594 struct grant_map *map;
595 int rv = -EINVAL;
582 596
583 if (copy_from_user(&op, u, sizeof(op)) != 0) 597 if (copy_from_user(&op, u, sizeof(op)) != 0)
584 return -EFAULT; 598 return -EFAULT;
585 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); 599 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
586 600
601 down_read(&current->mm->mmap_sem);
587 vma = find_vma(current->mm, op.vaddr); 602 vma = find_vma(current->mm, op.vaddr);
588 if (!vma || vma->vm_ops != &gntdev_vmops) 603 if (!vma || vma->vm_ops != &gntdev_vmops)
589 return -EINVAL; 604 goto out_unlock;
590 605
591 map = vma->vm_private_data; 606 map = vma->vm_private_data;
592 if (!map) 607 if (!map)
593 return -EINVAL; 608 goto out_unlock;
594 609
595 op.offset = map->index << PAGE_SHIFT; 610 op.offset = map->index << PAGE_SHIFT;
596 op.count = map->count; 611 op.count = map->count;
612 rv = 0;
597 613
598 if (copy_to_user(u, &op, sizeof(op)) != 0) 614 out_unlock:
615 up_read(&current->mm->mmap_sem);
616
617 if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
599 return -EFAULT; 618 return -EFAULT;
600 return 0; 619 return rv;
601} 620}
602 621
603static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) 622static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)