diff options
| author | Daniel De Graaf <dgdegra@tycho.nsa.gov> | 2013-01-02 17:57:12 -0500 |
|---|---|---|
| committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2013-01-15 16:01:30 -0500 |
| commit | 16a1d0225e22e4e273e6b60a21db95decde666c2 (patch) | |
| tree | 4a640870d7121933f8a94d0fbe7e5fe7014e1bf2 | |
| parent | 2512f298cb9886e06938e761c9e924c8448d9ab8 (diff) | |
xen/gntdev: correctly unmap unlinked maps in mmu notifier
If gntdev_ioctl_unmap_grant_ref is called on a range before unmapping
it, the entry is removed from priv->maps and the later call to
mn_invl_range_start won't find it to do the unmapping. Fix this by
creating another list of freeable maps that the mmu notifier can search
and use to unmap grants.
Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
| -rw-r--r-- | drivers/xen/gntdev.c | 92 |
1 files changed, 63 insertions, 29 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index cca62cc8549b..9be3e5e46d1f 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
| @@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " | |||
| 56 | static atomic_t pages_mapped = ATOMIC_INIT(0); | 56 | static atomic_t pages_mapped = ATOMIC_INIT(0); |
| 57 | 57 | ||
| 58 | static int use_ptemod; | 58 | static int use_ptemod; |
| 59 | #define populate_freeable_maps use_ptemod | ||
| 59 | 60 | ||
| 60 | struct gntdev_priv { | 61 | struct gntdev_priv { |
| 62 | /* maps with visible offsets in the file descriptor */ | ||
| 61 | struct list_head maps; | 63 | struct list_head maps; |
| 62 | /* lock protects maps from concurrent changes */ | 64 | /* maps that are not visible; will be freed on munmap. |
| 65 | * Only populated if populate_freeable_maps == 1 */ | ||
| 66 | struct list_head freeable_maps; | ||
| 67 | /* lock protects maps and freeable_maps */ | ||
| 63 | spinlock_t lock; | 68 | spinlock_t lock; |
| 64 | struct mm_struct *mm; | 69 | struct mm_struct *mm; |
| 65 | struct mmu_notifier mn; | 70 | struct mmu_notifier mn; |
| @@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, | |||
| 193 | return NULL; | 198 | return NULL; |
| 194 | } | 199 | } |
| 195 | 200 | ||
| 196 | static void gntdev_put_map(struct grant_map *map) | 201 | static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) |
| 197 | { | 202 | { |
| 198 | if (!map) | 203 | if (!map) |
| 199 | return; | 204 | return; |
| @@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map) | |||
| 208 | evtchn_put(map->notify.event); | 213 | evtchn_put(map->notify.event); |
| 209 | } | 214 | } |
| 210 | 215 | ||
| 216 | if (populate_freeable_maps && priv) { | ||
| 217 | spin_lock(&priv->lock); | ||
| 218 | list_del(&map->next); | ||
| 219 | spin_unlock(&priv->lock); | ||
| 220 | } | ||
| 221 | |||
| 211 | if (map->pages && !use_ptemod) | 222 | if (map->pages && !use_ptemod) |
| 212 | unmap_grant_pages(map, 0, map->count); | 223 | unmap_grant_pages(map, 0, map->count); |
| 213 | gntdev_free_map(map); | 224 | gntdev_free_map(map); |
| @@ -376,11 +387,11 @@ static void gntdev_vma_open(struct vm_area_struct *vma) | |||
| 376 | static void gntdev_vma_close(struct vm_area_struct *vma) | 387 | static void gntdev_vma_close(struct vm_area_struct *vma) |
| 377 | { | 388 | { |
| 378 | struct grant_map *map = vma->vm_private_data; | 389 | struct grant_map *map = vma->vm_private_data; |
| 390 | struct file *file = vma->vm_file; | ||
| 391 | struct gntdev_priv *priv = file->private_data; | ||
| 379 | 392 | ||
| 380 | pr_debug("gntdev_vma_close %p\n", vma); | 393 | pr_debug("gntdev_vma_close %p\n", vma); |
| 381 | if (use_ptemod) { | 394 | if (use_ptemod) { |
| 382 | struct file *file = vma->vm_file; | ||
| 383 | struct gntdev_priv *priv = file->private_data; | ||
| 384 | /* It is possible that an mmu notifier could be running | 395 | /* It is possible that an mmu notifier could be running |
| 385 | * concurrently, so take priv->lock to ensure that the vma won't | 396 | * concurrently, so take priv->lock to ensure that the vma won't |
| 386 | * vanishing during the unmap_grant_pages call, since we will | 397 | * vanishing during the unmap_grant_pages call, since we will |
| @@ -393,7 +404,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma) | |||
| 393 | spin_unlock(&priv->lock); | 404 | spin_unlock(&priv->lock); |
| 394 | } | 405 | } |
| 395 | vma->vm_private_data = NULL; | 406 | vma->vm_private_data = NULL; |
| 396 | gntdev_put_map(map); | 407 | gntdev_put_map(priv, map); |
| 397 | } | 408 | } |
| 398 | 409 | ||
| 399 | static struct vm_operations_struct gntdev_vmops = { | 410 | static struct vm_operations_struct gntdev_vmops = { |
| @@ -403,33 +414,43 @@ static struct vm_operations_struct gntdev_vmops = { | |||
| 403 | 414 | ||
| 404 | /* ------------------------------------------------------------------ */ | 415 | /* ------------------------------------------------------------------ */ |
| 405 | 416 | ||
| 417 | static void unmap_if_in_range(struct grant_map *map, | ||
| 418 | unsigned long start, unsigned long end) | ||
| 419 | { | ||
| 420 | unsigned long mstart, mend; | ||
| 421 | int err; | ||
| 422 | |||
| 423 | if (!map->vma) | ||
| 424 | return; | ||
| 425 | if (map->vma->vm_start >= end) | ||
| 426 | return; | ||
| 427 | if (map->vma->vm_end <= start) | ||
| 428 | return; | ||
| 429 | mstart = max(start, map->vma->vm_start); | ||
| 430 | mend = min(end, map->vma->vm_end); | ||
| 431 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | ||
| 432 | map->index, map->count, | ||
| 433 | map->vma->vm_start, map->vma->vm_end, | ||
| 434 | start, end, mstart, mend); | ||
| 435 | err = unmap_grant_pages(map, | ||
| 436 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | ||
| 437 | (mend - mstart) >> PAGE_SHIFT); | ||
| 438 | WARN_ON(err); | ||
| 439 | } | ||
| 440 | |||
| 406 | static void mn_invl_range_start(struct mmu_notifier *mn, | 441 | static void mn_invl_range_start(struct mmu_notifier *mn, |
| 407 | struct mm_struct *mm, | 442 | struct mm_struct *mm, |
| 408 | unsigned long start, unsigned long end) | 443 | unsigned long start, unsigned long end) |
| 409 | { | 444 | { |
| 410 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 445 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
| 411 | struct grant_map *map; | 446 | struct grant_map *map; |
| 412 | unsigned long mstart, mend; | ||
| 413 | int err; | ||
| 414 | 447 | ||
| 415 | spin_lock(&priv->lock); | 448 | spin_lock(&priv->lock); |
| 416 | list_for_each_entry(map, &priv->maps, next) { | 449 | list_for_each_entry(map, &priv->maps, next) { |
| 417 | if (!map->vma) | 450 | unmap_if_in_range(map, start, end); |
| 418 | continue; | 451 | } |
| 419 | if (map->vma->vm_start >= end) | 452 | list_for_each_entry(map, &priv->freeable_maps, next) { |
| 420 | continue; | 453 | unmap_if_in_range(map, start, end); |
| 421 | if (map->vma->vm_end <= start) | ||
| 422 | continue; | ||
| 423 | mstart = max(start, map->vma->vm_start); | ||
| 424 | mend = min(end, map->vma->vm_end); | ||
| 425 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | ||
| 426 | map->index, map->count, | ||
| 427 | map->vma->vm_start, map->vma->vm_end, | ||
| 428 | start, end, mstart, mend); | ||
| 429 | err = unmap_grant_pages(map, | ||
| 430 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | ||
| 431 | (mend - mstart) >> PAGE_SHIFT); | ||
| 432 | WARN_ON(err); | ||
| 433 | } | 454 | } |
| 434 | spin_unlock(&priv->lock); | 455 | spin_unlock(&priv->lock); |
| 435 | } | 456 | } |
| @@ -458,6 +479,15 @@ static void mn_release(struct mmu_notifier *mn, | |||
| 458 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | 479 | err = unmap_grant_pages(map, /* offset */ 0, map->count); |
| 459 | WARN_ON(err); | 480 | WARN_ON(err); |
| 460 | } | 481 | } |
| 482 | list_for_each_entry(map, &priv->freeable_maps, next) { | ||
| 483 | if (!map->vma) | ||
| 484 | continue; | ||
| 485 | pr_debug("map %d+%d (%lx %lx)\n", | ||
| 486 | map->index, map->count, | ||
| 487 | map->vma->vm_start, map->vma->vm_end); | ||
| 488 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | ||
| 489 | WARN_ON(err); | ||
| 490 | } | ||
| 461 | spin_unlock(&priv->lock); | 491 | spin_unlock(&priv->lock); |
| 462 | } | 492 | } |
| 463 | 493 | ||
| @@ -479,6 +509,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) | |||
| 479 | return -ENOMEM; | 509 | return -ENOMEM; |
| 480 | 510 | ||
| 481 | INIT_LIST_HEAD(&priv->maps); | 511 | INIT_LIST_HEAD(&priv->maps); |
| 512 | INIT_LIST_HEAD(&priv->freeable_maps); | ||
| 482 | spin_lock_init(&priv->lock); | 513 | spin_lock_init(&priv->lock); |
| 483 | 514 | ||
| 484 | if (use_ptemod) { | 515 | if (use_ptemod) { |
| @@ -513,8 +544,9 @@ static int gntdev_release(struct inode *inode, struct file *flip) | |||
| 513 | while (!list_empty(&priv->maps)) { | 544 | while (!list_empty(&priv->maps)) { |
| 514 | map = list_entry(priv->maps.next, struct grant_map, next); | 545 | map = list_entry(priv->maps.next, struct grant_map, next); |
| 515 | list_del(&map->next); | 546 | list_del(&map->next); |
| 516 | gntdev_put_map(map); | 547 | gntdev_put_map(NULL /* already removed */, map); |
| 517 | } | 548 | } |
| 549 | WARN_ON(!list_empty(&priv->freeable_maps)); | ||
| 518 | 550 | ||
| 519 | if (use_ptemod) | 551 | if (use_ptemod) |
| 520 | mmu_notifier_unregister(&priv->mn, priv->mm); | 552 | mmu_notifier_unregister(&priv->mn, priv->mm); |
| @@ -542,14 +574,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, | |||
| 542 | 574 | ||
| 543 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { | 575 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { |
| 544 | pr_debug("can't map: over limit\n"); | 576 | pr_debug("can't map: over limit\n"); |
| 545 | gntdev_put_map(map); | 577 | gntdev_put_map(NULL, map); |
| 546 | return err; | 578 | return err; |
| 547 | } | 579 | } |
| 548 | 580 | ||
| 549 | if (copy_from_user(map->grants, &u->refs, | 581 | if (copy_from_user(map->grants, &u->refs, |
| 550 | sizeof(map->grants[0]) * op.count) != 0) { | 582 | sizeof(map->grants[0]) * op.count) != 0) { |
| 551 | gntdev_put_map(map); | 583 | gntdev_put_map(NULL, map); |
| 552 | return err; | 584 | return -EFAULT; |
| 553 | } | 585 | } |
| 554 | 586 | ||
| 555 | spin_lock(&priv->lock); | 587 | spin_lock(&priv->lock); |
| @@ -578,11 +610,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | |||
| 578 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); | 610 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); |
| 579 | if (map) { | 611 | if (map) { |
| 580 | list_del(&map->next); | 612 | list_del(&map->next); |
| 613 | if (populate_freeable_maps) | ||
| 614 | list_add_tail(&map->next, &priv->freeable_maps); | ||
| 581 | err = 0; | 615 | err = 0; |
| 582 | } | 616 | } |
| 583 | spin_unlock(&priv->lock); | 617 | spin_unlock(&priv->lock); |
| 584 | if (map) | 618 | if (map) |
| 585 | gntdev_put_map(map); | 619 | gntdev_put_map(priv, map); |
| 586 | return err; | 620 | return err; |
| 587 | } | 621 | } |
| 588 | 622 | ||
| @@ -797,7 +831,7 @@ out_unlock_put: | |||
| 797 | out_put_map: | 831 | out_put_map: |
| 798 | if (use_ptemod) | 832 | if (use_ptemod) |
| 799 | map->vma = NULL; | 833 | map->vma = NULL; |
| 800 | gntdev_put_map(map); | 834 | gntdev_put_map(priv, map); |
| 801 | return err; | 835 | return err; |
| 802 | } | 836 | } |
| 803 | 837 | ||
