diff options
| author | David Vrabel <david.vrabel@citrix.com> | 2015-01-09 13:06:12 -0500 |
|---|---|---|
| committer | David Vrabel <david.vrabel@citrix.com> | 2015-01-28 09:03:14 -0500 |
| commit | 1401c00e59ea021c575f74612fe2dbba36d6a4ee (patch) | |
| tree | 060b2f6467849669fdd64b0a64c250c1a7d5e82e | |
| parent | 3f9f1c67572f5e5e6dc84216d48d1480f3c4fcf6 (diff) | |
xen/gntdev: convert priv->lock to a mutex
Unmapping may require sleeping and we unmap while holding priv->lock, so
convert it to a mutex.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
| -rw-r--r-- | drivers/xen/gntdev.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 8cc3f069a10f..3c2534433b30 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
| @@ -67,7 +67,7 @@ struct gntdev_priv { | |||
| 67 | * Only populated if populate_freeable_maps == 1 */ | 67 | * Only populated if populate_freeable_maps == 1 */ |
| 68 | struct list_head freeable_maps; | 68 | struct list_head freeable_maps; |
| 69 | /* lock protects maps and freeable_maps */ | 69 | /* lock protects maps and freeable_maps */ |
| 70 | spinlock_t lock; | 70 | struct mutex lock; |
| 71 | struct mm_struct *mm; | 71 | struct mm_struct *mm; |
| 72 | struct mmu_notifier mn; | 72 | struct mmu_notifier mn; |
| 73 | }; | 73 | }; |
| @@ -221,9 +221,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) | |||
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | if (populate_freeable_maps && priv) { | 223 | if (populate_freeable_maps && priv) { |
| 224 | spin_lock(&priv->lock); | 224 | mutex_lock(&priv->lock); |
| 225 | list_del(&map->next); | 225 | list_del(&map->next); |
| 226 | spin_unlock(&priv->lock); | 226 | mutex_unlock(&priv->lock); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | if (map->pages && !use_ptemod) | 229 | if (map->pages && !use_ptemod) |
| @@ -395,9 +395,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma) | |||
| 395 | * not do any unmapping, since that has been done prior to | 395 | * not do any unmapping, since that has been done prior to |
| 396 | * closing the vma, but it may still iterate the unmap_ops list. | 396 | * closing the vma, but it may still iterate the unmap_ops list. |
| 397 | */ | 397 | */ |
| 398 | spin_lock(&priv->lock); | 398 | mutex_lock(&priv->lock); |
| 399 | map->vma = NULL; | 399 | map->vma = NULL; |
| 400 | spin_unlock(&priv->lock); | 400 | mutex_unlock(&priv->lock); |
| 401 | } | 401 | } |
| 402 | vma->vm_private_data = NULL; | 402 | vma->vm_private_data = NULL; |
| 403 | gntdev_put_map(priv, map); | 403 | gntdev_put_map(priv, map); |
| @@ -441,14 +441,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn, | |||
| 441 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 441 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
| 442 | struct grant_map *map; | 442 | struct grant_map *map; |
| 443 | 443 | ||
| 444 | spin_lock(&priv->lock); | 444 | mutex_lock(&priv->lock); |
| 445 | list_for_each_entry(map, &priv->maps, next) { | 445 | list_for_each_entry(map, &priv->maps, next) { |
| 446 | unmap_if_in_range(map, start, end); | 446 | unmap_if_in_range(map, start, end); |
| 447 | } | 447 | } |
| 448 | list_for_each_entry(map, &priv->freeable_maps, next) { | 448 | list_for_each_entry(map, &priv->freeable_maps, next) { |
| 449 | unmap_if_in_range(map, start, end); | 449 | unmap_if_in_range(map, start, end); |
| 450 | } | 450 | } |
| 451 | spin_unlock(&priv->lock); | 451 | mutex_unlock(&priv->lock); |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | static void mn_invl_page(struct mmu_notifier *mn, | 454 | static void mn_invl_page(struct mmu_notifier *mn, |
| @@ -465,7 +465,7 @@ static void mn_release(struct mmu_notifier *mn, | |||
| 465 | struct grant_map *map; | 465 | struct grant_map *map; |
| 466 | int err; | 466 | int err; |
| 467 | 467 | ||
| 468 | spin_lock(&priv->lock); | 468 | mutex_lock(&priv->lock); |
| 469 | list_for_each_entry(map, &priv->maps, next) { | 469 | list_for_each_entry(map, &priv->maps, next) { |
| 470 | if (!map->vma) | 470 | if (!map->vma) |
| 471 | continue; | 471 | continue; |
| @@ -484,7 +484,7 @@ static void mn_release(struct mmu_notifier *mn, | |||
| 484 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | 484 | err = unmap_grant_pages(map, /* offset */ 0, map->count); |
| 485 | WARN_ON(err); | 485 | WARN_ON(err); |
| 486 | } | 486 | } |
| 487 | spin_unlock(&priv->lock); | 487 | mutex_unlock(&priv->lock); |
| 488 | } | 488 | } |
| 489 | 489 | ||
| 490 | static struct mmu_notifier_ops gntdev_mmu_ops = { | 490 | static struct mmu_notifier_ops gntdev_mmu_ops = { |
| @@ -506,7 +506,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) | |||
| 506 | 506 | ||
| 507 | INIT_LIST_HEAD(&priv->maps); | 507 | INIT_LIST_HEAD(&priv->maps); |
| 508 | INIT_LIST_HEAD(&priv->freeable_maps); | 508 | INIT_LIST_HEAD(&priv->freeable_maps); |
| 509 | spin_lock_init(&priv->lock); | 509 | mutex_init(&priv->lock); |
| 510 | 510 | ||
| 511 | if (use_ptemod) { | 511 | if (use_ptemod) { |
| 512 | priv->mm = get_task_mm(current); | 512 | priv->mm = get_task_mm(current); |
| @@ -580,10 +580,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, | |||
| 580 | return -EFAULT; | 580 | return -EFAULT; |
| 581 | } | 581 | } |
| 582 | 582 | ||
| 583 | spin_lock(&priv->lock); | 583 | mutex_lock(&priv->lock); |
| 584 | gntdev_add_map(priv, map); | 584 | gntdev_add_map(priv, map); |
| 585 | op.index = map->index << PAGE_SHIFT; | 585 | op.index = map->index << PAGE_SHIFT; |
| 586 | spin_unlock(&priv->lock); | 586 | mutex_unlock(&priv->lock); |
| 587 | 587 | ||
| 588 | if (copy_to_user(u, &op, sizeof(op)) != 0) | 588 | if (copy_to_user(u, &op, sizeof(op)) != 0) |
| 589 | return -EFAULT; | 589 | return -EFAULT; |
| @@ -602,7 +602,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | |||
| 602 | return -EFAULT; | 602 | return -EFAULT; |
| 603 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); | 603 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); |
| 604 | 604 | ||
| 605 | spin_lock(&priv->lock); | 605 | mutex_lock(&priv->lock); |
| 606 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); | 606 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); |
| 607 | if (map) { | 607 | if (map) { |
| 608 | list_del(&map->next); | 608 | list_del(&map->next); |
| @@ -610,7 +610,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | |||
| 610 | list_add_tail(&map->next, &priv->freeable_maps); | 610 | list_add_tail(&map->next, &priv->freeable_maps); |
| 611 | err = 0; | 611 | err = 0; |
| 612 | } | 612 | } |
| 613 | spin_unlock(&priv->lock); | 613 | mutex_unlock(&priv->lock); |
| 614 | if (map) | 614 | if (map) |
| 615 | gntdev_put_map(priv, map); | 615 | gntdev_put_map(priv, map); |
| 616 | return err; | 616 | return err; |
| @@ -678,7 +678,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
| 678 | out_flags = op.action; | 678 | out_flags = op.action; |
| 679 | out_event = op.event_channel_port; | 679 | out_event = op.event_channel_port; |
| 680 | 680 | ||
| 681 | spin_lock(&priv->lock); | 681 | mutex_lock(&priv->lock); |
| 682 | 682 | ||
| 683 | list_for_each_entry(map, &priv->maps, next) { | 683 | list_for_each_entry(map, &priv->maps, next) { |
| 684 | uint64_t begin = map->index << PAGE_SHIFT; | 684 | uint64_t begin = map->index << PAGE_SHIFT; |
| @@ -706,7 +706,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
| 706 | rc = 0; | 706 | rc = 0; |
| 707 | 707 | ||
| 708 | unlock_out: | 708 | unlock_out: |
| 709 | spin_unlock(&priv->lock); | 709 | mutex_unlock(&priv->lock); |
| 710 | 710 | ||
| 711 | /* Drop the reference to the event channel we did not save in the map */ | 711 | /* Drop the reference to the event channel we did not save in the map */ |
| 712 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) | 712 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) |
| @@ -756,7 +756,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
| 756 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", | 756 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", |
| 757 | index, count, vma->vm_start, vma->vm_pgoff); | 757 | index, count, vma->vm_start, vma->vm_pgoff); |
| 758 | 758 | ||
| 759 | spin_lock(&priv->lock); | 759 | mutex_lock(&priv->lock); |
| 760 | map = gntdev_find_map_index(priv, index, count); | 760 | map = gntdev_find_map_index(priv, index, count); |
| 761 | if (!map) | 761 | if (!map) |
| 762 | goto unlock_out; | 762 | goto unlock_out; |
| @@ -791,7 +791,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
| 791 | map->flags |= GNTMAP_readonly; | 791 | map->flags |= GNTMAP_readonly; |
| 792 | } | 792 | } |
| 793 | 793 | ||
| 794 | spin_unlock(&priv->lock); | 794 | mutex_unlock(&priv->lock); |
| 795 | 795 | ||
| 796 | if (use_ptemod) { | 796 | if (use_ptemod) { |
| 797 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, | 797 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, |
| @@ -819,11 +819,11 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
| 819 | return 0; | 819 | return 0; |
| 820 | 820 | ||
| 821 | unlock_out: | 821 | unlock_out: |
| 822 | spin_unlock(&priv->lock); | 822 | mutex_unlock(&priv->lock); |
| 823 | return err; | 823 | return err; |
| 824 | 824 | ||
| 825 | out_unlock_put: | 825 | out_unlock_put: |
| 826 | spin_unlock(&priv->lock); | 826 | mutex_unlock(&priv->lock); |
| 827 | out_put_map: | 827 | out_put_map: |
| 828 | if (use_ptemod) | 828 | if (use_ptemod) |
| 829 | map->vma = NULL; | 829 | map->vma = NULL; |
