aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHans Verkuil <hverkuil@xs4all.nl>2014-08-07 02:47:14 -0400
committerMauro Carvalho Chehab <m.chehab@samsung.com>2014-08-21 16:25:31 -0400
commitf035eb4e976ef5a059e30bc91cfd310ff030a7d3 (patch)
treee43abca91dd15d28c24c919110a60014185dcb82
parent23d3090f8b44ab42162e99e8584445bc25b8922f (diff)
[media] videobuf2: fix lockdep warning
The following lockdep warning has been there ever since commit a517cca6b24fc54ac209e44118ec8962051662e3 one year ago: [ 403.117947] ====================================================== [ 403.117949] [ INFO: possible circular locking dependency detected ] [ 403.117953] 3.16.0-rc6-test-media #961 Not tainted [ 403.117954] ------------------------------------------------------- [ 403.117956] v4l2-ctl/15377 is trying to acquire lock: [ 403.117959] (&dev->mutex#3){+.+.+.}, at: [<ffffffffa005a6c3>] vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.117974] [ 403.117974] but task is already holding lock: [ 403.117976] (&mm->mmap_sem){++++++}, at: [<ffffffff8118291f>] vm_mmap_pgoff+0x6f/0xc0 [ 403.117987] [ 403.117987] which lock already depends on the new lock. [ 403.117987] [ 403.117990] [ 403.117990] the existing dependency chain (in reverse order) is: [ 403.117992] [ 403.117992] -> #1 (&mm->mmap_sem){++++++}: [ 403.117997] [<ffffffff810d733c>] validate_chain.isra.39+0x5fc/0x9a0 [ 403.118006] [<ffffffff810d8bc3>] __lock_acquire+0x4d3/0xd30 [ 403.118010] [<ffffffff810d9da7>] lock_acquire+0xa7/0x160 [ 403.118014] [<ffffffff8118c9ec>] might_fault+0x7c/0xb0 [ 403.118018] [<ffffffffa0028a25>] video_usercopy+0x425/0x610 [videodev] [ 403.118028] [<ffffffffa0028c25>] video_ioctl2+0x15/0x20 [videodev] [ 403.118034] [<ffffffffa0022764>] v4l2_ioctl+0x184/0x1a0 [videodev] [ 403.118040] [<ffffffff811d77d0>] do_vfs_ioctl+0x2f0/0x4f0 [ 403.118307] [<ffffffff811d7a51>] SyS_ioctl+0x81/0xa0 [ 403.118311] [<ffffffff8199dc69>] system_call_fastpath+0x16/0x1b [ 403.118319] [ 403.118319] -> #0 (&dev->mutex#3){+.+.+.}: [ 403.118324] [<ffffffff810d6a96>] check_prevs_add+0x746/0x9f0 [ 403.118329] [<ffffffff810d733c>] validate_chain.isra.39+0x5fc/0x9a0 [ 403.118333] [<ffffffff810d8bc3>] __lock_acquire+0x4d3/0xd30 [ 403.118336] [<ffffffff810d9da7>] lock_acquire+0xa7/0x160 [ 403.118340] [<ffffffff81999664>] mutex_lock_interruptible_nested+0x64/0x640 [ 403.118344] [<ffffffffa005a6c3>] vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.118349] [<ffffffffa0022122>] v4l2_mmap+0x62/0xa0 [videodev] [ 403.118354] [<ffffffff81197270>] mmap_region+0x3d0/0x5d0 [ 403.118359] [<ffffffff8119778d>] do_mmap_pgoff+0x31d/0x400 [ 403.118363] [<ffffffff81182940>] vm_mmap_pgoff+0x90/0xc0 [ 403.118366] [<ffffffff81195cef>] SyS_mmap_pgoff+0x1df/0x2a0 [ 403.118369] [<ffffffff810085c2>] SyS_mmap+0x22/0x30 [ 403.118376] [<ffffffff8199dc69>] system_call_fastpath+0x16/0x1b [ 403.118381] [ 403.118381] other info that might help us debug this: [ 403.118381] [ 403.118383] Possible unsafe locking scenario: [ 403.118383] [ 403.118385] CPU0 CPU1 [ 403.118387] ---- ---- [ 403.118388] lock(&mm->mmap_sem); [ 403.118391] lock(&dev->mutex#3); [ 403.118394] lock(&mm->mmap_sem); [ 403.118397] lock(&dev->mutex#3); [ 403.118400] [ 403.118400] *** DEADLOCK *** [ 403.118400] [ 403.118403] 1 lock held by v4l2-ctl/15377: [ 403.118405] #0: (&mm->mmap_sem){++++++}, at: [<ffffffff8118291f>] vm_mmap_pgoff+0x6f/0xc0 [ 403.118411] [ 403.118411] stack backtrace: [ 403.118415] CPU: 0 PID: 15377 Comm: v4l2-ctl Not tainted 3.16.0-rc6-test-media #961 [ 403.118418] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/31/2013 [ 403.118420] ffffffff82a6c9d0 ffff8800af37fb00 ffffffff819916a2 ffffffff82a6c9d0 [ 403.118425] ffff8800af37fb40 ffffffff810d5715 ffff8802308e4200 0000000000000000 [ 403.118429] ffff8802308e4a48 ffff8802308e4a48 ffff8802308e4200 0000000000000001 [ 403.118433] Call Trace: [ 403.118441] [<ffffffff819916a2>] dump_stack+0x4e/0x7a [ 403.118445] [<ffffffff810d5715>] print_circular_bug+0x1d5/0x2a0 [ 403.118449] [<ffffffff810d6a96>] check_prevs_add+0x746/0x9f0 [ 403.118455] [<ffffffff8119c172>] ? find_vmap_area+0x42/0x70 [ 403.118459] [<ffffffff810d733c>] validate_chain.isra.39+0x5fc/0x9a0 [ 403.118463] [<ffffffff810d8bc3>] __lock_acquire+0x4d3/0xd30 [ 403.118468] [<ffffffff810d9da7>] lock_acquire+0xa7/0x160 [ 403.118472] [<ffffffffa005a6c3>] ? vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.118476] [<ffffffffa005a6c3>] ? vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.118480] [<ffffffff81999664>] mutex_lock_interruptible_nested+0x64/0x640 [ 403.118484] [<ffffffffa005a6c3>] ? vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.118488] [<ffffffffa005a6c3>] ? vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.118493] [<ffffffff810d8055>] ? mark_held_locks+0x75/0xa0 [ 403.118497] [<ffffffffa005a6c3>] vb2_fop_mmap+0x33/0x90 [videobuf2_core] [ 403.118502] [<ffffffffa0022122>] v4l2_mmap+0x62/0xa0 [videodev] [ 403.118506] [<ffffffff81197270>] mmap_region+0x3d0/0x5d0 [ 403.118510] [<ffffffff8119778d>] do_mmap_pgoff+0x31d/0x400 [ 403.118513] [<ffffffff81182940>] vm_mmap_pgoff+0x90/0xc0 [ 403.118517] [<ffffffff81195cef>] SyS_mmap_pgoff+0x1df/0x2a0 [ 403.118521] [<ffffffff810085c2>] SyS_mmap+0x22/0x30 [ 403.118525] [<ffffffff8199dc69>] system_call_fastpath+0x16/0x1b The reason is that vb2_fop_mmap and vb2_fop_get_unmapped_area take the core lock while they are called with the mmap_sem semaphore held. But elsewhere in the code the core lock is taken first but calls to copy_to/from_user() can take the mmap_sem semaphore as well, potentially causing a classical A-B/B-A deadlock. However, the mmap/get_unmapped_area calls really shouldn't take the core lock at all. So what would happen if they don't take the core lock anymore? There are two situations that need to be taken into account: calling mmap while new buffers are being added and calling mmap while buffers are being deleted. The first case works almost fine without a lock: in all cases mmap relies on correctly filled-in q->num_buffers/q->num_planes values and those are only updated by reqbufs and create_buffers *after* any new buffers have been initialized completely. Except in one case: if an error occurred while allocating the buffers it will increase num_buffers and rely on __vb2_queue_free to decrease it again. So there is a short period where the buffer information may be wrong. The second case definitely does pose a problem: buffers may be in the process of being deleted, without the internal structure being updated. In order to fix this a new mutex is added to vb2_queue that is taken when buffers are allocated or deleted, and in vb2_mmap. That way vb2_mmap won't get stale buffer data. Note that this is a problem only for MEMORY_MMAP, so even though __qbuf_userptr and __qbuf_dmabuf also mess around with buffers (mem_priv in particular), this doesn't clash with vb2_mmap or vb2_get_unmapped_area since those are MMAP specific. As an additional bonus the hack in __buf_prepare, the USERPTR case, can be removed as well since mmap() no longer takes the core lock. All in all a much cleaner solution. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c56
-rw-r--r--include/media/videobuf2-core.h2
2 files changed, 21 insertions, 37 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index c359006074a8..eb86913349fc 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -882,7 +882,9 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
882 * We already have buffers allocated, so first check if they 882 * We already have buffers allocated, so first check if they
883 * are not in use and can be freed. 883 * are not in use and can be freed.
884 */ 884 */
885 mutex_lock(&q->mmap_lock);
885 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { 886 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
887 mutex_unlock(&q->mmap_lock);
886 dprintk(1, "memory in use, cannot free\n"); 888 dprintk(1, "memory in use, cannot free\n");
887 return -EBUSY; 889 return -EBUSY;
888 } 890 }
@@ -894,6 +896,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
894 */ 896 */
895 __vb2_queue_cancel(q); 897 __vb2_queue_cancel(q);
896 ret = __vb2_queue_free(q, q->num_buffers); 898 ret = __vb2_queue_free(q, q->num_buffers);
899 mutex_unlock(&q->mmap_lock);
897 if (ret) 900 if (ret)
898 return ret; 901 return ret;
899 902
@@ -955,6 +958,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
955 */ 958 */
956 } 959 }
957 960
961 mutex_lock(&q->mmap_lock);
958 q->num_buffers = allocated_buffers; 962 q->num_buffers = allocated_buffers;
959 963
960 if (ret < 0) { 964 if (ret < 0) {
@@ -963,8 +967,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
963 * from q->num_buffers. 967 * from q->num_buffers.
964 */ 968 */
965 __vb2_queue_free(q, allocated_buffers); 969 __vb2_queue_free(q, allocated_buffers);
970 mutex_unlock(&q->mmap_lock);
966 return ret; 971 return ret;
967 } 972 }
973 mutex_unlock(&q->mmap_lock);
968 974
969 /* 975 /*
970 * Return the number of successfully allocated buffers 976 * Return the number of successfully allocated buffers
@@ -1061,6 +1067,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
1061 */ 1067 */
1062 } 1068 }
1063 1069
1070 mutex_lock(&q->mmap_lock);
1064 q->num_buffers += allocated_buffers; 1071 q->num_buffers += allocated_buffers;
1065 1072
1066 if (ret < 0) { 1073 if (ret < 0) {
@@ -1069,8 +1076,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
1069 * from q->num_buffers. 1076 * from q->num_buffers.
1070 */ 1077 */
1071 __vb2_queue_free(q, allocated_buffers); 1078 __vb2_queue_free(q, allocated_buffers);
1079 mutex_unlock(&q->mmap_lock);
1072 return -ENOMEM; 1080 return -ENOMEM;
1073 } 1081 }
1082 mutex_unlock(&q->mmap_lock);
1074 1083
1075 /* 1084 /*
1076 * Return the number of successfully allocated buffers 1085 * Return the number of successfully allocated buffers
@@ -1582,7 +1591,6 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
1582static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) 1591static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1583{ 1592{
1584 struct vb2_queue *q = vb->vb2_queue; 1593 struct vb2_queue *q = vb->vb2_queue;
1585 struct rw_semaphore *mmap_sem;
1586 int ret; 1594 int ret;
1587 1595
1588 ret = __verify_length(vb, b); 1596 ret = __verify_length(vb, b);
@@ -1619,26 +1627,7 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1619 ret = __qbuf_mmap(vb, b); 1627 ret = __qbuf_mmap(vb, b);
1620 break; 1628 break;
1621 case V4L2_MEMORY_USERPTR: 1629 case V4L2_MEMORY_USERPTR:
1622 /*
1623 * In case of user pointer buffers vb2 allocators need to get
1624 * direct access to userspace pages. This requires getting
1625 * the mmap semaphore for read access in the current process
1626 * structure. The same semaphore is taken before calling mmap
1627 * operation, while both qbuf/prepare_buf and mmap are called
1628 * by the driver or v4l2 core with the driver's lock held.
1629 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1630 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1631 * the videobuf2 core releases the driver's lock, takes
1632 * mmap_sem and then takes the driver's lock again.
1633 */
1634 mmap_sem = &current->mm->mmap_sem;
1635 call_void_qop(q, wait_prepare, q);
1636 down_read(mmap_sem);
1637 call_void_qop(q, wait_finish, q);
1638
1639 ret = __qbuf_userptr(vb, b); 1630 ret = __qbuf_userptr(vb, b);
1640
1641 up_read(mmap_sem);
1642 break; 1631 break;
1643 case V4L2_MEMORY_DMABUF: 1632 case V4L2_MEMORY_DMABUF:
1644 ret = __qbuf_dmabuf(vb, b); 1633 ret = __qbuf_dmabuf(vb, b);
@@ -2485,7 +2474,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2485 return -EINVAL; 2474 return -EINVAL;
2486 } 2475 }
2487 2476
2477 mutex_lock(&q->mmap_lock);
2488 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); 2478 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2479 mutex_unlock(&q->mmap_lock);
2489 if (ret) 2480 if (ret)
2490 return ret; 2481 return ret;
2491 2482
@@ -2504,6 +2495,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2504 unsigned long off = pgoff << PAGE_SHIFT; 2495 unsigned long off = pgoff << PAGE_SHIFT;
2505 struct vb2_buffer *vb; 2496 struct vb2_buffer *vb;
2506 unsigned int buffer, plane; 2497 unsigned int buffer, plane;
2498 void *vaddr;
2507 int ret; 2499 int ret;
2508 2500
2509 if (q->memory != V4L2_MEMORY_MMAP) { 2501 if (q->memory != V4L2_MEMORY_MMAP) {
@@ -2520,7 +2512,8 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2520 2512
2521 vb = q->bufs[buffer]; 2513 vb = q->bufs[buffer];
2522 2514
2523 return (unsigned long)vb2_plane_vaddr(vb, plane); 2515 vaddr = vb2_plane_vaddr(vb, plane);
2516 return vaddr ? (unsigned long)vaddr : -EINVAL;
2524} 2517}
2525EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); 2518EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2526#endif 2519#endif
@@ -2660,6 +2653,7 @@ int vb2_queue_init(struct vb2_queue *q)
2660 INIT_LIST_HEAD(&q->queued_list); 2653 INIT_LIST_HEAD(&q->queued_list);
2661 INIT_LIST_HEAD(&q->done_list); 2654 INIT_LIST_HEAD(&q->done_list);
2662 spin_lock_init(&q->done_lock); 2655 spin_lock_init(&q->done_lock);
2656 mutex_init(&q->mmap_lock);
2663 init_waitqueue_head(&q->done_wq); 2657 init_waitqueue_head(&q->done_wq);
2664 2658
2665 if (q->buf_struct_size == 0) 2659 if (q->buf_struct_size == 0)
@@ -2681,7 +2675,9 @@ void vb2_queue_release(struct vb2_queue *q)
2681{ 2675{
2682 __vb2_cleanup_fileio(q); 2676 __vb2_cleanup_fileio(q);
2683 __vb2_queue_cancel(q); 2677 __vb2_queue_cancel(q);
2678 mutex_lock(&q->mmap_lock);
2684 __vb2_queue_free(q, q->num_buffers); 2679 __vb2_queue_free(q, q->num_buffers);
2680 mutex_unlock(&q->mmap_lock);
2685} 2681}
2686EXPORT_SYMBOL_GPL(vb2_queue_release); 2682EXPORT_SYMBOL_GPL(vb2_queue_release);
2687 2683
@@ -3346,15 +3342,8 @@ EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3346int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) 3342int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3347{ 3343{
3348 struct video_device *vdev = video_devdata(file); 3344 struct video_device *vdev = video_devdata(file);
3349 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3350 int err;
3351 3345
3352 if (lock && mutex_lock_interruptible(lock)) 3346 return vb2_mmap(vdev->queue, vma);
3353 return -ERESTARTSYS;
3354 err = vb2_mmap(vdev->queue, vma);
3355 if (lock)
3356 mutex_unlock(lock);
3357 return err;
3358} 3347}
3359EXPORT_SYMBOL_GPL(vb2_fop_mmap); 3348EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3360 3349
@@ -3473,15 +3462,8 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3473 unsigned long len, unsigned long pgoff, unsigned long flags) 3462 unsigned long len, unsigned long pgoff, unsigned long flags)
3474{ 3463{
3475 struct video_device *vdev = video_devdata(file); 3464 struct video_device *vdev = video_devdata(file);
3476 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3477 int ret;
3478 3465
3479 if (lock && mutex_lock_interruptible(lock)) 3466 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3480 return -ERESTARTSYS;
3481 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3482 if (lock)
3483 mutex_unlock(lock);
3484 return ret;
3485} 3467}
3486EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); 3468EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3487#endif 3469#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index fc910a622451..5a10d8d695b4 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -366,6 +366,7 @@ struct v4l2_fh;
366 * cannot be started unless at least this number of buffers 366 * cannot be started unless at least this number of buffers
367 * have been queued into the driver. 367 * have been queued into the driver.
368 * 368 *
369 * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
369 * @memory: current memory type used 370 * @memory: current memory type used
370 * @bufs: videobuf buffer structures 371 * @bufs: videobuf buffer structures
371 * @num_buffers: number of allocated/used buffers 372 * @num_buffers: number of allocated/used buffers
@@ -399,6 +400,7 @@ struct vb2_queue {
399 u32 min_buffers_needed; 400 u32 min_buffers_needed;
400 401
401/* private: internal use only */ 402/* private: internal use only */
403 struct mutex mmap_lock;
402 enum v4l2_memory memory; 404 enum v4l2_memory memory;
403 struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; 405 struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
404 unsigned int num_buffers; 406 unsigned int num_buffers;