aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
authorThomas Pugliese <thomas.pugliese@gmail.com>2014-02-28 15:31:57 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-28 19:13:09 -0500
commit5da43afc2b73795e82c4bc3e53a4a177a02637d0 (patch)
tree049941c751b72206569d778bf468336599f83cd8 /drivers/usb/wusbcore
parentacfadcea2adaa52048c6b3c8a3c75105a5540707 (diff)
usb: wusbcore: prevent urb dequeue and giveback race
This patch takes a reference to the wa_xfer object in wa_urb_dequeue to prevent the urb giveback code from completing the xfer and freeing it while wa_urb_dequeue is executing. It also checks for done at the start to avoid a double completion scenario. Adding the check for done in urb_dequeue means that any other place where a submitted transfer segment is marked as done must complete the transfer if it is done. __wa_xfer_delayed_run was not checking this case so that check was added as well. Signed-off-by: Thomas Pugliese <thomas.pugliese@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index cb061915f051..5e5343e69915 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1471,6 +1471,8 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1471 xfer, wa_xfer_id(xfer), seg->index, 1471 xfer, wa_xfer_id(xfer), seg->index,
1472 atomic_read(&rpipe->segs_available), result); 1472 atomic_read(&rpipe->segs_available), result);
1473 if (unlikely(result < 0)) { 1473 if (unlikely(result < 0)) {
1474 int done;
1475
1474 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 1476 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1475 spin_lock_irqsave(&xfer->lock, flags); 1477 spin_lock_irqsave(&xfer->lock, flags);
1476 __wa_xfer_abort(xfer); 1478 __wa_xfer_abort(xfer);
@@ -1479,7 +1481,10 @@ static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1479 * the RPIPE seg_list. Mark it done. 1481 * the RPIPE seg_list. Mark it done.
1480 */ 1482 */
1481 xfer->segs_done++; 1483 xfer->segs_done++;
1484 done = __wa_xfer_is_done(xfer);
1482 spin_unlock_irqrestore(&xfer->lock, flags); 1485 spin_unlock_irqrestore(&xfer->lock, flags);
1486 if (done)
1487 wa_xfer_completion(xfer);
1483 spin_lock_irqsave(&rpipe->seg_lock, flags); 1488 spin_lock_irqsave(&rpipe->seg_lock, flags);
1484 } 1489 }
1485 wa_xfer_put(xfer); 1490 wa_xfer_put(xfer);
@@ -1915,20 +1920,20 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1915 /* check if it is safe to unlink. */ 1920 /* check if it is safe to unlink. */
1916 spin_lock_irqsave(&wa->xfer_list_lock, flags); 1921 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1917 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status); 1922 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1923 if ((result == 0) && urb->hcpriv) {
1924 /*
1925 * Get a xfer ref to prevent a race with wa_xfer_giveback
1926 * cleaning up the xfer while we are working with it.
1927 */
1928 wa_xfer_get(urb->hcpriv);
1929 }
1918 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 1930 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1919 if (result) 1931 if (result)
1920 return result; 1932 return result;
1921 1933
1922 xfer = urb->hcpriv; 1934 xfer = urb->hcpriv;
1923 if (xfer == NULL) { 1935 if (xfer == NULL)
1924 /* 1936 return -ENOENT;
1925 * Nothing setup yet enqueue will see urb->status !=
1926 * -EINPROGRESS (by hcd layer) and bail out with
1927 * error, no need to do completion
1928 */
1929 BUG_ON(urb->status == -EINPROGRESS);
1930 goto out;
1931 }
1932 spin_lock_irqsave(&xfer->lock, flags); 1937 spin_lock_irqsave(&xfer->lock, flags);
1933 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer)); 1938 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1934 rpipe = xfer->ep->hcpriv; 1939 rpipe = xfer->ep->hcpriv;
@@ -1939,6 +1944,16 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1939 result = -ENOENT; 1944 result = -ENOENT;
1940 goto out_unlock; 1945 goto out_unlock;
1941 } 1946 }
1947 /*
1948 * Check for done to avoid racing with wa_xfer_giveback and completing
1949 * twice.
1950 */
1951 if (__wa_xfer_is_done(xfer)) {
1952 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1953 xfer, wa_xfer_id(xfer));
1954 result = -ENOENT;
1955 goto out_unlock;
1956 }
1942 /* Check the delayed list -> if there, release and complete */ 1957 /* Check the delayed list -> if there, release and complete */
1943 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1958 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1944 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1959 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -2007,11 +2022,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
2007 wa_xfer_completion(xfer); 2022 wa_xfer_completion(xfer);
2008 if (rpipe_ready) 2023 if (rpipe_ready)
2009 wa_xfer_delayed_run(rpipe); 2024 wa_xfer_delayed_run(rpipe);
2025 wa_xfer_put(xfer);
2010 return result; 2026 return result;
2011 2027
2012out_unlock: 2028out_unlock:
2013 spin_unlock_irqrestore(&xfer->lock, flags); 2029 spin_unlock_irqrestore(&xfer->lock, flags);
2014out: 2030 wa_xfer_put(xfer);
2015 return result; 2031 return result;
2016 2032
2017dequeue_delayed: 2033dequeue_delayed:
@@ -2020,6 +2036,7 @@ dequeue_delayed:
2020 xfer->result = urb->status; 2036 xfer->result = urb->status;
2021 spin_unlock_irqrestore(&xfer->lock, flags); 2037 spin_unlock_irqrestore(&xfer->lock, flags);
2022 wa_xfer_giveback(xfer); 2038 wa_xfer_giveback(xfer);
2039 wa_xfer_put(xfer);
2023 usb_put_urb(urb); /* we got a ref in enqueue() */ 2040 usb_put_urb(urb); /* we got a ref in enqueue() */
2024 return 0; 2041 return 0;
2025} 2042}