diff options
author | David Brownell <dbrownell@users.sourceforge.net> | 2009-03-26 20:36:57 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-04-17 13:50:25 -0400 |
commit | 74bb35083d889c696a0f54be76ffe85a66dcbdc1 (patch) | |
tree | 9cb99faa41416c1af594a76345469fea57a9fd9e /drivers/usb | |
parent | e13c594f3a1fc2c78e7a20d1a07974f71e4b448f (diff) |
USB: musb_host, minor enqueue locking fix (v2)
Someone noted that the enqueue path used an unlocked access
for usb_host_endpoint->hcpriv ... fix that, by being safe
and always accessing it under spinlock protection.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/musb/musb_host.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 499c431a6d62..ff095956ca96 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -1841,7 +1841,7 @@ static int musb_urb_enqueue( | |||
1841 | unsigned long flags; | 1841 | unsigned long flags; |
1842 | struct musb *musb = hcd_to_musb(hcd); | 1842 | struct musb *musb = hcd_to_musb(hcd); |
1843 | struct usb_host_endpoint *hep = urb->ep; | 1843 | struct usb_host_endpoint *hep = urb->ep; |
1844 | struct musb_qh *qh = hep->hcpriv; | 1844 | struct musb_qh *qh; |
1845 | struct usb_endpoint_descriptor *epd = &hep->desc; | 1845 | struct usb_endpoint_descriptor *epd = &hep->desc; |
1846 | int ret; | 1846 | int ret; |
1847 | unsigned type_reg; | 1847 | unsigned type_reg; |
@@ -1853,22 +1853,21 @@ static int musb_urb_enqueue( | |||
1853 | 1853 | ||
1854 | spin_lock_irqsave(&musb->lock, flags); | 1854 | spin_lock_irqsave(&musb->lock, flags); |
1855 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | 1855 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
1856 | qh = ret ? NULL : hep->hcpriv; | ||
1857 | if (qh) | ||
1858 | urb->hcpriv = qh; | ||
1856 | spin_unlock_irqrestore(&musb->lock, flags); | 1859 | spin_unlock_irqrestore(&musb->lock, flags); |
1857 | if (ret) | ||
1858 | return ret; | ||
1859 | 1860 | ||
1860 | /* DMA mapping was already done, if needed, and this urb is on | 1861 | /* DMA mapping was already done, if needed, and this urb is on |
1861 | * hep->urb_list ... so there's little to do unless hep wasn't | 1862 | * hep->urb_list now ... so we're done, unless hep wasn't yet |
1862 | * yet scheduled onto a live qh. | 1863 | * scheduled onto a live qh. |
1863 | * | 1864 | * |
1864 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | 1865 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets |
1865 | * disabled, testing for empty qh->ring and avoiding qh setup costs | 1866 | * disabled, testing for empty qh->ring and avoiding qh setup costs |
1866 | * except for the first urb queued after a config change. | 1867 | * except for the first urb queued after a config change. |
1867 | */ | 1868 | */ |
1868 | if (qh) { | 1869 | if (qh || ret) |
1869 | urb->hcpriv = qh; | 1870 | return ret; |
1870 | return 0; | ||
1871 | } | ||
1872 | 1871 | ||
1873 | /* Allocate and initialize qh, minimizing the work done each time | 1872 | /* Allocate and initialize qh, minimizing the work done each time |
1874 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | 1873 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. |