aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget/ether.c
diff options
context:
space:
mode:
authorDavid Brownell <david-b@pacbell.net>2006-08-21 18:26:38 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2006-08-31 21:04:30 -0400
commit789851cf0005b946557340c9bbfc7728906cdbfc (patch)
treef10696d1397dead43f70509432266f7afef9aeb9 /drivers/usb/gadget/ether.c
parentea186651d5a7b822c855de5de505c5c19812bf0b (diff)
usb gadget: g_ether spinlock recursion fix
The new spinlock debug code turned up a spinlock recursion bug in the Ethernet gadget driver on a disconnect path; it would show up with any UDC driver where the cancellation of active requests was synchronous, rather than e.g. delayed until a controller's completion IRQ. That recursion is fixed here by creating and using a new spinlock to protect the relevant lists. Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/gadget/ether.c')
-rw-r--r--drivers/usb/gadget/ether.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 4fe1bec1c255..30299c620d97 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -117,6 +117,8 @@ struct eth_dev {
117 struct usb_ep *in_ep, *out_ep, *status_ep; 117 struct usb_ep *in_ep, *out_ep, *status_ep;
118 const struct usb_endpoint_descriptor 118 const struct usb_endpoint_descriptor
119 *in, *out, *status; 119 *in, *out, *status;
120
121 spinlock_t req_lock;
120 struct list_head tx_reqs, rx_reqs; 122 struct list_head tx_reqs, rx_reqs;
121 123
122 struct net_device *net; 124 struct net_device *net;
@@ -1066,21 +1068,31 @@ static void eth_reset_config (struct eth_dev *dev)
1066 */ 1068 */
1067 if (dev->in) { 1069 if (dev->in) {
1068 usb_ep_disable (dev->in_ep); 1070 usb_ep_disable (dev->in_ep);
1071 spin_lock(&dev->req_lock);
1069 while (likely (!list_empty (&dev->tx_reqs))) { 1072 while (likely (!list_empty (&dev->tx_reqs))) {
1070 req = container_of (dev->tx_reqs.next, 1073 req = container_of (dev->tx_reqs.next,
1071 struct usb_request, list); 1074 struct usb_request, list);
1072 list_del (&req->list); 1075 list_del (&req->list);
1076
1077 spin_unlock(&dev->req_lock);
1073 usb_ep_free_request (dev->in_ep, req); 1078 usb_ep_free_request (dev->in_ep, req);
1079 spin_lock(&dev->req_lock);
1074 } 1080 }
1081 spin_unlock(&dev->req_lock);
1075 } 1082 }
1076 if (dev->out) { 1083 if (dev->out) {
1077 usb_ep_disable (dev->out_ep); 1084 usb_ep_disable (dev->out_ep);
1085 spin_lock(&dev->req_lock);
1078 while (likely (!list_empty (&dev->rx_reqs))) { 1086 while (likely (!list_empty (&dev->rx_reqs))) {
1079 req = container_of (dev->rx_reqs.next, 1087 req = container_of (dev->rx_reqs.next,
1080 struct usb_request, list); 1088 struct usb_request, list);
1081 list_del (&req->list); 1089 list_del (&req->list);
1090
1091 spin_unlock(&dev->req_lock);
1082 usb_ep_free_request (dev->out_ep, req); 1092 usb_ep_free_request (dev->out_ep, req);
1093 spin_lock(&dev->req_lock);
1083 } 1094 }
1095 spin_unlock(&dev->req_lock);
1084 } 1096 }
1085 1097
1086 if (dev->status) { 1098 if (dev->status) {
@@ -1659,9 +1671,9 @@ enomem:
1659 if (retval) { 1671 if (retval) {
1660 DEBUG (dev, "rx submit --> %d\n", retval); 1672 DEBUG (dev, "rx submit --> %d\n", retval);
1661 dev_kfree_skb_any (skb); 1673 dev_kfree_skb_any (skb);
1662 spin_lock (&dev->lock); 1674 spin_lock(&dev->req_lock);
1663 list_add (&req->list, &dev->rx_reqs); 1675 list_add (&req->list, &dev->rx_reqs);
1664 spin_unlock (&dev->lock); 1676 spin_unlock(&dev->req_lock);
1665 } 1677 }
1666 return retval; 1678 return retval;
1667} 1679}
@@ -1730,8 +1742,9 @@ quiesce:
1730 dev_kfree_skb_any (skb); 1742 dev_kfree_skb_any (skb);
1731 if (!netif_running (dev->net)) { 1743 if (!netif_running (dev->net)) {
1732clean: 1744clean:
1733 /* nobody reading rx_reqs, so no dev->lock */ 1745 spin_lock(&dev->req_lock);
1734 list_add (&req->list, &dev->rx_reqs); 1746 list_add (&req->list, &dev->rx_reqs);
1747 spin_unlock(&dev->req_lock);
1735 req = NULL; 1748 req = NULL;
1736 } 1749 }
1737 if (req) 1750 if (req)
@@ -1782,15 +1795,18 @@ static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
1782{ 1795{
1783 int status; 1796 int status;
1784 1797
1798 spin_lock(&dev->req_lock);
1785 status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags); 1799 status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);
1786 if (status < 0) 1800 if (status < 0)
1787 goto fail; 1801 goto fail;
1788 status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags); 1802 status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);
1789 if (status < 0) 1803 if (status < 0)
1790 goto fail; 1804 goto fail;
1791 return 0; 1805 goto done;
1792fail: 1806fail:
1793 DEBUG (dev, "can't alloc requests\n"); 1807 DEBUG (dev, "can't alloc requests\n");
1808done:
1809 spin_unlock(&dev->req_lock);
1794 return status; 1810 return status;
1795} 1811}
1796 1812
@@ -1800,21 +1816,21 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
1800 unsigned long flags; 1816 unsigned long flags;
1801 1817
1802 /* fill unused rxq slots with some skb */ 1818 /* fill unused rxq slots with some skb */
1803 spin_lock_irqsave (&dev->lock, flags); 1819 spin_lock_irqsave(&dev->req_lock, flags);
1804 while (!list_empty (&dev->rx_reqs)) { 1820 while (!list_empty (&dev->rx_reqs)) {
1805 req = container_of (dev->rx_reqs.next, 1821 req = container_of (dev->rx_reqs.next,
1806 struct usb_request, list); 1822 struct usb_request, list);
1807 list_del_init (&req->list); 1823 list_del_init (&req->list);
1808 spin_unlock_irqrestore (&dev->lock, flags); 1824 spin_unlock_irqrestore(&dev->req_lock, flags);
1809 1825
1810 if (rx_submit (dev, req, gfp_flags) < 0) { 1826 if (rx_submit (dev, req, gfp_flags) < 0) {
1811 defer_kevent (dev, WORK_RX_MEMORY); 1827 defer_kevent (dev, WORK_RX_MEMORY);
1812 return; 1828 return;
1813 } 1829 }
1814 1830
1815 spin_lock_irqsave (&dev->lock, flags); 1831 spin_lock_irqsave(&dev->req_lock, flags);
1816 } 1832 }
1817 spin_unlock_irqrestore (&dev->lock, flags); 1833 spin_unlock_irqrestore(&dev->req_lock, flags);
1818} 1834}
1819 1835
1820static void eth_work (void *_dev) 1836static void eth_work (void *_dev)
@@ -1848,9 +1864,9 @@ static void tx_complete (struct usb_ep *ep, struct usb_request *req)
1848 } 1864 }
1849 dev->stats.tx_packets++; 1865 dev->stats.tx_packets++;
1850 1866
1851 spin_lock (&dev->lock); 1867 spin_lock(&dev->req_lock);
1852 list_add (&req->list, &dev->tx_reqs); 1868 list_add (&req->list, &dev->tx_reqs);
1853 spin_unlock (&dev->lock); 1869 spin_unlock(&dev->req_lock);
1854 dev_kfree_skb_any (skb); 1870 dev_kfree_skb_any (skb);
1855 1871
1856 atomic_dec (&dev->tx_qlen); 1872 atomic_dec (&dev->tx_qlen);
@@ -1896,12 +1912,12 @@ static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
1896 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 1912 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
1897 } 1913 }
1898 1914
1899 spin_lock_irqsave (&dev->lock, flags); 1915 spin_lock_irqsave(&dev->req_lock, flags);
1900 req = container_of (dev->tx_reqs.next, struct usb_request, list); 1916 req = container_of (dev->tx_reqs.next, struct usb_request, list);
1901 list_del (&req->list); 1917 list_del (&req->list);
1902 if (list_empty (&dev->tx_reqs)) 1918 if (list_empty (&dev->tx_reqs))
1903 netif_stop_queue (net); 1919 netif_stop_queue (net);
1904 spin_unlock_irqrestore (&dev->lock, flags); 1920 spin_unlock_irqrestore(&dev->req_lock, flags);
1905 1921
1906 /* no buffer copies needed, unless the network stack did it 1922 /* no buffer copies needed, unless the network stack did it
1907 * or the hardware can't use skb buffers. 1923 * or the hardware can't use skb buffers.
@@ -1955,11 +1971,11 @@ static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
1955drop: 1971drop:
1956 dev->stats.tx_dropped++; 1972 dev->stats.tx_dropped++;
1957 dev_kfree_skb_any (skb); 1973 dev_kfree_skb_any (skb);
1958 spin_lock_irqsave (&dev->lock, flags); 1974 spin_lock_irqsave(&dev->req_lock, flags);
1959 if (list_empty (&dev->tx_reqs)) 1975 if (list_empty (&dev->tx_reqs))
1960 netif_start_queue (net); 1976 netif_start_queue (net);
1961 list_add (&req->list, &dev->tx_reqs); 1977 list_add (&req->list, &dev->tx_reqs);
1962 spin_unlock_irqrestore (&dev->lock, flags); 1978 spin_unlock_irqrestore(&dev->req_lock, flags);
1963 } 1979 }
1964 return 0; 1980 return 0;
1965} 1981}
@@ -2378,6 +2394,7 @@ autoconf_fail:
2378 return status; 2394 return status;
2379 dev = netdev_priv(net); 2395 dev = netdev_priv(net);
2380 spin_lock_init (&dev->lock); 2396 spin_lock_init (&dev->lock);
2397 spin_lock_init (&dev->req_lock);
2381 INIT_WORK (&dev->work, eth_work, dev); 2398 INIT_WORK (&dev->work, eth_work, dev);
2382 INIT_LIST_HEAD (&dev->tx_reqs); 2399 INIT_LIST_HEAD (&dev->tx_reqs);
2383 INIT_LIST_HEAD (&dev->rx_reqs); 2400 INIT_LIST_HEAD (&dev->rx_reqs);