aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2009-03-27 11:14:38 -0400
committerHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2009-03-27 11:14:38 -0400
commitb92efa9abffc4a634cd2e7a0f81f8aa6310d67c9 (patch)
tree9847508d9b8d4e585f90db4a453bfbc3700c997e /drivers/usb/musb/musb_host.c
parenta16fffdd8eb95ebab7dc22414896fe6493951e0e (diff)
parentbe0ea69674ed95e1e98cb3687a241badc756d228 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into avr32-arch
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c237
1 files changed, 156 insertions, 81 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 99fa61234876..499c431a6d62 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -64,11 +64,8 @@
64 * 64 *
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems 65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
66 * 66 *
67 * - Still no traffic scheduling code to make NAKing for bulk or control 67 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
68 * transfers unable to starve other requests; or to make efficient use 68 * starvation ... nothing yet for TX, interrupt, or bulk.
69 * of hardware with periodic transfers. (Note that network drivers
70 * commonly post bulk reads that stay pending for a long time; these
71 * would make very visible trouble.)
72 * 69 *
73 * - Not tested with HNP, but some SRP paths seem to behave. 70 * - Not tested with HNP, but some SRP paths seem to behave.
74 * 71 *
@@ -88,11 +85,8 @@
88 * 85 *
89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 86 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 87 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91 *
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even 88 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others 89 * benefit from it.)
94 * need to perform transfers in that same direction. The same thing could
95 * be done in software though, assuming dma cooperates.)
96 * 90 *
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 91 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic: the endpoint will be 92 * So far that scheduling is both dumb and optimistic: the endpoint will be
@@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
201 len = urb->iso_frame_desc[0].length; 195 len = urb->iso_frame_desc[0].length;
202 break; 196 break;
203 default: /* bulk, interrupt */ 197 default: /* bulk, interrupt */
204 buf = urb->transfer_buffer; 198 /* actual_length may be nonzero on retry paths */
205 len = urb->transfer_buffer_length; 199 buf = urb->transfer_buffer + urb->actual_length;
200 len = urb->transfer_buffer_length - urb->actual_length;
206 } 201 }
207 202
208 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 203 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
@@ -335,16 +330,11 @@ musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
335static struct musb_qh * 330static struct musb_qh *
336musb_giveback(struct musb_qh *qh, struct urb *urb, int status) 331musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
337{ 332{
338 int is_in;
339 struct musb_hw_ep *ep = qh->hw_ep; 333 struct musb_hw_ep *ep = qh->hw_ep;
340 struct musb *musb = ep->musb; 334 struct musb *musb = ep->musb;
335 int is_in = usb_pipein(urb->pipe);
341 int ready = qh->is_ready; 336 int ready = qh->is_ready;
342 337
343 if (ep->is_shared_fifo)
344 is_in = 1;
345 else
346 is_in = usb_pipein(urb->pipe);
347
348 /* save toggle eagerly, for paranoia */ 338 /* save toggle eagerly, for paranoia */
349 switch (qh->type) { 339 switch (qh->type) {
350 case USB_ENDPOINT_XFER_BULK: 340 case USB_ENDPOINT_XFER_BULK:
@@ -400,7 +390,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
400 * de-allocated if it's tracked and allocated; 390 * de-allocated if it's tracked and allocated;
401 * and where we'd update the schedule tree... 391 * and where we'd update the schedule tree...
402 */ 392 */
403 musb->periodic[ep->epnum] = NULL;
404 kfree(qh); 393 kfree(qh);
405 qh = NULL; 394 qh = NULL;
406 break; 395 break;
@@ -432,7 +421,7 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
432 else 421 else
433 qh = musb_giveback(qh, urb, urb->status); 422 qh = musb_giveback(qh, urb, urb->status);
434 423
435 if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { 424 if (qh != NULL && qh->is_ready) {
436 DBG(4, "... next ep%d %cX urb %p\n", 425 DBG(4, "... next ep%d %cX urb %p\n",
437 hw_ep->epnum, is_in ? 'R' : 'T', 426 hw_ep->epnum, is_in ? 'R' : 'T',
438 next_urb(qh)); 427 next_urb(qh));
@@ -942,8 +931,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
942 switch (musb->ep0_stage) { 931 switch (musb->ep0_stage) {
943 case MUSB_EP0_IN: 932 case MUSB_EP0_IN:
944 fifo_dest = urb->transfer_buffer + urb->actual_length; 933 fifo_dest = urb->transfer_buffer + urb->actual_length;
945 fifo_count = min(len, ((u16) (urb->transfer_buffer_length 934 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
946 - urb->actual_length))); 935 urb->actual_length);
947 if (fifo_count < len) 936 if (fifo_count < len)
948 urb->status = -EOVERFLOW; 937 urb->status = -EOVERFLOW;
949 938
@@ -976,10 +965,9 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
976 } 965 }
977 /* FALLTHROUGH */ 966 /* FALLTHROUGH */
978 case MUSB_EP0_OUT: 967 case MUSB_EP0_OUT:
979 fifo_count = min(qh->maxpacket, ((u16) 968 fifo_count = min_t(size_t, qh->maxpacket,
980 (urb->transfer_buffer_length 969 urb->transfer_buffer_length -
981 - urb->actual_length))); 970 urb->actual_length);
982
983 if (fifo_count) { 971 if (fifo_count) {
984 fifo_dest = (u8 *) (urb->transfer_buffer 972 fifo_dest = (u8 *) (urb->transfer_buffer
985 + urb->actual_length); 973 + urb->actual_length);
@@ -1051,7 +1039,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
1051 1039
1052 /* NOTE: this code path would be a good place to PAUSE a 1040 /* NOTE: this code path would be a good place to PAUSE a
1053 * control transfer, if another one is queued, so that 1041 * control transfer, if another one is queued, so that
1054 * ep0 is more likely to stay busy. 1042 * ep0 is more likely to stay busy. That's already done
1043 * for bulk RX transfers.
1055 * 1044 *
1056 * if (qh->ring.next != &musb->control), then 1045 * if (qh->ring.next != &musb->control), then
1057 * we have a candidate... NAKing is *NOT* an error 1046 * we have a candidate... NAKing is *NOT* an error
@@ -1161,7 +1150,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1161 struct urb *urb; 1150 struct urb *urb;
1162 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1151 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1163 void __iomem *epio = hw_ep->regs; 1152 void __iomem *epio = hw_ep->regs;
1164 struct musb_qh *qh = hw_ep->out_qh; 1153 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1154 : hw_ep->out_qh;
1165 u32 status = 0; 1155 u32 status = 0;
1166 void __iomem *mbase = musb->mregs; 1156 void __iomem *mbase = musb->mregs;
1167 struct dma_channel *dma; 1157 struct dma_channel *dma;
@@ -1202,6 +1192,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1202 /* NOTE: this code path would be a good place to PAUSE a 1192 /* NOTE: this code path would be a good place to PAUSE a
1203 * transfer, if there's some other (nonperiodic) tx urb 1193 * transfer, if there's some other (nonperiodic) tx urb
1204 * that could use this fifo. (dma complicates it...) 1194 * that could use this fifo. (dma complicates it...)
1195 * That's already done for bulk RX transfers.
1205 * 1196 *
1206 * if (bulk && qh->ring.next != &musb->out_bulk), then 1197 * if (bulk && qh->ring.next != &musb->out_bulk), then
1207 * we have a candidate... NAKing is *NOT* an error 1198 * we have a candidate... NAKing is *NOT* an error
@@ -1308,7 +1299,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1308 * packets before updating TXCSR ... other docs disagree ... 1299 * packets before updating TXCSR ... other docs disagree ...
1309 */ 1300 */
1310 /* PIO: start next packet in this URB */ 1301 /* PIO: start next packet in this URB */
1311 wLength = min(qh->maxpacket, (u16) wLength); 1302 if (wLength > qh->maxpacket)
1303 wLength = qh->maxpacket;
1312 musb_write_fifo(hw_ep, wLength, buf); 1304 musb_write_fifo(hw_ep, wLength, buf);
1313 qh->segsize = wLength; 1305 qh->segsize = wLength;
1314 1306
@@ -1362,6 +1354,50 @@ finish:
1362 1354
1363#endif 1355#endif
1364 1356
1357/* Schedule next QH from musb->in_bulk and move the current qh to
1358 * the end; avoids starvation for other endpoints.
1359 */
1360static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1361{
1362 struct dma_channel *dma;
1363 struct urb *urb;
1364 void __iomem *mbase = musb->mregs;
1365 void __iomem *epio = ep->regs;
1366 struct musb_qh *cur_qh, *next_qh;
1367 u16 rx_csr;
1368
1369 musb_ep_select(mbase, ep->epnum);
1370 dma = is_dma_capable() ? ep->rx_channel : NULL;
1371
1372 /* clear nak timeout bit */
1373 rx_csr = musb_readw(epio, MUSB_RXCSR);
1374 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1375 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1376 musb_writew(epio, MUSB_RXCSR, rx_csr);
1377
1378 cur_qh = first_qh(&musb->in_bulk);
1379 if (cur_qh) {
1380 urb = next_urb(cur_qh);
1381 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1382 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1383 musb->dma_controller->channel_abort(dma);
1384 urb->actual_length += dma->actual_len;
1385 dma->actual_len = 0L;
1386 }
1387 musb_save_toggle(ep, 1, urb);
1388
1389 /* move cur_qh to end of queue */
1390 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1391
1392 /* get the next qh from musb->in_bulk */
1393 next_qh = first_qh(&musb->in_bulk);
1394
1395 /* set rx_reinit and schedule the next qh */
1396 ep->rx_reinit = 1;
1397 musb_start_urb(musb, 1, next_qh);
1398 }
1399}
1400
1365/* 1401/*
1366 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1402 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1367 * and high-bandwidth IN transfer cases. 1403 * and high-bandwidth IN transfer cases.
@@ -1425,18 +1461,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1425 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1461 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1426 1462
1427 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1463 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1428 /* NOTE this code path would be a good place to PAUSE a 1464 DBG(6, "RX end %d NAK timeout\n", epnum);
1429 * transfer, if there's some other (nonperiodic) rx urb 1465
1430 * that could use this fifo. (dma complicates it...) 1466 /* NOTE: NAKing is *NOT* an error, so we want to
1467 * continue. Except ... if there's a request for
1468 * another QH, use that instead of starving it.
1431 * 1469 *
1432 * if (bulk && qh->ring.next != &musb->in_bulk), then 1470 * Devices like Ethernet and serial adapters keep
1433 * we have a candidate... NAKing is *NOT* an error 1471 * reads posted at all times, which will starve
1472 * other devices without this logic.
1434 */ 1473 */
1435 DBG(6, "RX end %d NAK timeout\n", epnum); 1474 if (usb_pipebulk(urb->pipe)
1475 && qh->mux == 1
1476 && !list_is_singular(&musb->in_bulk)) {
1477 musb_bulk_rx_nak_timeout(musb, hw_ep);
1478 return;
1479 }
1436 musb_ep_select(mbase, epnum); 1480 musb_ep_select(mbase, epnum);
1437 musb_writew(epio, MUSB_RXCSR, 1481 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1438 MUSB_RXCSR_H_WZC_BITS 1482 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1439 | MUSB_RXCSR_H_REQPKT); 1483 musb_writew(epio, MUSB_RXCSR, rx_csr);
1440 1484
1441 goto finish; 1485 goto finish;
1442 } else { 1486 } else {
@@ -1715,31 +1759,27 @@ static int musb_schedule(
1715 1759
1716 /* else, periodic transfers get muxed to other endpoints */ 1760 /* else, periodic transfers get muxed to other endpoints */
1717 1761
1718 /* FIXME this doesn't consider direction, so it can only 1762 /*
1719 * work for one half of the endpoint hardware, and assumes 1763 * We know this qh hasn't been scheduled, so all we need to do
1720 * the previous cases handled all non-shared endpoints...
1721 */
1722
1723 /* we know this qh hasn't been scheduled, so all we need to do
1724 * is choose which hardware endpoint to put it on ... 1764 * is choose which hardware endpoint to put it on ...
1725 * 1765 *
1726 * REVISIT what we really want here is a regular schedule tree 1766 * REVISIT what we really want here is a regular schedule tree
1727 * like e.g. OHCI uses, but for now musb->periodic is just an 1767 * like e.g. OHCI uses.
1728 * array of the _single_ logical endpoint associated with a
1729 * given physical one (identity mapping logical->physical).
1730 *
1731 * that simplistic approach makes TT scheduling a lot simpler;
1732 * there is none, and thus none of its complexity...
1733 */ 1768 */
1734 best_diff = 4096; 1769 best_diff = 4096;
1735 best_end = -1; 1770 best_end = -1;
1736 1771
1737 for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { 1772 for (epnum = 1, hw_ep = musb->endpoints + 1;
1773 epnum < musb->nr_endpoints;
1774 epnum++, hw_ep++) {
1738 int diff; 1775 int diff;
1739 1776
1740 if (musb->periodic[epnum]) 1777 if (is_in || hw_ep->is_shared_fifo) {
1778 if (hw_ep->in_qh != NULL)
1779 continue;
1780 } else if (hw_ep->out_qh != NULL)
1741 continue; 1781 continue;
1742 hw_ep = &musb->endpoints[epnum]; 1782
1743 if (hw_ep == musb->bulk_ep) 1783 if (hw_ep == musb->bulk_ep)
1744 continue; 1784 continue;
1745 1785
@@ -1760,6 +1800,17 @@ static int musb_schedule(
1760 head = &musb->in_bulk; 1800 head = &musb->in_bulk;
1761 else 1801 else
1762 head = &musb->out_bulk; 1802 head = &musb->out_bulk;
1803
1804 /* Enable bulk RX NAK timeout scheme when bulk requests are
1805 * multiplexed. This scheme doen't work in high speed to full
1806 * speed scenario as NAK interrupts are not coming from a
1807 * full speed device connected to a high speed device.
1808 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1809 * 4 (8 frame or 8ms) for FS device.
1810 */
1811 if (is_in && qh->dev)
1812 qh->intv_reg =
1813 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1763 goto success; 1814 goto success;
1764 } else if (best_end < 0) { 1815 } else if (best_end < 0) {
1765 return -ENOSPC; 1816 return -ENOSPC;
@@ -1768,7 +1819,6 @@ static int musb_schedule(
1768 idle = 1; 1819 idle = 1;
1769 qh->mux = 0; 1820 qh->mux = 0;
1770 hw_ep = musb->endpoints + best_end; 1821 hw_ep = musb->endpoints + best_end;
1771 musb->periodic[best_end] = qh;
1772 DBG(4, "qh %p periodic slot %d\n", qh, best_end); 1822 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1773success: 1823success:
1774 if (head) { 1824 if (head) {
@@ -1847,8 +1897,8 @@ static int musb_urb_enqueue(
1847 goto done; 1897 goto done;
1848 } 1898 }
1849 1899
1850 qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 1900 qh->epnum = usb_endpoint_num(epd);
1851 qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 1901 qh->type = usb_endpoint_type(epd);
1852 1902
1853 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 1903 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1854 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 1904 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
@@ -1867,19 +1917,21 @@ static int musb_urb_enqueue(
1867 } 1917 }
1868 qh->type_reg = type_reg; 1918 qh->type_reg = type_reg;
1869 1919
1870 /* precompute rxinterval/txinterval register */ 1920 /* Precompute RXINTERVAL/TXINTERVAL register */
1871 interval = min((u8)16, epd->bInterval); /* log encoding */
1872 switch (qh->type) { 1921 switch (qh->type) {
1873 case USB_ENDPOINT_XFER_INT: 1922 case USB_ENDPOINT_XFER_INT:
1874 /* fullspeed uses linear encoding */ 1923 /*
1875 if (USB_SPEED_FULL == urb->dev->speed) { 1924 * Full/low speeds use the linear encoding,
1876 interval = epd->bInterval; 1925 * high speed uses the logarithmic encoding.
1877 if (!interval) 1926 */
1878 interval = 1; 1927 if (urb->dev->speed <= USB_SPEED_FULL) {
1928 interval = max_t(u8, epd->bInterval, 1);
1929 break;
1879 } 1930 }
1880 /* FALLTHROUGH */ 1931 /* FALLTHROUGH */
1881 case USB_ENDPOINT_XFER_ISOC: 1932 case USB_ENDPOINT_XFER_ISOC:
1882 /* iso always uses log encoding */ 1933 /* ISO always uses logarithmic encoding */
1934 interval = min_t(u8, epd->bInterval, 16);
1883 break; 1935 break;
1884 default: 1936 default:
1885 /* REVISIT we actually want to use NAK limits, hinting to the 1937 /* REVISIT we actually want to use NAK limits, hinting to the
@@ -1890,13 +1942,11 @@ static int musb_urb_enqueue(
1890 * 1942 *
1891 * The downside of disabling this is that transfer scheduling 1943 * The downside of disabling this is that transfer scheduling
1892 * gets VERY unfair for nonperiodic transfers; a misbehaving 1944 * gets VERY unfair for nonperiodic transfers; a misbehaving
1893 * peripheral could make that hurt. Or for reads, one that's 1945 * peripheral could make that hurt. That's perfectly normal
1894 * perfectly normal: network and other drivers keep reads 1946 * for reads from network or serial adapters ... so we have
1895 * posted at all times, having one pending for a week should 1947 * partial NAKlimit support for bulk RX.
1896 * be perfectly safe.
1897 * 1948 *
1898 * The upside of disabling it is avoidng transfer scheduling 1949 * The upside of disabling it is simpler transfer scheduling.
1899 * code to put this aside for while.
1900 */ 1950 */
1901 interval = 0; 1951 interval = 0;
1902 } 1952 }
@@ -2037,9 +2087,9 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2037 goto done; 2087 goto done;
2038 2088
2039 /* Any URB not actively programmed into endpoint hardware can be 2089 /* Any URB not actively programmed into endpoint hardware can be
2040 * immediately given back. Such an URB must be at the head of its 2090 * immediately given back; that's any URB not at the head of an
2041 * endpoint queue, unless someday we get real DMA queues. And even 2091 * endpoint queue, unless someday we get real DMA queues. And even
2042 * then, it might not be known to the hardware... 2092 * if it's at the head, it might not be known to the hardware...
2043 * 2093 *
2044 * Otherwise abort current transfer, pending dma, etc.; urb->status 2094 * Otherwise abort current transfer, pending dma, etc.; urb->status
2045 * has already been updated. This is a synchronous abort; it'd be 2095 * has already been updated. This is a synchronous abort; it'd be
@@ -2078,6 +2128,15 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2078 qh->is_ready = 0; 2128 qh->is_ready = 0;
2079 __musb_giveback(musb, urb, 0); 2129 __musb_giveback(musb, urb, 0);
2080 qh->is_ready = ready; 2130 qh->is_ready = ready;
2131
2132 /* If nothing else (usually musb_giveback) is using it
2133 * and its URB list has emptied, recycle this qh.
2134 */
2135 if (ready && list_empty(&qh->hep->urb_list)) {
2136 qh->hep->hcpriv = NULL;
2137 list_del(&qh->ring);
2138 kfree(qh);
2139 }
2081 } else 2140 } else
2082 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2141 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2083done: 2142done:
@@ -2093,15 +2152,16 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2093 unsigned long flags; 2152 unsigned long flags;
2094 struct musb *musb = hcd_to_musb(hcd); 2153 struct musb *musb = hcd_to_musb(hcd);
2095 u8 is_in = epnum & USB_DIR_IN; 2154 u8 is_in = epnum & USB_DIR_IN;
2096 struct musb_qh *qh = hep->hcpriv; 2155 struct musb_qh *qh;
2097 struct urb *urb, *tmp; 2156 struct urb *urb;
2098 struct list_head *sched; 2157 struct list_head *sched;
2099 2158
2100 if (!qh)
2101 return;
2102
2103 spin_lock_irqsave(&musb->lock, flags); 2159 spin_lock_irqsave(&musb->lock, flags);
2104 2160
2161 qh = hep->hcpriv;
2162 if (qh == NULL)
2163 goto exit;
2164
2105 switch (qh->type) { 2165 switch (qh->type) {
2106 case USB_ENDPOINT_XFER_CONTROL: 2166 case USB_ENDPOINT_XFER_CONTROL:
2107 sched = &musb->control; 2167 sched = &musb->control;
@@ -2135,13 +2195,28 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2135 2195
2136 /* cleanup */ 2196 /* cleanup */
2137 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2197 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2138 } else
2139 urb = NULL;
2140 2198
2141 /* then just nuke all the others */ 2199 /* Then nuke all the others ... and advance the
2142 list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) 2200 * queue on hw_ep (e.g. bulk ring) when we're done.
2143 musb_giveback(qh, urb, -ESHUTDOWN); 2201 */
2202 while (!list_empty(&hep->urb_list)) {
2203 urb = next_urb(qh);
2204 urb->status = -ESHUTDOWN;
2205 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2206 }
2207 } else {
2208 /* Just empty the queue; the hardware is busy with
2209 * other transfers, and since !qh->is_ready nothing
2210 * will activate any of these as it advances.
2211 */
2212 while (!list_empty(&hep->urb_list))
2213 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2144 2214
2215 hep->hcpriv = NULL;
2216 list_del(&qh->ring);
2217 kfree(qh);
2218 }
2219exit:
2145 spin_unlock_irqrestore(&musb->lock, flags); 2220 spin_unlock_irqrestore(&musb->lock, flags);
2146} 2221}
2147 2222