aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c140
1 files changed, 96 insertions, 44 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 6dbbd0786a6a..499c431a6d62 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -64,11 +64,8 @@
64 * 64 *
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems 65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
66 * 66 *
67 * - Still no traffic scheduling code to make NAKing for bulk or control 67 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
68 * transfers unable to starve other requests; or to make efficient use 68 * starvation ... nothing yet for TX, interrupt, or bulk.
69 * of hardware with periodic transfers. (Note that network drivers
70 * commonly post bulk reads that stay pending for a long time; these
71 * would make very visible trouble.)
72 * 69 *
73 * - Not tested with HNP, but some SRP paths seem to behave. 70 * - Not tested with HNP, but some SRP paths seem to behave.
74 * 71 *
@@ -88,11 +85,8 @@
88 * 85 *
89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 86 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 87 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91 *
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even 88 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others 89 * benefit from it.)
94 * need to perform transfers in that same direction. The same thing could
95 * be done in software though, assuming dma cooperates.)
96 * 90 *
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 91 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic: the endpoint will be 92 * So far that scheduling is both dumb and optimistic: the endpoint will be
@@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
201 len = urb->iso_frame_desc[0].length; 195 len = urb->iso_frame_desc[0].length;
202 break; 196 break;
203 default: /* bulk, interrupt */ 197 default: /* bulk, interrupt */
204 buf = urb->transfer_buffer; 198 /* actual_length may be nonzero on retry paths */
205 len = urb->transfer_buffer_length; 199 buf = urb->transfer_buffer + urb->actual_length;
200 len = urb->transfer_buffer_length - urb->actual_length;
206 } 201 }
207 202
208 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 203 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
@@ -395,7 +390,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
395 * de-allocated if it's tracked and allocated; 390 * de-allocated if it's tracked and allocated;
396 * and where we'd update the schedule tree... 391 * and where we'd update the schedule tree...
397 */ 392 */
398 musb->periodic[ep->epnum] = NULL;
399 kfree(qh); 393 kfree(qh);
400 qh = NULL; 394 qh = NULL;
401 break; 395 break;
@@ -1045,7 +1039,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
1045 1039
1046 /* NOTE: this code path would be a good place to PAUSE a 1040 /* NOTE: this code path would be a good place to PAUSE a
1047 * control transfer, if another one is queued, so that 1041 * control transfer, if another one is queued, so that
1048 * ep0 is more likely to stay busy. 1042 * ep0 is more likely to stay busy. That's already done
1043 * for bulk RX transfers.
1049 * 1044 *
1050 * if (qh->ring.next != &musb->control), then 1045 * if (qh->ring.next != &musb->control), then
1051 * we have a candidate... NAKing is *NOT* an error 1046 * we have a candidate... NAKing is *NOT* an error
@@ -1197,6 +1192,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1197 /* NOTE: this code path would be a good place to PAUSE a 1192 /* NOTE: this code path would be a good place to PAUSE a
1198 * transfer, if there's some other (nonperiodic) tx urb 1193 * transfer, if there's some other (nonperiodic) tx urb
1199 * that could use this fifo. (dma complicates it...) 1194 * that could use this fifo. (dma complicates it...)
1195 * That's already done for bulk RX transfers.
1200 * 1196 *
1201 * if (bulk && qh->ring.next != &musb->out_bulk), then 1197 * if (bulk && qh->ring.next != &musb->out_bulk), then
1202 * we have a candidate... NAKing is *NOT* an error 1198 * we have a candidate... NAKing is *NOT* an error
@@ -1358,6 +1354,50 @@ finish:
1358 1354
1359#endif 1355#endif
1360 1356
1357/* Schedule next QH from musb->in_bulk and move the current qh to
1358 * the end; avoids starvation for other endpoints.
1359 */
1360static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1361{
1362 struct dma_channel *dma;
1363 struct urb *urb;
1364 void __iomem *mbase = musb->mregs;
1365 void __iomem *epio = ep->regs;
1366 struct musb_qh *cur_qh, *next_qh;
1367 u16 rx_csr;
1368
1369 musb_ep_select(mbase, ep->epnum);
1370 dma = is_dma_capable() ? ep->rx_channel : NULL;
1371
1372 /* clear nak timeout bit */
1373 rx_csr = musb_readw(epio, MUSB_RXCSR);
1374 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1375 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1376 musb_writew(epio, MUSB_RXCSR, rx_csr);
1377
1378 cur_qh = first_qh(&musb->in_bulk);
1379 if (cur_qh) {
1380 urb = next_urb(cur_qh);
1381 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1382 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1383 musb->dma_controller->channel_abort(dma);
1384 urb->actual_length += dma->actual_len;
1385 dma->actual_len = 0L;
1386 }
1387 musb_save_toggle(ep, 1, urb);
1388
1389 /* move cur_qh to end of queue */
1390 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1391
1392 /* get the next qh from musb->in_bulk */
1393 next_qh = first_qh(&musb->in_bulk);
1394
1395 /* set rx_reinit and schedule the next qh */
1396 ep->rx_reinit = 1;
1397 musb_start_urb(musb, 1, next_qh);
1398 }
1399}
1400
1361/* 1401/*
1362 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1402 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1363 * and high-bandwidth IN transfer cases. 1403 * and high-bandwidth IN transfer cases.
@@ -1421,18 +1461,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1421 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1461 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1422 1462
1423 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1463 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1424 /* NOTE this code path would be a good place to PAUSE a 1464 DBG(6, "RX end %d NAK timeout\n", epnum);
1425 * transfer, if there's some other (nonperiodic) rx urb 1465
1426 * that could use this fifo. (dma complicates it...) 1466 /* NOTE: NAKing is *NOT* an error, so we want to
1467 * continue. Except ... if there's a request for
1468 * another QH, use that instead of starving it.
1427 * 1469 *
1428 * if (bulk && qh->ring.next != &musb->in_bulk), then 1470 * Devices like Ethernet and serial adapters keep
1429 * we have a candidate... NAKing is *NOT* an error 1471 * reads posted at all times, which will starve
1472 * other devices without this logic.
1430 */ 1473 */
1431 DBG(6, "RX end %d NAK timeout\n", epnum); 1474 if (usb_pipebulk(urb->pipe)
1475 && qh->mux == 1
1476 && !list_is_singular(&musb->in_bulk)) {
1477 musb_bulk_rx_nak_timeout(musb, hw_ep);
1478 return;
1479 }
1432 musb_ep_select(mbase, epnum); 1480 musb_ep_select(mbase, epnum);
1433 musb_writew(epio, MUSB_RXCSR, 1481 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1434 MUSB_RXCSR_H_WZC_BITS 1482 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1435 | MUSB_RXCSR_H_REQPKT); 1483 musb_writew(epio, MUSB_RXCSR, rx_csr);
1436 1484
1437 goto finish; 1485 goto finish;
1438 } else { 1486 } else {
@@ -1711,31 +1759,27 @@ static int musb_schedule(
1711 1759
1712 /* else, periodic transfers get muxed to other endpoints */ 1760 /* else, periodic transfers get muxed to other endpoints */
1713 1761
1714 /* FIXME this doesn't consider direction, so it can only 1762 /*
1715 * work for one half of the endpoint hardware, and assumes 1763 * We know this qh hasn't been scheduled, so all we need to do
1716 * the previous cases handled all non-shared endpoints...
1717 */
1718
1719 /* we know this qh hasn't been scheduled, so all we need to do
1720 * is choose which hardware endpoint to put it on ... 1764 * is choose which hardware endpoint to put it on ...
1721 * 1765 *
1722 * REVISIT what we really want here is a regular schedule tree 1766 * REVISIT what we really want here is a regular schedule tree
1723 * like e.g. OHCI uses, but for now musb->periodic is just an 1767 * like e.g. OHCI uses.
1724 * array of the _single_ logical endpoint associated with a
1725 * given physical one (identity mapping logical->physical).
1726 *
1727 * that simplistic approach makes TT scheduling a lot simpler;
1728 * there is none, and thus none of its complexity...
1729 */ 1768 */
1730 best_diff = 4096; 1769 best_diff = 4096;
1731 best_end = -1; 1770 best_end = -1;
1732 1771
1733 for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { 1772 for (epnum = 1, hw_ep = musb->endpoints + 1;
1773 epnum < musb->nr_endpoints;
1774 epnum++, hw_ep++) {
1734 int diff; 1775 int diff;
1735 1776
1736 if (musb->periodic[epnum]) 1777 if (is_in || hw_ep->is_shared_fifo) {
1778 if (hw_ep->in_qh != NULL)
1779 continue;
1780 } else if (hw_ep->out_qh != NULL)
1737 continue; 1781 continue;
1738 hw_ep = &musb->endpoints[epnum]; 1782
1739 if (hw_ep == musb->bulk_ep) 1783 if (hw_ep == musb->bulk_ep)
1740 continue; 1784 continue;
1741 1785
@@ -1756,6 +1800,17 @@ static int musb_schedule(
1756 head = &musb->in_bulk; 1800 head = &musb->in_bulk;
1757 else 1801 else
1758 head = &musb->out_bulk; 1802 head = &musb->out_bulk;
1803
1804 /* Enable bulk RX NAK timeout scheme when bulk requests are
1805 * multiplexed. This scheme doen't work in high speed to full
1806 * speed scenario as NAK interrupts are not coming from a
1807 * full speed device connected to a high speed device.
1808 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1809 * 4 (8 frame or 8ms) for FS device.
1810 */
1811 if (is_in && qh->dev)
1812 qh->intv_reg =
1813 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1759 goto success; 1814 goto success;
1760 } else if (best_end < 0) { 1815 } else if (best_end < 0) {
1761 return -ENOSPC; 1816 return -ENOSPC;
@@ -1764,7 +1819,6 @@ static int musb_schedule(
1764 idle = 1; 1819 idle = 1;
1765 qh->mux = 0; 1820 qh->mux = 0;
1766 hw_ep = musb->endpoints + best_end; 1821 hw_ep = musb->endpoints + best_end;
1767 musb->periodic[best_end] = qh;
1768 DBG(4, "qh %p periodic slot %d\n", qh, best_end); 1822 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1769success: 1823success:
1770 if (head) { 1824 if (head) {
@@ -1888,13 +1942,11 @@ static int musb_urb_enqueue(
1888 * 1942 *
1889 * The downside of disabling this is that transfer scheduling 1943 * The downside of disabling this is that transfer scheduling
1890 * gets VERY unfair for nonperiodic transfers; a misbehaving 1944 * gets VERY unfair for nonperiodic transfers; a misbehaving
1891 * peripheral could make that hurt. Or for reads, one that's 1945 * peripheral could make that hurt. That's perfectly normal
1892 * perfectly normal: network and other drivers keep reads 1946 * for reads from network or serial adapters ... so we have
1893 * posted at all times, having one pending for a week should 1947 * partial NAKlimit support for bulk RX.
1894 * be perfectly safe.
1895 * 1948 *
1896 * The upside of disabling it is avoidng transfer scheduling 1949 * The upside of disabling it is simpler transfer scheduling.
1897 * code to put this aside for while.
1898 */ 1950 */
1899 interval = 0; 1951 interval = 0;
1900 } 1952 }