aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
authorAjay Kumar Gupta <ajay.gupta@ti.com>2009-02-24 18:26:13 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2009-03-24 19:20:36 -0400
commit1e0320f0d46022d12ddc84516cbdb8865e8cd744 (patch)
tree802251635d1bf0851ff3929c2691a34d26eb27a2 /drivers/usb/musb/musb_host.c
parent5d67a851bca63d30cde0474bfc4fc4f03db1a1b8 (diff)
USB: musb: NAK timeout scheme on bulk RX endpoint
Fixes endpoint starvation issue when more than one bulk QH is multiplexed on the reserved bulk RX endpoint, which is normal for cases like serial and ethernet adapters. This patch sets the NAK timeout interval for such QHs, and when a timeout triggers the next QH will be scheduled. (This resembles the bulk scheduling done in hardware by EHCI, OHCI, and UHCI.) This scheme doesn't work for devices which are connected to a high to full speed tree (transaction translator) as there is no NAK timeout interrupt from the musb controller from such devices. Tested with PIO, Inventra DMA, CPPI DMA. [ dbrownell@users.sourceforge.net: fold in start_urb() update; clarify only for bulk RX; don't accidentally clear WZC bits ] Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com> Cc: Felipe Balbi <felipe.balbi@nokia.com> Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c112
1 files changed, 85 insertions, 27 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 9489c8598686..499c431a6d62 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -64,11 +64,8 @@
64 * 64 *
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems 65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
66 * 66 *
67 * - Still no traffic scheduling code to make NAKing for bulk or control 67 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
68 * transfers unable to starve other requests; or to make efficient use 68 * starvation ... nothing yet for TX, interrupt, or bulk.
69 * of hardware with periodic transfers. (Note that network drivers
70 * commonly post bulk reads that stay pending for a long time; these
71 * would make very visible trouble.)
72 * 69 *
73 * - Not tested with HNP, but some SRP paths seem to behave. 70 * - Not tested with HNP, but some SRP paths seem to behave.
74 * 71 *
@@ -88,11 +85,8 @@
88 * 85 *
89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN 86 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s). 87 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91 *
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even 88 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others 89 * benefit from it.)
94 * need to perform transfers in that same direction. The same thing could
95 * be done in software though, assuming dma cooperates.)
96 * 90 *
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. 91 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic: the endpoint will be 92 * So far that scheduling is both dumb and optimistic: the endpoint will be
@@ -201,8 +195,9 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
201 len = urb->iso_frame_desc[0].length; 195 len = urb->iso_frame_desc[0].length;
202 break; 196 break;
203 default: /* bulk, interrupt */ 197 default: /* bulk, interrupt */
204 buf = urb->transfer_buffer; 198 /* actual_length may be nonzero on retry paths */
205 len = urb->transfer_buffer_length; 199 buf = urb->transfer_buffer + urb->actual_length;
200 len = urb->transfer_buffer_length - urb->actual_length;
206 } 201 }
207 202
208 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", 203 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
@@ -1044,7 +1039,8 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
1044 1039
1045 /* NOTE: this code path would be a good place to PAUSE a 1040 /* NOTE: this code path would be a good place to PAUSE a
1046 * control transfer, if another one is queued, so that 1041 * control transfer, if another one is queued, so that
1047 * ep0 is more likely to stay busy. 1042 * ep0 is more likely to stay busy. That's already done
1043 * for bulk RX transfers.
1048 * 1044 *
1049 * if (qh->ring.next != &musb->control), then 1045 * if (qh->ring.next != &musb->control), then
1050 * we have a candidate... NAKing is *NOT* an error 1046 * we have a candidate... NAKing is *NOT* an error
@@ -1196,6 +1192,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1196 /* NOTE: this code path would be a good place to PAUSE a 1192 /* NOTE: this code path would be a good place to PAUSE a
1197 * transfer, if there's some other (nonperiodic) tx urb 1193 * transfer, if there's some other (nonperiodic) tx urb
1198 * that could use this fifo. (dma complicates it...) 1194 * that could use this fifo. (dma complicates it...)
1195 * That's already done for bulk RX transfers.
1199 * 1196 *
1200 * if (bulk && qh->ring.next != &musb->out_bulk), then 1197 * if (bulk && qh->ring.next != &musb->out_bulk), then
1201 * we have a candidate... NAKing is *NOT* an error 1198 * we have a candidate... NAKing is *NOT* an error
@@ -1357,6 +1354,50 @@ finish:
1357 1354
1358#endif 1355#endif
1359 1356
1357/* Schedule next QH from musb->in_bulk and move the current qh to
1358 * the end; avoids starvation for other endpoints.
1359 */
1360static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1361{
1362 struct dma_channel *dma;
1363 struct urb *urb;
1364 void __iomem *mbase = musb->mregs;
1365 void __iomem *epio = ep->regs;
1366 struct musb_qh *cur_qh, *next_qh;
1367 u16 rx_csr;
1368
1369 musb_ep_select(mbase, ep->epnum);
1370 dma = is_dma_capable() ? ep->rx_channel : NULL;
1371
1372 /* clear nak timeout bit */
1373 rx_csr = musb_readw(epio, MUSB_RXCSR);
1374 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1375 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1376 musb_writew(epio, MUSB_RXCSR, rx_csr);
1377
1378 cur_qh = first_qh(&musb->in_bulk);
1379 if (cur_qh) {
1380 urb = next_urb(cur_qh);
1381 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1382 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1383 musb->dma_controller->channel_abort(dma);
1384 urb->actual_length += dma->actual_len;
1385 dma->actual_len = 0L;
1386 }
1387 musb_save_toggle(ep, 1, urb);
1388
1389 /* move cur_qh to end of queue */
1390 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1391
1392 /* get the next qh from musb->in_bulk */
1393 next_qh = first_qh(&musb->in_bulk);
1394
1395 /* set rx_reinit and schedule the next qh */
1396 ep->rx_reinit = 1;
1397 musb_start_urb(musb, 1, next_qh);
1398 }
1399}
1400
1360/* 1401/*
1361 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, 1402 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1362 * and high-bandwidth IN transfer cases. 1403 * and high-bandwidth IN transfer cases.
@@ -1420,18 +1461,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1420 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1461 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1421 1462
1422 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1463 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1423 /* NOTE this code path would be a good place to PAUSE a 1464 DBG(6, "RX end %d NAK timeout\n", epnum);
1424 * transfer, if there's some other (nonperiodic) rx urb 1465
1425 * that could use this fifo. (dma complicates it...) 1466 /* NOTE: NAKing is *NOT* an error, so we want to
1467 * continue. Except ... if there's a request for
1468 * another QH, use that instead of starving it.
1426 * 1469 *
1427 * if (bulk && qh->ring.next != &musb->in_bulk), then 1470 * Devices like Ethernet and serial adapters keep
1428 * we have a candidate... NAKing is *NOT* an error 1471 * reads posted at all times, which will starve
1472 * other devices without this logic.
1429 */ 1473 */
1430 DBG(6, "RX end %d NAK timeout\n", epnum); 1474 if (usb_pipebulk(urb->pipe)
1475 && qh->mux == 1
1476 && !list_is_singular(&musb->in_bulk)) {
1477 musb_bulk_rx_nak_timeout(musb, hw_ep);
1478 return;
1479 }
1431 musb_ep_select(mbase, epnum); 1480 musb_ep_select(mbase, epnum);
1432 musb_writew(epio, MUSB_RXCSR, 1481 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1433 MUSB_RXCSR_H_WZC_BITS 1482 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1434 | MUSB_RXCSR_H_REQPKT); 1483 musb_writew(epio, MUSB_RXCSR, rx_csr);
1435 1484
1436 goto finish; 1485 goto finish;
1437 } else { 1486 } else {
@@ -1751,6 +1800,17 @@ static int musb_schedule(
1751 head = &musb->in_bulk; 1800 head = &musb->in_bulk;
1752 else 1801 else
1753 head = &musb->out_bulk; 1802 head = &musb->out_bulk;
1803
1804 /* Enable bulk RX NAK timeout scheme when bulk requests are
1805 * multiplexed. This scheme doen't work in high speed to full
1806 * speed scenario as NAK interrupts are not coming from a
1807 * full speed device connected to a high speed device.
1808 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1809 * 4 (8 frame or 8ms) for FS device.
1810 */
1811 if (is_in && qh->dev)
1812 qh->intv_reg =
1813 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1754 goto success; 1814 goto success;
1755 } else if (best_end < 0) { 1815 } else if (best_end < 0) {
1756 return -ENOSPC; 1816 return -ENOSPC;
@@ -1882,13 +1942,11 @@ static int musb_urb_enqueue(
1882 * 1942 *
1883 * The downside of disabling this is that transfer scheduling 1943 * The downside of disabling this is that transfer scheduling
1884 * gets VERY unfair for nonperiodic transfers; a misbehaving 1944 * gets VERY unfair for nonperiodic transfers; a misbehaving
1885 * peripheral could make that hurt. Or for reads, one that's 1945 * peripheral could make that hurt. That's perfectly normal
1886 * perfectly normal: network and other drivers keep reads 1946 * for reads from network or serial adapters ... so we have
1887 * posted at all times, having one pending for a week should 1947 * partial NAKlimit support for bulk RX.
1888 * be perfectly safe.
1889 * 1948 *
1890 * The upside of disabling it is avoidng transfer scheduling 1949 * The upside of disabling it is simpler transfer scheduling.
1891 * code to put this aside for while.
1892 */ 1950 */
1893 interval = 0; 1951 interval = 0;
1894 } 1952 }