aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-03-04 00:05:42 -0500
committerH. Peter Anvin <hpa@zytor.com>2009-03-04 00:05:42 -0500
commit2e22ea7cea0f7de86fd30df867fbf5b7e8eee0fd (patch)
tree8b8e5583fb2787ff7107a6f59b114ddcd2c2f691 /drivers/usb/musb/musb_host.c
parent638bee71c83a2837b48062fdc5b222163cf53d79 (diff)
parent645af4e9e0e32481e3336dda813688732c7e5f0f (diff)
Merge branch 'x86/core' into x86/mce2
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c93
1 files changed, 58 insertions, 35 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index a035ceccf950..6dbbd0786a6a 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -335,16 +335,11 @@ musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
335static struct musb_qh * 335static struct musb_qh *
336musb_giveback(struct musb_qh *qh, struct urb *urb, int status) 336musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
337{ 337{
338 int is_in;
339 struct musb_hw_ep *ep = qh->hw_ep; 338 struct musb_hw_ep *ep = qh->hw_ep;
340 struct musb *musb = ep->musb; 339 struct musb *musb = ep->musb;
340 int is_in = usb_pipein(urb->pipe);
341 int ready = qh->is_ready; 341 int ready = qh->is_ready;
342 342
343 if (ep->is_shared_fifo)
344 is_in = 1;
345 else
346 is_in = usb_pipein(urb->pipe);
347
348 /* save toggle eagerly, for paranoia */ 343 /* save toggle eagerly, for paranoia */
349 switch (qh->type) { 344 switch (qh->type) {
350 case USB_ENDPOINT_XFER_BULK: 345 case USB_ENDPOINT_XFER_BULK:
@@ -432,7 +427,7 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
432 else 427 else
433 qh = musb_giveback(qh, urb, urb->status); 428 qh = musb_giveback(qh, urb, urb->status);
434 429
435 if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { 430 if (qh != NULL && qh->is_ready) {
436 DBG(4, "... next ep%d %cX urb %p\n", 431 DBG(4, "... next ep%d %cX urb %p\n",
437 hw_ep->epnum, is_in ? 'R' : 'T', 432 hw_ep->epnum, is_in ? 'R' : 'T',
438 next_urb(qh)); 433 next_urb(qh));
@@ -942,8 +937,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
942 switch (musb->ep0_stage) { 937 switch (musb->ep0_stage) {
943 case MUSB_EP0_IN: 938 case MUSB_EP0_IN:
944 fifo_dest = urb->transfer_buffer + urb->actual_length; 939 fifo_dest = urb->transfer_buffer + urb->actual_length;
945 fifo_count = min(len, ((u16) (urb->transfer_buffer_length 940 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
946 - urb->actual_length))); 941 urb->actual_length);
947 if (fifo_count < len) 942 if (fifo_count < len)
948 urb->status = -EOVERFLOW; 943 urb->status = -EOVERFLOW;
949 944
@@ -976,10 +971,9 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
976 } 971 }
977 /* FALLTHROUGH */ 972 /* FALLTHROUGH */
978 case MUSB_EP0_OUT: 973 case MUSB_EP0_OUT:
979 fifo_count = min(qh->maxpacket, ((u16) 974 fifo_count = min_t(size_t, qh->maxpacket,
980 (urb->transfer_buffer_length 975 urb->transfer_buffer_length -
981 - urb->actual_length))); 976 urb->actual_length);
982
983 if (fifo_count) { 977 if (fifo_count) {
984 fifo_dest = (u8 *) (urb->transfer_buffer 978 fifo_dest = (u8 *) (urb->transfer_buffer
985 + urb->actual_length); 979 + urb->actual_length);
@@ -1161,7 +1155,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1161 struct urb *urb; 1155 struct urb *urb;
1162 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1156 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1163 void __iomem *epio = hw_ep->regs; 1157 void __iomem *epio = hw_ep->regs;
1164 struct musb_qh *qh = hw_ep->out_qh; 1158 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1159 : hw_ep->out_qh;
1165 u32 status = 0; 1160 u32 status = 0;
1166 void __iomem *mbase = musb->mregs; 1161 void __iomem *mbase = musb->mregs;
1167 struct dma_channel *dma; 1162 struct dma_channel *dma;
@@ -1308,7 +1303,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1308 * packets before updating TXCSR ... other docs disagree ... 1303 * packets before updating TXCSR ... other docs disagree ...
1309 */ 1304 */
1310 /* PIO: start next packet in this URB */ 1305 /* PIO: start next packet in this URB */
1311 wLength = min(qh->maxpacket, (u16) wLength); 1306 if (wLength > qh->maxpacket)
1307 wLength = qh->maxpacket;
1312 musb_write_fifo(hw_ep, wLength, buf); 1308 musb_write_fifo(hw_ep, wLength, buf);
1313 qh->segsize = wLength; 1309 qh->segsize = wLength;
1314 1310
@@ -1867,19 +1863,21 @@ static int musb_urb_enqueue(
1867 } 1863 }
1868 qh->type_reg = type_reg; 1864 qh->type_reg = type_reg;
1869 1865
1870 /* precompute rxinterval/txinterval register */ 1866 /* Precompute RXINTERVAL/TXINTERVAL register */
1871 interval = min((u8)16, epd->bInterval); /* log encoding */
1872 switch (qh->type) { 1867 switch (qh->type) {
1873 case USB_ENDPOINT_XFER_INT: 1868 case USB_ENDPOINT_XFER_INT:
1874 /* fullspeed uses linear encoding */ 1869 /*
1875 if (USB_SPEED_FULL == urb->dev->speed) { 1870 * Full/low speeds use the linear encoding,
1876 interval = epd->bInterval; 1871 * high speed uses the logarithmic encoding.
1877 if (!interval) 1872 */
1878 interval = 1; 1873 if (urb->dev->speed <= USB_SPEED_FULL) {
1874 interval = max_t(u8, epd->bInterval, 1);
1875 break;
1879 } 1876 }
1880 /* FALLTHROUGH */ 1877 /* FALLTHROUGH */
1881 case USB_ENDPOINT_XFER_ISOC: 1878 case USB_ENDPOINT_XFER_ISOC:
1882 /* iso always uses log encoding */ 1879 /* ISO always uses logarithmic encoding */
1880 interval = min_t(u8, epd->bInterval, 16);
1883 break; 1881 break;
1884 default: 1882 default:
1885 /* REVISIT we actually want to use NAK limits, hinting to the 1883 /* REVISIT we actually want to use NAK limits, hinting to the
@@ -2037,9 +2035,9 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2037 goto done; 2035 goto done;
2038 2036
2039 /* Any URB not actively programmed into endpoint hardware can be 2037 /* Any URB not actively programmed into endpoint hardware can be
2040 * immediately given back. Such an URB must be at the head of its 2038 * immediately given back; that's any URB not at the head of an
2041 * endpoint queue, unless someday we get real DMA queues. And even 2039 * endpoint queue, unless someday we get real DMA queues. And even
2042 * then, it might not be known to the hardware... 2040 * if it's at the head, it might not be known to the hardware...
2043 * 2041 *
2044 * Otherwise abort current transfer, pending dma, etc.; urb->status 2042 * Otherwise abort current transfer, pending dma, etc.; urb->status
2045 * has already been updated. This is a synchronous abort; it'd be 2043 * has already been updated. This is a synchronous abort; it'd be
@@ -2078,6 +2076,15 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2078 qh->is_ready = 0; 2076 qh->is_ready = 0;
2079 __musb_giveback(musb, urb, 0); 2077 __musb_giveback(musb, urb, 0);
2080 qh->is_ready = ready; 2078 qh->is_ready = ready;
2079
2080 /* If nothing else (usually musb_giveback) is using it
2081 * and its URB list has emptied, recycle this qh.
2082 */
2083 if (ready && list_empty(&qh->hep->urb_list)) {
2084 qh->hep->hcpriv = NULL;
2085 list_del(&qh->ring);
2086 kfree(qh);
2087 }
2081 } else 2088 } else
2082 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2089 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2083done: 2090done:
@@ -2093,15 +2100,16 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2093 unsigned long flags; 2100 unsigned long flags;
2094 struct musb *musb = hcd_to_musb(hcd); 2101 struct musb *musb = hcd_to_musb(hcd);
2095 u8 is_in = epnum & USB_DIR_IN; 2102 u8 is_in = epnum & USB_DIR_IN;
2096 struct musb_qh *qh = hep->hcpriv; 2103 struct musb_qh *qh;
2097 struct urb *urb, *tmp; 2104 struct urb *urb;
2098 struct list_head *sched; 2105 struct list_head *sched;
2099 2106
2100 if (!qh)
2101 return;
2102
2103 spin_lock_irqsave(&musb->lock, flags); 2107 spin_lock_irqsave(&musb->lock, flags);
2104 2108
2109 qh = hep->hcpriv;
2110 if (qh == NULL)
2111 goto exit;
2112
2105 switch (qh->type) { 2113 switch (qh->type) {
2106 case USB_ENDPOINT_XFER_CONTROL: 2114 case USB_ENDPOINT_XFER_CONTROL:
2107 sched = &musb->control; 2115 sched = &musb->control;
@@ -2135,13 +2143,28 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2135 2143
2136 /* cleanup */ 2144 /* cleanup */
2137 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2145 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2138 } else
2139 urb = NULL;
2140 2146
2141 /* then just nuke all the others */ 2147 /* Then nuke all the others ... and advance the
2142 list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) 2148 * queue on hw_ep (e.g. bulk ring) when we're done.
2143 musb_giveback(qh, urb, -ESHUTDOWN); 2149 */
2150 while (!list_empty(&hep->urb_list)) {
2151 urb = next_urb(qh);
2152 urb->status = -ESHUTDOWN;
2153 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2154 }
2155 } else {
2156 /* Just empty the queue; the hardware is busy with
2157 * other transfers, and since !qh->is_ready nothing
2158 * will activate any of these as it advances.
2159 */
2160 while (!list_empty(&hep->urb_list))
2161 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2144 2162
2163 hep->hcpriv = NULL;
2164 list_del(&qh->ring);
2165 kfree(qh);
2166 }
2167exit:
2145 spin_unlock_irqrestore(&musb->lock, flags); 2168 spin_unlock_irqrestore(&musb->lock, flags);
2146} 2169}
2147 2170