diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-17 21:16:55 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-17 21:16:55 -0400 |
commit | 4b337c5f245b6587ba844ac7bb13c313a2912f7b (patch) | |
tree | 999c6a6580b76a083c8efb9dabff709d1c49fcd0 /drivers/usb/musb/musb_host.c | |
parent | 492b057c426e4aa747484958e18e9da29003985d (diff) | |
parent | 3fe0344faf7fdcb158bd5c1a9aec960a8d70c8e8 (diff) |
Merge commit 'origin/master' into next
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r-- | drivers/usb/musb/musb_host.c | 273 |
1 files changed, 108 insertions, 165 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index db1b57415ec7..94a2a350a414 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -181,6 +181,19 @@ static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) | |||
181 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 181 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) | ||
185 | { | ||
186 | if (is_in != 0 || ep->is_shared_fifo) | ||
187 | ep->in_qh = qh; | ||
188 | if (is_in == 0 || ep->is_shared_fifo) | ||
189 | ep->out_qh = qh; | ||
190 | } | ||
191 | |||
192 | static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) | ||
193 | { | ||
194 | return is_in ? ep->in_qh : ep->out_qh; | ||
195 | } | ||
196 | |||
184 | /* | 197 | /* |
185 | * Start the URB at the front of an endpoint's queue | 198 | * Start the URB at the front of an endpoint's queue |
186 | * end must be claimed from the caller. | 199 | * end must be claimed from the caller. |
@@ -210,7 +223,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
210 | case USB_ENDPOINT_XFER_CONTROL: | 223 | case USB_ENDPOINT_XFER_CONTROL: |
211 | /* control transfers always start with SETUP */ | 224 | /* control transfers always start with SETUP */ |
212 | is_in = 0; | 225 | is_in = 0; |
213 | hw_ep->out_qh = qh; | ||
214 | musb->ep0_stage = MUSB_EP0_START; | 226 | musb->ep0_stage = MUSB_EP0_START; |
215 | buf = urb->setup_packet; | 227 | buf = urb->setup_packet; |
216 | len = 8; | 228 | len = 8; |
@@ -239,10 +251,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | |||
239 | epnum, buf + offset, len); | 251 | epnum, buf + offset, len); |
240 | 252 | ||
241 | /* Configure endpoint */ | 253 | /* Configure endpoint */ |
242 | if (is_in || hw_ep->is_shared_fifo) | 254 | musb_ep_set_qh(hw_ep, is_in, qh); |
243 | hw_ep->in_qh = qh; | ||
244 | else | ||
245 | hw_ep->out_qh = qh; | ||
246 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); | 255 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); |
247 | 256 | ||
248 | /* transmit may have more work: start it when it is time */ | 257 | /* transmit may have more work: start it when it is time */ |
@@ -286,9 +295,8 @@ start: | |||
286 | } | 295 | } |
287 | } | 296 | } |
288 | 297 | ||
289 | /* caller owns controller lock, irqs are blocked */ | 298 | /* Context: caller owns controller lock, IRQs are blocked */ |
290 | static void | 299 | static void musb_giveback(struct musb *musb, struct urb *urb, int status) |
291 | __musb_giveback(struct musb *musb, struct urb *urb, int status) | ||
292 | __releases(musb->lock) | 300 | __releases(musb->lock) |
293 | __acquires(musb->lock) | 301 | __acquires(musb->lock) |
294 | { | 302 | { |
@@ -321,60 +329,57 @@ __acquires(musb->lock) | |||
321 | spin_lock(&musb->lock); | 329 | spin_lock(&musb->lock); |
322 | } | 330 | } |
323 | 331 | ||
324 | /* for bulk/interrupt endpoints only */ | 332 | /* For bulk/interrupt endpoints only */ |
325 | static inline void | 333 | static inline void musb_save_toggle(struct musb_qh *qh, int is_in, |
326 | musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) | 334 | struct urb *urb) |
327 | { | 335 | { |
328 | struct usb_device *udev = urb->dev; | 336 | void __iomem *epio = qh->hw_ep->regs; |
329 | u16 csr; | 337 | u16 csr; |
330 | void __iomem *epio = ep->regs; | ||
331 | struct musb_qh *qh; | ||
332 | 338 | ||
333 | /* FIXME: the current Mentor DMA code seems to have | 339 | /* |
340 | * FIXME: the current Mentor DMA code seems to have | ||
334 | * problems getting toggle correct. | 341 | * problems getting toggle correct. |
335 | */ | 342 | */ |
336 | 343 | ||
337 | if (is_in || ep->is_shared_fifo) | 344 | if (is_in) |
338 | qh = ep->in_qh; | 345 | csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; |
339 | else | 346 | else |
340 | qh = ep->out_qh; | 347 | csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; |
341 | 348 | ||
342 | if (!is_in) { | 349 | usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); |
343 | csr = musb_readw(epio, MUSB_TXCSR); | ||
344 | usb_settoggle(udev, qh->epnum, 1, | ||
345 | (csr & MUSB_TXCSR_H_DATATOGGLE) | ||
346 | ? 1 : 0); | ||
347 | } else { | ||
348 | csr = musb_readw(epio, MUSB_RXCSR); | ||
349 | usb_settoggle(udev, qh->epnum, 0, | ||
350 | (csr & MUSB_RXCSR_H_DATATOGGLE) | ||
351 | ? 1 : 0); | ||
352 | } | ||
353 | } | 350 | } |
354 | 351 | ||
355 | /* caller owns controller lock, irqs are blocked */ | 352 | /* |
356 | static struct musb_qh * | 353 | * Advance this hardware endpoint's queue, completing the specified URB and |
357 | musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | 354 | * advancing to either the next URB queued to that qh, or else invalidating |
355 | * that qh and advancing to the next qh scheduled after the current one. | ||
356 | * | ||
357 | * Context: caller owns controller lock, IRQs are blocked | ||
358 | */ | ||
359 | static void musb_advance_schedule(struct musb *musb, struct urb *urb, | ||
360 | struct musb_hw_ep *hw_ep, int is_in) | ||
358 | { | 361 | { |
362 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); | ||
359 | struct musb_hw_ep *ep = qh->hw_ep; | 363 | struct musb_hw_ep *ep = qh->hw_ep; |
360 | struct musb *musb = ep->musb; | ||
361 | int is_in = usb_pipein(urb->pipe); | ||
362 | int ready = qh->is_ready; | 364 | int ready = qh->is_ready; |
365 | int status; | ||
366 | |||
367 | status = (urb->status == -EINPROGRESS) ? 0 : urb->status; | ||
363 | 368 | ||
364 | /* save toggle eagerly, for paranoia */ | 369 | /* save toggle eagerly, for paranoia */ |
365 | switch (qh->type) { | 370 | switch (qh->type) { |
366 | case USB_ENDPOINT_XFER_BULK: | 371 | case USB_ENDPOINT_XFER_BULK: |
367 | case USB_ENDPOINT_XFER_INT: | 372 | case USB_ENDPOINT_XFER_INT: |
368 | musb_save_toggle(ep, is_in, urb); | 373 | musb_save_toggle(qh, is_in, urb); |
369 | break; | 374 | break; |
370 | case USB_ENDPOINT_XFER_ISOC: | 375 | case USB_ENDPOINT_XFER_ISOC: |
371 | if (status == 0 && urb->error_count) | 376 | if (urb->error_count) |
372 | status = -EXDEV; | 377 | status = -EXDEV; |
373 | break; | 378 | break; |
374 | } | 379 | } |
375 | 380 | ||
376 | qh->is_ready = 0; | 381 | qh->is_ready = 0; |
377 | __musb_giveback(musb, urb, status); | 382 | musb_giveback(musb, urb, status); |
378 | qh->is_ready = ready; | 383 | qh->is_ready = ready; |
379 | 384 | ||
380 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and | 385 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and |
@@ -388,11 +393,8 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | |||
388 | else | 393 | else |
389 | ep->tx_reinit = 1; | 394 | ep->tx_reinit = 1; |
390 | 395 | ||
391 | /* clobber old pointers to this qh */ | 396 | /* Clobber old pointers to this qh */ |
392 | if (is_in || ep->is_shared_fifo) | 397 | musb_ep_set_qh(ep, is_in, NULL); |
393 | ep->in_qh = NULL; | ||
394 | else | ||
395 | ep->out_qh = NULL; | ||
396 | qh->hep->hcpriv = NULL; | 398 | qh->hep->hcpriv = NULL; |
397 | 399 | ||
398 | switch (qh->type) { | 400 | switch (qh->type) { |
@@ -421,36 +423,10 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | |||
421 | break; | 423 | break; |
422 | } | 424 | } |
423 | } | 425 | } |
424 | return qh; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * Advance this hardware endpoint's queue, completing the specified urb and | ||
429 | * advancing to either the next urb queued to that qh, or else invalidating | ||
430 | * that qh and advancing to the next qh scheduled after the current one. | ||
431 | * | ||
432 | * Context: caller owns controller lock, irqs are blocked | ||
433 | */ | ||
434 | static void | ||
435 | musb_advance_schedule(struct musb *musb, struct urb *urb, | ||
436 | struct musb_hw_ep *hw_ep, int is_in) | ||
437 | { | ||
438 | struct musb_qh *qh; | ||
439 | |||
440 | if (is_in || hw_ep->is_shared_fifo) | ||
441 | qh = hw_ep->in_qh; | ||
442 | else | ||
443 | qh = hw_ep->out_qh; | ||
444 | |||
445 | if (urb->status == -EINPROGRESS) | ||
446 | qh = musb_giveback(qh, urb, 0); | ||
447 | else | ||
448 | qh = musb_giveback(qh, urb, urb->status); | ||
449 | 426 | ||
450 | if (qh != NULL && qh->is_ready) { | 427 | if (qh != NULL && qh->is_ready) { |
451 | DBG(4, "... next ep%d %cX urb %p\n", | 428 | DBG(4, "... next ep%d %cX urb %p\n", |
452 | hw_ep->epnum, is_in ? 'R' : 'T', | 429 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
453 | next_urb(qh)); | ||
454 | musb_start_urb(musb, is_in, qh); | 430 | musb_start_urb(musb, is_in, qh); |
455 | } | 431 | } |
456 | } | 432 | } |
@@ -629,7 +605,8 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | |||
629 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | 605 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); |
630 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | 606 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); |
631 | /* NOTE: bulk combining rewrites high bits of maxpacket */ | 607 | /* NOTE: bulk combining rewrites high bits of maxpacket */ |
632 | musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); | 608 | musb_writew(ep->regs, MUSB_RXMAXP, |
609 | qh->maxpacket | ((qh->hb_mult - 1) << 11)); | ||
633 | 610 | ||
634 | ep->rx_reinit = 0; | 611 | ep->rx_reinit = 0; |
635 | } | 612 | } |
@@ -651,9 +628,10 @@ static bool musb_tx_dma_program(struct dma_controller *dma, | |||
651 | csr = musb_readw(epio, MUSB_TXCSR); | 628 | csr = musb_readw(epio, MUSB_TXCSR); |
652 | if (length > pkt_size) { | 629 | if (length > pkt_size) { |
653 | mode = 1; | 630 | mode = 1; |
654 | csr |= MUSB_TXCSR_AUTOSET | 631 | csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; |
655 | | MUSB_TXCSR_DMAMODE | 632 | /* autoset shouldn't be set in high bandwidth */ |
656 | | MUSB_TXCSR_DMAENAB; | 633 | if (qh->hb_mult == 1) |
634 | csr |= MUSB_TXCSR_AUTOSET; | ||
657 | } else { | 635 | } else { |
658 | mode = 0; | 636 | mode = 0; |
659 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); | 637 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); |
@@ -703,15 +681,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum, | |||
703 | void __iomem *mbase = musb->mregs; | 681 | void __iomem *mbase = musb->mregs; |
704 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 682 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
705 | void __iomem *epio = hw_ep->regs; | 683 | void __iomem *epio = hw_ep->regs; |
706 | struct musb_qh *qh; | 684 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); |
707 | u16 packet_sz; | 685 | u16 packet_sz = qh->maxpacket; |
708 | |||
709 | if (!is_out || hw_ep->is_shared_fifo) | ||
710 | qh = hw_ep->in_qh; | ||
711 | else | ||
712 | qh = hw_ep->out_qh; | ||
713 | |||
714 | packet_sz = qh->maxpacket; | ||
715 | 686 | ||
716 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | 687 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " |
717 | "h_addr%02x h_port%02x bytes %d\n", | 688 | "h_addr%02x h_port%02x bytes %d\n", |
@@ -1129,17 +1100,14 @@ void musb_host_tx(struct musb *musb, u8 epnum) | |||
1129 | u16 tx_csr; | 1100 | u16 tx_csr; |
1130 | size_t length = 0; | 1101 | size_t length = 0; |
1131 | size_t offset = 0; | 1102 | size_t offset = 0; |
1132 | struct urb *urb; | ||
1133 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 1103 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1134 | void __iomem *epio = hw_ep->regs; | 1104 | void __iomem *epio = hw_ep->regs; |
1135 | struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh | 1105 | struct musb_qh *qh = hw_ep->out_qh; |
1136 | : hw_ep->out_qh; | 1106 | struct urb *urb = next_urb(qh); |
1137 | u32 status = 0; | 1107 | u32 status = 0; |
1138 | void __iomem *mbase = musb->mregs; | 1108 | void __iomem *mbase = musb->mregs; |
1139 | struct dma_channel *dma; | 1109 | struct dma_channel *dma; |
1140 | 1110 | ||
1141 | urb = next_urb(qh); | ||
1142 | |||
1143 | musb_ep_select(mbase, epnum); | 1111 | musb_ep_select(mbase, epnum); |
1144 | tx_csr = musb_readw(epio, MUSB_TXCSR); | 1112 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
1145 | 1113 | ||
@@ -1427,7 +1395,7 @@ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep) | |||
1427 | urb->actual_length += dma->actual_len; | 1395 | urb->actual_length += dma->actual_len; |
1428 | dma->actual_len = 0L; | 1396 | dma->actual_len = 0L; |
1429 | } | 1397 | } |
1430 | musb_save_toggle(ep, 1, urb); | 1398 | musb_save_toggle(cur_qh, 1, urb); |
1431 | 1399 | ||
1432 | /* move cur_qh to end of queue */ | 1400 | /* move cur_qh to end of queue */ |
1433 | list_move_tail(&cur_qh->ring, &musb->in_bulk); | 1401 | list_move_tail(&cur_qh->ring, &musb->in_bulk); |
@@ -1531,6 +1499,10 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1531 | /* packet error reported later */ | 1499 | /* packet error reported later */ |
1532 | iso_err = true; | 1500 | iso_err = true; |
1533 | } | 1501 | } |
1502 | } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { | ||
1503 | DBG(3, "end %d high bandwidth incomplete ISO packet RX\n", | ||
1504 | epnum); | ||
1505 | status = -EPROTO; | ||
1534 | } | 1506 | } |
1535 | 1507 | ||
1536 | /* faults abort the transfer */ | 1508 | /* faults abort the transfer */ |
@@ -1738,7 +1710,11 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
1738 | val &= ~MUSB_RXCSR_H_AUTOREQ; | 1710 | val &= ~MUSB_RXCSR_H_AUTOREQ; |
1739 | else | 1711 | else |
1740 | val |= MUSB_RXCSR_H_AUTOREQ; | 1712 | val |= MUSB_RXCSR_H_AUTOREQ; |
1741 | val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; | 1713 | val |= MUSB_RXCSR_DMAENAB; |
1714 | |||
1715 | /* autoclear shouldn't be set in high bandwidth */ | ||
1716 | if (qh->hb_mult == 1) | ||
1717 | val |= MUSB_RXCSR_AUTOCLEAR; | ||
1742 | 1718 | ||
1743 | musb_writew(epio, MUSB_RXCSR, | 1719 | musb_writew(epio, MUSB_RXCSR, |
1744 | MUSB_RXCSR_H_WZC_BITS | val); | 1720 | MUSB_RXCSR_H_WZC_BITS | val); |
@@ -1817,19 +1793,17 @@ static int musb_schedule( | |||
1817 | epnum++, hw_ep++) { | 1793 | epnum++, hw_ep++) { |
1818 | int diff; | 1794 | int diff; |
1819 | 1795 | ||
1820 | if (is_in || hw_ep->is_shared_fifo) { | 1796 | if (musb_ep_get_qh(hw_ep, is_in) != NULL) |
1821 | if (hw_ep->in_qh != NULL) | ||
1822 | continue; | ||
1823 | } else if (hw_ep->out_qh != NULL) | ||
1824 | continue; | 1797 | continue; |
1825 | 1798 | ||
1826 | if (hw_ep == musb->bulk_ep) | 1799 | if (hw_ep == musb->bulk_ep) |
1827 | continue; | 1800 | continue; |
1828 | 1801 | ||
1829 | if (is_in) | 1802 | if (is_in) |
1830 | diff = hw_ep->max_packet_sz_rx - qh->maxpacket; | 1803 | diff = hw_ep->max_packet_sz_rx; |
1831 | else | 1804 | else |
1832 | diff = hw_ep->max_packet_sz_tx - qh->maxpacket; | 1805 | diff = hw_ep->max_packet_sz_tx; |
1806 | diff -= (qh->maxpacket * qh->hb_mult); | ||
1833 | 1807 | ||
1834 | if (diff >= 0 && best_diff > diff) { | 1808 | if (diff >= 0 && best_diff > diff) { |
1835 | best_diff = diff; | 1809 | best_diff = diff; |
@@ -1932,15 +1906,27 @@ static int musb_urb_enqueue( | |||
1932 | qh->is_ready = 1; | 1906 | qh->is_ready = 1; |
1933 | 1907 | ||
1934 | qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); | 1908 | qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); |
1909 | qh->type = usb_endpoint_type(epd); | ||
1935 | 1910 | ||
1936 | /* no high bandwidth support yet */ | 1911 | /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. |
1937 | if (qh->maxpacket & ~0x7ff) { | 1912 | * Some musb cores don't support high bandwidth ISO transfers; and |
1938 | ret = -EMSGSIZE; | 1913 | * we don't (yet!) support high bandwidth interrupt transfers. |
1939 | goto done; | 1914 | */ |
1915 | qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); | ||
1916 | if (qh->hb_mult > 1) { | ||
1917 | int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); | ||
1918 | |||
1919 | if (ok) | ||
1920 | ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) | ||
1921 | || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); | ||
1922 | if (!ok) { | ||
1923 | ret = -EMSGSIZE; | ||
1924 | goto done; | ||
1925 | } | ||
1926 | qh->maxpacket &= 0x7ff; | ||
1940 | } | 1927 | } |
1941 | 1928 | ||
1942 | qh->epnum = usb_endpoint_num(epd); | 1929 | qh->epnum = usb_endpoint_num(epd); |
1943 | qh->type = usb_endpoint_type(epd); | ||
1944 | 1930 | ||
1945 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | 1931 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ |
1946 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | 1932 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); |
@@ -2052,14 +2038,15 @@ done: | |||
2052 | * called with controller locked, irqs blocked | 2038 | * called with controller locked, irqs blocked |
2053 | * that hardware queue advances to the next transfer, unless prevented | 2039 | * that hardware queue advances to the next transfer, unless prevented |
2054 | */ | 2040 | */ |
2055 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | 2041 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) |
2056 | { | 2042 | { |
2057 | struct musb_hw_ep *ep = qh->hw_ep; | 2043 | struct musb_hw_ep *ep = qh->hw_ep; |
2058 | void __iomem *epio = ep->regs; | 2044 | void __iomem *epio = ep->regs; |
2059 | unsigned hw_end = ep->epnum; | 2045 | unsigned hw_end = ep->epnum; |
2060 | void __iomem *regs = ep->musb->mregs; | 2046 | void __iomem *regs = ep->musb->mregs; |
2061 | u16 csr; | 2047 | int is_in = usb_pipein(urb->pipe); |
2062 | int status = 0; | 2048 | int status = 0; |
2049 | u16 csr; | ||
2063 | 2050 | ||
2064 | musb_ep_select(regs, hw_end); | 2051 | musb_ep_select(regs, hw_end); |
2065 | 2052 | ||
@@ -2112,14 +2099,14 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
2112 | { | 2099 | { |
2113 | struct musb *musb = hcd_to_musb(hcd); | 2100 | struct musb *musb = hcd_to_musb(hcd); |
2114 | struct musb_qh *qh; | 2101 | struct musb_qh *qh; |
2115 | struct list_head *sched; | ||
2116 | unsigned long flags; | 2102 | unsigned long flags; |
2103 | int is_in = usb_pipein(urb->pipe); | ||
2117 | int ret; | 2104 | int ret; |
2118 | 2105 | ||
2119 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | 2106 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, |
2120 | usb_pipedevice(urb->pipe), | 2107 | usb_pipedevice(urb->pipe), |
2121 | usb_pipeendpoint(urb->pipe), | 2108 | usb_pipeendpoint(urb->pipe), |
2122 | usb_pipein(urb->pipe) ? "in" : "out"); | 2109 | is_in ? "in" : "out"); |
2123 | 2110 | ||
2124 | spin_lock_irqsave(&musb->lock, flags); | 2111 | spin_lock_irqsave(&musb->lock, flags); |
2125 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | 2112 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
@@ -2130,47 +2117,25 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
2130 | if (!qh) | 2117 | if (!qh) |
2131 | goto done; | 2118 | goto done; |
2132 | 2119 | ||
2133 | /* Any URB not actively programmed into endpoint hardware can be | 2120 | /* |
2121 | * Any URB not actively programmed into endpoint hardware can be | ||
2134 | * immediately given back; that's any URB not at the head of an | 2122 | * immediately given back; that's any URB not at the head of an |
2135 | * endpoint queue, unless someday we get real DMA queues. And even | 2123 | * endpoint queue, unless someday we get real DMA queues. And even |
2136 | * if it's at the head, it might not be known to the hardware... | 2124 | * if it's at the head, it might not be known to the hardware... |
2137 | * | 2125 | * |
2138 | * Otherwise abort current transfer, pending dma, etc.; urb->status | 2126 | * Otherwise abort current transfer, pending DMA, etc.; urb->status |
2139 | * has already been updated. This is a synchronous abort; it'd be | 2127 | * has already been updated. This is a synchronous abort; it'd be |
2140 | * OK to hold off until after some IRQ, though. | 2128 | * OK to hold off until after some IRQ, though. |
2129 | * | ||
2130 | * NOTE: qh is invalid unless !list_empty(&hep->urb_list) | ||
2141 | */ | 2131 | */ |
2142 | if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) | 2132 | if (!qh->is_ready |
2143 | ret = -EINPROGRESS; | 2133 | || urb->urb_list.prev != &qh->hep->urb_list |
2144 | else { | 2134 | || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { |
2145 | switch (qh->type) { | ||
2146 | case USB_ENDPOINT_XFER_CONTROL: | ||
2147 | sched = &musb->control; | ||
2148 | break; | ||
2149 | case USB_ENDPOINT_XFER_BULK: | ||
2150 | if (qh->mux == 1) { | ||
2151 | if (usb_pipein(urb->pipe)) | ||
2152 | sched = &musb->in_bulk; | ||
2153 | else | ||
2154 | sched = &musb->out_bulk; | ||
2155 | break; | ||
2156 | } | ||
2157 | default: | ||
2158 | /* REVISIT when we get a schedule tree, periodic | ||
2159 | * transfers won't always be at the head of a | ||
2160 | * singleton queue... | ||
2161 | */ | ||
2162 | sched = NULL; | ||
2163 | break; | ||
2164 | } | ||
2165 | } | ||
2166 | |||
2167 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
2168 | if (ret < 0 || (sched && qh != first_qh(sched))) { | ||
2169 | int ready = qh->is_ready; | 2135 | int ready = qh->is_ready; |
2170 | 2136 | ||
2171 | ret = 0; | ||
2172 | qh->is_ready = 0; | 2137 | qh->is_ready = 0; |
2173 | __musb_giveback(musb, urb, 0); | 2138 | musb_giveback(musb, urb, 0); |
2174 | qh->is_ready = ready; | 2139 | qh->is_ready = ready; |
2175 | 2140 | ||
2176 | /* If nothing else (usually musb_giveback) is using it | 2141 | /* If nothing else (usually musb_giveback) is using it |
@@ -2182,7 +2147,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
2182 | kfree(qh); | 2147 | kfree(qh); |
2183 | } | 2148 | } |
2184 | } else | 2149 | } else |
2185 | ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | 2150 | ret = musb_cleanup_urb(urb, qh); |
2186 | done: | 2151 | done: |
2187 | spin_unlock_irqrestore(&musb->lock, flags); | 2152 | spin_unlock_irqrestore(&musb->lock, flags); |
2188 | return ret; | 2153 | return ret; |
@@ -2192,13 +2157,11 @@ done: | |||
2192 | static void | 2157 | static void |
2193 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | 2158 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) |
2194 | { | 2159 | { |
2195 | u8 epnum = hep->desc.bEndpointAddress; | 2160 | u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; |
2196 | unsigned long flags; | 2161 | unsigned long flags; |
2197 | struct musb *musb = hcd_to_musb(hcd); | 2162 | struct musb *musb = hcd_to_musb(hcd); |
2198 | u8 is_in = epnum & USB_DIR_IN; | ||
2199 | struct musb_qh *qh; | 2163 | struct musb_qh *qh; |
2200 | struct urb *urb; | 2164 | struct urb *urb; |
2201 | struct list_head *sched; | ||
2202 | 2165 | ||
2203 | spin_lock_irqsave(&musb->lock, flags); | 2166 | spin_lock_irqsave(&musb->lock, flags); |
2204 | 2167 | ||
@@ -2206,31 +2169,11 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | |||
2206 | if (qh == NULL) | 2169 | if (qh == NULL) |
2207 | goto exit; | 2170 | goto exit; |
2208 | 2171 | ||
2209 | switch (qh->type) { | 2172 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ |
2210 | case USB_ENDPOINT_XFER_CONTROL: | ||
2211 | sched = &musb->control; | ||
2212 | break; | ||
2213 | case USB_ENDPOINT_XFER_BULK: | ||
2214 | if (qh->mux == 1) { | ||
2215 | if (is_in) | ||
2216 | sched = &musb->in_bulk; | ||
2217 | else | ||
2218 | sched = &musb->out_bulk; | ||
2219 | break; | ||
2220 | } | ||
2221 | default: | ||
2222 | /* REVISIT when we get a schedule tree, periodic transfers | ||
2223 | * won't always be at the head of a singleton queue... | ||
2224 | */ | ||
2225 | sched = NULL; | ||
2226 | break; | ||
2227 | } | ||
2228 | |||
2229 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
2230 | 2173 | ||
2231 | /* kick first urb off the hardware, if needed */ | 2174 | /* Kick the first URB off the hardware, if needed */ |
2232 | qh->is_ready = 0; | 2175 | qh->is_ready = 0; |
2233 | if (!sched || qh == first_qh(sched)) { | 2176 | if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { |
2234 | urb = next_urb(qh); | 2177 | urb = next_urb(qh); |
2235 | 2178 | ||
2236 | /* make software (then hardware) stop ASAP */ | 2179 | /* make software (then hardware) stop ASAP */ |
@@ -2238,7 +2181,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | |||
2238 | urb->status = -ESHUTDOWN; | 2181 | urb->status = -ESHUTDOWN; |
2239 | 2182 | ||
2240 | /* cleanup */ | 2183 | /* cleanup */ |
2241 | musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | 2184 | musb_cleanup_urb(urb, qh); |
2242 | 2185 | ||
2243 | /* Then nuke all the others ... and advance the | 2186 | /* Then nuke all the others ... and advance the |
2244 | * queue on hw_ep (e.g. bulk ring) when we're done. | 2187 | * queue on hw_ep (e.g. bulk ring) when we're done. |
@@ -2254,7 +2197,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | |||
2254 | * will activate any of these as it advances. | 2197 | * will activate any of these as it advances. |
2255 | */ | 2198 | */ |
2256 | while (!list_empty(&hep->urb_list)) | 2199 | while (!list_empty(&hep->urb_list)) |
2257 | __musb_giveback(musb, next_urb(qh), -ESHUTDOWN); | 2200 | musb_giveback(musb, next_urb(qh), -ESHUTDOWN); |
2258 | 2201 | ||
2259 | hep->hcpriv = NULL; | 2202 | hep->hcpriv = NULL; |
2260 | list_del(&qh->ring); | 2203 | list_del(&qh->ring); |
@@ -2293,7 +2236,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd) | |||
2293 | { | 2236 | { |
2294 | struct musb *musb = hcd_to_musb(hcd); | 2237 | struct musb *musb = hcd_to_musb(hcd); |
2295 | 2238 | ||
2296 | if (musb->xceiv.state == OTG_STATE_A_SUSPEND) | 2239 | if (musb->xceiv->state == OTG_STATE_A_SUSPEND) |
2297 | return 0; | 2240 | return 0; |
2298 | 2241 | ||
2299 | if (is_host_active(musb) && musb->is_active) { | 2242 | if (is_host_active(musb) && musb->is_active) { |