diff options
author | Roger Quadros <rogerq@ti.com> | 2012-08-07 09:09:56 -0400 |
---|---|---|
committer | Felipe Balbi <balbi@ti.com> | 2012-08-07 09:16:34 -0400 |
commit | 4f3e8d263d34e52e75b5adfa14811467d3033d8e (patch) | |
tree | 4316c907e4fcc85fa01bb80ed800a4180f1adc1b /drivers/usb/musb | |
parent | 8e8a55165469c99af0c24a276d997f9473dc89ab (diff) |
usb: musb: use DMA mode 1 whenever possible
Do not rely on any hints from gadget drivers and use DMA mode 1
whenever we expect data of at least the endpoint's packet size and
have not yet received a short packet.
The last packet if short is always transferred using DMA mode 0.
This patch fixes USB throughput issues in mass storage mode for
host to device transfers.
Signed-off-by: Roger Quadros <rogerq@ti.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r-- | drivers/usb/musb/musb_gadget.c | 30 |
1 files changed, 4 insertions, 26 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 8d2cce106f04..5c4392b02169 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -707,12 +707,11 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
707 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); | 707 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); |
708 | 708 | ||
709 | /* | 709 | /* |
710 | * Enable Mode 1 on RX transfers only when short_not_ok flag | 710 | * use mode 1 only if we expect data of at least ep packet_sz |
711 | * is set. Currently short_not_ok flag is set only from | 711 | * and have not yet received a short packet |
712 | * file_storage and f_mass_storage drivers | ||
713 | */ | 712 | */ |
714 | 713 | if ((request->length - request->actual >= musb_ep->packet_sz) && | |
715 | if (request->short_not_ok && fifo_count == musb_ep->packet_sz) | 714 | (fifo_count >= musb_ep->packet_sz)) |
716 | use_mode_1 = 1; | 715 | use_mode_1 = 1; |
717 | else | 716 | else |
718 | use_mode_1 = 0; | 717 | use_mode_1 = 0; |
@@ -727,27 +726,6 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
727 | c = musb->dma_controller; | 726 | c = musb->dma_controller; |
728 | channel = musb_ep->dma; | 727 | channel = musb_ep->dma; |
729 | 728 | ||
730 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in | ||
731 | * mode 0 only. So we do not get endpoint interrupts due to DMA | ||
732 | * completion. We only get interrupts from DMA controller. | ||
733 | * | ||
734 | * We could operate in DMA mode 1 if we knew the size of the tranfer | ||
735 | * in advance. For mass storage class, request->length = what the host | ||
736 | * sends, so that'd work. But for pretty much everything else, | ||
737 | * request->length is routinely more than what the host sends. For | ||
738 | * most these gadgets, end of is signified either by a short packet, | ||
739 | * or filling the last byte of the buffer. (Sending extra data in | ||
740 | * that last pckate should trigger an overflow fault.) But in mode 1, | ||
741 | * we don't get DMA completion interrupt for short packets. | ||
742 | * | ||
743 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), | ||
744 | * to get endpoint interrupt on every DMA req, but that didn't seem | ||
745 | * to work reliably. | ||
746 | * | ||
747 | * REVISIT an updated g_file_storage can set req->short_not_ok, which | ||
748 | * then becomes usable as a runtime "use mode 1" hint... | ||
749 | */ | ||
750 | |||
751 | /* Experimental: Mode1 works with mass storage use cases */ | 729 | /* Experimental: Mode1 works with mass storage use cases */ |
752 | if (use_mode_1) { | 730 | if (use_mode_1) { |
753 | csr |= MUSB_RXCSR_AUTOCLEAR; | 731 | csr |= MUSB_RXCSR_AUTOCLEAR; |