diff options
Diffstat (limited to 'drivers/usb/musb/musb_gadget.c')
-rw-r--r-- | drivers/usb/musb/musb_gadget.c | 71 |
1 files changed, 49 insertions, 22 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ed58c6c8f15c..2fe304611dcf 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -92,11 +92,33 @@ | |||
92 | 92 | ||
93 | /* ----------------------------------------------------------------------- */ | 93 | /* ----------------------------------------------------------------------- */ |
94 | 94 | ||
95 | #define is_buffer_mapped(req) (is_dma_capable() && \ | ||
96 | (req->map_state != UN_MAPPED)) | ||
97 | |||
95 | /* Maps the buffer to dma */ | 98 | /* Maps the buffer to dma */ |
96 | 99 | ||
97 | static inline void map_dma_buffer(struct musb_request *request, | 100 | static inline void map_dma_buffer(struct musb_request *request, |
98 | struct musb *musb) | 101 | struct musb *musb, struct musb_ep *musb_ep) |
99 | { | 102 | { |
103 | int compatible = true; | ||
104 | struct dma_controller *dma = musb->dma_controller; | ||
105 | |||
106 | request->map_state = UN_MAPPED; | ||
107 | |||
108 | if (!is_dma_capable() || !musb_ep->dma) | ||
109 | return; | ||
110 | |||
111 | /* Check if DMA engine can handle this request. | ||
112 | * DMA code must reject the USB request explicitly. | ||
113 | * Default behaviour is to map the request. | ||
114 | */ | ||
115 | if (dma->is_compatible) | ||
116 | compatible = dma->is_compatible(musb_ep->dma, | ||
117 | musb_ep->packet_sz, request->request.buf, | ||
118 | request->request.length); | ||
119 | if (!compatible) | ||
120 | return; | ||
121 | |||
100 | if (request->request.dma == DMA_ADDR_INVALID) { | 122 | if (request->request.dma == DMA_ADDR_INVALID) { |
101 | request->request.dma = dma_map_single( | 123 | request->request.dma = dma_map_single( |
102 | musb->controller, | 124 | musb->controller, |
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
105 | request->tx | 127 | request->tx |
106 | ? DMA_TO_DEVICE | 128 | ? DMA_TO_DEVICE |
107 | : DMA_FROM_DEVICE); | 129 | : DMA_FROM_DEVICE); |
108 | request->mapped = 1; | 130 | request->map_state = MUSB_MAPPED; |
109 | } else { | 131 | } else { |
110 | dma_sync_single_for_device(musb->controller, | 132 | dma_sync_single_for_device(musb->controller, |
111 | request->request.dma, | 133 | request->request.dma, |
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
113 | request->tx | 135 | request->tx |
114 | ? DMA_TO_DEVICE | 136 | ? DMA_TO_DEVICE |
115 | : DMA_FROM_DEVICE); | 137 | : DMA_FROM_DEVICE); |
116 | request->mapped = 0; | 138 | request->map_state = PRE_MAPPED; |
117 | } | 139 | } |
118 | } | 140 | } |
119 | 141 | ||
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
121 | static inline void unmap_dma_buffer(struct musb_request *request, | 143 | static inline void unmap_dma_buffer(struct musb_request *request, |
122 | struct musb *musb) | 144 | struct musb *musb) |
123 | { | 145 | { |
146 | if (!is_buffer_mapped(request)) | ||
147 | return; | ||
148 | |||
124 | if (request->request.dma == DMA_ADDR_INVALID) { | 149 | if (request->request.dma == DMA_ADDR_INVALID) { |
125 | DBG(20, "not unmapping a never mapped buffer\n"); | 150 | DBG(20, "not unmapping a never mapped buffer\n"); |
126 | return; | 151 | return; |
127 | } | 152 | } |
128 | if (request->mapped) { | 153 | if (request->map_state == MUSB_MAPPED) { |
129 | dma_unmap_single(musb->controller, | 154 | dma_unmap_single(musb->controller, |
130 | request->request.dma, | 155 | request->request.dma, |
131 | request->request.length, | 156 | request->request.length, |
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, | |||
133 | ? DMA_TO_DEVICE | 158 | ? DMA_TO_DEVICE |
134 | : DMA_FROM_DEVICE); | 159 | : DMA_FROM_DEVICE); |
135 | request->request.dma = DMA_ADDR_INVALID; | 160 | request->request.dma = DMA_ADDR_INVALID; |
136 | request->mapped = 0; | 161 | } else { /* PRE_MAPPED */ |
137 | } else { | ||
138 | dma_sync_single_for_cpu(musb->controller, | 162 | dma_sync_single_for_cpu(musb->controller, |
139 | request->request.dma, | 163 | request->request.dma, |
140 | request->request.length, | 164 | request->request.length, |
141 | request->tx | 165 | request->tx |
142 | ? DMA_TO_DEVICE | 166 | ? DMA_TO_DEVICE |
143 | : DMA_FROM_DEVICE); | 167 | : DMA_FROM_DEVICE); |
144 | |||
145 | } | 168 | } |
169 | request->map_state = UN_MAPPED; | ||
146 | } | 170 | } |
147 | 171 | ||
148 | /* | 172 | /* |
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock) | |||
172 | 196 | ||
173 | ep->busy = 1; | 197 | ep->busy = 1; |
174 | spin_unlock(&musb->lock); | 198 | spin_unlock(&musb->lock); |
175 | if (is_dma_capable() && ep->dma) | 199 | unmap_dma_buffer(req, musb); |
176 | unmap_dma_buffer(req, musb); | ||
177 | if (request->status == 0) | 200 | if (request->status == 0) |
178 | DBG(5, "%s done request %p, %d/%d\n", | 201 | DBG(5, "%s done request %p, %d/%d\n", |
179 | ep->end_point.name, request, | 202 | ep->end_point.name, request, |
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
335 | csr); | 358 | csr); |
336 | 359 | ||
337 | #ifndef CONFIG_MUSB_PIO_ONLY | 360 | #ifndef CONFIG_MUSB_PIO_ONLY |
338 | if (is_dma_capable() && musb_ep->dma) { | 361 | if (is_buffer_mapped(req)) { |
339 | struct dma_controller *c = musb->dma_controller; | 362 | struct dma_controller *c = musb->dma_controller; |
340 | size_t request_size; | 363 | size_t request_size; |
341 | 364 | ||
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
436 | * Unmap the dma buffer back to cpu if dma channel | 459 | * Unmap the dma buffer back to cpu if dma channel |
437 | * programming fails | 460 | * programming fails |
438 | */ | 461 | */ |
439 | if (is_dma_capable() && musb_ep->dma) | 462 | unmap_dma_buffer(req, musb); |
440 | unmap_dma_buffer(req, musb); | ||
441 | 463 | ||
442 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 464 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
443 | (u8 *) (request->buf + request->actual)); | 465 | (u8 *) (request->buf + request->actual)); |
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
627 | return; | 649 | return; |
628 | } | 650 | } |
629 | 651 | ||
630 | if (is_cppi_enabled() && musb_ep->dma) { | 652 | if (is_cppi_enabled() && is_buffer_mapped(req)) { |
631 | struct dma_controller *c = musb->dma_controller; | 653 | struct dma_controller *c = musb->dma_controller; |
632 | struct dma_channel *channel = musb_ep->dma; | 654 | struct dma_channel *channel = musb_ep->dma; |
633 | 655 | ||
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
658 | len = musb_readw(epio, MUSB_RXCOUNT); | 680 | len = musb_readw(epio, MUSB_RXCOUNT); |
659 | if (request->actual < request->length) { | 681 | if (request->actual < request->length) { |
660 | #ifdef CONFIG_USB_INVENTRA_DMA | 682 | #ifdef CONFIG_USB_INVENTRA_DMA |
661 | if (is_dma_capable() && musb_ep->dma) { | 683 | if (is_buffer_mapped(req)) { |
662 | struct dma_controller *c; | 684 | struct dma_controller *c; |
663 | struct dma_channel *channel; | 685 | struct dma_channel *channel; |
664 | int use_dma = 0; | 686 | int use_dma = 0; |
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
742 | fifo_count = min_t(unsigned, len, fifo_count); | 764 | fifo_count = min_t(unsigned, len, fifo_count); |
743 | 765 | ||
744 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | 766 | #ifdef CONFIG_USB_TUSB_OMAP_DMA |
745 | if (tusb_dma_omap() && musb_ep->dma) { | 767 | if (tusb_dma_omap() && is_buffer_mapped(req)) { |
746 | struct dma_controller *c = musb->dma_controller; | 768 | struct dma_controller *c = musb->dma_controller; |
747 | struct dma_channel *channel = musb_ep->dma; | 769 | struct dma_channel *channel = musb_ep->dma; |
748 | u32 dma_addr = request->dma + request->actual; | 770 | u32 dma_addr = request->dma + request->actual; |
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
762 | * programming fails. This buffer is mapped if the | 784 | * programming fails. This buffer is mapped if the |
763 | * channel allocation is successful | 785 | * channel allocation is successful |
764 | */ | 786 | */ |
765 | if (is_dma_capable() && musb_ep->dma) { | 787 | if (is_buffer_mapped(req)) { |
766 | unmap_dma_buffer(req, musb); | 788 | unmap_dma_buffer(req, musb); |
767 | 789 | ||
768 | /* | 790 | /* |
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
989 | /* Set TXMAXP with the FIFO size of the endpoint | 1011 | /* Set TXMAXP with the FIFO size of the endpoint |
990 | * to disable double buffering mode. | 1012 | * to disable double buffering mode. |
991 | */ | 1013 | */ |
992 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | 1014 | if (musb->double_buffer_not_ok) |
1015 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); | ||
1016 | else | ||
1017 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | ||
1018 | | (musb_ep->hb_mult << 11)); | ||
993 | 1019 | ||
994 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | 1020 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
995 | if (musb_readw(regs, MUSB_TXCSR) | 1021 | if (musb_readw(regs, MUSB_TXCSR) |
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
1025 | /* Set RXMAXP with the FIFO size of the endpoint | 1051 | /* Set RXMAXP with the FIFO size of the endpoint |
1026 | * to disable double buffering mode. | 1052 | * to disable double buffering mode. |
1027 | */ | 1053 | */ |
1028 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); | 1054 | if (musb->double_buffer_not_ok) |
1055 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); | ||
1056 | else | ||
1057 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | ||
1058 | | (musb_ep->hb_mult << 11)); | ||
1029 | 1059 | ||
1030 | /* force shared fifo to OUT-only mode */ | 1060 | /* force shared fifo to OUT-only mode */ |
1031 | if (hw_ep->is_shared_fifo) { | 1061 | if (hw_ep->is_shared_fifo) { |
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1214 | request->epnum = musb_ep->current_epnum; | 1244 | request->epnum = musb_ep->current_epnum; |
1215 | request->tx = musb_ep->is_in; | 1245 | request->tx = musb_ep->is_in; |
1216 | 1246 | ||
1217 | if (is_dma_capable() && musb_ep->dma) | 1247 | map_dma_buffer(request, musb, musb_ep); |
1218 | map_dma_buffer(request, musb); | ||
1219 | else | ||
1220 | request->mapped = 0; | ||
1221 | 1248 | ||
1222 | spin_lock_irqsave(&musb->lock, lockflags); | 1249 | spin_lock_irqsave(&musb->lock, lockflags); |
1223 | 1250 | ||