aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorHema Kalliguddi <hemahk@ti.com>2010-11-15 05:24:01 -0500
committerFelipe Balbi <balbi@ti.com>2010-11-22 05:36:48 -0500
commit92d2711f5dc15bf956546923a5718e74853f9912 (patch)
tree40d27b4757171130ccc03106b579f5a4d3425871 /drivers/usb
parent3561d43fd289f590fdae672e5eb831b8d5cf0bf6 (diff)
usb: musb: unmap dma buffer when switching to PIO
Buffer is mapped to dma when dma channel is allocated. If, for some reason, dma channel programming fails, musb code will fallback to PIO mode to transfer that request. In that case, we need to unmap the buffer back to CPU. MUSB RTL1.8 and above cannot handle buffers which are not 32bit aligned. That happens to every request sent by g_ether gadget driver. Since the buffer sent was unaligned, we need to fallback to PIO. Because of that, g_ether was failing due to missing buffer unmapping. With this patch and [1] g_ether works fine with all MUSB revisions. Verified with OMAP3630 board, which has MUSB RTL1.8 using g_ether and g_zero. [1] http://www.spinics.net/lists/linux-usb/msg38400.html Signed-off-by: Hema HK <hemahk@ti.com> Signed-off-by: Felipe Balbi <balbi@ti.com>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/musb/musb_gadget.c117
1 files changed, 79 insertions, 38 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 36cfd060dbe5..0169dcf3a6ff 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,6 +92,59 @@
92 92
93/* ----------------------------------------------------------------------- */ 93/* ----------------------------------------------------------------------- */
94 94
95/* Maps the buffer to dma */
96
97static inline void map_dma_buffer(struct musb_request *request,
98 struct musb *musb)
99{
100 if (request->request.dma == DMA_ADDR_INVALID) {
101 request->request.dma = dma_map_single(
102 musb->controller,
103 request->request.buf,
104 request->request.length,
105 request->tx
106 ? DMA_TO_DEVICE
107 : DMA_FROM_DEVICE);
108 request->mapped = 1;
109 } else {
110 dma_sync_single_for_device(musb->controller,
111 request->request.dma,
112 request->request.length,
113 request->tx
114 ? DMA_TO_DEVICE
115 : DMA_FROM_DEVICE);
116 request->mapped = 0;
117 }
118}
119
120/* Unmap the buffer from dma and maps it back to cpu */
121static inline void unmap_dma_buffer(struct musb_request *request,
122 struct musb *musb)
123{
124 if (request->request.dma == DMA_ADDR_INVALID) {
125 DBG(20, "not unmapping a never mapped buffer\n");
126 return;
127 }
128 if (request->mapped) {
129 dma_unmap_single(musb->controller,
130 request->request.dma,
131 request->request.length,
132 request->tx
133 ? DMA_TO_DEVICE
134 : DMA_FROM_DEVICE);
135 request->request.dma = DMA_ADDR_INVALID;
136 request->mapped = 0;
137 } else {
138 dma_sync_single_for_cpu(musb->controller,
139 request->request.dma,
140 request->request.length,
141 request->tx
142 ? DMA_TO_DEVICE
143 : DMA_FROM_DEVICE);
144
145 }
146}
147
95/* 148/*
96 * Immediately complete a request. 149 * Immediately complete a request.
97 * 150 *
@@ -119,24 +172,8 @@ __acquires(ep->musb->lock)
119 172
120 ep->busy = 1; 173 ep->busy = 1;
121 spin_unlock(&musb->lock); 174 spin_unlock(&musb->lock);
122 if (is_dma_capable()) { 175 if (is_dma_capable() && ep->dma)
123 if (req->mapped) { 176 unmap_dma_buffer(req, musb);
124 dma_unmap_single(musb->controller,
125 req->request.dma,
126 req->request.length,
127 req->tx
128 ? DMA_TO_DEVICE
129 : DMA_FROM_DEVICE);
130 req->request.dma = DMA_ADDR_INVALID;
131 req->mapped = 0;
132 } else if (req->request.dma != DMA_ADDR_INVALID)
133 dma_sync_single_for_cpu(musb->controller,
134 req->request.dma,
135 req->request.length,
136 req->tx
137 ? DMA_TO_DEVICE
138 : DMA_FROM_DEVICE);
139 }
140 if (request->status == 0) 177 if (request->status == 0)
141 DBG(5, "%s done request %p, %d/%d\n", 178 DBG(5, "%s done request %p, %d/%d\n",
142 ep->end_point.name, request, 179 ep->end_point.name, request,
@@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req)
395#endif 432#endif
396 433
397 if (!use_dma) { 434 if (!use_dma) {
435 /*
436 * Unmap the dma buffer back to cpu if dma channel
437 * programming fails
438 */
439 if (is_dma_capable() && musb_ep->dma)
440 unmap_dma_buffer(req, musb);
441
398 musb_write_fifo(musb_ep->hw_ep, fifo_count, 442 musb_write_fifo(musb_ep->hw_ep, fifo_count,
399 (u8 *) (request->buf + request->actual)); 443 (u8 *) (request->buf + request->actual));
400 request->actual += fifo_count; 444 request->actual += fifo_count;
@@ -713,6 +757,20 @@ static void rxstate(struct musb *musb, struct musb_request *req)
713 return; 757 return;
714 } 758 }
715#endif 759#endif
760 /*
761 * Unmap the dma buffer back to cpu if dma channel
762 * programming fails. This buffer is mapped if the
763 * channel allocation is successful
764 */
765 if (is_dma_capable() && musb_ep->dma) {
766 unmap_dma_buffer(req, musb);
767
768 /* Clear DMAENAB for the
769 * PIO mode transfer
770 */
771 csr &= ~MUSB_RXCSR_DMAENAB;
772 musb_writew(epio, MUSB_RXCSR, csr);
773 }
716 774
717 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 775 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
718 (request->buf + request->actual)); 776 (request->buf + request->actual));
@@ -1150,26 +1208,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1150 request->epnum = musb_ep->current_epnum; 1208 request->epnum = musb_ep->current_epnum;
1151 request->tx = musb_ep->is_in; 1209 request->tx = musb_ep->is_in;
1152 1210
1153 if (is_dma_capable() && musb_ep->dma) { 1211 if (is_dma_capable() && musb_ep->dma)
1154 if (request->request.dma == DMA_ADDR_INVALID) { 1212 map_dma_buffer(request, musb);
1155 request->request.dma = dma_map_single( 1213 else
1156 musb->controller,
1157 request->request.buf,
1158 request->request.length,
1159 request->tx
1160 ? DMA_TO_DEVICE
1161 : DMA_FROM_DEVICE);
1162 request->mapped = 1;
1163 } else {
1164 dma_sync_single_for_device(musb->controller,
1165 request->request.dma,
1166 request->request.length,
1167 request->tx
1168 ? DMA_TO_DEVICE
1169 : DMA_FROM_DEVICE);
1170 request->mapped = 0;
1171 }
1172 } else
1173 request->mapped = 0; 1214 request->mapped = 0;
1174 1215
1175 spin_lock_irqsave(&musb->lock, lockflags); 1216 spin_lock_irqsave(&musb->lock, lockflags);