diff options
author | Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> | 2011-01-04 06:47:02 -0500 |
---|---|---|
committer | Felipe Balbi <balbi@ti.com> | 2011-02-01 03:41:30 -0500 |
commit | c65bfa62b7185bdeb063c2a637f501f00997d068 (patch) | |
tree | f2b0a848f0994f9570d149dc60948dfbd32abcf4 /drivers/usb | |
parent | 0662481855c389b75a0a54c32870cc90563d80a9 (diff) |
usb: musb: maintain three states for buffer mappings instead of two
If dma buffers are mapped by a higher layer, with a boolean musb_request.mapped
it is still possible to call dma_sync_single_for_device() from
musb_g_giveback(), even if txstate()/rxstate() has called unmap_dma_buffer()
before falling back to pio mode.
Moreover, check for musb_ep->dma is moved within map_dma_buffer() so where
applicable checks for it are removed. And where possible, checks for
is_dma_capable() are merged with buffer map state check.
Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/musb/musb_gadget.c | 45 | ||||
-rw-r--r-- | drivers/usb/musb/musb_gadget.h | 8 |
2 files changed, 32 insertions, 21 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 23dad4c8785d..65775039d6b3 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -92,11 +92,19 @@ | |||
92 | 92 | ||
93 | /* ----------------------------------------------------------------------- */ | 93 | /* ----------------------------------------------------------------------- */ |
94 | 94 | ||
95 | #define is_buffer_mapped(req) (is_dma_capable() && \ | ||
96 | (req->map_state != UN_MAPPED)) | ||
97 | |||
95 | /* Maps the buffer to dma */ | 98 | /* Maps the buffer to dma */ |
96 | 99 | ||
97 | static inline void map_dma_buffer(struct musb_request *request, | 100 | static inline void map_dma_buffer(struct musb_request *request, |
98 | struct musb *musb) | 101 | struct musb *musb, struct musb_ep *musb_ep) |
99 | { | 102 | { |
103 | request->map_state = UN_MAPPED; | ||
104 | |||
105 | if (!is_dma_capable() || !musb_ep->dma) | ||
106 | return; | ||
107 | |||
100 | if (request->request.dma == DMA_ADDR_INVALID) { | 108 | if (request->request.dma == DMA_ADDR_INVALID) { |
101 | request->request.dma = dma_map_single( | 109 | request->request.dma = dma_map_single( |
102 | musb->controller, | 110 | musb->controller, |
@@ -105,7 +113,7 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
105 | request->tx | 113 | request->tx |
106 | ? DMA_TO_DEVICE | 114 | ? DMA_TO_DEVICE |
107 | : DMA_FROM_DEVICE); | 115 | : DMA_FROM_DEVICE); |
108 | request->mapped = 1; | 116 | request->map_state = MUSB_MAPPED; |
109 | } else { | 117 | } else { |
110 | dma_sync_single_for_device(musb->controller, | 118 | dma_sync_single_for_device(musb->controller, |
111 | request->request.dma, | 119 | request->request.dma, |
@@ -113,7 +121,7 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
113 | request->tx | 121 | request->tx |
114 | ? DMA_TO_DEVICE | 122 | ? DMA_TO_DEVICE |
115 | : DMA_FROM_DEVICE); | 123 | : DMA_FROM_DEVICE); |
116 | request->mapped = 0; | 124 | request->map_state = PRE_MAPPED; |
117 | } | 125 | } |
118 | } | 126 | } |
119 | 127 | ||
@@ -121,11 +129,14 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
121 | static inline void unmap_dma_buffer(struct musb_request *request, | 129 | static inline void unmap_dma_buffer(struct musb_request *request, |
122 | struct musb *musb) | 130 | struct musb *musb) |
123 | { | 131 | { |
132 | if (!is_buffer_mapped(request)) | ||
133 | return; | ||
134 | |||
124 | if (request->request.dma == DMA_ADDR_INVALID) { | 135 | if (request->request.dma == DMA_ADDR_INVALID) { |
125 | DBG(20, "not unmapping a never mapped buffer\n"); | 136 | DBG(20, "not unmapping a never mapped buffer\n"); |
126 | return; | 137 | return; |
127 | } | 138 | } |
128 | if (request->mapped) { | 139 | if (request->map_state == MUSB_MAPPED) { |
129 | dma_unmap_single(musb->controller, | 140 | dma_unmap_single(musb->controller, |
130 | request->request.dma, | 141 | request->request.dma, |
131 | request->request.length, | 142 | request->request.length, |
@@ -133,16 +144,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, | |||
133 | ? DMA_TO_DEVICE | 144 | ? DMA_TO_DEVICE |
134 | : DMA_FROM_DEVICE); | 145 | : DMA_FROM_DEVICE); |
135 | request->request.dma = DMA_ADDR_INVALID; | 146 | request->request.dma = DMA_ADDR_INVALID; |
136 | request->mapped = 0; | 147 | } else { /* PRE_MAPPED */ |
137 | } else { | ||
138 | dma_sync_single_for_cpu(musb->controller, | 148 | dma_sync_single_for_cpu(musb->controller, |
139 | request->request.dma, | 149 | request->request.dma, |
140 | request->request.length, | 150 | request->request.length, |
141 | request->tx | 151 | request->tx |
142 | ? DMA_TO_DEVICE | 152 | ? DMA_TO_DEVICE |
143 | : DMA_FROM_DEVICE); | 153 | : DMA_FROM_DEVICE); |
144 | |||
145 | } | 154 | } |
155 | request->map_state = UN_MAPPED; | ||
146 | } | 156 | } |
147 | 157 | ||
148 | /* | 158 | /* |
@@ -172,8 +182,7 @@ __acquires(ep->musb->lock) | |||
172 | 182 | ||
173 | ep->busy = 1; | 183 | ep->busy = 1; |
174 | spin_unlock(&musb->lock); | 184 | spin_unlock(&musb->lock); |
175 | if (is_dma_capable() && ep->dma) | 185 | unmap_dma_buffer(req, musb); |
176 | unmap_dma_buffer(req, musb); | ||
177 | if (request->status == 0) | 186 | if (request->status == 0) |
178 | DBG(5, "%s done request %p, %d/%d\n", | 187 | DBG(5, "%s done request %p, %d/%d\n", |
179 | ep->end_point.name, request, | 188 | ep->end_point.name, request, |
@@ -335,7 +344,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
335 | csr); | 344 | csr); |
336 | 345 | ||
337 | #ifndef CONFIG_MUSB_PIO_ONLY | 346 | #ifndef CONFIG_MUSB_PIO_ONLY |
338 | if (is_dma_capable() && musb_ep->dma) { | 347 | if (is_buffer_mapped(req)) { |
339 | struct dma_controller *c = musb->dma_controller; | 348 | struct dma_controller *c = musb->dma_controller; |
340 | size_t request_size; | 349 | size_t request_size; |
341 | 350 | ||
@@ -436,8 +445,7 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
436 | * Unmap the dma buffer back to cpu if dma channel | 445 | * Unmap the dma buffer back to cpu if dma channel |
437 | * programming fails | 446 | * programming fails |
438 | */ | 447 | */ |
439 | if (is_dma_capable() && musb_ep->dma) | 448 | unmap_dma_buffer(req, musb); |
440 | unmap_dma_buffer(req, musb); | ||
441 | 449 | ||
442 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 450 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
443 | (u8 *) (request->buf + request->actual)); | 451 | (u8 *) (request->buf + request->actual)); |
@@ -627,7 +635,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
627 | return; | 635 | return; |
628 | } | 636 | } |
629 | 637 | ||
630 | if (is_cppi_enabled() && musb_ep->dma) { | 638 | if (is_cppi_enabled() && is_buffer_mapped(req)) { |
631 | struct dma_controller *c = musb->dma_controller; | 639 | struct dma_controller *c = musb->dma_controller; |
632 | struct dma_channel *channel = musb_ep->dma; | 640 | struct dma_channel *channel = musb_ep->dma; |
633 | 641 | ||
@@ -658,7 +666,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
658 | len = musb_readw(epio, MUSB_RXCOUNT); | 666 | len = musb_readw(epio, MUSB_RXCOUNT); |
659 | if (request->actual < request->length) { | 667 | if (request->actual < request->length) { |
660 | #ifdef CONFIG_USB_INVENTRA_DMA | 668 | #ifdef CONFIG_USB_INVENTRA_DMA |
661 | if (is_dma_capable() && musb_ep->dma) { | 669 | if (is_buffer_mapped(req)) { |
662 | struct dma_controller *c; | 670 | struct dma_controller *c; |
663 | struct dma_channel *channel; | 671 | struct dma_channel *channel; |
664 | int use_dma = 0; | 672 | int use_dma = 0; |
@@ -742,7 +750,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
742 | fifo_count = min_t(unsigned, len, fifo_count); | 750 | fifo_count = min_t(unsigned, len, fifo_count); |
743 | 751 | ||
744 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | 752 | #ifdef CONFIG_USB_TUSB_OMAP_DMA |
745 | if (tusb_dma_omap() && musb_ep->dma) { | 753 | if (tusb_dma_omap() && is_buffer_mapped(req)) { |
746 | struct dma_controller *c = musb->dma_controller; | 754 | struct dma_controller *c = musb->dma_controller; |
747 | struct dma_channel *channel = musb_ep->dma; | 755 | struct dma_channel *channel = musb_ep->dma; |
748 | u32 dma_addr = request->dma + request->actual; | 756 | u32 dma_addr = request->dma + request->actual; |
@@ -762,7 +770,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
762 | * programming fails. This buffer is mapped if the | 770 | * programming fails. This buffer is mapped if the |
763 | * channel allocation is successful | 771 | * channel allocation is successful |
764 | */ | 772 | */ |
765 | if (is_dma_capable() && musb_ep->dma) { | 773 | if (is_buffer_mapped(req)) { |
766 | unmap_dma_buffer(req, musb); | 774 | unmap_dma_buffer(req, musb); |
767 | 775 | ||
768 | /* | 776 | /* |
@@ -1222,10 +1230,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1222 | request->epnum = musb_ep->current_epnum; | 1230 | request->epnum = musb_ep->current_epnum; |
1223 | request->tx = musb_ep->is_in; | 1231 | request->tx = musb_ep->is_in; |
1224 | 1232 | ||
1225 | if (is_dma_capable() && musb_ep->dma) | 1233 | map_dma_buffer(request, musb, musb_ep); |
1226 | map_dma_buffer(request, musb); | ||
1227 | else | ||
1228 | request->mapped = 0; | ||
1229 | 1234 | ||
1230 | spin_lock_irqsave(&musb->lock, lockflags); | 1235 | spin_lock_irqsave(&musb->lock, lockflags); |
1231 | 1236 | ||
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index dec8dc008191..a55354fbccf5 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h | |||
@@ -35,13 +35,19 @@ | |||
35 | #ifndef __MUSB_GADGET_H | 35 | #ifndef __MUSB_GADGET_H |
36 | #define __MUSB_GADGET_H | 36 | #define __MUSB_GADGET_H |
37 | 37 | ||
38 | enum buffer_map_state { | ||
39 | UN_MAPPED = 0, | ||
40 | PRE_MAPPED, | ||
41 | MUSB_MAPPED | ||
42 | }; | ||
43 | |||
38 | struct musb_request { | 44 | struct musb_request { |
39 | struct usb_request request; | 45 | struct usb_request request; |
40 | struct musb_ep *ep; | 46 | struct musb_ep *ep; |
41 | struct musb *musb; | 47 | struct musb *musb; |
42 | u8 tx; /* endpoint direction */ | 48 | u8 tx; /* endpoint direction */ |
43 | u8 epnum; | 49 | u8 epnum; |
44 | u8 mapped; | 50 | enum buffer_map_state map_state; |
45 | }; | 51 | }; |
46 | 52 | ||
47 | static inline struct musb_request *to_musb_request(struct usb_request *req) | 53 | static inline struct musb_request *to_musb_request(struct usb_request *req) |