diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-01-10 23:10:08 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-01-10 23:10:08 -0500 |
commit | eed0ba0b4ab2d1668588219a8efa81bf8636a12d (patch) | |
tree | f5aa3c732e7830a1b24e6071f8bed0f799881187 /drivers/usb/musb/musb_gadget.c | |
parent | 98b14d6b290d96b24ae993ceaccc59b2aa4b130c (diff) | |
parent | c9de9333f5a860cab82052bce6ac28bcac9b2c26 (diff) |
Merge remote branch 'gcl/next' into next
Diffstat (limited to 'drivers/usb/musb/musb_gadget.c')
-rw-r--r-- | drivers/usb/musb/musb_gadget.c | 124 |
1 files changed, 86 insertions, 38 deletions
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 36cfd060dbe5..9d6ade82b9f2 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -92,6 +92,59 @@ | |||
92 | 92 | ||
93 | /* ----------------------------------------------------------------------- */ | 93 | /* ----------------------------------------------------------------------- */ |
94 | 94 | ||
95 | /* Maps the buffer to dma */ | ||
96 | |||
97 | static inline void map_dma_buffer(struct musb_request *request, | ||
98 | struct musb *musb) | ||
99 | { | ||
100 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
101 | request->request.dma = dma_map_single( | ||
102 | musb->controller, | ||
103 | request->request.buf, | ||
104 | request->request.length, | ||
105 | request->tx | ||
106 | ? DMA_TO_DEVICE | ||
107 | : DMA_FROM_DEVICE); | ||
108 | request->mapped = 1; | ||
109 | } else { | ||
110 | dma_sync_single_for_device(musb->controller, | ||
111 | request->request.dma, | ||
112 | request->request.length, | ||
113 | request->tx | ||
114 | ? DMA_TO_DEVICE | ||
115 | : DMA_FROM_DEVICE); | ||
116 | request->mapped = 0; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | /* Unmap the buffer from dma and maps it back to cpu */ | ||
121 | static inline void unmap_dma_buffer(struct musb_request *request, | ||
122 | struct musb *musb) | ||
123 | { | ||
124 | if (request->request.dma == DMA_ADDR_INVALID) { | ||
125 | DBG(20, "not unmapping a never mapped buffer\n"); | ||
126 | return; | ||
127 | } | ||
128 | if (request->mapped) { | ||
129 | dma_unmap_single(musb->controller, | ||
130 | request->request.dma, | ||
131 | request->request.length, | ||
132 | request->tx | ||
133 | ? DMA_TO_DEVICE | ||
134 | : DMA_FROM_DEVICE); | ||
135 | request->request.dma = DMA_ADDR_INVALID; | ||
136 | request->mapped = 0; | ||
137 | } else { | ||
138 | dma_sync_single_for_cpu(musb->controller, | ||
139 | request->request.dma, | ||
140 | request->request.length, | ||
141 | request->tx | ||
142 | ? DMA_TO_DEVICE | ||
143 | : DMA_FROM_DEVICE); | ||
144 | |||
145 | } | ||
146 | } | ||
147 | |||
95 | /* | 148 | /* |
96 | * Immediately complete a request. | 149 | * Immediately complete a request. |
97 | * | 150 | * |
@@ -119,24 +172,8 @@ __acquires(ep->musb->lock) | |||
119 | 172 | ||
120 | ep->busy = 1; | 173 | ep->busy = 1; |
121 | spin_unlock(&musb->lock); | 174 | spin_unlock(&musb->lock); |
122 | if (is_dma_capable()) { | 175 | if (is_dma_capable() && ep->dma) |
123 | if (req->mapped) { | 176 | unmap_dma_buffer(req, musb); |
124 | dma_unmap_single(musb->controller, | ||
125 | req->request.dma, | ||
126 | req->request.length, | ||
127 | req->tx | ||
128 | ? DMA_TO_DEVICE | ||
129 | : DMA_FROM_DEVICE); | ||
130 | req->request.dma = DMA_ADDR_INVALID; | ||
131 | req->mapped = 0; | ||
132 | } else if (req->request.dma != DMA_ADDR_INVALID) | ||
133 | dma_sync_single_for_cpu(musb->controller, | ||
134 | req->request.dma, | ||
135 | req->request.length, | ||
136 | req->tx | ||
137 | ? DMA_TO_DEVICE | ||
138 | : DMA_FROM_DEVICE); | ||
139 | } | ||
140 | if (request->status == 0) | 177 | if (request->status == 0) |
141 | DBG(5, "%s done request %p, %d/%d\n", | 178 | DBG(5, "%s done request %p, %d/%d\n", |
142 | ep->end_point.name, request, | 179 | ep->end_point.name, request, |
@@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req) | |||
395 | #endif | 432 | #endif |
396 | 433 | ||
397 | if (!use_dma) { | 434 | if (!use_dma) { |
435 | /* | ||
436 | * Unmap the dma buffer back to cpu if dma channel | ||
437 | * programming fails | ||
438 | */ | ||
439 | if (is_dma_capable() && musb_ep->dma) | ||
440 | unmap_dma_buffer(req, musb); | ||
441 | |||
398 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 442 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
399 | (u8 *) (request->buf + request->actual)); | 443 | (u8 *) (request->buf + request->actual)); |
400 | request->actual += fifo_count; | 444 | request->actual += fifo_count; |
@@ -713,6 +757,21 @@ static void rxstate(struct musb *musb, struct musb_request *req) | |||
713 | return; | 757 | return; |
714 | } | 758 | } |
715 | #endif | 759 | #endif |
760 | /* | ||
761 | * Unmap the dma buffer back to cpu if dma channel | ||
762 | * programming fails. This buffer is mapped if the | ||
763 | * channel allocation is successful | ||
764 | */ | ||
765 | if (is_dma_capable() && musb_ep->dma) { | ||
766 | unmap_dma_buffer(req, musb); | ||
767 | |||
768 | /* | ||
769 | * Clear DMAENAB and AUTOCLEAR for the | ||
770 | * PIO mode transfer | ||
771 | */ | ||
772 | csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); | ||
773 | musb_writew(epio, MUSB_RXCSR, csr); | ||
774 | } | ||
716 | 775 | ||
717 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) | 776 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) |
718 | (request->buf + request->actual)); | 777 | (request->buf + request->actual)); |
@@ -837,7 +896,9 @@ void musb_g_rx(struct musb *musb, u8 epnum) | |||
837 | if (!request) | 896 | if (!request) |
838 | return; | 897 | return; |
839 | } | 898 | } |
899 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) | ||
840 | exit: | 900 | exit: |
901 | #endif | ||
841 | /* Analyze request */ | 902 | /* Analyze request */ |
842 | rxstate(musb, to_musb_request(request)); | 903 | rxstate(musb, to_musb_request(request)); |
843 | } | 904 | } |
@@ -1150,26 +1211,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1150 | request->epnum = musb_ep->current_epnum; | 1211 | request->epnum = musb_ep->current_epnum; |
1151 | request->tx = musb_ep->is_in; | 1212 | request->tx = musb_ep->is_in; |
1152 | 1213 | ||
1153 | if (is_dma_capable() && musb_ep->dma) { | 1214 | if (is_dma_capable() && musb_ep->dma) |
1154 | if (request->request.dma == DMA_ADDR_INVALID) { | 1215 | map_dma_buffer(request, musb); |
1155 | request->request.dma = dma_map_single( | 1216 | else |
1156 | musb->controller, | ||
1157 | request->request.buf, | ||
1158 | request->request.length, | ||
1159 | request->tx | ||
1160 | ? DMA_TO_DEVICE | ||
1161 | : DMA_FROM_DEVICE); | ||
1162 | request->mapped = 1; | ||
1163 | } else { | ||
1164 | dma_sync_single_for_device(musb->controller, | ||
1165 | request->request.dma, | ||
1166 | request->request.length, | ||
1167 | request->tx | ||
1168 | ? DMA_TO_DEVICE | ||
1169 | : DMA_FROM_DEVICE); | ||
1170 | request->mapped = 0; | ||
1171 | } | ||
1172 | } else | ||
1173 | request->mapped = 0; | 1217 | request->mapped = 0; |
1174 | 1218 | ||
1175 | spin_lock_irqsave(&musb->lock, lockflags); | 1219 | spin_lock_irqsave(&musb->lock, lockflags); |
@@ -1789,6 +1833,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, | |||
1789 | spin_unlock_irqrestore(&musb->lock, flags); | 1833 | spin_unlock_irqrestore(&musb->lock, flags); |
1790 | 1834 | ||
1791 | if (is_otg_enabled(musb)) { | 1835 | if (is_otg_enabled(musb)) { |
1836 | struct usb_hcd *hcd = musb_to_hcd(musb); | ||
1837 | |||
1792 | DBG(3, "OTG startup...\n"); | 1838 | DBG(3, "OTG startup...\n"); |
1793 | 1839 | ||
1794 | /* REVISIT: funcall to other code, which also | 1840 | /* REVISIT: funcall to other code, which also |
@@ -1803,6 +1849,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, | |||
1803 | musb->gadget_driver = NULL; | 1849 | musb->gadget_driver = NULL; |
1804 | musb->g.dev.driver = NULL; | 1850 | musb->g.dev.driver = NULL; |
1805 | spin_unlock_irqrestore(&musb->lock, flags); | 1851 | spin_unlock_irqrestore(&musb->lock, flags); |
1852 | } else { | ||
1853 | hcd->self.uses_pio_for_control = 1; | ||
1806 | } | 1854 | } |
1807 | } | 1855 | } |
1808 | } | 1856 | } |