diff options
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r-- | drivers/usb/musb/musb_host.c | 2170 |
1 files changed, 2170 insertions, 0 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c new file mode 100644 index 000000000000..8b4be012669a --- /dev/null +++ b/drivers/usb/musb/musb_host.c | |||
@@ -0,0 +1,2170 @@ | |||
1 | /* | ||
2 | * MUSB OTG driver host support | ||
3 | * | ||
4 | * Copyright 2005 Mentor Graphics Corporation | ||
5 | * Copyright (C) 2005-2006 by Texas Instruments | ||
6 | * Copyright (C) 2006-2007 Nokia Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | ||
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
24 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
25 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
27 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
28 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
29 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
31 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/sched.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/errno.h> | ||
41 | #include <linux/init.h> | ||
42 | #include <linux/list.h> | ||
43 | |||
44 | #include "musb_core.h" | ||
45 | #include "musb_host.h" | ||
46 | |||
47 | |||
48 | /* MUSB HOST status 22-mar-2006 | ||
49 | * | ||
50 | * - There's still lots of partial code duplication for fault paths, so | ||
51 | * they aren't handled as consistently as they need to be. | ||
52 | * | ||
53 | * - PIO mostly behaved when last tested. | ||
54 | * + including ep0, with all usbtest cases 9, 10 | ||
55 | * + usbtest 14 (ep0out) doesn't seem to run at all | ||
56 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest | ||
57 | * configurations, but otherwise double buffering passes basic tests. | ||
58 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. | ||
59 | * | ||
60 | * - DMA (CPPI) ... partially behaves, not currently recommended | ||
61 | * + about 1/15 the speed of typical EHCI implementations (PCI) | ||
62 | * + RX, all too often reqpkt seems to misbehave after tx | ||
63 | * + TX, no known issues (other than evident silicon issue) | ||
64 | * | ||
65 | * - DMA (Mentor/OMAP) ...has at least toggle update problems | ||
66 | * | ||
67 | * - Still no traffic scheduling code to make NAKing for bulk or control | ||
68 | * transfers unable to starve other requests; or to make efficient use | ||
69 | * of hardware with periodic transfers. (Note that network drivers | ||
70 | * commonly post bulk reads that stay pending for a long time; these | ||
71 | * would make very visible trouble.) | ||
72 | * | ||
73 | * - Not tested with HNP, but some SRP paths seem to behave. | ||
74 | * | ||
75 | * NOTE 24-August-2006: | ||
76 | * | ||
77 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an | ||
78 | * extra endpoint for periodic use enabling hub + keybd + mouse. That | ||
79 | * mostly works, except that with "usbnet" it's easy to trigger cases | ||
80 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", | ||
81 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses | ||
82 | * although ARP RX wins. (That test was done with a full speed link.) | ||
83 | */ | ||
84 | |||
85 | |||
86 | /* | ||
87 | * NOTE on endpoint usage: | ||
88 | * | ||
89 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN | ||
90 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | ||
91 | * | ||
92 | * (Yes, bulk _could_ use more of the endpoints than that, and would even | ||
93 | * benefit from it ... one remote device may easily be NAKing while others | ||
94 | * need to perform transfers in that same direction. The same thing could | ||
95 | * be done in software though, assuming dma cooperates.) | ||
96 | * | ||
97 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | ||
98 | * So far that scheduling is both dumb and optimistic: the endpoint will be | ||
99 | * "claimed" until its software queue is no longer refilled. No multiplexing | ||
100 | * of transfers between endpoints, or anything clever. | ||
101 | */ | ||
102 | |||
103 | |||
104 | static void musb_ep_program(struct musb *musb, u8 epnum, | ||
105 | struct urb *urb, unsigned int nOut, | ||
106 | u8 *buf, u32 len); | ||
107 | |||
108 | /* | ||
109 | * Clear TX fifo. Needed to avoid BABBLE errors. | ||
110 | */ | ||
111 | static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | ||
112 | { | ||
113 | void __iomem *epio = ep->regs; | ||
114 | u16 csr; | ||
115 | int retries = 1000; | ||
116 | |||
117 | csr = musb_readw(epio, MUSB_TXCSR); | ||
118 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | ||
119 | DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | ||
120 | csr |= MUSB_TXCSR_FLUSHFIFO; | ||
121 | musb_writew(epio, MUSB_TXCSR, csr); | ||
122 | csr = musb_readw(epio, MUSB_TXCSR); | ||
123 | if (retries-- < 1) { | ||
124 | ERR("Could not flush host TX fifo: csr: %04x\n", csr); | ||
125 | return; | ||
126 | } | ||
127 | mdelay(1); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Start transmit. Caller is responsible for locking shared resources. | ||
133 | * musb must be locked. | ||
134 | */ | ||
135 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) | ||
136 | { | ||
137 | u16 txcsr; | ||
138 | |||
139 | /* NOTE: no locks here; caller should lock and select EP */ | ||
140 | if (ep->epnum) { | ||
141 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | ||
142 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; | ||
143 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | ||
144 | } else { | ||
145 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; | ||
146 | musb_writew(ep->regs, MUSB_CSR0, txcsr); | ||
147 | } | ||
148 | |||
149 | } | ||
150 | |||
151 | static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) | ||
152 | { | ||
153 | u16 txcsr; | ||
154 | |||
155 | /* NOTE: no locks here; caller should lock and select EP */ | ||
156 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | ||
157 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | ||
158 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Start the URB at the front of an endpoint's queue | ||
163 | * end must be claimed from the caller. | ||
164 | * | ||
165 | * Context: controller locked, irqs blocked | ||
166 | */ | ||
167 | static void | ||
168 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | ||
169 | { | ||
170 | u16 frame; | ||
171 | u32 len; | ||
172 | void *buf; | ||
173 | void __iomem *mbase = musb->mregs; | ||
174 | struct urb *urb = next_urb(qh); | ||
175 | struct musb_hw_ep *hw_ep = qh->hw_ep; | ||
176 | unsigned pipe = urb->pipe; | ||
177 | u8 address = usb_pipedevice(pipe); | ||
178 | int epnum = hw_ep->epnum; | ||
179 | |||
180 | /* initialize software qh state */ | ||
181 | qh->offset = 0; | ||
182 | qh->segsize = 0; | ||
183 | |||
184 | /* gather right source of data */ | ||
185 | switch (qh->type) { | ||
186 | case USB_ENDPOINT_XFER_CONTROL: | ||
187 | /* control transfers always start with SETUP */ | ||
188 | is_in = 0; | ||
189 | hw_ep->out_qh = qh; | ||
190 | musb->ep0_stage = MUSB_EP0_START; | ||
191 | buf = urb->setup_packet; | ||
192 | len = 8; | ||
193 | break; | ||
194 | case USB_ENDPOINT_XFER_ISOC: | ||
195 | qh->iso_idx = 0; | ||
196 | qh->frame = 0; | ||
197 | buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; | ||
198 | len = urb->iso_frame_desc[0].length; | ||
199 | break; | ||
200 | default: /* bulk, interrupt */ | ||
201 | buf = urb->transfer_buffer; | ||
202 | len = urb->transfer_buffer_length; | ||
203 | } | ||
204 | |||
205 | DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | ||
206 | qh, urb, address, qh->epnum, | ||
207 | is_in ? "in" : "out", | ||
208 | ({char *s; switch (qh->type) { | ||
209 | case USB_ENDPOINT_XFER_CONTROL: s = ""; break; | ||
210 | case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; | ||
211 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | ||
212 | default: s = "-intr"; break; | ||
213 | }; s; }), | ||
214 | epnum, buf, len); | ||
215 | |||
216 | /* Configure endpoint */ | ||
217 | if (is_in || hw_ep->is_shared_fifo) | ||
218 | hw_ep->in_qh = qh; | ||
219 | else | ||
220 | hw_ep->out_qh = qh; | ||
221 | musb_ep_program(musb, epnum, urb, !is_in, buf, len); | ||
222 | |||
223 | /* transmit may have more work: start it when it is time */ | ||
224 | if (is_in) | ||
225 | return; | ||
226 | |||
227 | /* determine if the time is right for a periodic transfer */ | ||
228 | switch (qh->type) { | ||
229 | case USB_ENDPOINT_XFER_ISOC: | ||
230 | case USB_ENDPOINT_XFER_INT: | ||
231 | DBG(3, "check whether there's still time for periodic Tx\n"); | ||
232 | qh->iso_idx = 0; | ||
233 | frame = musb_readw(mbase, MUSB_FRAME); | ||
234 | /* FIXME this doesn't implement that scheduling policy ... | ||
235 | * or handle framecounter wrapping | ||
236 | */ | ||
237 | if ((urb->transfer_flags & URB_ISO_ASAP) | ||
238 | || (frame >= urb->start_frame)) { | ||
239 | /* REVISIT the SOF irq handler shouldn't duplicate | ||
240 | * this code; and we don't init urb->start_frame... | ||
241 | */ | ||
242 | qh->frame = 0; | ||
243 | goto start; | ||
244 | } else { | ||
245 | qh->frame = urb->start_frame; | ||
246 | /* enable SOF interrupt so we can count down */ | ||
247 | DBG(1, "SOF for %d\n", epnum); | ||
248 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ | ||
249 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | ||
250 | #endif | ||
251 | } | ||
252 | break; | ||
253 | default: | ||
254 | start: | ||
255 | DBG(4, "Start TX%d %s\n", epnum, | ||
256 | hw_ep->tx_channel ? "dma" : "pio"); | ||
257 | |||
258 | if (!hw_ep->tx_channel) | ||
259 | musb_h_tx_start(hw_ep); | ||
260 | else if (is_cppi_enabled() || tusb_dma_omap()) | ||
261 | cppi_host_txdma_start(hw_ep); | ||
262 | } | ||
263 | } | ||
264 | |||
265 | /* caller owns controller lock, irqs are blocked */ | ||
266 | static void | ||
267 | __musb_giveback(struct musb *musb, struct urb *urb, int status) | ||
268 | __releases(musb->lock) | ||
269 | __acquires(musb->lock) | ||
270 | { | ||
271 | DBG(({ int level; switch (urb->status) { | ||
272 | case 0: | ||
273 | level = 4; | ||
274 | break; | ||
275 | /* common/boring faults */ | ||
276 | case -EREMOTEIO: | ||
277 | case -ESHUTDOWN: | ||
278 | case -ECONNRESET: | ||
279 | case -EPIPE: | ||
280 | level = 3; | ||
281 | break; | ||
282 | default: | ||
283 | level = 2; | ||
284 | break; | ||
285 | }; level; }), | ||
286 | "complete %p (%d), dev%d ep%d%s, %d/%d\n", | ||
287 | urb, urb->status, | ||
288 | usb_pipedevice(urb->pipe), | ||
289 | usb_pipeendpoint(urb->pipe), | ||
290 | usb_pipein(urb->pipe) ? "in" : "out", | ||
291 | urb->actual_length, urb->transfer_buffer_length | ||
292 | ); | ||
293 | |||
294 | spin_unlock(&musb->lock); | ||
295 | usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); | ||
296 | spin_lock(&musb->lock); | ||
297 | } | ||
298 | |||
299 | /* for bulk/interrupt endpoints only */ | ||
300 | static inline void | ||
301 | musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) | ||
302 | { | ||
303 | struct usb_device *udev = urb->dev; | ||
304 | u16 csr; | ||
305 | void __iomem *epio = ep->regs; | ||
306 | struct musb_qh *qh; | ||
307 | |||
308 | /* FIXME: the current Mentor DMA code seems to have | ||
309 | * problems getting toggle correct. | ||
310 | */ | ||
311 | |||
312 | if (is_in || ep->is_shared_fifo) | ||
313 | qh = ep->in_qh; | ||
314 | else | ||
315 | qh = ep->out_qh; | ||
316 | |||
317 | if (!is_in) { | ||
318 | csr = musb_readw(epio, MUSB_TXCSR); | ||
319 | usb_settoggle(udev, qh->epnum, 1, | ||
320 | (csr & MUSB_TXCSR_H_DATATOGGLE) | ||
321 | ? 1 : 0); | ||
322 | } else { | ||
323 | csr = musb_readw(epio, MUSB_RXCSR); | ||
324 | usb_settoggle(udev, qh->epnum, 0, | ||
325 | (csr & MUSB_RXCSR_H_DATATOGGLE) | ||
326 | ? 1 : 0); | ||
327 | } | ||
328 | } | ||
329 | |||
330 | /* caller owns controller lock, irqs are blocked */ | ||
331 | static struct musb_qh * | ||
332 | musb_giveback(struct musb_qh *qh, struct urb *urb, int status) | ||
333 | { | ||
334 | int is_in; | ||
335 | struct musb_hw_ep *ep = qh->hw_ep; | ||
336 | struct musb *musb = ep->musb; | ||
337 | int ready = qh->is_ready; | ||
338 | |||
339 | if (ep->is_shared_fifo) | ||
340 | is_in = 1; | ||
341 | else | ||
342 | is_in = usb_pipein(urb->pipe); | ||
343 | |||
344 | /* save toggle eagerly, for paranoia */ | ||
345 | switch (qh->type) { | ||
346 | case USB_ENDPOINT_XFER_BULK: | ||
347 | case USB_ENDPOINT_XFER_INT: | ||
348 | musb_save_toggle(ep, is_in, urb); | ||
349 | break; | ||
350 | case USB_ENDPOINT_XFER_ISOC: | ||
351 | if (status == 0 && urb->error_count) | ||
352 | status = -EXDEV; | ||
353 | break; | ||
354 | } | ||
355 | |||
356 | usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); | ||
357 | |||
358 | qh->is_ready = 0; | ||
359 | __musb_giveback(musb, urb, status); | ||
360 | qh->is_ready = ready; | ||
361 | |||
362 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and | ||
363 | * invalidate qh as soon as list_empty(&hep->urb_list) | ||
364 | */ | ||
365 | if (list_empty(&qh->hep->urb_list)) { | ||
366 | struct list_head *head; | ||
367 | |||
368 | if (is_in) | ||
369 | ep->rx_reinit = 1; | ||
370 | else | ||
371 | ep->tx_reinit = 1; | ||
372 | |||
373 | /* clobber old pointers to this qh */ | ||
374 | if (is_in || ep->is_shared_fifo) | ||
375 | ep->in_qh = NULL; | ||
376 | else | ||
377 | ep->out_qh = NULL; | ||
378 | qh->hep->hcpriv = NULL; | ||
379 | |||
380 | switch (qh->type) { | ||
381 | |||
382 | case USB_ENDPOINT_XFER_ISOC: | ||
383 | case USB_ENDPOINT_XFER_INT: | ||
384 | /* this is where periodic bandwidth should be | ||
385 | * de-allocated if it's tracked and allocated; | ||
386 | * and where we'd update the schedule tree... | ||
387 | */ | ||
388 | musb->periodic[ep->epnum] = NULL; | ||
389 | kfree(qh); | ||
390 | qh = NULL; | ||
391 | break; | ||
392 | |||
393 | case USB_ENDPOINT_XFER_CONTROL: | ||
394 | case USB_ENDPOINT_XFER_BULK: | ||
395 | /* fifo policy for these lists, except that NAKing | ||
396 | * should rotate a qh to the end (for fairness). | ||
397 | */ | ||
398 | head = qh->ring.prev; | ||
399 | list_del(&qh->ring); | ||
400 | kfree(qh); | ||
401 | qh = first_qh(head); | ||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | return qh; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Advance this hardware endpoint's queue, completing the specified urb and | ||
410 | * advancing to either the next urb queued to that qh, or else invalidating | ||
411 | * that qh and advancing to the next qh scheduled after the current one. | ||
412 | * | ||
413 | * Context: caller owns controller lock, irqs are blocked | ||
414 | */ | ||
415 | static void | ||
416 | musb_advance_schedule(struct musb *musb, struct urb *urb, | ||
417 | struct musb_hw_ep *hw_ep, int is_in) | ||
418 | { | ||
419 | struct musb_qh *qh; | ||
420 | |||
421 | if (is_in || hw_ep->is_shared_fifo) | ||
422 | qh = hw_ep->in_qh; | ||
423 | else | ||
424 | qh = hw_ep->out_qh; | ||
425 | |||
426 | if (urb->status == -EINPROGRESS) | ||
427 | qh = musb_giveback(qh, urb, 0); | ||
428 | else | ||
429 | qh = musb_giveback(qh, urb, urb->status); | ||
430 | |||
431 | if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { | ||
432 | DBG(4, "... next ep%d %cX urb %p\n", | ||
433 | hw_ep->epnum, is_in ? 'R' : 'T', | ||
434 | next_urb(qh)); | ||
435 | musb_start_urb(musb, is_in, qh); | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) | ||
440 | { | ||
441 | /* we don't want fifo to fill itself again; | ||
442 | * ignore dma (various models), | ||
443 | * leave toggle alone (may not have been saved yet) | ||
444 | */ | ||
445 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; | ||
446 | csr &= ~(MUSB_RXCSR_H_REQPKT | ||
447 | | MUSB_RXCSR_H_AUTOREQ | ||
448 | | MUSB_RXCSR_AUTOCLEAR); | ||
449 | |||
450 | /* write 2x to allow double buffering */ | ||
451 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
452 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
453 | |||
454 | /* flush writebuffer */ | ||
455 | return musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * PIO RX for a packet (or part of it). | ||
460 | */ | ||
461 | static bool | ||
462 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | ||
463 | { | ||
464 | u16 rx_count; | ||
465 | u8 *buf; | ||
466 | u16 csr; | ||
467 | bool done = false; | ||
468 | u32 length; | ||
469 | int do_flush = 0; | ||
470 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
471 | void __iomem *epio = hw_ep->regs; | ||
472 | struct musb_qh *qh = hw_ep->in_qh; | ||
473 | int pipe = urb->pipe; | ||
474 | void *buffer = urb->transfer_buffer; | ||
475 | |||
476 | /* musb_ep_select(mbase, epnum); */ | ||
477 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | ||
478 | DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | ||
479 | urb->transfer_buffer, qh->offset, | ||
480 | urb->transfer_buffer_length); | ||
481 | |||
482 | /* unload FIFO */ | ||
483 | if (usb_pipeisoc(pipe)) { | ||
484 | int status = 0; | ||
485 | struct usb_iso_packet_descriptor *d; | ||
486 | |||
487 | if (iso_err) { | ||
488 | status = -EILSEQ; | ||
489 | urb->error_count++; | ||
490 | } | ||
491 | |||
492 | d = urb->iso_frame_desc + qh->iso_idx; | ||
493 | buf = buffer + d->offset; | ||
494 | length = d->length; | ||
495 | if (rx_count > length) { | ||
496 | if (status == 0) { | ||
497 | status = -EOVERFLOW; | ||
498 | urb->error_count++; | ||
499 | } | ||
500 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | ||
501 | do_flush = 1; | ||
502 | } else | ||
503 | length = rx_count; | ||
504 | urb->actual_length += length; | ||
505 | d->actual_length = length; | ||
506 | |||
507 | d->status = status; | ||
508 | |||
509 | /* see if we are done */ | ||
510 | done = (++qh->iso_idx >= urb->number_of_packets); | ||
511 | } else { | ||
512 | /* non-isoch */ | ||
513 | buf = buffer + qh->offset; | ||
514 | length = urb->transfer_buffer_length - qh->offset; | ||
515 | if (rx_count > length) { | ||
516 | if (urb->status == -EINPROGRESS) | ||
517 | urb->status = -EOVERFLOW; | ||
518 | DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); | ||
519 | do_flush = 1; | ||
520 | } else | ||
521 | length = rx_count; | ||
522 | urb->actual_length += length; | ||
523 | qh->offset += length; | ||
524 | |||
525 | /* see if we are done */ | ||
526 | done = (urb->actual_length == urb->transfer_buffer_length) | ||
527 | || (rx_count < qh->maxpacket) | ||
528 | || (urb->status != -EINPROGRESS); | ||
529 | if (done | ||
530 | && (urb->status == -EINPROGRESS) | ||
531 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | ||
532 | && (urb->actual_length | ||
533 | < urb->transfer_buffer_length)) | ||
534 | urb->status = -EREMOTEIO; | ||
535 | } | ||
536 | |||
537 | musb_read_fifo(hw_ep, length, buf); | ||
538 | |||
539 | csr = musb_readw(epio, MUSB_RXCSR); | ||
540 | csr |= MUSB_RXCSR_H_WZC_BITS; | ||
541 | if (unlikely(do_flush)) | ||
542 | musb_h_flush_rxfifo(hw_ep, csr); | ||
543 | else { | ||
544 | /* REVISIT this assumes AUTOCLEAR is never set */ | ||
545 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); | ||
546 | if (!done) | ||
547 | csr |= MUSB_RXCSR_H_REQPKT; | ||
548 | musb_writew(epio, MUSB_RXCSR, csr); | ||
549 | } | ||
550 | |||
551 | return done; | ||
552 | } | ||
553 | |||
554 | /* we don't always need to reinit a given side of an endpoint... | ||
555 | * when we do, use tx/rx reinit routine and then construct a new CSR | ||
556 | * to address data toggle, NYET, and DMA or PIO. | ||
557 | * | ||
558 | * it's possible that driver bugs (especially for DMA) or aborting a | ||
559 | * transfer might have left the endpoint busier than it should be. | ||
560 | * the busy/not-empty tests are basically paranoia. | ||
561 | */ | ||
562 | static void | ||
563 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | ||
564 | { | ||
565 | u16 csr; | ||
566 | |||
567 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. | ||
568 | * That always uses tx_reinit since ep0 repurposes TX register | ||
569 | * offsets; the initial SETUP packet is also a kind of OUT. | ||
570 | */ | ||
571 | |||
572 | /* if programmed for Tx, put it in RX mode */ | ||
573 | if (ep->is_shared_fifo) { | ||
574 | csr = musb_readw(ep->regs, MUSB_TXCSR); | ||
575 | if (csr & MUSB_TXCSR_MODE) { | ||
576 | musb_h_tx_flush_fifo(ep); | ||
577 | musb_writew(ep->regs, MUSB_TXCSR, | ||
578 | MUSB_TXCSR_FRCDATATOG); | ||
579 | } | ||
580 | /* clear mode (and everything else) to enable Rx */ | ||
581 | musb_writew(ep->regs, MUSB_TXCSR, 0); | ||
582 | |||
583 | /* scrub all previous state, clearing toggle */ | ||
584 | } else { | ||
585 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
586 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
587 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
588 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
589 | |||
590 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
591 | } | ||
592 | |||
593 | /* target addr and (for multipoint) hub addr/port */ | ||
594 | if (musb->is_multipoint) { | ||
595 | musb_writeb(ep->target_regs, MUSB_RXFUNCADDR, | ||
596 | qh->addr_reg); | ||
597 | musb_writeb(ep->target_regs, MUSB_RXHUBADDR, | ||
598 | qh->h_addr_reg); | ||
599 | musb_writeb(ep->target_regs, MUSB_RXHUBPORT, | ||
600 | qh->h_port_reg); | ||
601 | } else | ||
602 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); | ||
603 | |||
604 | /* protocol/endpoint, interval/NAKlimit, i/o size */ | ||
605 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | ||
606 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | ||
607 | /* NOTE: bulk combining rewrites high bits of maxpacket */ | ||
608 | musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); | ||
609 | |||
610 | ep->rx_reinit = 0; | ||
611 | } | ||
612 | |||
613 | |||
614 | /* | ||
615 | * Program an HDRC endpoint as per the given URB | ||
616 | * Context: irqs blocked, controller lock held | ||
617 | */ | ||
618 | static void musb_ep_program(struct musb *musb, u8 epnum, | ||
619 | struct urb *urb, unsigned int is_out, | ||
620 | u8 *buf, u32 len) | ||
621 | { | ||
622 | struct dma_controller *dma_controller; | ||
623 | struct dma_channel *dma_channel; | ||
624 | u8 dma_ok; | ||
625 | void __iomem *mbase = musb->mregs; | ||
626 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
627 | void __iomem *epio = hw_ep->regs; | ||
628 | struct musb_qh *qh; | ||
629 | u16 packet_sz; | ||
630 | |||
631 | if (!is_out || hw_ep->is_shared_fifo) | ||
632 | qh = hw_ep->in_qh; | ||
633 | else | ||
634 | qh = hw_ep->out_qh; | ||
635 | |||
636 | packet_sz = qh->maxpacket; | ||
637 | |||
638 | DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " | ||
639 | "h_addr%02x h_port%02x bytes %d\n", | ||
640 | is_out ? "-->" : "<--", | ||
641 | epnum, urb, urb->dev->speed, | ||
642 | qh->addr_reg, qh->epnum, is_out ? "out" : "in", | ||
643 | qh->h_addr_reg, qh->h_port_reg, | ||
644 | len); | ||
645 | |||
646 | musb_ep_select(mbase, epnum); | ||
647 | |||
648 | /* candidate for DMA? */ | ||
649 | dma_controller = musb->dma_controller; | ||
650 | if (is_dma_capable() && epnum && dma_controller) { | ||
651 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; | ||
652 | if (!dma_channel) { | ||
653 | dma_channel = dma_controller->channel_alloc( | ||
654 | dma_controller, hw_ep, is_out); | ||
655 | if (is_out) | ||
656 | hw_ep->tx_channel = dma_channel; | ||
657 | else | ||
658 | hw_ep->rx_channel = dma_channel; | ||
659 | } | ||
660 | } else | ||
661 | dma_channel = NULL; | ||
662 | |||
663 | /* make sure we clear DMAEnab, autoSet bits from previous run */ | ||
664 | |||
665 | /* OUT/transmit/EP0 or IN/receive? */ | ||
666 | if (is_out) { | ||
667 | u16 csr; | ||
668 | u16 int_txe; | ||
669 | u16 load_count; | ||
670 | |||
671 | csr = musb_readw(epio, MUSB_TXCSR); | ||
672 | |||
673 | /* disable interrupt in case we flush */ | ||
674 | int_txe = musb_readw(mbase, MUSB_INTRTXE); | ||
675 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | ||
676 | |||
677 | /* general endpoint setup */ | ||
678 | if (epnum) { | ||
679 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
680 | |||
681 | /* flush all old state, set default */ | ||
682 | musb_h_tx_flush_fifo(hw_ep); | ||
683 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | ||
684 | | MUSB_TXCSR_DMAMODE | ||
685 | | MUSB_TXCSR_FRCDATATOG | ||
686 | | MUSB_TXCSR_H_RXSTALL | ||
687 | | MUSB_TXCSR_H_ERROR | ||
688 | | MUSB_TXCSR_TXPKTRDY | ||
689 | ); | ||
690 | csr |= MUSB_TXCSR_MODE; | ||
691 | |||
692 | if (usb_gettoggle(urb->dev, | ||
693 | qh->epnum, 1)) | ||
694 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE | ||
695 | | MUSB_TXCSR_H_DATATOGGLE; | ||
696 | else | ||
697 | csr |= MUSB_TXCSR_CLRDATATOG; | ||
698 | |||
699 | /* twice in case of double packet buffering */ | ||
700 | musb_writew(epio, MUSB_TXCSR, csr); | ||
701 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
702 | musb_writew(epio, MUSB_TXCSR, csr); | ||
703 | csr = musb_readw(epio, MUSB_TXCSR); | ||
704 | } else { | ||
705 | /* endpoint 0: just flush */ | ||
706 | musb_writew(epio, MUSB_CSR0, | ||
707 | csr | MUSB_CSR0_FLUSHFIFO); | ||
708 | musb_writew(epio, MUSB_CSR0, | ||
709 | csr | MUSB_CSR0_FLUSHFIFO); | ||
710 | } | ||
711 | |||
712 | /* target addr and (for multipoint) hub addr/port */ | ||
713 | if (musb->is_multipoint) { | ||
714 | musb_writeb(mbase, | ||
715 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), | ||
716 | qh->addr_reg); | ||
717 | musb_writeb(mbase, | ||
718 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), | ||
719 | qh->h_addr_reg); | ||
720 | musb_writeb(mbase, | ||
721 | MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), | ||
722 | qh->h_port_reg); | ||
723 | /* FIXME if !epnum, do the same for RX ... */ | ||
724 | } else | ||
725 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); | ||
726 | |||
727 | /* protocol/endpoint/interval/NAKlimit */ | ||
728 | if (epnum) { | ||
729 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | ||
730 | if (can_bulk_split(musb, qh->type)) | ||
731 | musb_writew(epio, MUSB_TXMAXP, | ||
732 | packet_sz | ||
733 | | ((hw_ep->max_packet_sz_tx / | ||
734 | packet_sz) - 1) << 11); | ||
735 | else | ||
736 | musb_writew(epio, MUSB_TXMAXP, | ||
737 | packet_sz); | ||
738 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | ||
739 | } else { | ||
740 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | ||
741 | if (musb->is_multipoint) | ||
742 | musb_writeb(epio, MUSB_TYPE0, | ||
743 | qh->type_reg); | ||
744 | } | ||
745 | |||
746 | if (can_bulk_split(musb, qh->type)) | ||
747 | load_count = min((u32) hw_ep->max_packet_sz_tx, | ||
748 | len); | ||
749 | else | ||
750 | load_count = min((u32) packet_sz, len); | ||
751 | |||
752 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
753 | if (dma_channel) { | ||
754 | |||
755 | /* clear previous state */ | ||
756 | csr = musb_readw(epio, MUSB_TXCSR); | ||
757 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
758 | | MUSB_TXCSR_DMAMODE | ||
759 | | MUSB_TXCSR_DMAENAB); | ||
760 | csr |= MUSB_TXCSR_MODE; | ||
761 | musb_writew(epio, MUSB_TXCSR, | ||
762 | csr | MUSB_TXCSR_MODE); | ||
763 | |||
764 | qh->segsize = min(len, dma_channel->max_len); | ||
765 | |||
766 | if (qh->segsize <= packet_sz) | ||
767 | dma_channel->desired_mode = 0; | ||
768 | else | ||
769 | dma_channel->desired_mode = 1; | ||
770 | |||
771 | |||
772 | if (dma_channel->desired_mode == 0) { | ||
773 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
774 | | MUSB_TXCSR_DMAMODE); | ||
775 | csr |= (MUSB_TXCSR_DMAENAB); | ||
776 | /* against programming guide */ | ||
777 | } else | ||
778 | csr |= (MUSB_TXCSR_AUTOSET | ||
779 | | MUSB_TXCSR_DMAENAB | ||
780 | | MUSB_TXCSR_DMAMODE); | ||
781 | |||
782 | musb_writew(epio, MUSB_TXCSR, csr); | ||
783 | |||
784 | dma_ok = dma_controller->channel_program( | ||
785 | dma_channel, packet_sz, | ||
786 | dma_channel->desired_mode, | ||
787 | urb->transfer_dma, | ||
788 | qh->segsize); | ||
789 | if (dma_ok) { | ||
790 | load_count = 0; | ||
791 | } else { | ||
792 | dma_controller->channel_release(dma_channel); | ||
793 | if (is_out) | ||
794 | hw_ep->tx_channel = NULL; | ||
795 | else | ||
796 | hw_ep->rx_channel = NULL; | ||
797 | dma_channel = NULL; | ||
798 | } | ||
799 | } | ||
800 | #endif | ||
801 | |||
802 | /* candidate for DMA */ | ||
803 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
804 | |||
805 | /* program endpoint CSRs first, then setup DMA. | ||
806 | * assume CPPI setup succeeds. | ||
807 | * defer enabling dma. | ||
808 | */ | ||
809 | csr = musb_readw(epio, MUSB_TXCSR); | ||
810 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
811 | | MUSB_TXCSR_DMAMODE | ||
812 | | MUSB_TXCSR_DMAENAB); | ||
813 | csr |= MUSB_TXCSR_MODE; | ||
814 | musb_writew(epio, MUSB_TXCSR, | ||
815 | csr | MUSB_TXCSR_MODE); | ||
816 | |||
817 | dma_channel->actual_len = 0L; | ||
818 | qh->segsize = len; | ||
819 | |||
820 | /* TX uses "rndis" mode automatically, but needs help | ||
821 | * to identify the zero-length-final-packet case. | ||
822 | */ | ||
823 | dma_ok = dma_controller->channel_program( | ||
824 | dma_channel, packet_sz, | ||
825 | (urb->transfer_flags | ||
826 | & URB_ZERO_PACKET) | ||
827 | == URB_ZERO_PACKET, | ||
828 | urb->transfer_dma, | ||
829 | qh->segsize); | ||
830 | if (dma_ok) { | ||
831 | load_count = 0; | ||
832 | } else { | ||
833 | dma_controller->channel_release(dma_channel); | ||
834 | hw_ep->tx_channel = NULL; | ||
835 | dma_channel = NULL; | ||
836 | |||
837 | /* REVISIT there's an error path here that | ||
838 | * needs handling: can't do dma, but | ||
839 | * there's no pio buffer address... | ||
840 | */ | ||
841 | } | ||
842 | } | ||
843 | |||
844 | if (load_count) { | ||
845 | /* ASSERT: TXCSR_DMAENAB was already cleared */ | ||
846 | |||
847 | /* PIO to load FIFO */ | ||
848 | qh->segsize = load_count; | ||
849 | musb_write_fifo(hw_ep, load_count, buf); | ||
850 | csr = musb_readw(epio, MUSB_TXCSR); | ||
851 | csr &= ~(MUSB_TXCSR_DMAENAB | ||
852 | | MUSB_TXCSR_DMAMODE | ||
853 | | MUSB_TXCSR_AUTOSET); | ||
854 | /* write CSR */ | ||
855 | csr |= MUSB_TXCSR_MODE; | ||
856 | |||
857 | if (epnum) | ||
858 | musb_writew(epio, MUSB_TXCSR, csr); | ||
859 | } | ||
860 | |||
861 | /* re-enable interrupt */ | ||
862 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | ||
863 | |||
864 | /* IN/receive */ | ||
865 | } else { | ||
866 | u16 csr; | ||
867 | |||
868 | if (hw_ep->rx_reinit) { | ||
869 | musb_rx_reinit(musb, qh, hw_ep); | ||
870 | |||
871 | /* init new state: toggle and NYET, maybe DMA later */ | ||
872 | if (usb_gettoggle(urb->dev, qh->epnum, 0)) | ||
873 | csr = MUSB_RXCSR_H_WR_DATATOGGLE | ||
874 | | MUSB_RXCSR_H_DATATOGGLE; | ||
875 | else | ||
876 | csr = 0; | ||
877 | if (qh->type == USB_ENDPOINT_XFER_INT) | ||
878 | csr |= MUSB_RXCSR_DISNYET; | ||
879 | |||
880 | } else { | ||
881 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
882 | |||
883 | if (csr & (MUSB_RXCSR_RXPKTRDY | ||
884 | | MUSB_RXCSR_DMAENAB | ||
885 | | MUSB_RXCSR_H_REQPKT)) | ||
886 | ERR("broken !rx_reinit, ep%d csr %04x\n", | ||
887 | hw_ep->epnum, csr); | ||
888 | |||
889 | /* scrub any stale state, leaving toggle alone */ | ||
890 | csr &= MUSB_RXCSR_DISNYET; | ||
891 | } | ||
892 | |||
893 | /* kick things off */ | ||
894 | |||
895 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | ||
896 | /* candidate for DMA */ | ||
897 | if (dma_channel) { | ||
898 | dma_channel->actual_len = 0L; | ||
899 | qh->segsize = len; | ||
900 | |||
901 | /* AUTOREQ is in a DMA register */ | ||
902 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
903 | csr = musb_readw(hw_ep->regs, | ||
904 | MUSB_RXCSR); | ||
905 | |||
906 | /* unless caller treats short rx transfers as | ||
907 | * errors, we dare not queue multiple transfers. | ||
908 | */ | ||
909 | dma_ok = dma_controller->channel_program( | ||
910 | dma_channel, packet_sz, | ||
911 | !(urb->transfer_flags | ||
912 | & URB_SHORT_NOT_OK), | ||
913 | urb->transfer_dma, | ||
914 | qh->segsize); | ||
915 | if (!dma_ok) { | ||
916 | dma_controller->channel_release( | ||
917 | dma_channel); | ||
918 | hw_ep->rx_channel = NULL; | ||
919 | dma_channel = NULL; | ||
920 | } else | ||
921 | csr |= MUSB_RXCSR_DMAENAB; | ||
922 | } | ||
923 | } | ||
924 | |||
925 | csr |= MUSB_RXCSR_H_REQPKT; | ||
926 | DBG(7, "RXCSR%d := %04x\n", epnum, csr); | ||
927 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | ||
928 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | ||
929 | } | ||
930 | } | ||
931 | |||
932 | |||
933 | /* | ||
934 | * Service the default endpoint (ep0) as host. | ||
935 | * Return true until it's time to start the status stage. | ||
936 | */ | ||
937 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | ||
938 | { | ||
939 | bool more = false; | ||
940 | u8 *fifo_dest = NULL; | ||
941 | u16 fifo_count = 0; | ||
942 | struct musb_hw_ep *hw_ep = musb->control_ep; | ||
943 | struct musb_qh *qh = hw_ep->in_qh; | ||
944 | struct usb_ctrlrequest *request; | ||
945 | |||
946 | switch (musb->ep0_stage) { | ||
947 | case MUSB_EP0_IN: | ||
948 | fifo_dest = urb->transfer_buffer + urb->actual_length; | ||
949 | fifo_count = min(len, ((u16) (urb->transfer_buffer_length | ||
950 | - urb->actual_length))); | ||
951 | if (fifo_count < len) | ||
952 | urb->status = -EOVERFLOW; | ||
953 | |||
954 | musb_read_fifo(hw_ep, fifo_count, fifo_dest); | ||
955 | |||
956 | urb->actual_length += fifo_count; | ||
957 | if (len < qh->maxpacket) { | ||
958 | /* always terminate on short read; it's | ||
959 | * rarely reported as an error. | ||
960 | */ | ||
961 | } else if (urb->actual_length < | ||
962 | urb->transfer_buffer_length) | ||
963 | more = true; | ||
964 | break; | ||
965 | case MUSB_EP0_START: | ||
966 | request = (struct usb_ctrlrequest *) urb->setup_packet; | ||
967 | |||
968 | if (!request->wLength) { | ||
969 | DBG(4, "start no-DATA\n"); | ||
970 | break; | ||
971 | } else if (request->bRequestType & USB_DIR_IN) { | ||
972 | DBG(4, "start IN-DATA\n"); | ||
973 | musb->ep0_stage = MUSB_EP0_IN; | ||
974 | more = true; | ||
975 | break; | ||
976 | } else { | ||
977 | DBG(4, "start OUT-DATA\n"); | ||
978 | musb->ep0_stage = MUSB_EP0_OUT; | ||
979 | more = true; | ||
980 | } | ||
981 | /* FALLTHROUGH */ | ||
982 | case MUSB_EP0_OUT: | ||
983 | fifo_count = min(qh->maxpacket, ((u16) | ||
984 | (urb->transfer_buffer_length | ||
985 | - urb->actual_length))); | ||
986 | |||
987 | if (fifo_count) { | ||
988 | fifo_dest = (u8 *) (urb->transfer_buffer | ||
989 | + urb->actual_length); | ||
990 | DBG(3, "Sending %d bytes to %p\n", | ||
991 | fifo_count, fifo_dest); | ||
992 | musb_write_fifo(hw_ep, fifo_count, fifo_dest); | ||
993 | |||
994 | urb->actual_length += fifo_count; | ||
995 | more = true; | ||
996 | } | ||
997 | break; | ||
998 | default: | ||
999 | ERR("bogus ep0 stage %d\n", musb->ep0_stage); | ||
1000 | break; | ||
1001 | } | ||
1002 | |||
1003 | return more; | ||
1004 | } | ||
1005 | |||
1006 | /* | ||
1007 | * Handle default endpoint interrupt as host. Only called in IRQ time | ||
1008 | * from the LinuxIsr() interrupt service routine. | ||
1009 | * | ||
1010 | * called with controller irqlocked | ||
1011 | */ | ||
1012 | irqreturn_t musb_h_ep0_irq(struct musb *musb) | ||
1013 | { | ||
1014 | struct urb *urb; | ||
1015 | u16 csr, len; | ||
1016 | int status = 0; | ||
1017 | void __iomem *mbase = musb->mregs; | ||
1018 | struct musb_hw_ep *hw_ep = musb->control_ep; | ||
1019 | void __iomem *epio = hw_ep->regs; | ||
1020 | struct musb_qh *qh = hw_ep->in_qh; | ||
1021 | bool complete = false; | ||
1022 | irqreturn_t retval = IRQ_NONE; | ||
1023 | |||
1024 | /* ep0 only has one queue, "in" */ | ||
1025 | urb = next_urb(qh); | ||
1026 | |||
1027 | musb_ep_select(mbase, 0); | ||
1028 | csr = musb_readw(epio, MUSB_CSR0); | ||
1029 | len = (csr & MUSB_CSR0_RXPKTRDY) | ||
1030 | ? musb_readb(epio, MUSB_COUNT0) | ||
1031 | : 0; | ||
1032 | |||
1033 | DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | ||
1034 | csr, qh, len, urb, musb->ep0_stage); | ||
1035 | |||
1036 | /* if we just did status stage, we are done */ | ||
1037 | if (MUSB_EP0_STATUS == musb->ep0_stage) { | ||
1038 | retval = IRQ_HANDLED; | ||
1039 | complete = true; | ||
1040 | } | ||
1041 | |||
1042 | /* prepare status */ | ||
1043 | if (csr & MUSB_CSR0_H_RXSTALL) { | ||
1044 | DBG(6, "STALLING ENDPOINT\n"); | ||
1045 | status = -EPIPE; | ||
1046 | |||
1047 | } else if (csr & MUSB_CSR0_H_ERROR) { | ||
1048 | DBG(2, "no response, csr0 %04x\n", csr); | ||
1049 | status = -EPROTO; | ||
1050 | |||
1051 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | ||
1052 | DBG(2, "control NAK timeout\n"); | ||
1053 | |||
1054 | /* NOTE: this code path would be a good place to PAUSE a | ||
1055 | * control transfer, if another one is queued, so that | ||
1056 | * ep0 is more likely to stay busy. | ||
1057 | * | ||
1058 | * if (qh->ring.next != &musb->control), then | ||
1059 | * we have a candidate... NAKing is *NOT* an error | ||
1060 | */ | ||
1061 | musb_writew(epio, MUSB_CSR0, 0); | ||
1062 | retval = IRQ_HANDLED; | ||
1063 | } | ||
1064 | |||
1065 | if (status) { | ||
1066 | DBG(6, "aborting\n"); | ||
1067 | retval = IRQ_HANDLED; | ||
1068 | if (urb) | ||
1069 | urb->status = status; | ||
1070 | complete = true; | ||
1071 | |||
1072 | /* use the proper sequence to abort the transfer */ | ||
1073 | if (csr & MUSB_CSR0_H_REQPKT) { | ||
1074 | csr &= ~MUSB_CSR0_H_REQPKT; | ||
1075 | musb_writew(epio, MUSB_CSR0, csr); | ||
1076 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
1077 | musb_writew(epio, MUSB_CSR0, csr); | ||
1078 | } else { | ||
1079 | csr |= MUSB_CSR0_FLUSHFIFO; | ||
1080 | musb_writew(epio, MUSB_CSR0, csr); | ||
1081 | musb_writew(epio, MUSB_CSR0, csr); | ||
1082 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | ||
1083 | musb_writew(epio, MUSB_CSR0, csr); | ||
1084 | } | ||
1085 | |||
1086 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | ||
1087 | |||
1088 | /* clear it */ | ||
1089 | musb_writew(epio, MUSB_CSR0, 0); | ||
1090 | } | ||
1091 | |||
1092 | if (unlikely(!urb)) { | ||
1093 | /* stop endpoint since we have no place for its data, this | ||
1094 | * SHOULD NEVER HAPPEN! */ | ||
1095 | ERR("no URB for end 0\n"); | ||
1096 | |||
1097 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
1098 | musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); | ||
1099 | musb_writew(epio, MUSB_CSR0, 0); | ||
1100 | |||
1101 | goto done; | ||
1102 | } | ||
1103 | |||
1104 | if (!complete) { | ||
1105 | /* call common logic and prepare response */ | ||
1106 | if (musb_h_ep0_continue(musb, len, urb)) { | ||
1107 | /* more packets required */ | ||
1108 | csr = (MUSB_EP0_IN == musb->ep0_stage) | ||
1109 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; | ||
1110 | } else { | ||
1111 | /* data transfer complete; perform status phase */ | ||
1112 | if (usb_pipeout(urb->pipe) | ||
1113 | || !urb->transfer_buffer_length) | ||
1114 | csr = MUSB_CSR0_H_STATUSPKT | ||
1115 | | MUSB_CSR0_H_REQPKT; | ||
1116 | else | ||
1117 | csr = MUSB_CSR0_H_STATUSPKT | ||
1118 | | MUSB_CSR0_TXPKTRDY; | ||
1119 | |||
1120 | /* flag status stage */ | ||
1121 | musb->ep0_stage = MUSB_EP0_STATUS; | ||
1122 | |||
1123 | DBG(5, "ep0 STATUS, csr %04x\n", csr); | ||
1124 | |||
1125 | } | ||
1126 | musb_writew(epio, MUSB_CSR0, csr); | ||
1127 | retval = IRQ_HANDLED; | ||
1128 | } else | ||
1129 | musb->ep0_stage = MUSB_EP0_IDLE; | ||
1130 | |||
1131 | /* call completion handler if done */ | ||
1132 | if (complete) | ||
1133 | musb_advance_schedule(musb, urb, hw_ep, 1); | ||
1134 | done: | ||
1135 | return retval; | ||
1136 | } | ||
1137 | |||
1138 | |||
1139 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1140 | |||
1141 | /* Host side TX (OUT) using Mentor DMA works as follows: | ||
1142 | submit_urb -> | ||
1143 | - if queue was empty, Program Endpoint | ||
1144 | - ... which starts DMA to fifo in mode 1 or 0 | ||
1145 | |||
1146 | DMA Isr (transfer complete) -> TxAvail() | ||
1147 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens | ||
1148 | only in musb_cleanup_urb) | ||
1149 | - TxPktRdy has to be set in mode 0 or for | ||
1150 | short packets in mode 1. | ||
1151 | */ | ||
1152 | |||
1153 | #endif | ||
1154 | |||
1155 | /* Service a Tx-Available or dma completion irq for the endpoint */ | ||
1156 | void musb_host_tx(struct musb *musb, u8 epnum) | ||
1157 | { | ||
1158 | int pipe; | ||
1159 | bool done = false; | ||
1160 | u16 tx_csr; | ||
1161 | size_t wLength = 0; | ||
1162 | u8 *buf = NULL; | ||
1163 | struct urb *urb; | ||
1164 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
1165 | void __iomem *epio = hw_ep->regs; | ||
1166 | struct musb_qh *qh = hw_ep->out_qh; | ||
1167 | u32 status = 0; | ||
1168 | void __iomem *mbase = musb->mregs; | ||
1169 | struct dma_channel *dma; | ||
1170 | |||
1171 | urb = next_urb(qh); | ||
1172 | |||
1173 | musb_ep_select(mbase, epnum); | ||
1174 | tx_csr = musb_readw(epio, MUSB_TXCSR); | ||
1175 | |||
1176 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | ||
1177 | if (!urb) { | ||
1178 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | ||
1179 | goto finish; | ||
1180 | } | ||
1181 | |||
1182 | pipe = urb->pipe; | ||
1183 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | ||
1184 | DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | ||
1185 | dma ? ", dma" : ""); | ||
1186 | |||
1187 | /* check for errors */ | ||
1188 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | ||
1189 | /* dma was disabled, fifo flushed */ | ||
1190 | DBG(3, "TX end %d stall\n", epnum); | ||
1191 | |||
1192 | /* stall; record URB status */ | ||
1193 | status = -EPIPE; | ||
1194 | |||
1195 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { | ||
1196 | /* (NON-ISO) dma was disabled, fifo flushed */ | ||
1197 | DBG(3, "TX 3strikes on ep=%d\n", epnum); | ||
1198 | |||
1199 | status = -ETIMEDOUT; | ||
1200 | |||
1201 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | ||
1202 | DBG(6, "TX end=%d device not responding\n", epnum); | ||
1203 | |||
1204 | /* NOTE: this code path would be a good place to PAUSE a | ||
1205 | * transfer, if there's some other (nonperiodic) tx urb | ||
1206 | * that could use this fifo. (dma complicates it...) | ||
1207 | * | ||
1208 | * if (bulk && qh->ring.next != &musb->out_bulk), then | ||
1209 | * we have a candidate... NAKing is *NOT* an error | ||
1210 | */ | ||
1211 | musb_ep_select(mbase, epnum); | ||
1212 | musb_writew(epio, MUSB_TXCSR, | ||
1213 | MUSB_TXCSR_H_WZC_BITS | ||
1214 | | MUSB_TXCSR_TXPKTRDY); | ||
1215 | goto finish; | ||
1216 | } | ||
1217 | |||
1218 | if (status) { | ||
1219 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1220 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
1221 | (void) musb->dma_controller->channel_abort(dma); | ||
1222 | } | ||
1223 | |||
1224 | /* do the proper sequence to abort the transfer in the | ||
1225 | * usb core; the dma engine should already be stopped. | ||
1226 | */ | ||
1227 | musb_h_tx_flush_fifo(hw_ep); | ||
1228 | tx_csr &= ~(MUSB_TXCSR_AUTOSET | ||
1229 | | MUSB_TXCSR_DMAENAB | ||
1230 | | MUSB_TXCSR_H_ERROR | ||
1231 | | MUSB_TXCSR_H_RXSTALL | ||
1232 | | MUSB_TXCSR_H_NAKTIMEOUT | ||
1233 | ); | ||
1234 | |||
1235 | musb_ep_select(mbase, epnum); | ||
1236 | musb_writew(epio, MUSB_TXCSR, tx_csr); | ||
1237 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
1238 | musb_writew(epio, MUSB_TXCSR, tx_csr); | ||
1239 | musb_writeb(epio, MUSB_TXINTERVAL, 0); | ||
1240 | |||
1241 | done = true; | ||
1242 | } | ||
1243 | |||
1244 | /* second cppi case */ | ||
1245 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1246 | DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | ||
1247 | goto finish; | ||
1248 | |||
1249 | } | ||
1250 | |||
1251 | /* REVISIT this looks wrong... */ | ||
1252 | if (!status || dma || usb_pipeisoc(pipe)) { | ||
1253 | if (dma) | ||
1254 | wLength = dma->actual_len; | ||
1255 | else | ||
1256 | wLength = qh->segsize; | ||
1257 | qh->offset += wLength; | ||
1258 | |||
1259 | if (usb_pipeisoc(pipe)) { | ||
1260 | struct usb_iso_packet_descriptor *d; | ||
1261 | |||
1262 | d = urb->iso_frame_desc + qh->iso_idx; | ||
1263 | d->actual_length = qh->segsize; | ||
1264 | if (++qh->iso_idx >= urb->number_of_packets) { | ||
1265 | done = true; | ||
1266 | } else { | ||
1267 | d++; | ||
1268 | buf = urb->transfer_buffer + d->offset; | ||
1269 | wLength = d->length; | ||
1270 | } | ||
1271 | } else if (dma) { | ||
1272 | done = true; | ||
1273 | } else { | ||
1274 | /* see if we need to send more data, or ZLP */ | ||
1275 | if (qh->segsize < qh->maxpacket) | ||
1276 | done = true; | ||
1277 | else if (qh->offset == urb->transfer_buffer_length | ||
1278 | && !(urb->transfer_flags | ||
1279 | & URB_ZERO_PACKET)) | ||
1280 | done = true; | ||
1281 | if (!done) { | ||
1282 | buf = urb->transfer_buffer | ||
1283 | + qh->offset; | ||
1284 | wLength = urb->transfer_buffer_length | ||
1285 | - qh->offset; | ||
1286 | } | ||
1287 | } | ||
1288 | } | ||
1289 | |||
1290 | /* urb->status != -EINPROGRESS means request has been faulted, | ||
1291 | * so we must abort this transfer after cleanup | ||
1292 | */ | ||
1293 | if (urb->status != -EINPROGRESS) { | ||
1294 | done = true; | ||
1295 | if (status == 0) | ||
1296 | status = urb->status; | ||
1297 | } | ||
1298 | |||
1299 | if (done) { | ||
1300 | /* set status */ | ||
1301 | urb->status = status; | ||
1302 | urb->actual_length = qh->offset; | ||
1303 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | ||
1304 | |||
1305 | } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { | ||
1306 | /* WARN_ON(!buf); */ | ||
1307 | |||
1308 | /* REVISIT: some docs say that when hw_ep->tx_double_buffered, | ||
1309 | * (and presumably, fifo is not half-full) we should write TWO | ||
1310 | * packets before updating TXCSR ... other docs disagree ... | ||
1311 | */ | ||
1312 | /* PIO: start next packet in this URB */ | ||
1313 | wLength = min(qh->maxpacket, (u16) wLength); | ||
1314 | musb_write_fifo(hw_ep, wLength, buf); | ||
1315 | qh->segsize = wLength; | ||
1316 | |||
1317 | musb_ep_select(mbase, epnum); | ||
1318 | musb_writew(epio, MUSB_TXCSR, | ||
1319 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | ||
1320 | } else | ||
1321 | DBG(1, "not complete, but dma enabled?\n"); | ||
1322 | |||
1323 | finish: | ||
1324 | return; | ||
1325 | } | ||
1326 | |||
1327 | |||
1328 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1329 | |||
1330 | /* Host side RX (IN) using Mentor DMA works as follows: | ||
1331 | submit_urb -> | ||
1332 | - if queue was empty, ProgramEndpoint | ||
1333 | - first IN token is sent out (by setting ReqPkt) | ||
1334 | LinuxIsr -> RxReady() | ||
1335 | /\ => first packet is received | ||
1336 | | - Set in mode 0 (DmaEnab, ~ReqPkt) | ||
1337 | | -> DMA Isr (transfer complete) -> RxReady() | ||
1338 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | ||
1339 | | - if urb not complete, send next IN token (ReqPkt) | ||
1340 | | | else complete urb. | ||
1341 | | | | ||
1342 | --------------------------- | ||
1343 | * | ||
1344 | * Nuances of mode 1: | ||
1345 | * For short packets, no ack (+RxPktRdy) is sent automatically | ||
1346 | * (even if AutoClear is ON) | ||
1347 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent | ||
1348 | * automatically => major problem, as collecting the next packet becomes | ||
1349 | * difficult. Hence mode 1 is not used. | ||
1350 | * | ||
1351 | * REVISIT | ||
1352 | * All we care about at this driver level is that | ||
1353 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; | ||
1354 | * (b) termination conditions are: short RX, or buffer full; | ||
1355 | * (c) fault modes include | ||
1356 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. | ||
1357 | * (and that endpoint's dma queue stops immediately) | ||
1358 | * - overflow (full, PLUS more bytes in the terminal packet) | ||
1359 | * | ||
1360 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would | ||
1361 | * thus be a great candidate for using mode 1 ... for all but the | ||
1362 | * last packet of one URB's transfer. | ||
1363 | */ | ||
1364 | |||
1365 | #endif | ||
1366 | |||
1367 | /* | ||
1368 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | ||
1369 | * and high-bandwidth IN transfer cases. | ||
1370 | */ | ||
1371 | void musb_host_rx(struct musb *musb, u8 epnum) | ||
1372 | { | ||
1373 | struct urb *urb; | ||
1374 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | ||
1375 | void __iomem *epio = hw_ep->regs; | ||
1376 | struct musb_qh *qh = hw_ep->in_qh; | ||
1377 | size_t xfer_len; | ||
1378 | void __iomem *mbase = musb->mregs; | ||
1379 | int pipe; | ||
1380 | u16 rx_csr, val; | ||
1381 | bool iso_err = false; | ||
1382 | bool done = false; | ||
1383 | u32 status; | ||
1384 | struct dma_channel *dma; | ||
1385 | |||
1386 | musb_ep_select(mbase, epnum); | ||
1387 | |||
1388 | urb = next_urb(qh); | ||
1389 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; | ||
1390 | status = 0; | ||
1391 | xfer_len = 0; | ||
1392 | |||
1393 | rx_csr = musb_readw(epio, MUSB_RXCSR); | ||
1394 | val = rx_csr; | ||
1395 | |||
1396 | if (unlikely(!urb)) { | ||
1397 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least | ||
1398 | * usbtest #11 (unlinks) triggers it regularly, sometimes | ||
1399 | * with fifo full. (Only with DMA??) | ||
1400 | */ | ||
1401 | DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | ||
1402 | musb_readw(epio, MUSB_RXCOUNT)); | ||
1403 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | ||
1404 | return; | ||
1405 | } | ||
1406 | |||
1407 | pipe = urb->pipe; | ||
1408 | |||
1409 | DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | ||
1410 | epnum, rx_csr, urb->actual_length, | ||
1411 | dma ? dma->actual_len : 0); | ||
1412 | |||
1413 | /* check for errors, concurrent stall & unlink is not really | ||
1414 | * handled yet! */ | ||
1415 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | ||
1416 | DBG(3, "RX end %d STALL\n", epnum); | ||
1417 | |||
1418 | /* stall; record URB status */ | ||
1419 | status = -EPIPE; | ||
1420 | |||
1421 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { | ||
1422 | DBG(3, "end %d RX proto error\n", epnum); | ||
1423 | |||
1424 | status = -EPROTO; | ||
1425 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | ||
1426 | |||
1427 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | ||
1428 | |||
1429 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | ||
1430 | /* NOTE this code path would be a good place to PAUSE a | ||
1431 | * transfer, if there's some other (nonperiodic) rx urb | ||
1432 | * that could use this fifo. (dma complicates it...) | ||
1433 | * | ||
1434 | * if (bulk && qh->ring.next != &musb->in_bulk), then | ||
1435 | * we have a candidate... NAKing is *NOT* an error | ||
1436 | */ | ||
1437 | DBG(6, "RX end %d NAK timeout\n", epnum); | ||
1438 | musb_ep_select(mbase, epnum); | ||
1439 | musb_writew(epio, MUSB_RXCSR, | ||
1440 | MUSB_RXCSR_H_WZC_BITS | ||
1441 | | MUSB_RXCSR_H_REQPKT); | ||
1442 | |||
1443 | goto finish; | ||
1444 | } else { | ||
1445 | DBG(4, "RX end %d ISO data error\n", epnum); | ||
1446 | /* packet error reported later */ | ||
1447 | iso_err = true; | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | /* faults abort the transfer */ | ||
1452 | if (status) { | ||
1453 | /* clean up dma and collect transfer count */ | ||
1454 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1455 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
1456 | (void) musb->dma_controller->channel_abort(dma); | ||
1457 | xfer_len = dma->actual_len; | ||
1458 | } | ||
1459 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | ||
1460 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | ||
1461 | done = true; | ||
1462 | goto finish; | ||
1463 | } | ||
1464 | |||
1465 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { | ||
1466 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ | ||
1467 | ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); | ||
1468 | goto finish; | ||
1469 | } | ||
1470 | |||
1471 | /* thorough shutdown for now ... given more precise fault handling | ||
1472 | * and better queueing support, we might keep a DMA pipeline going | ||
1473 | * while processing this irq for earlier completions. | ||
1474 | */ | ||
1475 | |||
1476 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ | ||
1477 | |||
1478 | #ifndef CONFIG_USB_INVENTRA_DMA | ||
1479 | if (rx_csr & MUSB_RXCSR_H_REQPKT) { | ||
1480 | /* REVISIT this happened for a while on some short reads... | ||
1481 | * the cleanup still needs investigation... looks bad... | ||
1482 | * and also duplicates dma cleanup code above ... plus, | ||
1483 | * shouldn't this be the "half full" double buffer case? | ||
1484 | */ | ||
1485 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | ||
1486 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | ||
1487 | (void) musb->dma_controller->channel_abort(dma); | ||
1488 | xfer_len = dma->actual_len; | ||
1489 | done = true; | ||
1490 | } | ||
1491 | |||
1492 | DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | ||
1493 | xfer_len, dma ? ", dma" : ""); | ||
1494 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | ||
1495 | |||
1496 | musb_ep_select(mbase, epnum); | ||
1497 | musb_writew(epio, MUSB_RXCSR, | ||
1498 | MUSB_RXCSR_H_WZC_BITS | rx_csr); | ||
1499 | } | ||
1500 | #endif | ||
1501 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { | ||
1502 | xfer_len = dma->actual_len; | ||
1503 | |||
1504 | val &= ~(MUSB_RXCSR_DMAENAB | ||
1505 | | MUSB_RXCSR_H_AUTOREQ | ||
1506 | | MUSB_RXCSR_AUTOCLEAR | ||
1507 | | MUSB_RXCSR_RXPKTRDY); | ||
1508 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); | ||
1509 | |||
1510 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1511 | /* done if urb buffer is full or short packet is recd */ | ||
1512 | done = (urb->actual_length + xfer_len >= | ||
1513 | urb->transfer_buffer_length | ||
1514 | || dma->actual_len < qh->maxpacket); | ||
1515 | |||
1516 | /* send IN token for next packet, without AUTOREQ */ | ||
1517 | if (!done) { | ||
1518 | val |= MUSB_RXCSR_H_REQPKT; | ||
1519 | musb_writew(epio, MUSB_RXCSR, | ||
1520 | MUSB_RXCSR_H_WZC_BITS | val); | ||
1521 | } | ||
1522 | |||
1523 | DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | ||
1524 | done ? "off" : "reset", | ||
1525 | musb_readw(epio, MUSB_RXCSR), | ||
1526 | musb_readw(epio, MUSB_RXCOUNT)); | ||
1527 | #else | ||
1528 | done = true; | ||
1529 | #endif | ||
1530 | } else if (urb->status == -EINPROGRESS) { | ||
1531 | /* if no errors, be sure a packet is ready for unloading */ | ||
1532 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { | ||
1533 | status = -EPROTO; | ||
1534 | ERR("Rx interrupt with no errors or packet!\n"); | ||
1535 | |||
1536 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ | ||
1537 | |||
1538 | /* SCRUB (RX) */ | ||
1539 | /* do the proper sequence to abort the transfer */ | ||
1540 | musb_ep_select(mbase, epnum); | ||
1541 | val &= ~MUSB_RXCSR_H_REQPKT; | ||
1542 | musb_writew(epio, MUSB_RXCSR, val); | ||
1543 | goto finish; | ||
1544 | } | ||
1545 | |||
1546 | /* we are expecting IN packets */ | ||
1547 | #ifdef CONFIG_USB_INVENTRA_DMA | ||
1548 | if (dma) { | ||
1549 | struct dma_controller *c; | ||
1550 | u16 rx_count; | ||
1551 | int ret; | ||
1552 | |||
1553 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | ||
1554 | |||
1555 | DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", | ||
1556 | epnum, rx_count, | ||
1557 | urb->transfer_dma | ||
1558 | + urb->actual_length, | ||
1559 | qh->offset, | ||
1560 | urb->transfer_buffer_length); | ||
1561 | |||
1562 | c = musb->dma_controller; | ||
1563 | |||
1564 | dma->desired_mode = 0; | ||
1565 | #ifdef USE_MODE1 | ||
1566 | /* because of the issue below, mode 1 will | ||
1567 | * only rarely behave with correct semantics. | ||
1568 | */ | ||
1569 | if ((urb->transfer_flags & | ||
1570 | URB_SHORT_NOT_OK) | ||
1571 | && (urb->transfer_buffer_length - | ||
1572 | urb->actual_length) | ||
1573 | > qh->maxpacket) | ||
1574 | dma->desired_mode = 1; | ||
1575 | #endif | ||
1576 | |||
1577 | /* Disadvantage of using mode 1: | ||
1578 | * It's basically usable only for mass storage class; essentially all | ||
1579 | * other protocols also terminate transfers on short packets. | ||
1580 | * | ||
1581 | * Details: | ||
1582 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) | ||
1583 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try | ||
1584 | * to use the extra IN token to grab the last packet using mode 0, then | ||
1585 | * the problem is that you cannot be sure when the device will send the | ||
1586 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon | ||
1587 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 | ||
1588 | * transfer, while sometimes it is recd just a little late so that if you | ||
1589 | * try to configure for mode 0 soon after the mode 1 transfer is | ||
1590 | * completed, you will find rxcount 0. Okay, so you might think why not | ||
1591 | * wait for an interrupt when the pkt is recd. Well, you won't get any! | ||
1592 | */ | ||
1593 | |||
1594 | val = musb_readw(epio, MUSB_RXCSR); | ||
1595 | val &= ~MUSB_RXCSR_H_REQPKT; | ||
1596 | |||
1597 | if (dma->desired_mode == 0) | ||
1598 | val &= ~MUSB_RXCSR_H_AUTOREQ; | ||
1599 | else | ||
1600 | val |= MUSB_RXCSR_H_AUTOREQ; | ||
1601 | val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; | ||
1602 | |||
1603 | musb_writew(epio, MUSB_RXCSR, | ||
1604 | MUSB_RXCSR_H_WZC_BITS | val); | ||
1605 | |||
1606 | /* REVISIT if when actual_length != 0, | ||
1607 | * transfer_buffer_length needs to be | ||
1608 | * adjusted first... | ||
1609 | */ | ||
1610 | ret = c->channel_program( | ||
1611 | dma, qh->maxpacket, | ||
1612 | dma->desired_mode, | ||
1613 | urb->transfer_dma | ||
1614 | + urb->actual_length, | ||
1615 | (dma->desired_mode == 0) | ||
1616 | ? rx_count | ||
1617 | : urb->transfer_buffer_length); | ||
1618 | |||
1619 | if (!ret) { | ||
1620 | c->channel_release(dma); | ||
1621 | hw_ep->rx_channel = NULL; | ||
1622 | dma = NULL; | ||
1623 | /* REVISIT reset CSR */ | ||
1624 | } | ||
1625 | } | ||
1626 | #endif /* Mentor DMA */ | ||
1627 | |||
1628 | if (!dma) { | ||
1629 | done = musb_host_packet_rx(musb, urb, | ||
1630 | epnum, iso_err); | ||
1631 | DBG(6, "read %spacket\n", done ? "last " : ""); | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | if (dma && usb_pipeisoc(pipe)) { | ||
1636 | struct usb_iso_packet_descriptor *d; | ||
1637 | int iso_stat = status; | ||
1638 | |||
1639 | d = urb->iso_frame_desc + qh->iso_idx; | ||
1640 | d->actual_length += xfer_len; | ||
1641 | if (iso_err) { | ||
1642 | iso_stat = -EILSEQ; | ||
1643 | urb->error_count++; | ||
1644 | } | ||
1645 | d->status = iso_stat; | ||
1646 | } | ||
1647 | |||
1648 | finish: | ||
1649 | urb->actual_length += xfer_len; | ||
1650 | qh->offset += xfer_len; | ||
1651 | if (done) { | ||
1652 | if (urb->status == -EINPROGRESS) | ||
1653 | urb->status = status; | ||
1654 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); | ||
1655 | } | ||
1656 | } | ||
1657 | |||
1658 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. | ||
1659 | * the software schedule associates multiple such nodes with a given | ||
1660 | * host side hardware endpoint + direction; scheduling may activate | ||
1661 | * that hardware endpoint. | ||
1662 | */ | ||
1663 | static int musb_schedule( | ||
1664 | struct musb *musb, | ||
1665 | struct musb_qh *qh, | ||
1666 | int is_in) | ||
1667 | { | ||
1668 | int idle; | ||
1669 | int best_diff; | ||
1670 | int best_end, epnum; | ||
1671 | struct musb_hw_ep *hw_ep = NULL; | ||
1672 | struct list_head *head = NULL; | ||
1673 | |||
1674 | /* use fixed hardware for control and bulk */ | ||
1675 | switch (qh->type) { | ||
1676 | case USB_ENDPOINT_XFER_CONTROL: | ||
1677 | head = &musb->control; | ||
1678 | hw_ep = musb->control_ep; | ||
1679 | break; | ||
1680 | case USB_ENDPOINT_XFER_BULK: | ||
1681 | hw_ep = musb->bulk_ep; | ||
1682 | if (is_in) | ||
1683 | head = &musb->in_bulk; | ||
1684 | else | ||
1685 | head = &musb->out_bulk; | ||
1686 | break; | ||
1687 | } | ||
1688 | if (head) { | ||
1689 | idle = list_empty(head); | ||
1690 | list_add_tail(&qh->ring, head); | ||
1691 | goto success; | ||
1692 | } | ||
1693 | |||
1694 | /* else, periodic transfers get muxed to other endpoints */ | ||
1695 | |||
1696 | /* FIXME this doesn't consider direction, so it can only | ||
1697 | * work for one half of the endpoint hardware, and assumes | ||
1698 | * the previous cases handled all non-shared endpoints... | ||
1699 | */ | ||
1700 | |||
1701 | /* we know this qh hasn't been scheduled, so all we need to do | ||
1702 | * is choose which hardware endpoint to put it on ... | ||
1703 | * | ||
1704 | * REVISIT what we really want here is a regular schedule tree | ||
1705 | * like e.g. OHCI uses, but for now musb->periodic is just an | ||
1706 | * array of the _single_ logical endpoint associated with a | ||
1707 | * given physical one (identity mapping logical->physical). | ||
1708 | * | ||
1709 | * that simplistic approach makes TT scheduling a lot simpler; | ||
1710 | * there is none, and thus none of its complexity... | ||
1711 | */ | ||
1712 | best_diff = 4096; | ||
1713 | best_end = -1; | ||
1714 | |||
1715 | for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { | ||
1716 | int diff; | ||
1717 | |||
1718 | if (musb->periodic[epnum]) | ||
1719 | continue; | ||
1720 | hw_ep = &musb->endpoints[epnum]; | ||
1721 | if (hw_ep == musb->bulk_ep) | ||
1722 | continue; | ||
1723 | |||
1724 | if (is_in) | ||
1725 | diff = hw_ep->max_packet_sz_rx - qh->maxpacket; | ||
1726 | else | ||
1727 | diff = hw_ep->max_packet_sz_tx - qh->maxpacket; | ||
1728 | |||
1729 | if (diff > 0 && best_diff > diff) { | ||
1730 | best_diff = diff; | ||
1731 | best_end = epnum; | ||
1732 | } | ||
1733 | } | ||
1734 | if (best_end < 0) | ||
1735 | return -ENOSPC; | ||
1736 | |||
1737 | idle = 1; | ||
1738 | hw_ep = musb->endpoints + best_end; | ||
1739 | musb->periodic[best_end] = qh; | ||
1740 | DBG(4, "qh %p periodic slot %d\n", qh, best_end); | ||
1741 | success: | ||
1742 | qh->hw_ep = hw_ep; | ||
1743 | qh->hep->hcpriv = qh; | ||
1744 | if (idle) | ||
1745 | musb_start_urb(musb, is_in, qh); | ||
1746 | return 0; | ||
1747 | } | ||
1748 | |||
1749 | static int musb_urb_enqueue( | ||
1750 | struct usb_hcd *hcd, | ||
1751 | struct urb *urb, | ||
1752 | gfp_t mem_flags) | ||
1753 | { | ||
1754 | unsigned long flags; | ||
1755 | struct musb *musb = hcd_to_musb(hcd); | ||
1756 | struct usb_host_endpoint *hep = urb->ep; | ||
1757 | struct musb_qh *qh = hep->hcpriv; | ||
1758 | struct usb_endpoint_descriptor *epd = &hep->desc; | ||
1759 | int ret; | ||
1760 | unsigned type_reg; | ||
1761 | unsigned interval; | ||
1762 | |||
1763 | /* host role must be active */ | ||
1764 | if (!is_host_active(musb) || !musb->is_active) | ||
1765 | return -ENODEV; | ||
1766 | |||
1767 | spin_lock_irqsave(&musb->lock, flags); | ||
1768 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | ||
1769 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1770 | if (ret) | ||
1771 | return ret; | ||
1772 | |||
1773 | /* DMA mapping was already done, if needed, and this urb is on | ||
1774 | * hep->urb_list ... so there's little to do unless hep wasn't | ||
1775 | * yet scheduled onto a live qh. | ||
1776 | * | ||
1777 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | ||
1778 | * disabled, testing for empty qh->ring and avoiding qh setup costs | ||
1779 | * except for the first urb queued after a config change. | ||
1780 | */ | ||
1781 | if (qh) { | ||
1782 | urb->hcpriv = qh; | ||
1783 | return 0; | ||
1784 | } | ||
1785 | |||
1786 | /* Allocate and initialize qh, minimizing the work done each time | ||
1787 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | ||
1788 | * | ||
1789 | * REVISIT consider a dedicated qh kmem_cache, so it's harder | ||
1790 | * for bugs in other kernel code to break this driver... | ||
1791 | */ | ||
1792 | qh = kzalloc(sizeof *qh, mem_flags); | ||
1793 | if (!qh) { | ||
1794 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
1795 | return -ENOMEM; | ||
1796 | } | ||
1797 | |||
1798 | qh->hep = hep; | ||
1799 | qh->dev = urb->dev; | ||
1800 | INIT_LIST_HEAD(&qh->ring); | ||
1801 | qh->is_ready = 1; | ||
1802 | |||
1803 | qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); | ||
1804 | |||
1805 | /* no high bandwidth support yet */ | ||
1806 | if (qh->maxpacket & ~0x7ff) { | ||
1807 | ret = -EMSGSIZE; | ||
1808 | goto done; | ||
1809 | } | ||
1810 | |||
1811 | qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | ||
1812 | qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; | ||
1813 | |||
1814 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | ||
1815 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | ||
1816 | |||
1817 | /* precompute rxtype/txtype/type0 register */ | ||
1818 | type_reg = (qh->type << 4) | qh->epnum; | ||
1819 | switch (urb->dev->speed) { | ||
1820 | case USB_SPEED_LOW: | ||
1821 | type_reg |= 0xc0; | ||
1822 | break; | ||
1823 | case USB_SPEED_FULL: | ||
1824 | type_reg |= 0x80; | ||
1825 | break; | ||
1826 | default: | ||
1827 | type_reg |= 0x40; | ||
1828 | } | ||
1829 | qh->type_reg = type_reg; | ||
1830 | |||
1831 | /* precompute rxinterval/txinterval register */ | ||
1832 | interval = min((u8)16, epd->bInterval); /* log encoding */ | ||
1833 | switch (qh->type) { | ||
1834 | case USB_ENDPOINT_XFER_INT: | ||
1835 | /* fullspeed uses linear encoding */ | ||
1836 | if (USB_SPEED_FULL == urb->dev->speed) { | ||
1837 | interval = epd->bInterval; | ||
1838 | if (!interval) | ||
1839 | interval = 1; | ||
1840 | } | ||
1841 | /* FALLTHROUGH */ | ||
1842 | case USB_ENDPOINT_XFER_ISOC: | ||
1843 | /* iso always uses log encoding */ | ||
1844 | break; | ||
1845 | default: | ||
1846 | /* REVISIT we actually want to use NAK limits, hinting to the | ||
1847 | * transfer scheduling logic to try some other qh, e.g. try | ||
1848 | * for 2 msec first: | ||
1849 | * | ||
1850 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; | ||
1851 | * | ||
1852 | * The downside of disabling this is that transfer scheduling | ||
1853 | * gets VERY unfair for nonperiodic transfers; a misbehaving | ||
1854 | * peripheral could make that hurt. Or for reads, one that's | ||
1855 | * perfectly normal: network and other drivers keep reads | ||
1856 | * posted at all times, having one pending for a week should | ||
1857 | * be perfectly safe. | ||
1858 | * | ||
1859 | * The upside of disabling it is avoidng transfer scheduling | ||
1860 | * code to put this aside for while. | ||
1861 | */ | ||
1862 | interval = 0; | ||
1863 | } | ||
1864 | qh->intv_reg = interval; | ||
1865 | |||
1866 | /* precompute addressing for external hub/tt ports */ | ||
1867 | if (musb->is_multipoint) { | ||
1868 | struct usb_device *parent = urb->dev->parent; | ||
1869 | |||
1870 | if (parent != hcd->self.root_hub) { | ||
1871 | qh->h_addr_reg = (u8) parent->devnum; | ||
1872 | |||
1873 | /* set up tt info if needed */ | ||
1874 | if (urb->dev->tt) { | ||
1875 | qh->h_port_reg = (u8) urb->dev->ttport; | ||
1876 | qh->h_addr_reg |= 0x80; | ||
1877 | } | ||
1878 | } | ||
1879 | } | ||
1880 | |||
1881 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. | ||
1882 | * until we get real dma queues (with an entry for each urb/buffer), | ||
1883 | * we only have work to do in the former case. | ||
1884 | */ | ||
1885 | spin_lock_irqsave(&musb->lock, flags); | ||
1886 | if (hep->hcpriv) { | ||
1887 | /* some concurrent activity submitted another urb to hep... | ||
1888 | * odd, rare, error prone, but legal. | ||
1889 | */ | ||
1890 | kfree(qh); | ||
1891 | ret = 0; | ||
1892 | } else | ||
1893 | ret = musb_schedule(musb, qh, | ||
1894 | epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); | ||
1895 | |||
1896 | if (ret == 0) { | ||
1897 | urb->hcpriv = qh; | ||
1898 | /* FIXME set urb->start_frame for iso/intr, it's tested in | ||
1899 | * musb_start_urb(), but otherwise only konicawc cares ... | ||
1900 | */ | ||
1901 | } | ||
1902 | spin_unlock_irqrestore(&musb->lock, flags); | ||
1903 | |||
1904 | done: | ||
1905 | if (ret != 0) { | ||
1906 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
1907 | kfree(qh); | ||
1908 | } | ||
1909 | return ret; | ||
1910 | } | ||
1911 | |||
1912 | |||
1913 | /* | ||
1914 | * abort a transfer that's at the head of a hardware queue. | ||
1915 | * called with controller locked, irqs blocked | ||
1916 | * that hardware queue advances to the next transfer, unless prevented | ||
1917 | */ | ||
1918 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) | ||
1919 | { | ||
1920 | struct musb_hw_ep *ep = qh->hw_ep; | ||
1921 | void __iomem *epio = ep->regs; | ||
1922 | unsigned hw_end = ep->epnum; | ||
1923 | void __iomem *regs = ep->musb->mregs; | ||
1924 | u16 csr; | ||
1925 | int status = 0; | ||
1926 | |||
1927 | musb_ep_select(regs, hw_end); | ||
1928 | |||
1929 | if (is_dma_capable()) { | ||
1930 | struct dma_channel *dma; | ||
1931 | |||
1932 | dma = is_in ? ep->rx_channel : ep->tx_channel; | ||
1933 | if (dma) { | ||
1934 | status = ep->musb->dma_controller->channel_abort(dma); | ||
1935 | DBG(status ? 1 : 3, | ||
1936 | "abort %cX%d DMA for urb %p --> %d\n", | ||
1937 | is_in ? 'R' : 'T', ep->epnum, | ||
1938 | urb, status); | ||
1939 | urb->actual_length += dma->actual_len; | ||
1940 | } | ||
1941 | } | ||
1942 | |||
1943 | /* turn off DMA requests, discard state, stop polling ... */ | ||
1944 | if (is_in) { | ||
1945 | /* giveback saves bulk toggle */ | ||
1946 | csr = musb_h_flush_rxfifo(ep, 0); | ||
1947 | |||
1948 | /* REVISIT we still get an irq; should likely clear the | ||
1949 | * endpoint's irq status here to avoid bogus irqs. | ||
1950 | * clearing that status is platform-specific... | ||
1951 | */ | ||
1952 | } else { | ||
1953 | musb_h_tx_flush_fifo(ep); | ||
1954 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1955 | csr &= ~(MUSB_TXCSR_AUTOSET | ||
1956 | | MUSB_TXCSR_DMAENAB | ||
1957 | | MUSB_TXCSR_H_RXSTALL | ||
1958 | | MUSB_TXCSR_H_NAKTIMEOUT | ||
1959 | | MUSB_TXCSR_H_ERROR | ||
1960 | | MUSB_TXCSR_TXPKTRDY); | ||
1961 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1962 | /* REVISIT may need to clear FLUSHFIFO ... */ | ||
1963 | musb_writew(epio, MUSB_TXCSR, csr); | ||
1964 | /* flush cpu writebuffer */ | ||
1965 | csr = musb_readw(epio, MUSB_TXCSR); | ||
1966 | } | ||
1967 | if (status == 0) | ||
1968 | musb_advance_schedule(ep->musb, urb, ep, is_in); | ||
1969 | return status; | ||
1970 | } | ||
1971 | |||
1972 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
1973 | { | ||
1974 | struct musb *musb = hcd_to_musb(hcd); | ||
1975 | struct musb_qh *qh; | ||
1976 | struct list_head *sched; | ||
1977 | unsigned long flags; | ||
1978 | int ret; | ||
1979 | |||
1980 | DBG(4, "urb=%p, dev%d ep%d%s\n", urb, | ||
1981 | usb_pipedevice(urb->pipe), | ||
1982 | usb_pipeendpoint(urb->pipe), | ||
1983 | usb_pipein(urb->pipe) ? "in" : "out"); | ||
1984 | |||
1985 | spin_lock_irqsave(&musb->lock, flags); | ||
1986 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
1987 | if (ret) | ||
1988 | goto done; | ||
1989 | |||
1990 | qh = urb->hcpriv; | ||
1991 | if (!qh) | ||
1992 | goto done; | ||
1993 | |||
1994 | /* Any URB not actively programmed into endpoint hardware can be | ||
1995 | * immediately given back. Such an URB must be at the head of its | ||
1996 | * endpoint queue, unless someday we get real DMA queues. And even | ||
1997 | * then, it might not be known to the hardware... | ||
1998 | * | ||
1999 | * Otherwise abort current transfer, pending dma, etc.; urb->status | ||
2000 | * has already been updated. This is a synchronous abort; it'd be | ||
2001 | * OK to hold off until after some IRQ, though. | ||
2002 | */ | ||
2003 | if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) | ||
2004 | ret = -EINPROGRESS; | ||
2005 | else { | ||
2006 | switch (qh->type) { | ||
2007 | case USB_ENDPOINT_XFER_CONTROL: | ||
2008 | sched = &musb->control; | ||
2009 | break; | ||
2010 | case USB_ENDPOINT_XFER_BULK: | ||
2011 | if (usb_pipein(urb->pipe)) | ||
2012 | sched = &musb->in_bulk; | ||
2013 | else | ||
2014 | sched = &musb->out_bulk; | ||
2015 | break; | ||
2016 | default: | ||
2017 | /* REVISIT when we get a schedule tree, periodic | ||
2018 | * transfers won't always be at the head of a | ||
2019 | * singleton queue... | ||
2020 | */ | ||
2021 | sched = NULL; | ||
2022 | break; | ||
2023 | } | ||
2024 | } | ||
2025 | |||
2026 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
2027 | if (ret < 0 || (sched && qh != first_qh(sched))) { | ||
2028 | int ready = qh->is_ready; | ||
2029 | |||
2030 | ret = 0; | ||
2031 | qh->is_ready = 0; | ||
2032 | __musb_giveback(musb, urb, 0); | ||
2033 | qh->is_ready = ready; | ||
2034 | } else | ||
2035 | ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | ||
2036 | done: | ||
2037 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2038 | return ret; | ||
2039 | } | ||
2040 | |||
2041 | /* disable an endpoint */ | ||
2042 | static void | ||
2043 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | ||
2044 | { | ||
2045 | u8 epnum = hep->desc.bEndpointAddress; | ||
2046 | unsigned long flags; | ||
2047 | struct musb *musb = hcd_to_musb(hcd); | ||
2048 | u8 is_in = epnum & USB_DIR_IN; | ||
2049 | struct musb_qh *qh = hep->hcpriv; | ||
2050 | struct urb *urb, *tmp; | ||
2051 | struct list_head *sched; | ||
2052 | |||
2053 | if (!qh) | ||
2054 | return; | ||
2055 | |||
2056 | spin_lock_irqsave(&musb->lock, flags); | ||
2057 | |||
2058 | switch (qh->type) { | ||
2059 | case USB_ENDPOINT_XFER_CONTROL: | ||
2060 | sched = &musb->control; | ||
2061 | break; | ||
2062 | case USB_ENDPOINT_XFER_BULK: | ||
2063 | if (is_in) | ||
2064 | sched = &musb->in_bulk; | ||
2065 | else | ||
2066 | sched = &musb->out_bulk; | ||
2067 | break; | ||
2068 | default: | ||
2069 | /* REVISIT when we get a schedule tree, periodic transfers | ||
2070 | * won't always be at the head of a singleton queue... | ||
2071 | */ | ||
2072 | sched = NULL; | ||
2073 | break; | ||
2074 | } | ||
2075 | |||
2076 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | ||
2077 | |||
2078 | /* kick first urb off the hardware, if needed */ | ||
2079 | qh->is_ready = 0; | ||
2080 | if (!sched || qh == first_qh(sched)) { | ||
2081 | urb = next_urb(qh); | ||
2082 | |||
2083 | /* make software (then hardware) stop ASAP */ | ||
2084 | if (!urb->unlinked) | ||
2085 | urb->status = -ESHUTDOWN; | ||
2086 | |||
2087 | /* cleanup */ | ||
2088 | musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); | ||
2089 | } else | ||
2090 | urb = NULL; | ||
2091 | |||
2092 | /* then just nuke all the others */ | ||
2093 | list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) | ||
2094 | musb_giveback(qh, urb, -ESHUTDOWN); | ||
2095 | |||
2096 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2097 | } | ||
2098 | |||
2099 | static int musb_h_get_frame_number(struct usb_hcd *hcd) | ||
2100 | { | ||
2101 | struct musb *musb = hcd_to_musb(hcd); | ||
2102 | |||
2103 | return musb_readw(musb->mregs, MUSB_FRAME); | ||
2104 | } | ||
2105 | |||
2106 | static int musb_h_start(struct usb_hcd *hcd) | ||
2107 | { | ||
2108 | struct musb *musb = hcd_to_musb(hcd); | ||
2109 | |||
2110 | /* NOTE: musb_start() is called when the hub driver turns | ||
2111 | * on port power, or when (OTG) peripheral starts. | ||
2112 | */ | ||
2113 | hcd->state = HC_STATE_RUNNING; | ||
2114 | musb->port1_status = 0; | ||
2115 | return 0; | ||
2116 | } | ||
2117 | |||
2118 | static void musb_h_stop(struct usb_hcd *hcd) | ||
2119 | { | ||
2120 | musb_stop(hcd_to_musb(hcd)); | ||
2121 | hcd->state = HC_STATE_HALT; | ||
2122 | } | ||
2123 | |||
2124 | static int musb_bus_suspend(struct usb_hcd *hcd) | ||
2125 | { | ||
2126 | struct musb *musb = hcd_to_musb(hcd); | ||
2127 | |||
2128 | if (musb->xceiv.state == OTG_STATE_A_SUSPEND) | ||
2129 | return 0; | ||
2130 | |||
2131 | if (is_host_active(musb) && musb->is_active) { | ||
2132 | WARNING("trying to suspend as %s is_active=%i\n", | ||
2133 | otg_state_string(musb), musb->is_active); | ||
2134 | return -EBUSY; | ||
2135 | } else | ||
2136 | return 0; | ||
2137 | } | ||
2138 | |||
2139 | static int musb_bus_resume(struct usb_hcd *hcd) | ||
2140 | { | ||
2141 | /* resuming child port does the work */ | ||
2142 | return 0; | ||
2143 | } | ||
2144 | |||
2145 | const struct hc_driver musb_hc_driver = { | ||
2146 | .description = "musb-hcd", | ||
2147 | .product_desc = "MUSB HDRC host driver", | ||
2148 | .hcd_priv_size = sizeof(struct musb), | ||
2149 | .flags = HCD_USB2 | HCD_MEMORY, | ||
2150 | |||
2151 | /* not using irq handler or reset hooks from usbcore, since | ||
2152 | * those must be shared with peripheral code for OTG configs | ||
2153 | */ | ||
2154 | |||
2155 | .start = musb_h_start, | ||
2156 | .stop = musb_h_stop, | ||
2157 | |||
2158 | .get_frame_number = musb_h_get_frame_number, | ||
2159 | |||
2160 | .urb_enqueue = musb_urb_enqueue, | ||
2161 | .urb_dequeue = musb_urb_dequeue, | ||
2162 | .endpoint_disable = musb_h_disable, | ||
2163 | |||
2164 | .hub_status_data = musb_hub_status_data, | ||
2165 | .hub_control = musb_hub_control, | ||
2166 | .bus_suspend = musb_bus_suspend, | ||
2167 | .bus_resume = musb_bus_resume, | ||
2168 | /* .start_port_reset = NULL, */ | ||
2169 | /* .hub_irq_enable = NULL, */ | ||
2170 | }; | ||