diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/usb/host/ehci-q.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r-- | drivers/usb/host/ehci-q.c | 1090 |
1 files changed, 1090 insertions, 0 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c new file mode 100644 index 000000000000..7df9b9af54f6 --- /dev/null +++ b/drivers/usb/host/ehci-q.c | |||
@@ -0,0 +1,1090 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2001-2002 by David Brownell | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the | ||
6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
7 | * option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
12 | * for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software Foundation, | ||
16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | /* this file is part of ehci-hcd.c */ | ||
20 | |||
21 | /*-------------------------------------------------------------------------*/ | ||
22 | |||
23 | /* | ||
24 | * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. | ||
25 | * | ||
26 | * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" | ||
27 | * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned | ||
28 | * buffers needed for the larger number). We use one QH per endpoint, queue | ||
29 | * multiple urbs (all three types) per endpoint. URBs may need several qtds. | ||
30 | * | ||
31 | * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with | ||
32 | * interrupts) needs careful scheduling. Performance improvements can be | ||
33 | * an ongoing challenge. That's in "ehci-sched.c". | ||
34 | * | ||
35 | * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, | ||
36 | * or otherwise through transaction translators (TTs) in USB 2.0 hubs using | ||
37 | * (b) special fields in qh entries or (c) split iso entries. TTs will | ||
38 | * buffer low/full speed data so the host collects it at high speed. | ||
39 | */ | ||
40 | |||
41 | /*-------------------------------------------------------------------------*/ | ||
42 | |||
43 | /* fill a qtd, returning how much of the buffer we were able to queue up */ | ||
44 | |||
45 | static int | ||
46 | qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, | ||
47 | int token, int maxpacket) | ||
48 | { | ||
49 | int i, count; | ||
50 | u64 addr = buf; | ||
51 | |||
52 | /* one buffer entry per 4K ... first might be short or unaligned */ | ||
53 | qtd->hw_buf [0] = cpu_to_le32 ((u32)addr); | ||
54 | qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32)); | ||
55 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ | ||
56 | if (likely (len < count)) /* ... iff needed */ | ||
57 | count = len; | ||
58 | else { | ||
59 | buf += 0x1000; | ||
60 | buf &= ~0x0fff; | ||
61 | |||
62 | /* per-qtd limit: from 16K to 20K (best alignment) */ | ||
63 | for (i = 1; count < len && i < 5; i++) { | ||
64 | addr = buf; | ||
65 | qtd->hw_buf [i] = cpu_to_le32 ((u32)addr); | ||
66 | qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32)); | ||
67 | buf += 0x1000; | ||
68 | if ((count + 0x1000) < len) | ||
69 | count += 0x1000; | ||
70 | else | ||
71 | count = len; | ||
72 | } | ||
73 | |||
74 | /* short packets may only terminate transfers */ | ||
75 | if (count != len) | ||
76 | count -= (count % maxpacket); | ||
77 | } | ||
78 | qtd->hw_token = cpu_to_le32 ((count << 16) | token); | ||
79 | qtd->length = count; | ||
80 | |||
81 | return count; | ||
82 | } | ||
83 | |||
84 | /*-------------------------------------------------------------------------*/ | ||
85 | |||
86 | static inline void | ||
87 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) | ||
88 | { | ||
89 | /* writes to an active overlay are unsafe */ | ||
90 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | ||
91 | |||
92 | qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma); | ||
93 | qh->hw_alt_next = EHCI_LIST_END; | ||
94 | |||
95 | /* Except for control endpoints, we make hardware maintain data | ||
96 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | ||
97 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | ||
98 | * ever clear it. | ||
99 | */ | ||
100 | if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { | ||
101 | unsigned is_out, epnum; | ||
102 | |||
103 | is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); | ||
104 | epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; | ||
105 | if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { | ||
106 | qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE); | ||
107 | usb_settoggle (qh->dev, epnum, is_out, 1); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ | ||
112 | wmb (); | ||
113 | qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING); | ||
114 | } | ||
115 | |||
116 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | ||
117 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | ||
118 | * recovery (including urb dequeue) would need software changes to a QH... | ||
119 | */ | ||
120 | static void | ||
121 | qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) | ||
122 | { | ||
123 | struct ehci_qtd *qtd; | ||
124 | |||
125 | if (list_empty (&qh->qtd_list)) | ||
126 | qtd = qh->dummy; | ||
127 | else { | ||
128 | qtd = list_entry (qh->qtd_list.next, | ||
129 | struct ehci_qtd, qtd_list); | ||
130 | /* first qtd may already be partially processed */ | ||
131 | if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current) | ||
132 | qtd = NULL; | ||
133 | } | ||
134 | |||
135 | if (qtd) | ||
136 | qh_update (ehci, qh, qtd); | ||
137 | } | ||
138 | |||
139 | /*-------------------------------------------------------------------------*/ | ||
140 | |||
141 | static void qtd_copy_status ( | ||
142 | struct ehci_hcd *ehci, | ||
143 | struct urb *urb, | ||
144 | size_t length, | ||
145 | u32 token | ||
146 | ) | ||
147 | { | ||
148 | /* count IN/OUT bytes, not SETUP (even short packets) */ | ||
149 | if (likely (QTD_PID (token) != 2)) | ||
150 | urb->actual_length += length - QTD_LENGTH (token); | ||
151 | |||
152 | /* don't modify error codes */ | ||
153 | if (unlikely (urb->status != -EINPROGRESS)) | ||
154 | return; | ||
155 | |||
156 | /* force cleanup after short read; not always an error */ | ||
157 | if (unlikely (IS_SHORT_READ (token))) | ||
158 | urb->status = -EREMOTEIO; | ||
159 | |||
160 | /* serious "can't proceed" faults reported by the hardware */ | ||
161 | if (token & QTD_STS_HALT) { | ||
162 | if (token & QTD_STS_BABBLE) { | ||
163 | /* FIXME "must" disable babbling device's port too */ | ||
164 | urb->status = -EOVERFLOW; | ||
165 | } else if (token & QTD_STS_MMF) { | ||
166 | /* fs/ls interrupt xfer missed the complete-split */ | ||
167 | urb->status = -EPROTO; | ||
168 | } else if (token & QTD_STS_DBE) { | ||
169 | urb->status = (QTD_PID (token) == 1) /* IN ? */ | ||
170 | ? -ENOSR /* hc couldn't read data */ | ||
171 | : -ECOMM; /* hc couldn't write data */ | ||
172 | } else if (token & QTD_STS_XACT) { | ||
173 | /* timeout, bad crc, wrong PID, etc; retried */ | ||
174 | if (QTD_CERR (token)) | ||
175 | urb->status = -EPIPE; | ||
176 | else { | ||
177 | ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n", | ||
178 | urb->dev->devpath, | ||
179 | usb_pipeendpoint (urb->pipe), | ||
180 | usb_pipein (urb->pipe) ? "in" : "out"); | ||
181 | urb->status = -EPROTO; | ||
182 | } | ||
183 | /* CERR nonzero + no errors + halt --> stall */ | ||
184 | } else if (QTD_CERR (token)) | ||
185 | urb->status = -EPIPE; | ||
186 | else /* unknown */ | ||
187 | urb->status = -EPROTO; | ||
188 | |||
189 | ehci_vdbg (ehci, | ||
190 | "dev%d ep%d%s qtd token %08x --> status %d\n", | ||
191 | usb_pipedevice (urb->pipe), | ||
192 | usb_pipeendpoint (urb->pipe), | ||
193 | usb_pipein (urb->pipe) ? "in" : "out", | ||
194 | token, urb->status); | ||
195 | |||
196 | /* if async CSPLIT failed, try cleaning out the TT buffer */ | ||
197 | if (urb->status != -EPIPE | ||
198 | && urb->dev->tt && !usb_pipeint (urb->pipe) | ||
199 | && ((token & QTD_STS_MMF) != 0 | ||
200 | || QTD_CERR(token) == 0) | ||
201 | && (!ehci_is_TDI(ehci) | ||
202 | || urb->dev->tt->hub != | ||
203 | ehci_to_hcd(ehci)->self.root_hub)) { | ||
204 | #ifdef DEBUG | ||
205 | struct usb_device *tt = urb->dev->tt->hub; | ||
206 | dev_dbg (&tt->dev, | ||
207 | "clear tt buffer port %d, a%d ep%d t%08x\n", | ||
208 | urb->dev->ttport, urb->dev->devnum, | ||
209 | usb_pipeendpoint (urb->pipe), token); | ||
210 | #endif /* DEBUG */ | ||
211 | usb_hub_tt_clear_buffer (urb->dev, urb->pipe); | ||
212 | } | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static void | ||
217 | ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb, struct pt_regs *regs) | ||
218 | __releases(ehci->lock) | ||
219 | __acquires(ehci->lock) | ||
220 | { | ||
221 | if (likely (urb->hcpriv != NULL)) { | ||
222 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; | ||
223 | |||
224 | /* S-mask in a QH means it's an interrupt urb */ | ||
225 | if ((qh->hw_info2 & __constant_cpu_to_le32 (0x00ff)) != 0) { | ||
226 | |||
227 | /* ... update hc-wide periodic stats (for usbfs) */ | ||
228 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; | ||
229 | } | ||
230 | qh_put (qh); | ||
231 | } | ||
232 | |||
233 | spin_lock (&urb->lock); | ||
234 | urb->hcpriv = NULL; | ||
235 | switch (urb->status) { | ||
236 | case -EINPROGRESS: /* success */ | ||
237 | urb->status = 0; | ||
238 | default: /* fault */ | ||
239 | COUNT (ehci->stats.complete); | ||
240 | break; | ||
241 | case -EREMOTEIO: /* fault or normal */ | ||
242 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | ||
243 | urb->status = 0; | ||
244 | COUNT (ehci->stats.complete); | ||
245 | break; | ||
246 | case -ECONNRESET: /* canceled */ | ||
247 | case -ENOENT: | ||
248 | COUNT (ehci->stats.unlink); | ||
249 | break; | ||
250 | } | ||
251 | spin_unlock (&urb->lock); | ||
252 | |||
253 | #ifdef EHCI_URB_TRACE | ||
254 | ehci_dbg (ehci, | ||
255 | "%s %s urb %p ep%d%s status %d len %d/%d\n", | ||
256 | __FUNCTION__, urb->dev->devpath, urb, | ||
257 | usb_pipeendpoint (urb->pipe), | ||
258 | usb_pipein (urb->pipe) ? "in" : "out", | ||
259 | urb->status, | ||
260 | urb->actual_length, urb->transfer_buffer_length); | ||
261 | #endif | ||
262 | |||
263 | /* complete() can reenter this HCD */ | ||
264 | spin_unlock (&ehci->lock); | ||
265 | usb_hcd_giveback_urb (ehci_to_hcd(ehci), urb, regs); | ||
266 | spin_lock (&ehci->lock); | ||
267 | } | ||
268 | |||
269 | static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); | ||
270 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); | ||
271 | |||
272 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh); | ||
273 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); | ||
274 | |||
275 | /* | ||
276 | * Process and free completed qtds for a qh, returning URBs to drivers. | ||
277 | * Chases up to qh->hw_current. Returns number of completions called, | ||
278 | * indicating how much "real" work we did. | ||
279 | */ | ||
280 | #define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT) | ||
281 | static unsigned | ||
282 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs) | ||
283 | { | ||
284 | struct ehci_qtd *last = NULL, *end = qh->dummy; | ||
285 | struct list_head *entry, *tmp; | ||
286 | int stopped; | ||
287 | unsigned count = 0; | ||
288 | int do_status = 0; | ||
289 | u8 state; | ||
290 | |||
291 | if (unlikely (list_empty (&qh->qtd_list))) | ||
292 | return count; | ||
293 | |||
294 | /* completions (or tasks on other cpus) must never clobber HALT | ||
295 | * till we've gone through and cleaned everything up, even when | ||
296 | * they add urbs to this qh's queue or mark them for unlinking. | ||
297 | * | ||
298 | * NOTE: unlinking expects to be done in queue order. | ||
299 | */ | ||
300 | state = qh->qh_state; | ||
301 | qh->qh_state = QH_STATE_COMPLETING; | ||
302 | stopped = (state == QH_STATE_IDLE); | ||
303 | |||
304 | /* remove de-activated QTDs from front of queue. | ||
305 | * after faults (including short reads), cleanup this urb | ||
306 | * then let the queue advance. | ||
307 | * if queue is stopped, handles unlinks. | ||
308 | */ | ||
309 | list_for_each_safe (entry, tmp, &qh->qtd_list) { | ||
310 | struct ehci_qtd *qtd; | ||
311 | struct urb *urb; | ||
312 | u32 token = 0; | ||
313 | |||
314 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | ||
315 | urb = qtd->urb; | ||
316 | |||
317 | /* clean up any state from previous QTD ...*/ | ||
318 | if (last) { | ||
319 | if (likely (last->urb != urb)) { | ||
320 | ehci_urb_done (ehci, last->urb, regs); | ||
321 | count++; | ||
322 | } | ||
323 | ehci_qtd_free (ehci, last); | ||
324 | last = NULL; | ||
325 | } | ||
326 | |||
327 | /* ignore urbs submitted during completions we reported */ | ||
328 | if (qtd == end) | ||
329 | break; | ||
330 | |||
331 | /* hardware copies qtd out of qh overlay */ | ||
332 | rmb (); | ||
333 | token = le32_to_cpu (qtd->hw_token); | ||
334 | |||
335 | /* always clean up qtds the hc de-activated */ | ||
336 | if ((token & QTD_STS_ACTIVE) == 0) { | ||
337 | |||
338 | if ((token & QTD_STS_HALT) != 0) { | ||
339 | stopped = 1; | ||
340 | |||
341 | /* magic dummy for some short reads; qh won't advance. | ||
342 | * that silicon quirk can kick in with this dummy too. | ||
343 | */ | ||
344 | } else if (IS_SHORT_READ (token) | ||
345 | && !(qtd->hw_alt_next & EHCI_LIST_END)) { | ||
346 | stopped = 1; | ||
347 | goto halt; | ||
348 | } | ||
349 | |||
350 | /* stop scanning when we reach qtds the hc is using */ | ||
351 | } else if (likely (!stopped | ||
352 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { | ||
353 | break; | ||
354 | |||
355 | } else { | ||
356 | stopped = 1; | ||
357 | |||
358 | if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) | ||
359 | urb->status = -ESHUTDOWN; | ||
360 | |||
361 | /* ignore active urbs unless some previous qtd | ||
362 | * for the urb faulted (including short read) or | ||
363 | * its urb was canceled. we may patch qh or qtds. | ||
364 | */ | ||
365 | if (likely (urb->status == -EINPROGRESS)) | ||
366 | continue; | ||
367 | |||
368 | /* issue status after short control reads */ | ||
369 | if (unlikely (do_status != 0) | ||
370 | && QTD_PID (token) == 0 /* OUT */) { | ||
371 | do_status = 0; | ||
372 | continue; | ||
373 | } | ||
374 | |||
375 | /* token in overlay may be most current */ | ||
376 | if (state == QH_STATE_IDLE | ||
377 | && cpu_to_le32 (qtd->qtd_dma) | ||
378 | == qh->hw_current) | ||
379 | token = le32_to_cpu (qh->hw_token); | ||
380 | |||
381 | /* force halt for unlinked or blocked qh, so we'll | ||
382 | * patch the qh later and so that completions can't | ||
383 | * activate it while we "know" it's stopped. | ||
384 | */ | ||
385 | if ((HALT_BIT & qh->hw_token) == 0) { | ||
386 | halt: | ||
387 | qh->hw_token |= HALT_BIT; | ||
388 | wmb (); | ||
389 | } | ||
390 | } | ||
391 | |||
392 | /* remove it from the queue */ | ||
393 | spin_lock (&urb->lock); | ||
394 | qtd_copy_status (ehci, urb, qtd->length, token); | ||
395 | do_status = (urb->status == -EREMOTEIO) | ||
396 | && usb_pipecontrol (urb->pipe); | ||
397 | spin_unlock (&urb->lock); | ||
398 | |||
399 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { | ||
400 | last = list_entry (qtd->qtd_list.prev, | ||
401 | struct ehci_qtd, qtd_list); | ||
402 | last->hw_next = qtd->hw_next; | ||
403 | } | ||
404 | list_del (&qtd->qtd_list); | ||
405 | last = qtd; | ||
406 | } | ||
407 | |||
408 | /* last urb's completion might still need calling */ | ||
409 | if (likely (last != NULL)) { | ||
410 | ehci_urb_done (ehci, last->urb, regs); | ||
411 | count++; | ||
412 | ehci_qtd_free (ehci, last); | ||
413 | } | ||
414 | |||
415 | /* restore original state; caller must unlink or relink */ | ||
416 | qh->qh_state = state; | ||
417 | |||
418 | /* be sure the hardware's done with the qh before refreshing | ||
419 | * it after fault cleanup, or recovering from silicon wrongly | ||
420 | * overlaying the dummy qtd (which reduces DMA chatter). | ||
421 | */ | ||
422 | if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { | ||
423 | switch (state) { | ||
424 | case QH_STATE_IDLE: | ||
425 | qh_refresh(ehci, qh); | ||
426 | break; | ||
427 | case QH_STATE_LINKED: | ||
428 | /* should be rare for periodic transfers, | ||
429 | * except maybe high bandwidth ... | ||
430 | */ | ||
431 | if (qh->period) { | ||
432 | intr_deschedule (ehci, qh); | ||
433 | (void) qh_schedule (ehci, qh); | ||
434 | } else | ||
435 | unlink_async (ehci, qh); | ||
436 | break; | ||
437 | /* otherwise, unlink already started */ | ||
438 | } | ||
439 | } | ||
440 | |||
441 | return count; | ||
442 | } | ||
443 | |||
444 | /*-------------------------------------------------------------------------*/ | ||
445 | |||
446 | // high bandwidth multiplier, as encoded in highspeed endpoint descriptors | ||
447 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | ||
448 | // ... and packet size, for any kind of endpoint descriptor | ||
449 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | ||
450 | |||
451 | /* | ||
452 | * reverse of qh_urb_transaction: free a list of TDs. | ||
453 | * used for cleanup after errors, before HC sees an URB's TDs. | ||
454 | */ | ||
455 | static void qtd_list_free ( | ||
456 | struct ehci_hcd *ehci, | ||
457 | struct urb *urb, | ||
458 | struct list_head *qtd_list | ||
459 | ) { | ||
460 | struct list_head *entry, *temp; | ||
461 | |||
462 | list_for_each_safe (entry, temp, qtd_list) { | ||
463 | struct ehci_qtd *qtd; | ||
464 | |||
465 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | ||
466 | list_del (&qtd->qtd_list); | ||
467 | ehci_qtd_free (ehci, qtd); | ||
468 | } | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * create a list of filled qtds for this URB; won't link into qh. | ||
473 | */ | ||
474 | static struct list_head * | ||
475 | qh_urb_transaction ( | ||
476 | struct ehci_hcd *ehci, | ||
477 | struct urb *urb, | ||
478 | struct list_head *head, | ||
479 | int flags | ||
480 | ) { | ||
481 | struct ehci_qtd *qtd, *qtd_prev; | ||
482 | dma_addr_t buf; | ||
483 | int len, maxpacket; | ||
484 | int is_input; | ||
485 | u32 token; | ||
486 | |||
487 | /* | ||
488 | * URBs map to sequences of QTDs: one logical transaction | ||
489 | */ | ||
490 | qtd = ehci_qtd_alloc (ehci, flags); | ||
491 | if (unlikely (!qtd)) | ||
492 | return NULL; | ||
493 | list_add_tail (&qtd->qtd_list, head); | ||
494 | qtd->urb = urb; | ||
495 | |||
496 | token = QTD_STS_ACTIVE; | ||
497 | token |= (EHCI_TUNE_CERR << 10); | ||
498 | /* for split transactions, SplitXState initialized to zero */ | ||
499 | |||
500 | len = urb->transfer_buffer_length; | ||
501 | is_input = usb_pipein (urb->pipe); | ||
502 | if (usb_pipecontrol (urb->pipe)) { | ||
503 | /* SETUP pid */ | ||
504 | qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest), | ||
505 | token | (2 /* "setup" */ << 8), 8); | ||
506 | |||
507 | /* ... and always at least one more pid */ | ||
508 | token ^= QTD_TOGGLE; | ||
509 | qtd_prev = qtd; | ||
510 | qtd = ehci_qtd_alloc (ehci, flags); | ||
511 | if (unlikely (!qtd)) | ||
512 | goto cleanup; | ||
513 | qtd->urb = urb; | ||
514 | qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); | ||
515 | list_add_tail (&qtd->qtd_list, head); | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * data transfer stage: buffer setup | ||
520 | */ | ||
521 | if (likely (len > 0)) | ||
522 | buf = urb->transfer_dma; | ||
523 | else | ||
524 | buf = 0; | ||
525 | |||
526 | /* for zero length DATA stages, STATUS is always IN */ | ||
527 | if (!buf || is_input) | ||
528 | token |= (1 /* "in" */ << 8); | ||
529 | /* else it's already initted to "out" pid (0 << 8) */ | ||
530 | |||
531 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | ||
532 | |||
533 | /* | ||
534 | * buffer gets wrapped in one or more qtds; | ||
535 | * last one may be "short" (including zero len) | ||
536 | * and may serve as a control status ack | ||
537 | */ | ||
538 | for (;;) { | ||
539 | int this_qtd_len; | ||
540 | |||
541 | this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket); | ||
542 | len -= this_qtd_len; | ||
543 | buf += this_qtd_len; | ||
544 | if (is_input) | ||
545 | qtd->hw_alt_next = ehci->async->hw_alt_next; | ||
546 | |||
547 | /* qh makes control packets use qtd toggle; maybe switch it */ | ||
548 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | ||
549 | token ^= QTD_TOGGLE; | ||
550 | |||
551 | if (likely (len <= 0)) | ||
552 | break; | ||
553 | |||
554 | qtd_prev = qtd; | ||
555 | qtd = ehci_qtd_alloc (ehci, flags); | ||
556 | if (unlikely (!qtd)) | ||
557 | goto cleanup; | ||
558 | qtd->urb = urb; | ||
559 | qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); | ||
560 | list_add_tail (&qtd->qtd_list, head); | ||
561 | } | ||
562 | |||
563 | /* unless the bulk/interrupt caller wants a chance to clean | ||
564 | * up after short reads, hc should advance qh past this urb | ||
565 | */ | ||
566 | if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | ||
567 | || usb_pipecontrol (urb->pipe))) | ||
568 | qtd->hw_alt_next = EHCI_LIST_END; | ||
569 | |||
570 | /* | ||
571 | * control requests may need a terminating data "status" ack; | ||
572 | * bulk ones may need a terminating short packet (zero length). | ||
573 | */ | ||
574 | if (likely (buf != 0)) { | ||
575 | int one_more = 0; | ||
576 | |||
577 | if (usb_pipecontrol (urb->pipe)) { | ||
578 | one_more = 1; | ||
579 | token ^= 0x0100; /* "in" <--> "out" */ | ||
580 | token |= QTD_TOGGLE; /* force DATA1 */ | ||
581 | } else if (usb_pipebulk (urb->pipe) | ||
582 | && (urb->transfer_flags & URB_ZERO_PACKET) | ||
583 | && !(urb->transfer_buffer_length % maxpacket)) { | ||
584 | one_more = 1; | ||
585 | } | ||
586 | if (one_more) { | ||
587 | qtd_prev = qtd; | ||
588 | qtd = ehci_qtd_alloc (ehci, flags); | ||
589 | if (unlikely (!qtd)) | ||
590 | goto cleanup; | ||
591 | qtd->urb = urb; | ||
592 | qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); | ||
593 | list_add_tail (&qtd->qtd_list, head); | ||
594 | |||
595 | /* never any data in such packets */ | ||
596 | qtd_fill (qtd, 0, 0, token, 0); | ||
597 | } | ||
598 | } | ||
599 | |||
600 | /* by default, enable interrupt on urb completion */ | ||
601 | if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) | ||
602 | qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC); | ||
603 | return head; | ||
604 | |||
605 | cleanup: | ||
606 | qtd_list_free (ehci, urb, head); | ||
607 | return NULL; | ||
608 | } | ||
609 | |||
610 | /*-------------------------------------------------------------------------*/ | ||
611 | |||
612 | // Would be best to create all qh's from config descriptors, | ||
613 | // when each interface/altsetting is established. Unlink | ||
614 | // any previous qh and cancel its urbs first; endpoints are | ||
615 | // implicitly reset then (data toggle too). | ||
616 | // That'd mean updating how usbcore talks to HCDs. (2.7?) | ||
617 | |||
618 | |||
619 | /* | ||
620 | * Each QH holds a qtd list; a QH is used for everything except iso. | ||
621 | * | ||
622 | * For interrupt urbs, the scheduler must set the microframe scheduling | ||
623 | * mask(s) each time the QH gets scheduled. For highspeed, that's | ||
624 | * just one microframe in the s-mask. For split interrupt transactions | ||
625 | * there are additional complications: c-mask, maybe FSTNs. | ||
626 | */ | ||
627 | static struct ehci_qh * | ||
628 | qh_make ( | ||
629 | struct ehci_hcd *ehci, | ||
630 | struct urb *urb, | ||
631 | int flags | ||
632 | ) { | ||
633 | struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); | ||
634 | u32 info1 = 0, info2 = 0; | ||
635 | int is_input, type; | ||
636 | int maxp = 0; | ||
637 | |||
638 | if (!qh) | ||
639 | return qh; | ||
640 | |||
641 | /* | ||
642 | * init endpoint/device data for this QH | ||
643 | */ | ||
644 | info1 |= usb_pipeendpoint (urb->pipe) << 8; | ||
645 | info1 |= usb_pipedevice (urb->pipe) << 0; | ||
646 | |||
647 | is_input = usb_pipein (urb->pipe); | ||
648 | type = usb_pipetype (urb->pipe); | ||
649 | maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); | ||
650 | |||
651 | /* Compute interrupt scheduling parameters just once, and save. | ||
652 | * - allowing for high bandwidth, how many nsec/uframe are used? | ||
653 | * - split transactions need a second CSPLIT uframe; same question | ||
654 | * - splits also need a schedule gap (for full/low speed I/O) | ||
655 | * - qh has a polling interval | ||
656 | * | ||
657 | * For control/bulk requests, the HC or TT handles these. | ||
658 | */ | ||
659 | if (type == PIPE_INTERRUPT) { | ||
660 | qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0, | ||
661 | hb_mult (maxp) * max_packet (maxp)); | ||
662 | qh->start = NO_FRAME; | ||
663 | |||
664 | if (urb->dev->speed == USB_SPEED_HIGH) { | ||
665 | qh->c_usecs = 0; | ||
666 | qh->gap_uf = 0; | ||
667 | |||
668 | qh->period = urb->interval >> 3; | ||
669 | if (qh->period == 0 && urb->interval != 1) { | ||
670 | /* NOTE interval 2 or 4 uframes could work. | ||
671 | * But interval 1 scheduling is simpler, and | ||
672 | * includes high bandwidth. | ||
673 | */ | ||
674 | dbg ("intr period %d uframes, NYET!", | ||
675 | urb->interval); | ||
676 | goto done; | ||
677 | } | ||
678 | } else { | ||
679 | /* gap is f(FS/LS transfer times) */ | ||
680 | qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, | ||
681 | is_input, 0, maxp) / (125 * 1000); | ||
682 | |||
683 | /* FIXME this just approximates SPLIT/CSPLIT times */ | ||
684 | if (is_input) { // SPLIT, gap, CSPLIT+DATA | ||
685 | qh->c_usecs = qh->usecs + HS_USECS (0); | ||
686 | qh->usecs = HS_USECS (1); | ||
687 | } else { // SPLIT+DATA, gap, CSPLIT | ||
688 | qh->usecs += HS_USECS (1); | ||
689 | qh->c_usecs = HS_USECS (0); | ||
690 | } | ||
691 | |||
692 | qh->period = urb->interval; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | /* support for tt scheduling, and access to toggles */ | ||
697 | qh->dev = usb_get_dev (urb->dev); | ||
698 | |||
699 | /* using TT? */ | ||
700 | switch (urb->dev->speed) { | ||
701 | case USB_SPEED_LOW: | ||
702 | info1 |= (1 << 12); /* EPS "low" */ | ||
703 | /* FALL THROUGH */ | ||
704 | |||
705 | case USB_SPEED_FULL: | ||
706 | /* EPS 0 means "full" */ | ||
707 | if (type != PIPE_INTERRUPT) | ||
708 | info1 |= (EHCI_TUNE_RL_TT << 28); | ||
709 | if (type == PIPE_CONTROL) { | ||
710 | info1 |= (1 << 27); /* for TT */ | ||
711 | info1 |= 1 << 14; /* toggle from qtd */ | ||
712 | } | ||
713 | info1 |= maxp << 16; | ||
714 | |||
715 | info2 |= (EHCI_TUNE_MULT_TT << 30); | ||
716 | info2 |= urb->dev->ttport << 23; | ||
717 | |||
718 | /* set the address of the TT; for TDI's integrated | ||
719 | * root hub tt, leave it zeroed. | ||
720 | */ | ||
721 | if (!ehci_is_TDI(ehci) | ||
722 | || urb->dev->tt->hub != | ||
723 | ehci_to_hcd(ehci)->self.root_hub) | ||
724 | info2 |= urb->dev->tt->hub->devnum << 16; | ||
725 | |||
726 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | ||
727 | |||
728 | break; | ||
729 | |||
730 | case USB_SPEED_HIGH: /* no TT involved */ | ||
731 | info1 |= (2 << 12); /* EPS "high" */ | ||
732 | if (type == PIPE_CONTROL) { | ||
733 | info1 |= (EHCI_TUNE_RL_HS << 28); | ||
734 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | ||
735 | info1 |= 1 << 14; /* toggle from qtd */ | ||
736 | info2 |= (EHCI_TUNE_MULT_HS << 30); | ||
737 | } else if (type == PIPE_BULK) { | ||
738 | info1 |= (EHCI_TUNE_RL_HS << 28); | ||
739 | info1 |= 512 << 16; /* usb2 fixed maxpacket */ | ||
740 | info2 |= (EHCI_TUNE_MULT_HS << 30); | ||
741 | } else { /* PIPE_INTERRUPT */ | ||
742 | info1 |= max_packet (maxp) << 16; | ||
743 | info2 |= hb_mult (maxp) << 30; | ||
744 | } | ||
745 | break; | ||
746 | default: | ||
747 | dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed); | ||
748 | done: | ||
749 | qh_put (qh); | ||
750 | return NULL; | ||
751 | } | ||
752 | |||
753 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | ||
754 | |||
755 | /* init as live, toggle clear, advance to dummy */ | ||
756 | qh->qh_state = QH_STATE_IDLE; | ||
757 | qh->hw_info1 = cpu_to_le32 (info1); | ||
758 | qh->hw_info2 = cpu_to_le32 (info2); | ||
759 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); | ||
760 | qh_refresh (ehci, qh); | ||
761 | return qh; | ||
762 | } | ||
763 | |||
764 | /*-------------------------------------------------------------------------*/ | ||
765 | |||
766 | /* move qh (and its qtds) onto async queue; maybe enable queue. */ | ||
767 | |||
768 | static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | ||
769 | { | ||
770 | __le32 dma = QH_NEXT (qh->qh_dma); | ||
771 | struct ehci_qh *head; | ||
772 | |||
773 | /* (re)start the async schedule? */ | ||
774 | head = ehci->async; | ||
775 | timer_action_done (ehci, TIMER_ASYNC_OFF); | ||
776 | if (!head->qh_next.qh) { | ||
777 | u32 cmd = readl (&ehci->regs->command); | ||
778 | |||
779 | if (!(cmd & CMD_ASE)) { | ||
780 | /* in case a clear of CMD_ASE didn't take yet */ | ||
781 | (void) handshake (&ehci->regs->status, STS_ASS, 0, 150); | ||
782 | cmd |= CMD_ASE | CMD_RUN; | ||
783 | writel (cmd, &ehci->regs->command); | ||
784 | ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; | ||
785 | /* posted write need not be known to HC yet ... */ | ||
786 | } | ||
787 | } | ||
788 | |||
789 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ | ||
790 | if (qh->qh_state == QH_STATE_IDLE) | ||
791 | qh_refresh (ehci, qh); | ||
792 | |||
793 | /* splice right after start */ | ||
794 | qh->qh_next = head->qh_next; | ||
795 | qh->hw_next = head->hw_next; | ||
796 | wmb (); | ||
797 | |||
798 | head->qh_next.qh = qh; | ||
799 | head->hw_next = dma; | ||
800 | |||
801 | qh->qh_state = QH_STATE_LINKED; | ||
802 | /* qtd completions reported later by interrupt */ | ||
803 | } | ||
804 | |||
805 | /*-------------------------------------------------------------------------*/ | ||
806 | |||
807 | #define QH_ADDR_MASK __constant_cpu_to_le32(0x7f) | ||
808 | |||
809 | /* | ||
810 | * For control/bulk/interrupt, return QH with these TDs appended. | ||
811 | * Allocates and initializes the QH if necessary. | ||
812 | * Returns null if it can't allocate a QH it needs to. | ||
813 | * If the QH has TDs (urbs) already, that's great. | ||
814 | */ | ||
815 | static struct ehci_qh *qh_append_tds ( | ||
816 | struct ehci_hcd *ehci, | ||
817 | struct urb *urb, | ||
818 | struct list_head *qtd_list, | ||
819 | int epnum, | ||
820 | void **ptr | ||
821 | ) | ||
822 | { | ||
823 | struct ehci_qh *qh = NULL; | ||
824 | |||
825 | qh = (struct ehci_qh *) *ptr; | ||
826 | if (unlikely (qh == NULL)) { | ||
827 | /* can't sleep here, we have ehci->lock... */ | ||
828 | qh = qh_make (ehci, urb, GFP_ATOMIC); | ||
829 | *ptr = qh; | ||
830 | } | ||
831 | if (likely (qh != NULL)) { | ||
832 | struct ehci_qtd *qtd; | ||
833 | |||
834 | if (unlikely (list_empty (qtd_list))) | ||
835 | qtd = NULL; | ||
836 | else | ||
837 | qtd = list_entry (qtd_list->next, struct ehci_qtd, | ||
838 | qtd_list); | ||
839 | |||
840 | /* control qh may need patching ... */ | ||
841 | if (unlikely (epnum == 0)) { | ||
842 | |||
843 | /* usb_reset_device() briefly reverts to address 0 */ | ||
844 | if (usb_pipedevice (urb->pipe) == 0) | ||
845 | qh->hw_info1 &= ~QH_ADDR_MASK; | ||
846 | } | ||
847 | |||
848 | /* just one way to queue requests: swap with the dummy qtd. | ||
849 | * only hc or qh_refresh() ever modify the overlay. | ||
850 | */ | ||
851 | if (likely (qtd != NULL)) { | ||
852 | struct ehci_qtd *dummy; | ||
853 | dma_addr_t dma; | ||
854 | __le32 token; | ||
855 | |||
856 | /* to avoid racing the HC, use the dummy td instead of | ||
857 | * the first td of our list (becomes new dummy). both | ||
858 | * tds stay deactivated until we're done, when the | ||
859 | * HC is allowed to fetch the old dummy (4.10.2). | ||
860 | */ | ||
861 | token = qtd->hw_token; | ||
862 | qtd->hw_token = HALT_BIT; | ||
863 | wmb (); | ||
864 | dummy = qh->dummy; | ||
865 | |||
866 | dma = dummy->qtd_dma; | ||
867 | *dummy = *qtd; | ||
868 | dummy->qtd_dma = dma; | ||
869 | |||
870 | list_del (&qtd->qtd_list); | ||
871 | list_add (&dummy->qtd_list, qtd_list); | ||
872 | __list_splice (qtd_list, qh->qtd_list.prev); | ||
873 | |||
874 | ehci_qtd_init (qtd, qtd->qtd_dma); | ||
875 | qh->dummy = qtd; | ||
876 | |||
877 | /* hc must see the new dummy at list end */ | ||
878 | dma = qtd->qtd_dma; | ||
879 | qtd = list_entry (qh->qtd_list.prev, | ||
880 | struct ehci_qtd, qtd_list); | ||
881 | qtd->hw_next = QTD_NEXT (dma); | ||
882 | |||
883 | /* let the hc process these next qtds */ | ||
884 | wmb (); | ||
885 | dummy->hw_token = token; | ||
886 | |||
887 | urb->hcpriv = qh_get (qh); | ||
888 | } | ||
889 | } | ||
890 | return qh; | ||
891 | } | ||
892 | |||
893 | /*-------------------------------------------------------------------------*/ | ||
894 | |||
895 | static int | ||
896 | submit_async ( | ||
897 | struct ehci_hcd *ehci, | ||
898 | struct usb_host_endpoint *ep, | ||
899 | struct urb *urb, | ||
900 | struct list_head *qtd_list, | ||
901 | int mem_flags | ||
902 | ) { | ||
903 | struct ehci_qtd *qtd; | ||
904 | int epnum; | ||
905 | unsigned long flags; | ||
906 | struct ehci_qh *qh = NULL; | ||
907 | |||
908 | qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); | ||
909 | epnum = ep->desc.bEndpointAddress; | ||
910 | |||
911 | #ifdef EHCI_URB_TRACE | ||
912 | ehci_dbg (ehci, | ||
913 | "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | ||
914 | __FUNCTION__, urb->dev->devpath, urb, | ||
915 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | ||
916 | urb->transfer_buffer_length, | ||
917 | qtd, ep->hcpriv); | ||
918 | #endif | ||
919 | |||
920 | spin_lock_irqsave (&ehci->lock, flags); | ||
921 | qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); | ||
922 | |||
923 | /* Control/bulk operations through TTs don't need scheduling, | ||
924 | * the HC and TT handle it when the TT has a buffer ready. | ||
925 | */ | ||
926 | if (likely (qh != NULL)) { | ||
927 | if (likely (qh->qh_state == QH_STATE_IDLE)) | ||
928 | qh_link_async (ehci, qh_get (qh)); | ||
929 | } | ||
930 | spin_unlock_irqrestore (&ehci->lock, flags); | ||
931 | if (unlikely (qh == NULL)) { | ||
932 | qtd_list_free (ehci, urb, qtd_list); | ||
933 | return -ENOMEM; | ||
934 | } | ||
935 | return 0; | ||
936 | } | ||
937 | |||
938 | /*-------------------------------------------------------------------------*/ | ||
939 | |||
940 | /* the async qh for the qtds being reclaimed are now unlinked from the HC */ | ||
941 | |||
942 | static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs) | ||
943 | { | ||
944 | struct ehci_qh *qh = ehci->reclaim; | ||
945 | struct ehci_qh *next; | ||
946 | |||
947 | timer_action_done (ehci, TIMER_IAA_WATCHDOG); | ||
948 | |||
949 | // qh->hw_next = cpu_to_le32 (qh->qh_dma); | ||
950 | qh->qh_state = QH_STATE_IDLE; | ||
951 | qh->qh_next.qh = NULL; | ||
952 | qh_put (qh); // refcount from reclaim | ||
953 | |||
954 | /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ | ||
955 | next = qh->reclaim; | ||
956 | ehci->reclaim = next; | ||
957 | ehci->reclaim_ready = 0; | ||
958 | qh->reclaim = NULL; | ||
959 | |||
960 | qh_completions (ehci, qh, regs); | ||
961 | |||
962 | if (!list_empty (&qh->qtd_list) | ||
963 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) | ||
964 | qh_link_async (ehci, qh); | ||
965 | else { | ||
966 | qh_put (qh); // refcount from async list | ||
967 | |||
968 | /* it's not free to turn the async schedule on/off; leave it | ||
969 | * active but idle for a while once it empties. | ||
970 | */ | ||
971 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) | ||
972 | && ehci->async->qh_next.qh == NULL) | ||
973 | timer_action (ehci, TIMER_ASYNC_OFF); | ||
974 | } | ||
975 | |||
976 | if (next) { | ||
977 | ehci->reclaim = NULL; | ||
978 | start_unlink_async (ehci, next); | ||
979 | } | ||
980 | } | ||
981 | |||
982 | /* makes sure the async qh will become idle */ | ||
983 | /* caller must own ehci->lock */ | ||
984 | |||
985 | static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | ||
986 | { | ||
987 | int cmd = readl (&ehci->regs->command); | ||
988 | struct ehci_qh *prev; | ||
989 | |||
990 | #ifdef DEBUG | ||
991 | assert_spin_locked(&ehci->lock); | ||
992 | if (ehci->reclaim | ||
993 | || (qh->qh_state != QH_STATE_LINKED | ||
994 | && qh->qh_state != QH_STATE_UNLINK_WAIT) | ||
995 | ) | ||
996 | BUG (); | ||
997 | #endif | ||
998 | |||
999 | /* stop async schedule right now? */ | ||
1000 | if (unlikely (qh == ehci->async)) { | ||
1001 | /* can't get here without STS_ASS set */ | ||
1002 | if (ehci_to_hcd(ehci)->state != HC_STATE_HALT) { | ||
1003 | writel (cmd & ~CMD_ASE, &ehci->regs->command); | ||
1004 | wmb (); | ||
1005 | // handshake later, if we need to | ||
1006 | } | ||
1007 | timer_action_done (ehci, TIMER_ASYNC_OFF); | ||
1008 | return; | ||
1009 | } | ||
1010 | |||
1011 | qh->qh_state = QH_STATE_UNLINK; | ||
1012 | ehci->reclaim = qh = qh_get (qh); | ||
1013 | |||
1014 | prev = ehci->async; | ||
1015 | while (prev->qh_next.qh != qh) | ||
1016 | prev = prev->qh_next.qh; | ||
1017 | |||
1018 | prev->hw_next = qh->hw_next; | ||
1019 | prev->qh_next = qh->qh_next; | ||
1020 | wmb (); | ||
1021 | |||
1022 | if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) { | ||
1023 | /* if (unlikely (qh->reclaim != 0)) | ||
1024 | * this will recurse, probably not much | ||
1025 | */ | ||
1026 | end_unlink_async (ehci, NULL); | ||
1027 | return; | ||
1028 | } | ||
1029 | |||
1030 | ehci->reclaim_ready = 0; | ||
1031 | cmd |= CMD_IAAD; | ||
1032 | writel (cmd, &ehci->regs->command); | ||
1033 | (void) readl (&ehci->regs->command); | ||
1034 | timer_action (ehci, TIMER_IAA_WATCHDOG); | ||
1035 | } | ||
1036 | |||
1037 | /*-------------------------------------------------------------------------*/ | ||
1038 | |||
1039 | static void | ||
1040 | scan_async (struct ehci_hcd *ehci, struct pt_regs *regs) | ||
1041 | { | ||
1042 | struct ehci_qh *qh; | ||
1043 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; | ||
1044 | |||
1045 | if (!++(ehci->stamp)) | ||
1046 | ehci->stamp++; | ||
1047 | timer_action_done (ehci, TIMER_ASYNC_SHRINK); | ||
1048 | rescan: | ||
1049 | qh = ehci->async->qh_next.qh; | ||
1050 | if (likely (qh != NULL)) { | ||
1051 | do { | ||
1052 | /* clean any finished work for this qh */ | ||
1053 | if (!list_empty (&qh->qtd_list) | ||
1054 | && qh->stamp != ehci->stamp) { | ||
1055 | int temp; | ||
1056 | |||
1057 | /* unlinks could happen here; completion | ||
1058 | * reporting drops the lock. rescan using | ||
1059 | * the latest schedule, but don't rescan | ||
1060 | * qhs we already finished (no looping). | ||
1061 | */ | ||
1062 | qh = qh_get (qh); | ||
1063 | qh->stamp = ehci->stamp; | ||
1064 | temp = qh_completions (ehci, qh, regs); | ||
1065 | qh_put (qh); | ||
1066 | if (temp != 0) { | ||
1067 | goto rescan; | ||
1068 | } | ||
1069 | } | ||
1070 | |||
1071 | /* unlink idle entries, reducing HC PCI usage as well | ||
1072 | * as HCD schedule-scanning costs. delay for any qh | ||
1073 | * we just scanned, there's a not-unusual case that it | ||
1074 | * doesn't stay idle for long. | ||
1075 | * (plus, avoids some kind of re-activation race.) | ||
1076 | */ | ||
1077 | if (list_empty (&qh->qtd_list)) { | ||
1078 | if (qh->stamp == ehci->stamp) | ||
1079 | action = TIMER_ASYNC_SHRINK; | ||
1080 | else if (!ehci->reclaim | ||
1081 | && qh->qh_state == QH_STATE_LINKED) | ||
1082 | start_unlink_async (ehci, qh); | ||
1083 | } | ||
1084 | |||
1085 | qh = qh->qh_next.qh; | ||
1086 | } while (qh); | ||
1087 | } | ||
1088 | if (action == TIMER_ASYNC_SHRINK) | ||
1089 | timer_action (ehci, TIMER_ASYNC_SHRINK); | ||
1090 | } | ||