diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/usb/host/ohci-q.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/usb/host/ohci-q.c')
-rw-r--r-- | drivers/usb/host/ohci-q.c | 1107 |
1 files changed, 1107 insertions, 0 deletions
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c new file mode 100644 index 000000000000..c90114a77277 --- /dev/null +++ b/drivers/usb/host/ohci-q.c | |||
@@ -0,0 +1,1107 @@ | |||
1 | /* | ||
2 | * OHCI HCD (Host Controller Driver) for USB. | ||
3 | * | ||
4 | * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> | ||
5 | * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> | ||
6 | * | ||
7 | * This file is licenced under the GPL. | ||
8 | */ | ||
9 | |||
10 | static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) | ||
11 | { | ||
12 | int last = urb_priv->length - 1; | ||
13 | |||
14 | if (last >= 0) { | ||
15 | int i; | ||
16 | struct td *td; | ||
17 | |||
18 | for (i = 0; i <= last; i++) { | ||
19 | td = urb_priv->td [i]; | ||
20 | if (td) | ||
21 | td_free (hc, td); | ||
22 | } | ||
23 | } | ||
24 | |||
25 | list_del (&urb_priv->pending); | ||
26 | kfree (urb_priv); | ||
27 | } | ||
28 | |||
29 | /*-------------------------------------------------------------------------*/ | ||
30 | |||
31 | /* | ||
32 | * URB goes back to driver, and isn't reissued. | ||
33 | * It's completely gone from HC data structures. | ||
34 | * PRECONDITION: ohci lock held, irqs blocked. | ||
35 | */ | ||
36 | static void | ||
37 | finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs) | ||
38 | __releases(ohci->lock) | ||
39 | __acquires(ohci->lock) | ||
40 | { | ||
41 | // ASSERT (urb->hcpriv != 0); | ||
42 | |||
43 | urb_free_priv (ohci, urb->hcpriv); | ||
44 | urb->hcpriv = NULL; | ||
45 | |||
46 | spin_lock (&urb->lock); | ||
47 | if (likely (urb->status == -EINPROGRESS)) | ||
48 | urb->status = 0; | ||
49 | /* report short control reads right even though the data TD always | ||
50 | * has TD_R set. (much simpler, but creates the 1-td limit.) | ||
51 | */ | ||
52 | if (unlikely (urb->transfer_flags & URB_SHORT_NOT_OK) | ||
53 | && unlikely (usb_pipecontrol (urb->pipe)) | ||
54 | && urb->actual_length < urb->transfer_buffer_length | ||
55 | && usb_pipein (urb->pipe) | ||
56 | && urb->status == 0) { | ||
57 | urb->status = -EREMOTEIO; | ||
58 | } | ||
59 | spin_unlock (&urb->lock); | ||
60 | |||
61 | switch (usb_pipetype (urb->pipe)) { | ||
62 | case PIPE_ISOCHRONOUS: | ||
63 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; | ||
64 | break; | ||
65 | case PIPE_INTERRUPT: | ||
66 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | #ifdef OHCI_VERBOSE_DEBUG | ||
71 | urb_print (urb, "RET", usb_pipeout (urb->pipe)); | ||
72 | #endif | ||
73 | |||
74 | /* urb->complete() can reenter this HCD */ | ||
75 | spin_unlock (&ohci->lock); | ||
76 | usb_hcd_giveback_urb (ohci_to_hcd(ohci), urb, regs); | ||
77 | spin_lock (&ohci->lock); | ||
78 | |||
79 | /* stop periodic dma if it's not needed */ | ||
80 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | ||
81 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) { | ||
82 | ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); | ||
83 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | |||
88 | /*-------------------------------------------------------------------------* | ||
89 | * ED handling functions | ||
90 | *-------------------------------------------------------------------------*/ | ||
91 | |||
92 | /* search for the right schedule branch to use for a periodic ed. | ||
93 | * does some load balancing; returns the branch, or negative errno. | ||
94 | */ | ||
95 | static int balance (struct ohci_hcd *ohci, int interval, int load) | ||
96 | { | ||
97 | int i, branch = -ENOSPC; | ||
98 | |||
99 | /* iso periods can be huge; iso tds specify frame numbers */ | ||
100 | if (interval > NUM_INTS) | ||
101 | interval = NUM_INTS; | ||
102 | |||
103 | /* search for the least loaded schedule branch of that period | ||
104 | * that has enough bandwidth left unreserved. | ||
105 | */ | ||
106 | for (i = 0; i < interval ; i++) { | ||
107 | if (branch < 0 || ohci->load [branch] > ohci->load [i]) { | ||
108 | #if 1 /* CONFIG_USB_BANDWIDTH */ | ||
109 | int j; | ||
110 | |||
111 | /* usb 1.1 says 90% of one frame */ | ||
112 | for (j = i; j < NUM_INTS; j += interval) { | ||
113 | if ((ohci->load [j] + load) > 900) | ||
114 | break; | ||
115 | } | ||
116 | if (j < NUM_INTS) | ||
117 | continue; | ||
118 | #endif | ||
119 | branch = i; | ||
120 | } | ||
121 | } | ||
122 | return branch; | ||
123 | } | ||
124 | |||
125 | /*-------------------------------------------------------------------------*/ | ||
126 | |||
127 | /* both iso and interrupt requests have periods; this routine puts them | ||
128 | * into the schedule tree in the apppropriate place. most iso devices use | ||
129 | * 1msec periods, but that's not required. | ||
130 | */ | ||
131 | static void periodic_link (struct ohci_hcd *ohci, struct ed *ed) | ||
132 | { | ||
133 | unsigned i; | ||
134 | |||
135 | ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n", | ||
136 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", | ||
137 | ed, ed->branch, ed->load, ed->interval); | ||
138 | |||
139 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | ||
140 | struct ed **prev = &ohci->periodic [i]; | ||
141 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | ||
142 | struct ed *here = *prev; | ||
143 | |||
144 | /* sorting each branch by period (slow before fast) | ||
145 | * lets us share the faster parts of the tree. | ||
146 | * (plus maybe: put interrupt eds before iso) | ||
147 | */ | ||
148 | while (here && ed != here) { | ||
149 | if (ed->interval > here->interval) | ||
150 | break; | ||
151 | prev = &here->ed_next; | ||
152 | prev_p = &here->hwNextED; | ||
153 | here = *prev; | ||
154 | } | ||
155 | if (ed != here) { | ||
156 | ed->ed_next = here; | ||
157 | if (here) | ||
158 | ed->hwNextED = *prev_p; | ||
159 | wmb (); | ||
160 | *prev = ed; | ||
161 | *prev_p = cpu_to_hc32(ohci, ed->dma); | ||
162 | wmb(); | ||
163 | } | ||
164 | ohci->load [i] += ed->load; | ||
165 | } | ||
166 | ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval; | ||
167 | } | ||
168 | |||
169 | /* link an ed into one of the HC chains */ | ||
170 | |||
171 | static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | ||
172 | { | ||
173 | int branch; | ||
174 | |||
175 | if (ohci_to_hcd(ohci)->state == HC_STATE_QUIESCING) | ||
176 | return -EAGAIN; | ||
177 | |||
178 | ed->state = ED_OPER; | ||
179 | ed->ed_prev = NULL; | ||
180 | ed->ed_next = NULL; | ||
181 | ed->hwNextED = 0; | ||
182 | wmb (); | ||
183 | |||
184 | /* we care about rm_list when setting CLE/BLE in case the HC was at | ||
185 | * work on some TD when CLE/BLE was turned off, and isn't quiesced | ||
186 | * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF. | ||
187 | * | ||
188 | * control and bulk EDs are doubly linked (ed_next, ed_prev), but | ||
189 | * periodic ones are singly linked (ed_next). that's because the | ||
190 | * periodic schedule encodes a tree like figure 3-5 in the ohci | ||
191 | * spec: each qh can have several "previous" nodes, and the tree | ||
192 | * doesn't have unused/idle descriptors. | ||
193 | */ | ||
194 | switch (ed->type) { | ||
195 | case PIPE_CONTROL: | ||
196 | if (ohci->ed_controltail == NULL) { | ||
197 | WARN_ON (ohci->hc_control & OHCI_CTRL_CLE); | ||
198 | ohci_writel (ohci, ed->dma, | ||
199 | &ohci->regs->ed_controlhead); | ||
200 | } else { | ||
201 | ohci->ed_controltail->ed_next = ed; | ||
202 | ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci, | ||
203 | ed->dma); | ||
204 | } | ||
205 | ed->ed_prev = ohci->ed_controltail; | ||
206 | if (!ohci->ed_controltail && !ohci->ed_rm_list) { | ||
207 | wmb(); | ||
208 | ohci->hc_control |= OHCI_CTRL_CLE; | ||
209 | ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent); | ||
210 | ohci_writel (ohci, ohci->hc_control, | ||
211 | &ohci->regs->control); | ||
212 | } | ||
213 | ohci->ed_controltail = ed; | ||
214 | break; | ||
215 | |||
216 | case PIPE_BULK: | ||
217 | if (ohci->ed_bulktail == NULL) { | ||
218 | WARN_ON (ohci->hc_control & OHCI_CTRL_BLE); | ||
219 | ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead); | ||
220 | } else { | ||
221 | ohci->ed_bulktail->ed_next = ed; | ||
222 | ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci, | ||
223 | ed->dma); | ||
224 | } | ||
225 | ed->ed_prev = ohci->ed_bulktail; | ||
226 | if (!ohci->ed_bulktail && !ohci->ed_rm_list) { | ||
227 | wmb(); | ||
228 | ohci->hc_control |= OHCI_CTRL_BLE; | ||
229 | ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent); | ||
230 | ohci_writel (ohci, ohci->hc_control, | ||
231 | &ohci->regs->control); | ||
232 | } | ||
233 | ohci->ed_bulktail = ed; | ||
234 | break; | ||
235 | |||
236 | // case PIPE_INTERRUPT: | ||
237 | // case PIPE_ISOCHRONOUS: | ||
238 | default: | ||
239 | branch = balance (ohci, ed->interval, ed->load); | ||
240 | if (branch < 0) { | ||
241 | ohci_dbg (ohci, | ||
242 | "ERR %d, interval %d msecs, load %d\n", | ||
243 | branch, ed->interval, ed->load); | ||
244 | // FIXME if there are TDs queued, fail them! | ||
245 | return branch; | ||
246 | } | ||
247 | ed->branch = branch; | ||
248 | periodic_link (ohci, ed); | ||
249 | } | ||
250 | |||
251 | /* the HC may not see the schedule updates yet, but if it does | ||
252 | * then they'll be properly ordered. | ||
253 | */ | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | /*-------------------------------------------------------------------------*/ | ||
258 | |||
259 | /* scan the periodic table to find and unlink this ED */ | ||
260 | static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) | ||
261 | { | ||
262 | int i; | ||
263 | |||
264 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | ||
265 | struct ed *temp; | ||
266 | struct ed **prev = &ohci->periodic [i]; | ||
267 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | ||
268 | |||
269 | while (*prev && (temp = *prev) != ed) { | ||
270 | prev_p = &temp->hwNextED; | ||
271 | prev = &temp->ed_next; | ||
272 | } | ||
273 | if (*prev) { | ||
274 | *prev_p = ed->hwNextED; | ||
275 | *prev = ed->ed_next; | ||
276 | } | ||
277 | ohci->load [i] -= ed->load; | ||
278 | } | ||
279 | ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; | ||
280 | |||
281 | ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", | ||
282 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", | ||
283 | ed, ed->branch, ed->load, ed->interval); | ||
284 | } | ||
285 | |||
286 | /* unlink an ed from one of the HC chains. | ||
287 | * just the link to the ed is unlinked. | ||
288 | * the link from the ed still points to another operational ed or 0 | ||
289 | * so the HC can eventually finish the processing of the unlinked ed | ||
290 | * (assuming it already started that, which needn't be true). | ||
291 | * | ||
292 | * ED_UNLINK is a transient state: the HC may still see this ED, but soon | ||
293 | * it won't. ED_SKIP means the HC will finish its current transaction, | ||
294 | * but won't start anything new. The TD queue may still grow; device | ||
295 | * drivers don't know about this HCD-internal state. | ||
296 | * | ||
297 | * When the HC can't see the ED, something changes ED_UNLINK to one of: | ||
298 | * | ||
299 | * - ED_OPER: when there's any request queued, the ED gets rescheduled | ||
300 | * immediately. HC should be working on them. | ||
301 | * | ||
302 | * - ED_IDLE: when there's no TD queue. there's no reason for the HC | ||
303 | * to care about this ED; safe to disable the endpoint. | ||
304 | * | ||
305 | * When finish_unlinks() runs later, after SOF interrupt, it will often | ||
306 | * complete one or more URB unlinks before making that state change. | ||
307 | */ | ||
308 | static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) | ||
309 | { | ||
310 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | ||
311 | wmb (); | ||
312 | ed->state = ED_UNLINK; | ||
313 | |||
314 | /* To deschedule something from the control or bulk list, just | ||
315 | * clear CLE/BLE and wait. There's no safe way to scrub out list | ||
316 | * head/current registers until later, and "later" isn't very | ||
317 | * tightly specified. Figure 6-5 and Section 6.4.2.2 show how | ||
318 | * the HC is reading the ED queues (while we modify them). | ||
319 | * | ||
320 | * For now, ed_schedule() is "later". It might be good paranoia | ||
321 | * to scrub those registers in finish_unlinks(), in case of bugs | ||
322 | * that make the HC try to use them. | ||
323 | */ | ||
324 | switch (ed->type) { | ||
325 | case PIPE_CONTROL: | ||
326 | /* remove ED from the HC's list: */ | ||
327 | if (ed->ed_prev == NULL) { | ||
328 | if (!ed->hwNextED) { | ||
329 | ohci->hc_control &= ~OHCI_CTRL_CLE; | ||
330 | ohci_writel (ohci, ohci->hc_control, | ||
331 | &ohci->regs->control); | ||
332 | // a ohci_readl() later syncs CLE with the HC | ||
333 | } else | ||
334 | ohci_writel (ohci, | ||
335 | hc32_to_cpup (ohci, &ed->hwNextED), | ||
336 | &ohci->regs->ed_controlhead); | ||
337 | } else { | ||
338 | ed->ed_prev->ed_next = ed->ed_next; | ||
339 | ed->ed_prev->hwNextED = ed->hwNextED; | ||
340 | } | ||
341 | /* remove ED from the HCD's list: */ | ||
342 | if (ohci->ed_controltail == ed) { | ||
343 | ohci->ed_controltail = ed->ed_prev; | ||
344 | if (ohci->ed_controltail) | ||
345 | ohci->ed_controltail->ed_next = NULL; | ||
346 | } else if (ed->ed_next) { | ||
347 | ed->ed_next->ed_prev = ed->ed_prev; | ||
348 | } | ||
349 | break; | ||
350 | |||
351 | case PIPE_BULK: | ||
352 | /* remove ED from the HC's list: */ | ||
353 | if (ed->ed_prev == NULL) { | ||
354 | if (!ed->hwNextED) { | ||
355 | ohci->hc_control &= ~OHCI_CTRL_BLE; | ||
356 | ohci_writel (ohci, ohci->hc_control, | ||
357 | &ohci->regs->control); | ||
358 | // a ohci_readl() later syncs BLE with the HC | ||
359 | } else | ||
360 | ohci_writel (ohci, | ||
361 | hc32_to_cpup (ohci, &ed->hwNextED), | ||
362 | &ohci->regs->ed_bulkhead); | ||
363 | } else { | ||
364 | ed->ed_prev->ed_next = ed->ed_next; | ||
365 | ed->ed_prev->hwNextED = ed->hwNextED; | ||
366 | } | ||
367 | /* remove ED from the HCD's list: */ | ||
368 | if (ohci->ed_bulktail == ed) { | ||
369 | ohci->ed_bulktail = ed->ed_prev; | ||
370 | if (ohci->ed_bulktail) | ||
371 | ohci->ed_bulktail->ed_next = NULL; | ||
372 | } else if (ed->ed_next) { | ||
373 | ed->ed_next->ed_prev = ed->ed_prev; | ||
374 | } | ||
375 | break; | ||
376 | |||
377 | // case PIPE_INTERRUPT: | ||
378 | // case PIPE_ISOCHRONOUS: | ||
379 | default: | ||
380 | periodic_unlink (ohci, ed); | ||
381 | break; | ||
382 | } | ||
383 | } | ||
384 | |||
385 | |||
386 | /*-------------------------------------------------------------------------*/ | ||
387 | |||
388 | /* get and maybe (re)init an endpoint. init _should_ be done only as part | ||
389 | * of enumeration, usb_set_configuration() or usb_set_interface(). | ||
390 | */ | ||
391 | static struct ed *ed_get ( | ||
392 | struct ohci_hcd *ohci, | ||
393 | struct usb_host_endpoint *ep, | ||
394 | struct usb_device *udev, | ||
395 | unsigned int pipe, | ||
396 | int interval | ||
397 | ) { | ||
398 | struct ed *ed; | ||
399 | unsigned long flags; | ||
400 | |||
401 | spin_lock_irqsave (&ohci->lock, flags); | ||
402 | |||
403 | if (!(ed = ep->hcpriv)) { | ||
404 | struct td *td; | ||
405 | int is_out; | ||
406 | u32 info; | ||
407 | |||
408 | ed = ed_alloc (ohci, GFP_ATOMIC); | ||
409 | if (!ed) { | ||
410 | /* out of memory */ | ||
411 | goto done; | ||
412 | } | ||
413 | |||
414 | /* dummy td; end of td list for ed */ | ||
415 | td = td_alloc (ohci, GFP_ATOMIC); | ||
416 | if (!td) { | ||
417 | /* out of memory */ | ||
418 | ed_free (ohci, ed); | ||
419 | ed = NULL; | ||
420 | goto done; | ||
421 | } | ||
422 | ed->dummy = td; | ||
423 | ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma); | ||
424 | ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ | ||
425 | ed->state = ED_IDLE; | ||
426 | |||
427 | is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); | ||
428 | |||
429 | /* FIXME usbcore changes dev->devnum before SET_ADDRESS | ||
430 | * suceeds ... otherwise we wouldn't need "pipe". | ||
431 | */ | ||
432 | info = usb_pipedevice (pipe); | ||
433 | ed->type = usb_pipetype(pipe); | ||
434 | |||
435 | info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7; | ||
436 | info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16; | ||
437 | if (udev->speed == USB_SPEED_LOW) | ||
438 | info |= ED_LOWSPEED; | ||
439 | /* only control transfers store pids in tds */ | ||
440 | if (ed->type != PIPE_CONTROL) { | ||
441 | info |= is_out ? ED_OUT : ED_IN; | ||
442 | if (ed->type != PIPE_BULK) { | ||
443 | /* periodic transfers... */ | ||
444 | if (ed->type == PIPE_ISOCHRONOUS) | ||
445 | info |= ED_ISO; | ||
446 | else if (interval > 32) /* iso can be bigger */ | ||
447 | interval = 32; | ||
448 | ed->interval = interval; | ||
449 | ed->load = usb_calc_bus_time ( | ||
450 | udev->speed, !is_out, | ||
451 | ed->type == PIPE_ISOCHRONOUS, | ||
452 | le16_to_cpu(ep->desc.wMaxPacketSize)) | ||
453 | / 1000; | ||
454 | } | ||
455 | } | ||
456 | ed->hwINFO = cpu_to_hc32(ohci, info); | ||
457 | |||
458 | ep->hcpriv = ed; | ||
459 | } | ||
460 | |||
461 | done: | ||
462 | spin_unlock_irqrestore (&ohci->lock, flags); | ||
463 | return ed; | ||
464 | } | ||
465 | |||
466 | /*-------------------------------------------------------------------------*/ | ||
467 | |||
468 | /* request unlinking of an endpoint from an operational HC. | ||
469 | * put the ep on the rm_list | ||
470 | * real work is done at the next start frame (SF) hardware interrupt | ||
471 | * caller guarantees HCD is running, so hardware access is safe, | ||
472 | * and that ed->state is ED_OPER | ||
473 | */ | ||
474 | static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) | ||
475 | { | ||
476 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE); | ||
477 | ed_deschedule (ohci, ed); | ||
478 | |||
479 | /* rm_list is just singly linked, for simplicity */ | ||
480 | ed->ed_next = ohci->ed_rm_list; | ||
481 | ed->ed_prev = NULL; | ||
482 | ohci->ed_rm_list = ed; | ||
483 | |||
484 | /* enable SOF interrupt */ | ||
485 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus); | ||
486 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable); | ||
487 | // flush those writes, and get latest HCCA contents | ||
488 | (void) ohci_readl (ohci, &ohci->regs->control); | ||
489 | |||
490 | /* SF interrupt might get delayed; record the frame counter value that | ||
491 | * indicates when the HC isn't looking at it, so concurrent unlinks | ||
492 | * behave. frame_no wraps every 2^16 msec, and changes right before | ||
493 | * SF is triggered. | ||
494 | */ | ||
495 | ed->tick = ohci_frame_no(ohci) + 1; | ||
496 | |||
497 | } | ||
498 | |||
499 | /*-------------------------------------------------------------------------* | ||
500 | * TD handling functions | ||
501 | *-------------------------------------------------------------------------*/ | ||
502 | |||
503 | /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ | ||
504 | |||
505 | static void | ||
506 | td_fill (struct ohci_hcd *ohci, u32 info, | ||
507 | dma_addr_t data, int len, | ||
508 | struct urb *urb, int index) | ||
509 | { | ||
510 | struct td *td, *td_pt; | ||
511 | struct urb_priv *urb_priv = urb->hcpriv; | ||
512 | int is_iso = info & TD_ISO; | ||
513 | int hash; | ||
514 | |||
515 | // ASSERT (index < urb_priv->length); | ||
516 | |||
517 | /* aim for only one interrupt per urb. mostly applies to control | ||
518 | * and iso; other urbs rarely need more than one TD per urb. | ||
519 | * this way, only final tds (or ones with an error) cause IRQs. | ||
520 | * at least immediately; use DI=6 in case any control request is | ||
521 | * tempted to die part way through. (and to force the hc to flush | ||
522 | * its donelist soonish, even on unlink paths.) | ||
523 | * | ||
524 | * NOTE: could delay interrupts even for the last TD, and get fewer | ||
525 | * interrupts ... increasing per-urb latency by sharing interrupts. | ||
526 | * Drivers that queue bulk urbs may request that behavior. | ||
527 | */ | ||
528 | if (index != (urb_priv->length - 1) | ||
529 | || (urb->transfer_flags & URB_NO_INTERRUPT)) | ||
530 | info |= TD_DI_SET (6); | ||
531 | |||
532 | /* use this td as the next dummy */ | ||
533 | td_pt = urb_priv->td [index]; | ||
534 | |||
535 | /* fill the old dummy TD */ | ||
536 | td = urb_priv->td [index] = urb_priv->ed->dummy; | ||
537 | urb_priv->ed->dummy = td_pt; | ||
538 | |||
539 | td->ed = urb_priv->ed; | ||
540 | td->next_dl_td = NULL; | ||
541 | td->index = index; | ||
542 | td->urb = urb; | ||
543 | td->data_dma = data; | ||
544 | if (!len) | ||
545 | data = 0; | ||
546 | |||
547 | td->hwINFO = cpu_to_hc32 (ohci, info); | ||
548 | if (is_iso) { | ||
549 | td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); | ||
550 | *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, | ||
551 | (data & 0x0FFF) | 0xE000); | ||
552 | td->ed->last_iso = info & 0xffff; | ||
553 | } else { | ||
554 | td->hwCBP = cpu_to_hc32 (ohci, data); | ||
555 | } | ||
556 | if (data) | ||
557 | td->hwBE = cpu_to_hc32 (ohci, data + len - 1); | ||
558 | else | ||
559 | td->hwBE = 0; | ||
560 | td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma); | ||
561 | |||
562 | /* append to queue */ | ||
563 | list_add_tail (&td->td_list, &td->ed->td_list); | ||
564 | |||
565 | /* hash it for later reverse mapping */ | ||
566 | hash = TD_HASH_FUNC (td->td_dma); | ||
567 | td->td_hash = ohci->td_hash [hash]; | ||
568 | ohci->td_hash [hash] = td; | ||
569 | |||
570 | /* HC might read the TD (or cachelines) right away ... */ | ||
571 | wmb (); | ||
572 | td->ed->hwTailP = td->hwNextTD; | ||
573 | } | ||
574 | |||
575 | /*-------------------------------------------------------------------------*/ | ||
576 | |||
577 | /* Prepare all TDs of a transfer, and queue them onto the ED. | ||
578 | * Caller guarantees HC is active. | ||
579 | * Usually the ED is already on the schedule, so TDs might be | ||
580 | * processed as soon as they're queued. | ||
581 | */ | ||
582 | static void td_submit_urb ( | ||
583 | struct ohci_hcd *ohci, | ||
584 | struct urb *urb | ||
585 | ) { | ||
586 | struct urb_priv *urb_priv = urb->hcpriv; | ||
587 | dma_addr_t data; | ||
588 | int data_len = urb->transfer_buffer_length; | ||
589 | int cnt = 0; | ||
590 | u32 info = 0; | ||
591 | int is_out = usb_pipeout (urb->pipe); | ||
592 | int periodic = 0; | ||
593 | |||
594 | /* OHCI handles the bulk/interrupt data toggles itself. We just | ||
595 | * use the device toggle bits for resetting, and rely on the fact | ||
596 | * that resetting toggle is meaningless if the endpoint is active. | ||
597 | */ | ||
598 | if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) { | ||
599 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), | ||
600 | is_out, 1); | ||
601 | urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C); | ||
602 | } | ||
603 | |||
604 | urb_priv->td_cnt = 0; | ||
605 | list_add (&urb_priv->pending, &ohci->pending); | ||
606 | |||
607 | if (data_len) | ||
608 | data = urb->transfer_dma; | ||
609 | else | ||
610 | data = 0; | ||
611 | |||
612 | /* NOTE: TD_CC is set so we can tell which TDs the HC processed by | ||
613 | * using TD_CC_GET, as well as by seeing them on the done list. | ||
614 | * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.) | ||
615 | */ | ||
616 | switch (urb_priv->ed->type) { | ||
617 | |||
618 | /* Bulk and interrupt are identical except for where in the schedule | ||
619 | * their EDs live. | ||
620 | */ | ||
621 | case PIPE_INTERRUPT: | ||
622 | /* ... and periodic urbs have extra accounting */ | ||
623 | periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0 | ||
624 | && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0; | ||
625 | /* FALLTHROUGH */ | ||
626 | case PIPE_BULK: | ||
627 | info = is_out | ||
628 | ? TD_T_TOGGLE | TD_CC | TD_DP_OUT | ||
629 | : TD_T_TOGGLE | TD_CC | TD_DP_IN; | ||
630 | /* TDs _could_ transfer up to 8K each */ | ||
631 | while (data_len > 4096) { | ||
632 | td_fill (ohci, info, data, 4096, urb, cnt); | ||
633 | data += 4096; | ||
634 | data_len -= 4096; | ||
635 | cnt++; | ||
636 | } | ||
637 | /* maybe avoid ED halt on final TD short read */ | ||
638 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | ||
639 | info |= TD_R; | ||
640 | td_fill (ohci, info, data, data_len, urb, cnt); | ||
641 | cnt++; | ||
642 | if ((urb->transfer_flags & URB_ZERO_PACKET) | ||
643 | && cnt < urb_priv->length) { | ||
644 | td_fill (ohci, info, 0, 0, urb, cnt); | ||
645 | cnt++; | ||
646 | } | ||
647 | /* maybe kickstart bulk list */ | ||
648 | if (urb_priv->ed->type == PIPE_BULK) { | ||
649 | wmb (); | ||
650 | ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus); | ||
651 | } | ||
652 | break; | ||
653 | |||
654 | /* control manages DATA0/DATA1 toggle per-request; SETUP resets it, | ||
655 | * any DATA phase works normally, and the STATUS ack is special. | ||
656 | */ | ||
657 | case PIPE_CONTROL: | ||
658 | info = TD_CC | TD_DP_SETUP | TD_T_DATA0; | ||
659 | td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++); | ||
660 | if (data_len > 0) { | ||
661 | info = TD_CC | TD_R | TD_T_DATA1; | ||
662 | info |= is_out ? TD_DP_OUT : TD_DP_IN; | ||
663 | /* NOTE: mishandles transfers >8K, some >4K */ | ||
664 | td_fill (ohci, info, data, data_len, urb, cnt++); | ||
665 | } | ||
666 | info = (is_out || data_len == 0) | ||
667 | ? TD_CC | TD_DP_IN | TD_T_DATA1 | ||
668 | : TD_CC | TD_DP_OUT | TD_T_DATA1; | ||
669 | td_fill (ohci, info, data, 0, urb, cnt++); | ||
670 | /* maybe kickstart control list */ | ||
671 | wmb (); | ||
672 | ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus); | ||
673 | break; | ||
674 | |||
675 | /* ISO has no retransmit, so no toggle; and it uses special TDs. | ||
676 | * Each TD could handle multiple consecutive frames (interval 1); | ||
677 | * we could often reduce the number of TDs here. | ||
678 | */ | ||
679 | case PIPE_ISOCHRONOUS: | ||
680 | for (cnt = 0; cnt < urb->number_of_packets; cnt++) { | ||
681 | int frame = urb->start_frame; | ||
682 | |||
683 | // FIXME scheduling should handle frame counter | ||
684 | // roll-around ... exotic case (and OHCI has | ||
685 | // a 2^16 iso range, vs other HCs max of 2^10) | ||
686 | frame += cnt * urb->interval; | ||
687 | frame &= 0xffff; | ||
688 | td_fill (ohci, TD_CC | TD_ISO | frame, | ||
689 | data + urb->iso_frame_desc [cnt].offset, | ||
690 | urb->iso_frame_desc [cnt].length, urb, cnt); | ||
691 | } | ||
692 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 | ||
693 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; | ||
694 | break; | ||
695 | } | ||
696 | |||
697 | /* start periodic dma if needed */ | ||
698 | if (periodic) { | ||
699 | wmb (); | ||
700 | ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE; | ||
701 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | ||
702 | } | ||
703 | |||
704 | // ASSERT (urb_priv->length == cnt); | ||
705 | } | ||
706 | |||
707 | /*-------------------------------------------------------------------------* | ||
708 | * Done List handling functions | ||
709 | *-------------------------------------------------------------------------*/ | ||
710 | |||
711 | /* calculate transfer length/status and update the urb | ||
712 | * PRECONDITION: irqsafe (only for urb->status locking) | ||
713 | */ | ||
714 | static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td) | ||
715 | { | ||
716 | u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO); | ||
717 | int cc = 0; | ||
718 | |||
719 | list_del (&td->td_list); | ||
720 | |||
721 | /* ISO ... drivers see per-TD length/status */ | ||
722 | if (tdINFO & TD_ISO) { | ||
723 | u16 tdPSW = ohci_hwPSW (ohci, td, 0); | ||
724 | int dlen = 0; | ||
725 | |||
726 | /* NOTE: assumes FC in tdINFO == 0, and that | ||
727 | * only the first of 0..MAXPSW psws is used. | ||
728 | */ | ||
729 | |||
730 | cc = (tdPSW >> 12) & 0xF; | ||
731 | if (tdINFO & TD_CC) /* hc didn't touch? */ | ||
732 | return; | ||
733 | |||
734 | if (usb_pipeout (urb->pipe)) | ||
735 | dlen = urb->iso_frame_desc [td->index].length; | ||
736 | else { | ||
737 | /* short reads are always OK for ISO */ | ||
738 | if (cc == TD_DATAUNDERRUN) | ||
739 | cc = TD_CC_NOERROR; | ||
740 | dlen = tdPSW & 0x3ff; | ||
741 | } | ||
742 | urb->actual_length += dlen; | ||
743 | urb->iso_frame_desc [td->index].actual_length = dlen; | ||
744 | urb->iso_frame_desc [td->index].status = cc_to_error [cc]; | ||
745 | |||
746 | if (cc != TD_CC_NOERROR) | ||
747 | ohci_vdbg (ohci, | ||
748 | "urb %p iso td %p (%d) len %d cc %d\n", | ||
749 | urb, td, 1 + td->index, dlen, cc); | ||
750 | |||
751 | /* BULK, INT, CONTROL ... drivers see aggregate length/status, | ||
752 | * except that "setup" bytes aren't counted and "short" transfers | ||
753 | * might not be reported as errors. | ||
754 | */ | ||
755 | } else { | ||
756 | int type = usb_pipetype (urb->pipe); | ||
757 | u32 tdBE = hc32_to_cpup (ohci, &td->hwBE); | ||
758 | |||
759 | cc = TD_CC_GET (tdINFO); | ||
760 | |||
761 | /* update packet status if needed (short is normally ok) */ | ||
762 | if (cc == TD_DATAUNDERRUN | ||
763 | && !(urb->transfer_flags & URB_SHORT_NOT_OK)) | ||
764 | cc = TD_CC_NOERROR; | ||
765 | if (cc != TD_CC_NOERROR && cc < 0x0E) { | ||
766 | spin_lock (&urb->lock); | ||
767 | if (urb->status == -EINPROGRESS) | ||
768 | urb->status = cc_to_error [cc]; | ||
769 | spin_unlock (&urb->lock); | ||
770 | } | ||
771 | |||
772 | /* count all non-empty packets except control SETUP packet */ | ||
773 | if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) { | ||
774 | if (td->hwCBP == 0) | ||
775 | urb->actual_length += tdBE - td->data_dma + 1; | ||
776 | else | ||
777 | urb->actual_length += | ||
778 | hc32_to_cpup (ohci, &td->hwCBP) | ||
779 | - td->data_dma; | ||
780 | } | ||
781 | |||
782 | if (cc != TD_CC_NOERROR && cc < 0x0E) | ||
783 | ohci_vdbg (ohci, | ||
784 | "urb %p td %p (%d) cc %d, len=%d/%d\n", | ||
785 | urb, td, 1 + td->index, cc, | ||
786 | urb->actual_length, | ||
787 | urb->transfer_buffer_length); | ||
788 | } | ||
789 | } | ||
790 | |||
791 | /*-------------------------------------------------------------------------*/ | ||
792 | |||
793 | static inline struct td * | ||
794 | ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev) | ||
795 | { | ||
796 | struct urb *urb = td->urb; | ||
797 | struct ed *ed = td->ed; | ||
798 | struct list_head *tmp = td->td_list.next; | ||
799 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C); | ||
800 | |||
801 | /* clear ed halt; this is the td that caused it, but keep it inactive | ||
802 | * until its urb->complete() has a chance to clean up. | ||
803 | */ | ||
804 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | ||
805 | wmb (); | ||
806 | ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H); | ||
807 | |||
808 | /* put any later tds from this urb onto the donelist, after 'td', | ||
809 | * order won't matter here: no errors, and nothing was transferred. | ||
810 | * also patch the ed so it looks as if those tds completed normally. | ||
811 | */ | ||
812 | while (tmp != &ed->td_list) { | ||
813 | struct td *next; | ||
814 | __hc32 info; | ||
815 | |||
816 | next = list_entry (tmp, struct td, td_list); | ||
817 | tmp = next->td_list.next; | ||
818 | |||
819 | if (next->urb != urb) | ||
820 | break; | ||
821 | |||
822 | /* NOTE: if multi-td control DATA segments get supported, | ||
823 | * this urb had one of them, this td wasn't the last td | ||
824 | * in that segment (TD_R clear), this ed halted because | ||
825 | * of a short read, _and_ URB_SHORT_NOT_OK is clear ... | ||
826 | * then we need to leave the control STATUS packet queued | ||
827 | * and clear ED_SKIP. | ||
828 | */ | ||
829 | info = next->hwINFO; | ||
830 | info |= cpu_to_hc32 (ohci, TD_DONE); | ||
831 | info &= ~cpu_to_hc32 (ohci, TD_CC); | ||
832 | next->hwINFO = info; | ||
833 | |||
834 | next->next_dl_td = rev; | ||
835 | rev = next; | ||
836 | |||
837 | ed->hwHeadP = next->hwNextTD | toggle; | ||
838 | } | ||
839 | |||
840 | /* help for troubleshooting: report anything that | ||
841 | * looks odd ... that doesn't include protocol stalls | ||
842 | * (or maybe some other things) | ||
843 | */ | ||
844 | switch (cc) { | ||
845 | case TD_DATAUNDERRUN: | ||
846 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) | ||
847 | break; | ||
848 | /* fallthrough */ | ||
849 | case TD_CC_STALL: | ||
850 | if (usb_pipecontrol (urb->pipe)) | ||
851 | break; | ||
852 | /* fallthrough */ | ||
853 | default: | ||
854 | ohci_dbg (ohci, | ||
855 | "urb %p path %s ep%d%s %08x cc %d --> status %d\n", | ||
856 | urb, urb->dev->devpath, | ||
857 | usb_pipeendpoint (urb->pipe), | ||
858 | usb_pipein (urb->pipe) ? "in" : "out", | ||
859 | hc32_to_cpu (ohci, td->hwINFO), | ||
860 | cc, cc_to_error [cc]); | ||
861 | } | ||
862 | |||
863 | return rev; | ||
864 | } | ||
865 | |||
866 | /* replies to the request have to be on a FIFO basis so | ||
867 | * we unreverse the hc-reversed done-list | ||
868 | */ | ||
869 | static struct td *dl_reverse_done_list (struct ohci_hcd *ohci) | ||
870 | { | ||
871 | u32 td_dma; | ||
872 | struct td *td_rev = NULL; | ||
873 | struct td *td = NULL; | ||
874 | |||
875 | td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head); | ||
876 | ohci->hcca->done_head = 0; | ||
877 | wmb(); | ||
878 | |||
879 | /* get TD from hc's singly linked list, and | ||
880 | * prepend to ours. ed->td_list changes later. | ||
881 | */ | ||
882 | while (td_dma) { | ||
883 | int cc; | ||
884 | |||
885 | td = dma_to_td (ohci, td_dma); | ||
886 | if (!td) { | ||
887 | ohci_err (ohci, "bad entry %8x\n", td_dma); | ||
888 | break; | ||
889 | } | ||
890 | |||
891 | td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE); | ||
892 | cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO)); | ||
893 | |||
894 | /* Non-iso endpoints can halt on error; un-halt, | ||
895 | * and dequeue any other TDs from this urb. | ||
896 | * No other TD could have caused the halt. | ||
897 | */ | ||
898 | if (cc != TD_CC_NOERROR | ||
899 | && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) | ||
900 | td_rev = ed_halted (ohci, td, cc, td_rev); | ||
901 | |||
902 | td->next_dl_td = td_rev; | ||
903 | td_rev = td; | ||
904 | td_dma = hc32_to_cpup (ohci, &td->hwNextTD); | ||
905 | } | ||
906 | return td_rev; | ||
907 | } | ||
908 | |||
909 | /*-------------------------------------------------------------------------*/ | ||
910 | |||
911 | /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ | ||
912 | static void | ||
913 | finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs) | ||
914 | { | ||
915 | struct ed *ed, **last; | ||
916 | |||
917 | rescan_all: | ||
918 | for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) { | ||
919 | struct list_head *entry, *tmp; | ||
920 | int completed, modified; | ||
921 | __hc32 *prev; | ||
922 | |||
923 | /* only take off EDs that the HC isn't using, accounting for | ||
924 | * frame counter wraps and EDs with partially retired TDs | ||
925 | */ | ||
926 | if (likely (regs && HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) { | ||
927 | if (tick_before (tick, ed->tick)) { | ||
928 | skip_ed: | ||
929 | last = &ed->ed_next; | ||
930 | continue; | ||
931 | } | ||
932 | |||
933 | if (!list_empty (&ed->td_list)) { | ||
934 | struct td *td; | ||
935 | u32 head; | ||
936 | |||
937 | td = list_entry (ed->td_list.next, struct td, | ||
938 | td_list); | ||
939 | head = hc32_to_cpu (ohci, ed->hwHeadP) & | ||
940 | TD_MASK; | ||
941 | |||
942 | /* INTR_WDH may need to clean up first */ | ||
943 | if (td->td_dma != head) | ||
944 | goto skip_ed; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | /* reentrancy: if we drop the schedule lock, someone might | ||
949 | * have modified this list. normally it's just prepending | ||
950 | * entries (which we'd ignore), but paranoia won't hurt. | ||
951 | */ | ||
952 | *last = ed->ed_next; | ||
953 | ed->ed_next = NULL; | ||
954 | modified = 0; | ||
955 | |||
956 | /* unlink urbs as requested, but rescan the list after | ||
957 | * we call a completion since it might have unlinked | ||
958 | * another (earlier) urb | ||
959 | * | ||
960 | * When we get here, the HC doesn't see this ed. But it | ||
961 | * must not be rescheduled until all completed URBs have | ||
962 | * been given back to the driver. | ||
963 | */ | ||
964 | rescan_this: | ||
965 | completed = 0; | ||
966 | prev = &ed->hwHeadP; | ||
967 | list_for_each_safe (entry, tmp, &ed->td_list) { | ||
968 | struct td *td; | ||
969 | struct urb *urb; | ||
970 | urb_priv_t *urb_priv; | ||
971 | __hc32 savebits; | ||
972 | |||
973 | td = list_entry (entry, struct td, td_list); | ||
974 | urb = td->urb; | ||
975 | urb_priv = td->urb->hcpriv; | ||
976 | |||
977 | if (urb->status == -EINPROGRESS) { | ||
978 | prev = &td->hwNextTD; | ||
979 | continue; | ||
980 | } | ||
981 | |||
982 | /* patch pointer hc uses */ | ||
983 | savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK); | ||
984 | *prev = td->hwNextTD | savebits; | ||
985 | |||
986 | /* HC may have partly processed this TD */ | ||
987 | td_done (ohci, urb, td); | ||
988 | urb_priv->td_cnt++; | ||
989 | |||
990 | /* if URB is done, clean up */ | ||
991 | if (urb_priv->td_cnt == urb_priv->length) { | ||
992 | modified = completed = 1; | ||
993 | finish_urb (ohci, urb, regs); | ||
994 | } | ||
995 | } | ||
996 | if (completed && !list_empty (&ed->td_list)) | ||
997 | goto rescan_this; | ||
998 | |||
999 | /* ED's now officially unlinked, hc doesn't see */ | ||
1000 | ed->state = ED_IDLE; | ||
1001 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); | ||
1002 | ed->hwNextED = 0; | ||
1003 | wmb (); | ||
1004 | ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); | ||
1005 | |||
1006 | /* but if there's work queued, reschedule */ | ||
1007 | if (!list_empty (&ed->td_list)) { | ||
1008 | if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)) | ||
1009 | ed_schedule (ohci, ed); | ||
1010 | } | ||
1011 | |||
1012 | if (modified) | ||
1013 | goto rescan_all; | ||
1014 | } | ||
1015 | |||
1016 | /* maybe reenable control and bulk lists */ | ||
1017 | if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state) | ||
1018 | && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING | ||
1019 | && !ohci->ed_rm_list) { | ||
1020 | u32 command = 0, control = 0; | ||
1021 | |||
1022 | if (ohci->ed_controltail) { | ||
1023 | command |= OHCI_CLF; | ||
1024 | if (!(ohci->hc_control & OHCI_CTRL_CLE)) { | ||
1025 | control |= OHCI_CTRL_CLE; | ||
1026 | ohci_writel (ohci, 0, | ||
1027 | &ohci->regs->ed_controlcurrent); | ||
1028 | } | ||
1029 | } | ||
1030 | if (ohci->ed_bulktail) { | ||
1031 | command |= OHCI_BLF; | ||
1032 | if (!(ohci->hc_control & OHCI_CTRL_BLE)) { | ||
1033 | control |= OHCI_CTRL_BLE; | ||
1034 | ohci_writel (ohci, 0, | ||
1035 | &ohci->regs->ed_bulkcurrent); | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */ | ||
1040 | if (control) { | ||
1041 | ohci->hc_control |= control; | ||
1042 | ohci_writel (ohci, ohci->hc_control, | ||
1043 | &ohci->regs->control); | ||
1044 | } | ||
1045 | if (command) | ||
1046 | ohci_writel (ohci, command, &ohci->regs->cmdstatus); | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | |||
1051 | |||
1052 | /*-------------------------------------------------------------------------*/ | ||
1053 | |||
1054 | /* | ||
1055 | * Process normal completions (error or success) and clean the schedules. | ||
1056 | * | ||
1057 | * This is the main path for handing urbs back to drivers. The only other | ||
1058 | * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of | ||
1059 | * scanning the (re-reversed) donelist as this does. | ||
1060 | */ | ||
1061 | static void | ||
1062 | dl_done_list (struct ohci_hcd *ohci, struct pt_regs *regs) | ||
1063 | { | ||
1064 | struct td *td = dl_reverse_done_list (ohci); | ||
1065 | |||
1066 | while (td) { | ||
1067 | struct td *td_next = td->next_dl_td; | ||
1068 | struct urb *urb = td->urb; | ||
1069 | urb_priv_t *urb_priv = urb->hcpriv; | ||
1070 | struct ed *ed = td->ed; | ||
1071 | |||
1072 | /* update URB's length and status from TD */ | ||
1073 | td_done (ohci, urb, td); | ||
1074 | urb_priv->td_cnt++; | ||
1075 | |||
1076 | /* If all this urb's TDs are done, call complete() */ | ||
1077 | if (urb_priv->td_cnt == urb_priv->length) | ||
1078 | finish_urb (ohci, urb, regs); | ||
1079 | |||
1080 | /* clean schedule: unlink EDs that are no longer busy */ | ||
1081 | if (list_empty (&ed->td_list)) { | ||
1082 | if (ed->state == ED_OPER) | ||
1083 | start_ed_unlink (ohci, ed); | ||
1084 | |||
1085 | /* ... reenabling halted EDs only after fault cleanup */ | ||
1086 | } else if ((ed->hwINFO & cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE)) | ||
1087 | == cpu_to_hc32 (ohci, ED_SKIP)) { | ||
1088 | td = list_entry (ed->td_list.next, struct td, td_list); | ||
1089 | if (!(td->hwINFO & cpu_to_hc32 (ohci, TD_DONE))) { | ||
1090 | ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP); | ||
1091 | /* ... hc may need waking-up */ | ||
1092 | switch (ed->type) { | ||
1093 | case PIPE_CONTROL: | ||
1094 | ohci_writel (ohci, OHCI_CLF, | ||
1095 | &ohci->regs->cmdstatus); | ||
1096 | break; | ||
1097 | case PIPE_BULK: | ||
1098 | ohci_writel (ohci, OHCI_BLF, | ||
1099 | &ohci->regs->cmdstatus); | ||
1100 | break; | ||
1101 | } | ||
1102 | } | ||
1103 | } | ||
1104 | |||
1105 | td = td_next; | ||
1106 | } | ||
1107 | } | ||