aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ehci-q.c
diff options
context:
space:
mode:
authorDavid Brownell <david-b@pacbell.net>2008-04-10 17:21:06 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2008-04-25 00:16:50 -0400
commita082b5c7882bdbd8a86ace8470ca2ecda796d5a7 (patch)
treedd5f519ad7caa417363d256f7e572c02a43927f5 /drivers/usb/host/ehci-q.c
parent6427f7995338387ddded92f98adec19ddbf0ae5e (diff)
USB: ehci: qh/qtd cleanup comments
Provide better comments about qh_completions() and QTD handling. That code can be *VERY* confusing, since it's evolved over a few years to cope with both hardware races and silicon quirks. Remove two unlikely() annotations that match the GCC defaults (and are thus pointless); add an "else" to highlight code flow. This patch doesn't change driver behavior. Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r--drivers/usb/host/ehci-q.c50
1 files changed, 40 insertions, 10 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index c0e752cffc68..64942ec584c2 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -336,11 +336,20 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
336 /* always clean up qtds the hc de-activated */ 336 /* always clean up qtds the hc de-activated */
337 if ((token & QTD_STS_ACTIVE) == 0) { 337 if ((token & QTD_STS_ACTIVE) == 0) {
338 338
339 /* on STALL, error, and short reads this urb must
340 * complete and all its qtds must be recycled.
341 */
339 if ((token & QTD_STS_HALT) != 0) { 342 if ((token & QTD_STS_HALT) != 0) {
340 stopped = 1; 343 stopped = 1;
341 344
342 /* magic dummy for some short reads; qh won't advance. 345 /* magic dummy for some short reads; qh won't advance.
343 * that silicon quirk can kick in with this dummy too. 346 * that silicon quirk can kick in with this dummy too.
347 *
348 * other short reads won't stop the queue, including
349 * control transfers (status stage handles that) or
350 * most other single-qtd reads ... the queue stops if
351 * URB_SHORT_NOT_OK was set so the driver submitting
352 * the urbs could clean it up.
344 */ 353 */
345 } else if (IS_SHORT_READ (token) 354 } else if (IS_SHORT_READ (token)
346 && !(qtd->hw_alt_next 355 && !(qtd->hw_alt_next
@@ -354,18 +363,18 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
354 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { 363 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
355 break; 364 break;
356 365
366 /* scan the whole queue for unlinks whenever it stops */
357 } else { 367 } else {
358 stopped = 1; 368 stopped = 1;
359 369
360 if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) 370 /* cancel everything if we halt, suspend, etc */
371 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
361 last_status = -ESHUTDOWN; 372 last_status = -ESHUTDOWN;
362 373
363 /* ignore active urbs unless some previous qtd 374 /* this qtd is active; skip it unless a previous qtd
364 * for the urb faulted (including short read) or 375 * for its urb faulted, or its urb was canceled.
365 * its urb was canceled. we may patch qh or qtds.
366 */ 376 */
367 if (likely(last_status == -EINPROGRESS && 377 else if (last_status == -EINPROGRESS && !urb->unlinked)
368 !urb->unlinked))
369 continue; 378 continue;
370 379
371 /* issue status after short control reads */ 380 /* issue status after short control reads */
@@ -375,7 +384,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
375 continue; 384 continue;
376 } 385 }
377 386
378 /* token in overlay may be most current */ 387 /* qh unlinked; token in overlay may be most current */
379 if (state == QH_STATE_IDLE 388 if (state == QH_STATE_IDLE
380 && cpu_to_hc32(ehci, qtd->qtd_dma) 389 && cpu_to_hc32(ehci, qtd->qtd_dma)
381 == qh->hw_current) 390 == qh->hw_current)
@@ -402,11 +411,16 @@ halt:
402 if (likely(last_status == -EINPROGRESS)) 411 if (likely(last_status == -EINPROGRESS))
403 last_status = qtd_status; 412 last_status = qtd_status;
404 413
414 /* if we're removing something not at the queue head,
415 * patch the hardware queue pointer.
416 */
405 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { 417 if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
406 last = list_entry (qtd->qtd_list.prev, 418 last = list_entry (qtd->qtd_list.prev,
407 struct ehci_qtd, qtd_list); 419 struct ehci_qtd, qtd_list);
408 last->hw_next = qtd->hw_next; 420 last->hw_next = qtd->hw_next;
409 } 421 }
422
423 /* remove qtd; it's recycled after possible urb completion */
410 list_del (&qtd->qtd_list); 424 list_del (&qtd->qtd_list);
411 last = qtd; 425 last = qtd;
412 } 426 }
@@ -431,7 +445,15 @@ halt:
431 qh_refresh(ehci, qh); 445 qh_refresh(ehci, qh);
432 break; 446 break;
433 case QH_STATE_LINKED: 447 case QH_STATE_LINKED:
434 /* should be rare for periodic transfers, 448 /* We won't refresh a QH that's linked (after the HC
449 * stopped the queue). That avoids a race:
450 * - HC reads first part of QH;
451 * - CPU updates that first part and the token;
452 * - HC reads rest of that QH, including token
453 * Result: HC gets an inconsistent image, and then
454 * DMAs to/from the wrong memory (corrupting it).
455 *
456 * That should be rare for interrupt transfers,
435 * except maybe high bandwidth ... 457 * except maybe high bandwidth ...
436 */ 458 */
437 if ((cpu_to_hc32(ehci, QH_SMASK) 459 if ((cpu_to_hc32(ehci, QH_SMASK)
@@ -549,6 +571,12 @@ qh_urb_transaction (
549 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket); 571 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
550 len -= this_qtd_len; 572 len -= this_qtd_len;
551 buf += this_qtd_len; 573 buf += this_qtd_len;
574
575 /*
576 * short reads advance to a "magic" dummy instead of the next
577 * qtd ... that forces the queue to stop, for manual cleanup.
578 * (this will usually be overridden later.)
579 */
552 if (is_input) 580 if (is_input)
553 qtd->hw_alt_next = ehci->async->hw_alt_next; 581 qtd->hw_alt_next = ehci->async->hw_alt_next;
554 582
@@ -568,8 +596,10 @@ qh_urb_transaction (
568 list_add_tail (&qtd->qtd_list, head); 596 list_add_tail (&qtd->qtd_list, head);
569 } 597 }
570 598
571 /* unless the bulk/interrupt caller wants a chance to clean 599 /*
572 * up after short reads, hc should advance qh past this urb 600 * unless the caller requires manual cleanup after short reads,
601 * have the alt_next mechanism keep the queue running after the
602 * last data qtd (the only one, for control and most other cases).
573 */ 603 */
574 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 604 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
575 || usb_pipecontrol (urb->pipe))) 605 || usb_pipecontrol (urb->pipe)))