aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ehci-sched.c
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2012-07-11 11:22:26 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-07-16 19:54:25 -0400
commitdf2022553dd8d34d49e16c19d851ea619438f0ef (patch)
tree9850c967e0fbee1cb99e900c2e60a0ad50b528da /drivers/usb/host/ehci-sched.c
parent314466101c6ae14f6f5db8a86eda1509ba2c02a8 (diff)
USB: EHCI: use hrtimer for interrupt QH unlink
This patch (as1577) adds hrtimer support for unlinking interrupt QHs in ehci-hcd. The current code relies on a fixed delay of either 2 or 55 us, which is not always adequate and in any case is totally bogus. Thanks to internal caching, the EHCI hardware may continue to access an interrupt QH for more than a millisecond after it has been unlinked. In fact, the EHCI spec doesn't say how long to wait before using an unlinked interrupt QH. The patch sets the delay to 9 microframes minimum, which ought to be adequate. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/ehci-sched.c')
-rw-r--r--drivers/usb/host/ehci-sched.c71
1 files changed, 46 insertions, 25 deletions
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 69b1861e4325..eec8446f8ded 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -578,12 +578,20 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
578 unsigned i; 578 unsigned i;
579 unsigned period; 579 unsigned period;
580 580
581 // FIXME: 581 /*
582 // IF this isn't high speed 582 * If qh is for a low/full-speed device, simply unlinking it
583 // and this qh is active in the current uframe 583 * could interfere with an ongoing split transaction. To unlink
584 // (and overlay token SplitXstate is false?) 584 * it safely would require setting the QH_INACTIVATE bit and
585 // THEN 585 * waiting at least one frame, as described in EHCI 4.12.2.5.
586 // qh->hw_info1 |= cpu_to_hc32(1 << 7 /* "ignore" */); 586 *
587 * We won't bother with any of this. Instead, we assume that the
588 * only reason for unlinking an interrupt QH while the current URB
589 * is still active is to dequeue all the URBs (flush the whole
590 * endpoint queue).
591 *
592 * If rebalancing the periodic schedule is ever implemented, this
593 * approach will no longer be valid.
594 */
587 595
588 /* high bandwidth, or otherwise part of every microframe */ 596 /* high bandwidth, or otherwise part of every microframe */
589 if ((period = qh->period) == 0) 597 if ((period = qh->period) == 0)
@@ -608,12 +616,8 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
608 qh->qh_next.ptr = NULL; 616 qh->qh_next.ptr = NULL;
609} 617}
610 618
611static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 619static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
612{ 620{
613 unsigned wait;
614 struct ehci_qh_hw *hw = qh->hw;
615 int rc;
616
617 /* If the QH isn't linked then there's nothing we can do 621 /* If the QH isn't linked then there's nothing we can do
618 * unless we were called during a giveback, in which case 622 * unless we were called during a giveback, in which case
619 * qh_completions() has to deal with it. 623 * qh_completions() has to deal with it.
@@ -626,28 +630,45 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
626 630
627 qh_unlink_periodic (ehci, qh); 631 qh_unlink_periodic (ehci, qh);
628 632
629 /* simple/paranoid: always delay, expecting the HC needs to read 633 /* Make sure the unlinks are visible before starting the timer */
630 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and 634 wmb();
631 * expect khubd to clean up after any CSPLITs we won't issue. 635
632 * active high speed queues may need bigger delays... 636 /*
637 * The EHCI spec doesn't say how long it takes the controller to
638 * stop accessing an unlinked interrupt QH. The timer delay is
639 * 9 uframes; presumably that will be long enough.
633 */ 640 */
634 if (list_empty (&qh->qtd_list) 641 qh->unlink_cycle = ehci->intr_unlink_cycle;
635 || (cpu_to_hc32(ehci, QH_CMASK) 642
636 & hw->hw_info2) != 0) 643 /* New entries go at the end of the intr_unlink list */
637 wait = 2; 644 if (ehci->intr_unlink)
645 ehci->intr_unlink_last->unlink_next = qh;
638 else 646 else
639 wait = 55; /* worst case: 3 * 1024 */ 647 ehci->intr_unlink = qh;
648 ehci->intr_unlink_last = qh;
649
650 if (ehci->intr_unlinking)
651 ; /* Avoid recursive calls */
652 else if (ehci->rh_state < EHCI_RH_RUNNING)
653 ehci_handle_intr_unlinks(ehci);
654 else if (ehci->intr_unlink == qh) {
655 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
656 ++ehci->intr_unlink_cycle;
657 }
658}
659
660static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
661{
662 struct ehci_qh_hw *hw = qh->hw;
663 int rc;
640 664
641 udelay (wait);
642 qh->qh_state = QH_STATE_IDLE; 665 qh->qh_state = QH_STATE_IDLE;
643 hw->hw_next = EHCI_LIST_END(ehci); 666 hw->hw_next = EHCI_LIST_END(ehci);
644 wmb ();
645 667
646 qh_completions(ehci, qh); 668 qh_completions(ehci, qh);
647 669
648 /* reschedule QH iff another request is queued */ 670 /* reschedule QH iff another request is queued */
649 if (!list_empty(&qh->qtd_list) && 671 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
650 ehci->rh_state == EHCI_RH_RUNNING) {
651 rc = qh_schedule(ehci, qh); 672 rc = qh_schedule(ehci, qh);
652 673
653 /* An error here likely indicates handshake failure 674 /* An error here likely indicates handshake failure
@@ -2302,7 +2323,7 @@ restart:
2302 temp.qh->stamp = ehci->periodic_stamp; 2323 temp.qh->stamp = ehci->periodic_stamp;
2303 if (unlikely(list_empty(&temp.qh->qtd_list) || 2324 if (unlikely(list_empty(&temp.qh->qtd_list) ||
2304 temp.qh->needs_rescan)) 2325 temp.qh->needs_rescan))
2305 intr_deschedule(ehci, temp.qh); 2326 start_unlink_intr(ehci, temp.qh);
2306 } 2327 }
2307 break; 2328 break;
2308 case Q_TYPE_FSTN: 2329 case Q_TYPE_FSTN: