aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2012-07-11 11:22:26 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-07-16 19:54:25 -0400
commitdf2022553dd8d34d49e16c19d851ea619438f0ef (patch)
tree9850c967e0fbee1cb99e900c2e60a0ad50b528da /drivers/usb/host
parent314466101c6ae14f6f5db8a86eda1509ba2c02a8 (diff)
USB: EHCI: use hrtimer for interrupt QH unlink
This patch (as1577) adds hrtimer support for unlinking interrupt QHs in ehci-hcd. The current code relies on a fixed delay of either 2 or 55 us, which is not always adequate and in any case is totally bogus. Thanks to internal caching, the EHCI hardware may continue to access an interrupt QH for more than a millisecond after it has been unlinked. In fact, the EHCI spec doesn't say how long to wait before using an unlinked interrupt QH. The patch sets the delay to 9 microframes minimum, which ought to be adequate. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/ehci-hcd.c6
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/host/ehci-sched.c71
-rw-r--r--drivers/usb/host/ehci-timer.c34
-rw-r--r--drivers/usb/host/ehci.h10
5 files changed, 94 insertions, 28 deletions
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 21d6fbc0a327..edcfd2c4295e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -309,6 +309,8 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
309 309
310static void end_unlink_async(struct ehci_hcd *ehci); 310static void end_unlink_async(struct ehci_hcd *ehci);
311static void ehci_work(struct ehci_hcd *ehci); 311static void ehci_work(struct ehci_hcd *ehci);
312static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
313static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
312 314
313#include "ehci-timer.c" 315#include "ehci-timer.c"
314#include "ehci-hub.c" 316#include "ehci-hub.c"
@@ -1034,7 +1036,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1034 switch (qh->qh_state) { 1036 switch (qh->qh_state) {
1035 case QH_STATE_LINKED: 1037 case QH_STATE_LINKED:
1036 case QH_STATE_COMPLETING: 1038 case QH_STATE_COMPLETING:
1037 intr_deschedule (ehci, qh); 1039 start_unlink_intr(ehci, qh);
1038 break; 1040 break;
1039 case QH_STATE_IDLE: 1041 case QH_STATE_IDLE:
1040 qh_completions (ehci, qh); 1042 qh_completions (ehci, qh);
@@ -1164,7 +1166,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1164 if (eptype == USB_ENDPOINT_XFER_BULK) 1166 if (eptype == USB_ENDPOINT_XFER_BULK)
1165 unlink_async(ehci, qh); 1167 unlink_async(ehci, qh);
1166 else 1168 else
1167 intr_deschedule(ehci, qh); 1169 start_unlink_intr(ehci, qh);
1168 } 1170 }
1169 } 1171 }
1170 spin_unlock_irqrestore(&ehci->lock, flags); 1172 spin_unlock_irqrestore(&ehci->lock, flags);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 25329e4b844f..8aa740dc510d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -302,6 +302,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
302 302
303 if (ehci->async_unlink) 303 if (ehci->async_unlink)
304 end_unlink_async(ehci); 304 end_unlink_async(ehci);
305 ehci_handle_intr_unlinks(ehci);
305 306
306 /* allow remote wakeup */ 307 /* allow remote wakeup */
307 mask = INTR_MASK; 308 mask = INTR_MASK;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 69b1861e4325..eec8446f8ded 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -578,12 +578,20 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
578 unsigned i; 578 unsigned i;
579 unsigned period; 579 unsigned period;
580 580
581 // FIXME: 581 /*
582 // IF this isn't high speed 582 * If qh is for a low/full-speed device, simply unlinking it
583 // and this qh is active in the current uframe 583 * could interfere with an ongoing split transaction. To unlink
584 // (and overlay token SplitXstate is false?) 584 * it safely would require setting the QH_INACTIVATE bit and
585 // THEN 585 * waiting at least one frame, as described in EHCI 4.12.2.5.
586 // qh->hw_info1 |= cpu_to_hc32(1 << 7 /* "ignore" */); 586 *
587 * We won't bother with any of this. Instead, we assume that the
588 * only reason for unlinking an interrupt QH while the current URB
589 * is still active is to dequeue all the URBs (flush the whole
590 * endpoint queue).
591 *
592 * If rebalancing the periodic schedule is ever implemented, this
593 * approach will no longer be valid.
594 */
587 595
588 /* high bandwidth, or otherwise part of every microframe */ 596 /* high bandwidth, or otherwise part of every microframe */
589 if ((period = qh->period) == 0) 597 if ((period = qh->period) == 0)
@@ -608,12 +616,8 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
608 qh->qh_next.ptr = NULL; 616 qh->qh_next.ptr = NULL;
609} 617}
610 618
611static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 619static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
612{ 620{
613 unsigned wait;
614 struct ehci_qh_hw *hw = qh->hw;
615 int rc;
616
617 /* If the QH isn't linked then there's nothing we can do 621 /* If the QH isn't linked then there's nothing we can do
618 * unless we were called during a giveback, in which case 622 * unless we were called during a giveback, in which case
619 * qh_completions() has to deal with it. 623 * qh_completions() has to deal with it.
@@ -626,28 +630,45 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
626 630
627 qh_unlink_periodic (ehci, qh); 631 qh_unlink_periodic (ehci, qh);
628 632
629 /* simple/paranoid: always delay, expecting the HC needs to read 633 /* Make sure the unlinks are visible before starting the timer */
630 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and 634 wmb();
631 * expect khubd to clean up after any CSPLITs we won't issue. 635
632 * active high speed queues may need bigger delays... 636 /*
637 * The EHCI spec doesn't say how long it takes the controller to
638 * stop accessing an unlinked interrupt QH. The timer delay is
639 * 9 uframes; presumably that will be long enough.
633 */ 640 */
634 if (list_empty (&qh->qtd_list) 641 qh->unlink_cycle = ehci->intr_unlink_cycle;
635 || (cpu_to_hc32(ehci, QH_CMASK) 642
636 & hw->hw_info2) != 0) 643 /* New entries go at the end of the intr_unlink list */
637 wait = 2; 644 if (ehci->intr_unlink)
645 ehci->intr_unlink_last->unlink_next = qh;
638 else 646 else
639 wait = 55; /* worst case: 3 * 1024 */ 647 ehci->intr_unlink = qh;
648 ehci->intr_unlink_last = qh;
649
650 if (ehci->intr_unlinking)
651 ; /* Avoid recursive calls */
652 else if (ehci->rh_state < EHCI_RH_RUNNING)
653 ehci_handle_intr_unlinks(ehci);
654 else if (ehci->intr_unlink == qh) {
655 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
656 ++ehci->intr_unlink_cycle;
657 }
658}
659
660static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
661{
662 struct ehci_qh_hw *hw = qh->hw;
663 int rc;
640 664
641 udelay (wait);
642 qh->qh_state = QH_STATE_IDLE; 665 qh->qh_state = QH_STATE_IDLE;
643 hw->hw_next = EHCI_LIST_END(ehci); 666 hw->hw_next = EHCI_LIST_END(ehci);
644 wmb ();
645 667
646 qh_completions(ehci, qh); 668 qh_completions(ehci, qh);
647 669
648 /* reschedule QH iff another request is queued */ 670 /* reschedule QH iff another request is queued */
649 if (!list_empty(&qh->qtd_list) && 671 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
650 ehci->rh_state == EHCI_RH_RUNNING) {
651 rc = qh_schedule(ehci, qh); 672 rc = qh_schedule(ehci, qh);
652 673
653 /* An error here likely indicates handshake failure 674 /* An error here likely indicates handshake failure
@@ -2302,7 +2323,7 @@ restart:
2302 temp.qh->stamp = ehci->periodic_stamp; 2323 temp.qh->stamp = ehci->periodic_stamp;
2303 if (unlikely(list_empty(&temp.qh->qtd_list) || 2324 if (unlikely(list_empty(&temp.qh->qtd_list) ||
2304 temp.qh->needs_rescan)) 2325 temp.qh->needs_rescan))
2305 intr_deschedule(ehci, temp.qh); 2326 start_unlink_intr(ehci, temp.qh);
2306 } 2327 }
2307 break; 2328 break;
2308 case Q_TYPE_FSTN: 2329 case Q_TYPE_FSTN:
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 1e907dd3bb1b..bd8b591771b0 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -69,6 +69,7 @@ static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
69static unsigned event_delays_ns[] = { 69static unsigned event_delays_ns[] = {
70 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */ 70 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */
71 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */ 71 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */
72 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
72 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */ 73 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
73 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */ 74 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */
74}; 75};
@@ -192,6 +193,38 @@ static void ehci_disable_PSE(struct ehci_hcd *ehci)
192} 193}
193 194
194 195
196/* Handle unlinked interrupt QHs once they are gone from the hardware */
197static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
198{
199 bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
200
201 /*
202 * Process all the QHs on the intr_unlink list that were added
203 * before the current unlink cycle began. The list is in
204 * temporal order, so stop when we reach the first entry in the
205 * current cycle. But if the root hub isn't running then
206 * process all the QHs on the list.
207 */
208 ehci->intr_unlinking = true;
209 while (ehci->intr_unlink) {
210 struct ehci_qh *qh = ehci->intr_unlink;
211
212 if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
213 break;
214 ehci->intr_unlink = qh->unlink_next;
215 qh->unlink_next = NULL;
216 end_unlink_intr(ehci, qh);
217 }
218
219 /* Handle remaining entries later */
220 if (ehci->intr_unlink) {
221 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
222 ++ehci->intr_unlink_cycle;
223 }
224 ehci->intr_unlinking = false;
225}
226
227
195/* 228/*
196 * Handler functions for the hrtimer event types. 229 * Handler functions for the hrtimer event types.
197 * Keep this array in the same order as the event types indexed by 230 * Keep this array in the same order as the event types indexed by
@@ -200,6 +233,7 @@ static void ehci_disable_PSE(struct ehci_hcd *ehci)
200static void (*event_handlers[])(struct ehci_hcd *) = { 233static void (*event_handlers[])(struct ehci_hcd *) = {
201 ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */ 234 ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */
202 ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */ 235 ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */
236 ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
203 ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */ 237 ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */
204 ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */ 238 ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */
205}; 239};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bf06bbb77ba4..f36f1f85d7fd 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -81,6 +81,7 @@ enum ehci_rh_state {
81enum ehci_hrtimer_event { 81enum ehci_hrtimer_event {
82 EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */ 82 EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */
83 EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ 83 EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
84 EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
84 EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ 85 EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
85 EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ 86 EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
86 EHCI_HRTIMER_NUM_EVENTS /* Must come last */ 87 EHCI_HRTIMER_NUM_EVENTS /* Must come last */
@@ -106,13 +107,16 @@ struct ehci_hcd { /* one per controller */
106 spinlock_t lock; 107 spinlock_t lock;
107 enum ehci_rh_state rh_state; 108 enum ehci_rh_state rh_state;
108 109
110 /* general schedule support */
111 unsigned scanning:1;
112 bool intr_unlinking:1;
113
109 /* async schedule support */ 114 /* async schedule support */
110 struct ehci_qh *async; 115 struct ehci_qh *async;
111 struct ehci_qh *dummy; /* For AMD quirk use */ 116 struct ehci_qh *dummy; /* For AMD quirk use */
112 struct ehci_qh *async_unlink; 117 struct ehci_qh *async_unlink;
113 struct ehci_qh *async_unlink_last; 118 struct ehci_qh *async_unlink_last;
114 struct ehci_qh *qh_scan_next; 119 struct ehci_qh *qh_scan_next;
115 unsigned scanning : 1;
116 unsigned async_count; /* async activity count */ 120 unsigned async_count; /* async activity count */
117 121
118 /* periodic schedule support */ 122 /* periodic schedule support */
@@ -123,6 +127,9 @@ struct ehci_hcd { /* one per controller */
123 unsigned i_thresh; /* uframes HC might cache */ 127 unsigned i_thresh; /* uframes HC might cache */
124 128
125 union ehci_shadow *pshadow; /* mirror hw periodic table */ 129 union ehci_shadow *pshadow; /* mirror hw periodic table */
130 struct ehci_qh *intr_unlink;
131 struct ehci_qh *intr_unlink_last;
132 unsigned intr_unlink_cycle;
126 int next_uframe; /* scan periodic, start here */ 133 int next_uframe; /* scan periodic, start here */
127 unsigned periodic_count; /* periodic activity count */ 134 unsigned periodic_count; /* periodic activity count */
128 unsigned uframe_periodic_max; /* max periodic time per uframe */ 135 unsigned uframe_periodic_max; /* max periodic time per uframe */
@@ -385,6 +392,7 @@ struct ehci_qh {
385 struct ehci_qh *unlink_next; /* next on unlink list */ 392 struct ehci_qh *unlink_next; /* next on unlink list */
386 393
387 unsigned long unlink_time; 394 unsigned long unlink_time;
395 unsigned unlink_cycle;
388 unsigned stamp; 396 unsigned stamp;
389 397
390 u8 needs_rescan; /* Dequeue during giveback */ 398 u8 needs_rescan; /* Dequeue during giveback */