aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ehci-q.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r--drivers/usb/host/ehci-q.c95
1 files changed, 62 insertions, 33 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 7673554fa64d..00ad9ce392ed 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -87,31 +87,33 @@ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
87static inline void 87static inline void
88qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) 88qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
89{ 89{
90 struct ehci_qh_hw *hw = qh->hw;
91
90 /* writes to an active overlay are unsafe */ 92 /* writes to an active overlay are unsafe */
91 BUG_ON(qh->qh_state != QH_STATE_IDLE); 93 BUG_ON(qh->qh_state != QH_STATE_IDLE);
92 94
93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 95 hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
94 qh->hw_alt_next = EHCI_LIST_END(ehci); 96 hw->hw_alt_next = EHCI_LIST_END(ehci);
95 97
96 /* Except for control endpoints, we make hardware maintain data 98 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
99 * ever clear it. 101 * ever clear it.
100 */ 102 */
101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { 103 if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
102 unsigned is_out, epnum; 104 unsigned is_out, epnum;
103 105
104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); 106 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f; 107 epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 108 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 109 hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
108 usb_settoggle (qh->dev, epnum, is_out, 1); 110 usb_settoggle (qh->dev, epnum, is_out, 1);
109 } 111 }
110 } 112 }
111 113
112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 114 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
113 wmb (); 115 wmb ();
114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 116 hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
115} 117}
116 118
117/* if it weren't for a common silicon quirk (writing the dummy into the qh 119/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -129,7 +131,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
129 qtd = list_entry (qh->qtd_list.next, 131 qtd = list_entry (qh->qtd_list.next,
130 struct ehci_qtd, qtd_list); 132 struct ehci_qtd, qtd_list);
131 /* first qtd may already be partially processed */ 133 /* first qtd may already be partially processed */
132 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current) 134 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
133 qtd = NULL; 135 qtd = NULL;
134 } 136 }
135 137
@@ -260,7 +262,7 @@ __acquires(ehci->lock)
260 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 262 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
261 263
262 /* S-mask in a QH means it's an interrupt urb */ 264 /* S-mask in a QH means it's an interrupt urb */
263 if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { 265 if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
264 266
265 /* ... update hc-wide periodic stats (for usbfs) */ 267 /* ... update hc-wide periodic stats (for usbfs) */
266 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 268 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@@ -297,7 +299,6 @@ __acquires(ehci->lock)
297static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 299static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
298static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); 300static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
299 301
300static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
301static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 302static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
302 303
303/* 304/*
@@ -308,13 +309,14 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
308static unsigned 309static unsigned
309qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 310qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
310{ 311{
311 struct ehci_qtd *last = NULL, *end = qh->dummy; 312 struct ehci_qtd *last, *end = qh->dummy;
312 struct list_head *entry, *tmp; 313 struct list_head *entry, *tmp;
313 int last_status = -EINPROGRESS; 314 int last_status;
314 int stopped; 315 int stopped;
315 unsigned count = 0; 316 unsigned count = 0;
316 u8 state; 317 u8 state;
317 __le32 halt = HALT_BIT(ehci); 318 const __le32 halt = HALT_BIT(ehci);
319 struct ehci_qh_hw *hw = qh->hw;
318 320
319 if (unlikely (list_empty (&qh->qtd_list))) 321 if (unlikely (list_empty (&qh->qtd_list)))
320 return count; 322 return count;
@@ -324,11 +326,20 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
324 * they add urbs to this qh's queue or mark them for unlinking. 326 * they add urbs to this qh's queue or mark them for unlinking.
325 * 327 *
326 * NOTE: unlinking expects to be done in queue order. 328 * NOTE: unlinking expects to be done in queue order.
329 *
330 * It's a bug for qh->qh_state to be anything other than
331 * QH_STATE_IDLE, unless our caller is scan_async() or
332 * scan_periodic().
327 */ 333 */
328 state = qh->qh_state; 334 state = qh->qh_state;
329 qh->qh_state = QH_STATE_COMPLETING; 335 qh->qh_state = QH_STATE_COMPLETING;
330 stopped = (state == QH_STATE_IDLE); 336 stopped = (state == QH_STATE_IDLE);
331 337
338 rescan:
339 last = NULL;
340 last_status = -EINPROGRESS;
341 qh->needs_rescan = 0;
342
332 /* remove de-activated QTDs from front of queue. 343 /* remove de-activated QTDs from front of queue.
333 * after faults (including short reads), cleanup this urb 344 * after faults (including short reads), cleanup this urb
334 * then let the queue advance. 345 * then let the queue advance.
@@ -392,7 +403,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
392 qtd->hw_token = cpu_to_hc32(ehci, 403 qtd->hw_token = cpu_to_hc32(ehci,
393 token); 404 token);
394 wmb(); 405 wmb();
395 qh->hw_token = cpu_to_hc32(ehci, token); 406 hw->hw_token = cpu_to_hc32(ehci,
407 token);
396 goto retry_xacterr; 408 goto retry_xacterr;
397 } 409 }
398 stopped = 1; 410 stopped = 1;
@@ -435,8 +447,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
435 /* qh unlinked; token in overlay may be most current */ 447 /* qh unlinked; token in overlay may be most current */
436 if (state == QH_STATE_IDLE 448 if (state == QH_STATE_IDLE
437 && cpu_to_hc32(ehci, qtd->qtd_dma) 449 && cpu_to_hc32(ehci, qtd->qtd_dma)
438 == qh->hw_current) { 450 == hw->hw_current) {
439 token = hc32_to_cpu(ehci, qh->hw_token); 451 token = hc32_to_cpu(ehci, hw->hw_token);
440 452
441 /* An unlink may leave an incomplete 453 /* An unlink may leave an incomplete
442 * async transaction in the TT buffer. 454 * async transaction in the TT buffer.
@@ -449,9 +461,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
449 * patch the qh later and so that completions can't 461 * patch the qh later and so that completions can't
450 * activate it while we "know" it's stopped. 462 * activate it while we "know" it's stopped.
451 */ 463 */
452 if ((halt & qh->hw_token) == 0) { 464 if ((halt & hw->hw_token) == 0) {
453halt: 465halt:
454 qh->hw_token |= halt; 466 hw->hw_token |= halt;
455 wmb (); 467 wmb ();
456 } 468 }
457 } 469 }
@@ -503,6 +515,21 @@ halt:
503 ehci_qtd_free (ehci, last); 515 ehci_qtd_free (ehci, last);
504 } 516 }
505 517
518 /* Do we need to rescan for URBs dequeued during a giveback? */
519 if (unlikely(qh->needs_rescan)) {
520 /* If the QH is already unlinked, do the rescan now. */
521 if (state == QH_STATE_IDLE)
522 goto rescan;
523
524 /* Otherwise we have to wait until the QH is fully unlinked.
525 * Our caller will start an unlink if qh->needs_rescan is
526 * set. But if an unlink has already started, nothing needs
527 * to be done.
528 */
529 if (state != QH_STATE_LINKED)
530 qh->needs_rescan = 0;
531 }
532
506 /* restore original state; caller must unlink or relink */ 533 /* restore original state; caller must unlink or relink */
507 qh->qh_state = state; 534 qh->qh_state = state;
508 535
@@ -510,7 +537,7 @@ halt:
510 * it after fault cleanup, or recovering from silicon wrongly 537 * it after fault cleanup, or recovering from silicon wrongly
511 * overlaying the dummy qtd (which reduces DMA chatter). 538 * overlaying the dummy qtd (which reduces DMA chatter).
512 */ 539 */
513 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { 540 if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
514 switch (state) { 541 switch (state) {
515 case QH_STATE_IDLE: 542 case QH_STATE_IDLE:
516 qh_refresh(ehci, qh); 543 qh_refresh(ehci, qh);
@@ -527,12 +554,9 @@ halt:
527 * That should be rare for interrupt transfers, 554 * That should be rare for interrupt transfers,
528 * except maybe high bandwidth ... 555 * except maybe high bandwidth ...
529 */ 556 */
530 if ((cpu_to_hc32(ehci, QH_SMASK) 557
531 & qh->hw_info2) != 0) { 558 /* Tell the caller to start an unlink */
532 intr_deschedule (ehci, qh); 559 qh->needs_rescan = 1;
533 (void) qh_schedule (ehci, qh);
534 } else
535 unlink_async (ehci, qh);
536 break; 560 break;
537 /* otherwise, unlink already started */ 561 /* otherwise, unlink already started */
538 } 562 }
@@ -649,7 +673,7 @@ qh_urb_transaction (
649 * (this will usually be overridden later.) 673 * (this will usually be overridden later.)
650 */ 674 */
651 if (is_input) 675 if (is_input)
652 qtd->hw_alt_next = ehci->async->hw_alt_next; 676 qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
653 677
654 /* qh makes control packets use qtd toggle; maybe switch it */ 678 /* qh makes control packets use qtd toggle; maybe switch it */
655 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 679 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
@@ -744,6 +768,7 @@ qh_make (
744 int is_input, type; 768 int is_input, type;
745 int maxp = 0; 769 int maxp = 0;
746 struct usb_tt *tt = urb->dev->tt; 770 struct usb_tt *tt = urb->dev->tt;
771 struct ehci_qh_hw *hw;
747 772
748 if (!qh) 773 if (!qh)
749 return qh; 774 return qh;
@@ -890,8 +915,9 @@ done:
890 915
891 /* init as live, toggle clear, advance to dummy */ 916 /* init as live, toggle clear, advance to dummy */
892 qh->qh_state = QH_STATE_IDLE; 917 qh->qh_state = QH_STATE_IDLE;
893 qh->hw_info1 = cpu_to_hc32(ehci, info1); 918 hw = qh->hw;
894 qh->hw_info2 = cpu_to_hc32(ehci, info2); 919 hw->hw_info1 = cpu_to_hc32(ehci, info1);
920 hw->hw_info2 = cpu_to_hc32(ehci, info2);
895 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 921 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
896 qh_refresh (ehci, qh); 922 qh_refresh (ehci, qh);
897 return qh; 923 return qh;
@@ -910,6 +936,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
910 if (unlikely(qh->clearing_tt)) 936 if (unlikely(qh->clearing_tt))
911 return; 937 return;
912 938
939 WARN_ON(qh->qh_state != QH_STATE_IDLE);
940
913 /* (re)start the async schedule? */ 941 /* (re)start the async schedule? */
914 head = ehci->async; 942 head = ehci->async;
915 timer_action_done (ehci, TIMER_ASYNC_OFF); 943 timer_action_done (ehci, TIMER_ASYNC_OFF);
@@ -928,16 +956,15 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
928 } 956 }
929 957
930 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 958 /* clear halt and/or toggle; and maybe recover from silicon quirk */
931 if (qh->qh_state == QH_STATE_IDLE) 959 qh_refresh(ehci, qh);
932 qh_refresh (ehci, qh);
933 960
934 /* splice right after start */ 961 /* splice right after start */
935 qh->qh_next = head->qh_next; 962 qh->qh_next = head->qh_next;
936 qh->hw_next = head->hw_next; 963 qh->hw->hw_next = head->hw->hw_next;
937 wmb (); 964 wmb ();
938 965
939 head->qh_next.qh = qh; 966 head->qh_next.qh = qh;
940 head->hw_next = dma; 967 head->hw->hw_next = dma;
941 968
942 qh_get(qh); 969 qh_get(qh);
943 qh->xacterrs = 0; 970 qh->xacterrs = 0;
@@ -984,7 +1011,7 @@ static struct ehci_qh *qh_append_tds (
984 1011
985 /* usb_reset_device() briefly reverts to address 0 */ 1012 /* usb_reset_device() briefly reverts to address 0 */
986 if (usb_pipedevice (urb->pipe) == 0) 1013 if (usb_pipedevice (urb->pipe) == 0)
987 qh->hw_info1 &= ~qh_addr_mask; 1014 qh->hw->hw_info1 &= ~qh_addr_mask;
988 } 1015 }
989 1016
990 /* just one way to queue requests: swap with the dummy qtd. 1017 /* just one way to queue requests: swap with the dummy qtd.
@@ -1169,7 +1196,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1169 while (prev->qh_next.qh != qh) 1196 while (prev->qh_next.qh != qh)
1170 prev = prev->qh_next.qh; 1197 prev = prev->qh_next.qh;
1171 1198
1172 prev->hw_next = qh->hw_next; 1199 prev->hw->hw_next = qh->hw->hw_next;
1173 prev->qh_next = qh->qh_next; 1200 prev->qh_next = qh->qh_next;
1174 wmb (); 1201 wmb ();
1175 1202
@@ -1214,6 +1241,8 @@ rescan:
1214 qh = qh_get (qh); 1241 qh = qh_get (qh);
1215 qh->stamp = ehci->stamp; 1242 qh->stamp = ehci->stamp;
1216 temp = qh_completions (ehci, qh); 1243 temp = qh_completions (ehci, qh);
1244 if (qh->needs_rescan)
1245 unlink_async(ehci, qh);
1217 qh_put (qh); 1246 qh_put (qh);
1218 if (temp != 0) { 1247 if (temp != 0) {
1219 goto rescan; 1248 goto rescan;