aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ehci-q.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r--drivers/usb/host/ehci-q.c96
1 files changed, 51 insertions, 45 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index e7fbbd00e7cd..2284028f8aa5 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -43,15 +43,15 @@
43/* fill a qtd, returning how much of the buffer we were able to queue up */ 43/* fill a qtd, returning how much of the buffer we were able to queue up */
44 44
45static int 45static int
46qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, 46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 int token, int maxpacket) 47 size_t len, int token, int maxpacket)
48{ 48{
49 int i, count; 49 int i, count;
50 u64 addr = buf; 50 u64 addr = buf;
51 51
52 /* one buffer entry per 4K ... first might be short or unaligned */ 52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf [0] = cpu_to_le32 ((u32)addr); 53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32)); 54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */ 56 if (likely (len < count)) /* ... iff needed */
57 count = len; 57 count = len;
@@ -62,8 +62,9 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
62 /* per-qtd limit: from 16K to 20K (best alignment) */ 62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) { 63 for (i = 1; count < len && i < 5; i++) {
64 addr = buf; 64 addr = buf;
65 qtd->hw_buf [i] = cpu_to_le32 ((u32)addr); 65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32)); 66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
67 (u32)(addr >> 32));
67 buf += 0x1000; 68 buf += 0x1000;
68 if ((count + 0x1000) < len) 69 if ((count + 0x1000) < len)
69 count += 0x1000; 70 count += 0x1000;
@@ -75,7 +76,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
75 if (count != len) 76 if (count != len)
76 count -= (count % maxpacket); 77 count -= (count % maxpacket);
77 } 78 }
78 qtd->hw_token = cpu_to_le32 ((count << 16) | token); 79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
79 qtd->length = count; 80 qtd->length = count;
80 81
81 return count; 82 return count;
@@ -89,28 +90,28 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
89 /* writes to an active overlay are unsafe */ 90 /* writes to an active overlay are unsafe */
90 BUG_ON(qh->qh_state != QH_STATE_IDLE); 91 BUG_ON(qh->qh_state != QH_STATE_IDLE);
91 92
92 qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma); 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
93 qh->hw_alt_next = EHCI_LIST_END; 94 qh->hw_alt_next = EHCI_LIST_END(ehci);
94 95
95 /* Except for control endpoints, we make hardware maintain data 96 /* Except for control endpoints, we make hardware maintain data
96 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
97 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
98 * ever clear it. 99 * ever clear it.
99 */ 100 */
100 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { 101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
101 unsigned is_out, epnum; 102 unsigned is_out, epnum;
102 103
103 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); 104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
104 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; 105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
105 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
106 qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE); 107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
107 usb_settoggle (qh->dev, epnum, is_out, 1); 108 usb_settoggle (qh->dev, epnum, is_out, 1);
108 } 109 }
109 } 110 }
110 111
111 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
112 wmb (); 113 wmb ();
113 qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING); 114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
114} 115}
115 116
116/* if it weren't for a common silicon quirk (writing the dummy into the qh 117/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -128,7 +129,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
128 qtd = list_entry (qh->qtd_list.next, 129 qtd = list_entry (qh->qtd_list.next,
129 struct ehci_qtd, qtd_list); 130 struct ehci_qtd, qtd_list);
130 /* first qtd may already be partially processed */ 131 /* first qtd may already be partially processed */
131 if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current) 132 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
132 qtd = NULL; 133 qtd = NULL;
133 } 134 }
134 135
@@ -222,7 +223,7 @@ __acquires(ehci->lock)
222 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 223 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
223 224
224 /* S-mask in a QH means it's an interrupt urb */ 225 /* S-mask in a QH means it's an interrupt urb */
225 if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) { 226 if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
226 227
227 /* ... update hc-wide periodic stats (for usbfs) */ 228 /* ... update hc-wide periodic stats (for usbfs) */
228 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 229 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@@ -277,7 +278,6 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
277 * Chases up to qh->hw_current. Returns number of completions called, 278 * Chases up to qh->hw_current. Returns number of completions called,
278 * indicating how much "real" work we did. 279 * indicating how much "real" work we did.
279 */ 280 */
280#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
281static unsigned 281static unsigned
282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
283{ 283{
@@ -287,6 +287,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
287 unsigned count = 0; 287 unsigned count = 0;
288 int do_status = 0; 288 int do_status = 0;
289 u8 state; 289 u8 state;
290 u32 halt = HALT_BIT(ehci);
290 291
291 if (unlikely (list_empty (&qh->qtd_list))) 292 if (unlikely (list_empty (&qh->qtd_list)))
292 return count; 293 return count;
@@ -311,6 +312,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
311 struct urb *urb; 312 struct urb *urb;
312 u32 token = 0; 313 u32 token = 0;
313 314
315 /* ignore QHs that are currently inactive */
316 if (qh->hw_info1 & __constant_cpu_to_le32(QH_INACTIVATE))
317 break;
318
314 qtd = list_entry (entry, struct ehci_qtd, qtd_list); 319 qtd = list_entry (entry, struct ehci_qtd, qtd_list);
315 urb = qtd->urb; 320 urb = qtd->urb;
316 321
@@ -330,7 +335,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
330 335
331 /* hardware copies qtd out of qh overlay */ 336 /* hardware copies qtd out of qh overlay */
332 rmb (); 337 rmb ();
333 token = le32_to_cpu (qtd->hw_token); 338 token = hc32_to_cpu(ehci, qtd->hw_token);
334 339
335 /* always clean up qtds the hc de-activated */ 340 /* always clean up qtds the hc de-activated */
336 if ((token & QTD_STS_ACTIVE) == 0) { 341 if ((token & QTD_STS_ACTIVE) == 0) {
@@ -342,7 +347,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
342 * that silicon quirk can kick in with this dummy too. 347 * that silicon quirk can kick in with this dummy too.
343 */ 348 */
344 } else if (IS_SHORT_READ (token) 349 } else if (IS_SHORT_READ (token)
345 && !(qtd->hw_alt_next & EHCI_LIST_END)) { 350 && !(qtd->hw_alt_next
351 & EHCI_LIST_END(ehci))) {
346 stopped = 1; 352 stopped = 1;
347 goto halt; 353 goto halt;
348 } 354 }
@@ -374,17 +380,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
374 380
375 /* token in overlay may be most current */ 381 /* token in overlay may be most current */
376 if (state == QH_STATE_IDLE 382 if (state == QH_STATE_IDLE
377 && cpu_to_le32 (qtd->qtd_dma) 383 && cpu_to_hc32(ehci, qtd->qtd_dma)
378 == qh->hw_current) 384 == qh->hw_current)
379 token = le32_to_cpu (qh->hw_token); 385 token = hc32_to_cpu(ehci, qh->hw_token);
380 386
381 /* force halt for unlinked or blocked qh, so we'll 387 /* force halt for unlinked or blocked qh, so we'll
382 * patch the qh later and so that completions can't 388 * patch the qh later and so that completions can't
383 * activate it while we "know" it's stopped. 389 * activate it while we "know" it's stopped.
384 */ 390 */
385 if ((HALT_BIT & qh->hw_token) == 0) { 391 if ((halt & qh->hw_token) == 0) {
386halt: 392halt:
387 qh->hw_token |= HALT_BIT; 393 qh->hw_token |= halt;
388 wmb (); 394 wmb ();
389 } 395 }
390 } 396 }
@@ -419,7 +425,7 @@ halt:
419 * it after fault cleanup, or recovering from silicon wrongly 425 * it after fault cleanup, or recovering from silicon wrongly
420 * overlaying the dummy qtd (which reduces DMA chatter). 426 * overlaying the dummy qtd (which reduces DMA chatter).
421 */ 427 */
422 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { 428 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
423 switch (state) { 429 switch (state) {
424 case QH_STATE_IDLE: 430 case QH_STATE_IDLE:
425 qh_refresh(ehci, qh); 431 qh_refresh(ehci, qh);
@@ -428,7 +434,7 @@ halt:
428 /* should be rare for periodic transfers, 434 /* should be rare for periodic transfers,
429 * except maybe high bandwidth ... 435 * except maybe high bandwidth ...
430 */ 436 */
431 if ((__constant_cpu_to_le32 (QH_SMASK) 437 if ((cpu_to_hc32(ehci, QH_SMASK)
432 & qh->hw_info2) != 0) { 438 & qh->hw_info2) != 0) {
433 intr_deschedule (ehci, qh); 439 intr_deschedule (ehci, qh);
434 (void) qh_schedule (ehci, qh); 440 (void) qh_schedule (ehci, qh);
@@ -502,8 +508,9 @@ qh_urb_transaction (
502 is_input = usb_pipein (urb->pipe); 508 is_input = usb_pipein (urb->pipe);
503 if (usb_pipecontrol (urb->pipe)) { 509 if (usb_pipecontrol (urb->pipe)) {
504 /* SETUP pid */ 510 /* SETUP pid */
505 qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest), 511 qtd_fill(ehci, qtd, urb->setup_dma,
506 token | (2 /* "setup" */ << 8), 8); 512 sizeof (struct usb_ctrlrequest),
513 token | (2 /* "setup" */ << 8), 8);
507 514
508 /* ... and always at least one more pid */ 515 /* ... and always at least one more pid */
509 token ^= QTD_TOGGLE; 516 token ^= QTD_TOGGLE;
@@ -512,7 +519,7 @@ qh_urb_transaction (
512 if (unlikely (!qtd)) 519 if (unlikely (!qtd))
513 goto cleanup; 520 goto cleanup;
514 qtd->urb = urb; 521 qtd->urb = urb;
515 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 522 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
516 list_add_tail (&qtd->qtd_list, head); 523 list_add_tail (&qtd->qtd_list, head);
517 524
518 /* for zero length DATA stages, STATUS is always IN */ 525 /* for zero length DATA stages, STATUS is always IN */
@@ -539,7 +546,7 @@ qh_urb_transaction (
539 for (;;) { 546 for (;;) {
540 int this_qtd_len; 547 int this_qtd_len;
541 548
542 this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket); 549 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
543 len -= this_qtd_len; 550 len -= this_qtd_len;
544 buf += this_qtd_len; 551 buf += this_qtd_len;
545 if (is_input) 552 if (is_input)
@@ -557,7 +564,7 @@ qh_urb_transaction (
557 if (unlikely (!qtd)) 564 if (unlikely (!qtd))
558 goto cleanup; 565 goto cleanup;
559 qtd->urb = urb; 566 qtd->urb = urb;
560 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 567 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
561 list_add_tail (&qtd->qtd_list, head); 568 list_add_tail (&qtd->qtd_list, head);
562 } 569 }
563 570
@@ -566,7 +573,7 @@ qh_urb_transaction (
566 */ 573 */
567 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 574 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
568 || usb_pipecontrol (urb->pipe))) 575 || usb_pipecontrol (urb->pipe)))
569 qtd->hw_alt_next = EHCI_LIST_END; 576 qtd->hw_alt_next = EHCI_LIST_END(ehci);
570 577
571 /* 578 /*
572 * control requests may need a terminating data "status" ack; 579 * control requests may need a terminating data "status" ack;
@@ -590,17 +597,17 @@ qh_urb_transaction (
590 if (unlikely (!qtd)) 597 if (unlikely (!qtd))
591 goto cleanup; 598 goto cleanup;
592 qtd->urb = urb; 599 qtd->urb = urb;
593 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 600 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
594 list_add_tail (&qtd->qtd_list, head); 601 list_add_tail (&qtd->qtd_list, head);
595 602
596 /* never any data in such packets */ 603 /* never any data in such packets */
597 qtd_fill (qtd, 0, 0, token, 0); 604 qtd_fill(ehci, qtd, 0, 0, token, 0);
598 } 605 }
599 } 606 }
600 607
601 /* by default, enable interrupt on urb completion */ 608 /* by default, enable interrupt on urb completion */
602 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 609 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
603 qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC); 610 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
604 return head; 611 return head;
605 612
606cleanup: 613cleanup:
@@ -769,8 +776,8 @@ done:
769 776
770 /* init as live, toggle clear, advance to dummy */ 777 /* init as live, toggle clear, advance to dummy */
771 qh->qh_state = QH_STATE_IDLE; 778 qh->qh_state = QH_STATE_IDLE;
772 qh->hw_info1 = cpu_to_le32 (info1); 779 qh->hw_info1 = cpu_to_hc32(ehci, info1);
773 qh->hw_info2 = cpu_to_le32 (info2); 780 qh->hw_info2 = cpu_to_hc32(ehci, info2);
774 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 781 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
775 qh_refresh (ehci, qh); 782 qh_refresh (ehci, qh);
776 return qh; 783 return qh;
@@ -782,7 +789,7 @@ done:
782 789
783static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 790static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
784{ 791{
785 __le32 dma = QH_NEXT (qh->qh_dma); 792 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
786 struct ehci_qh *head; 793 struct ehci_qh *head;
787 794
788 /* (re)start the async schedule? */ 795 /* (re)start the async schedule? */
@@ -820,8 +827,6 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
820 827
821/*-------------------------------------------------------------------------*/ 828/*-------------------------------------------------------------------------*/
822 829
823#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
824
825/* 830/*
826 * For control/bulk/interrupt, return QH with these TDs appended. 831 * For control/bulk/interrupt, return QH with these TDs appended.
827 * Allocates and initializes the QH if necessary. 832 * Allocates and initializes the QH if necessary.
@@ -837,6 +842,7 @@ static struct ehci_qh *qh_append_tds (
837) 842)
838{ 843{
839 struct ehci_qh *qh = NULL; 844 struct ehci_qh *qh = NULL;
845 u32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
840 846
841 qh = (struct ehci_qh *) *ptr; 847 qh = (struct ehci_qh *) *ptr;
842 if (unlikely (qh == NULL)) { 848 if (unlikely (qh == NULL)) {
@@ -858,7 +864,7 @@ static struct ehci_qh *qh_append_tds (
858 864
859 /* usb_reset_device() briefly reverts to address 0 */ 865 /* usb_reset_device() briefly reverts to address 0 */
860 if (usb_pipedevice (urb->pipe) == 0) 866 if (usb_pipedevice (urb->pipe) == 0)
861 qh->hw_info1 &= ~QH_ADDR_MASK; 867 qh->hw_info1 &= ~qh_addr_mask;
862 } 868 }
863 869
864 /* just one way to queue requests: swap with the dummy qtd. 870 /* just one way to queue requests: swap with the dummy qtd.
@@ -867,7 +873,7 @@ static struct ehci_qh *qh_append_tds (
867 if (likely (qtd != NULL)) { 873 if (likely (qtd != NULL)) {
868 struct ehci_qtd *dummy; 874 struct ehci_qtd *dummy;
869 dma_addr_t dma; 875 dma_addr_t dma;
870 __le32 token; 876 __hc32 token;
871 877
872 /* to avoid racing the HC, use the dummy td instead of 878 /* to avoid racing the HC, use the dummy td instead of
873 * the first td of our list (becomes new dummy). both 879 * the first td of our list (becomes new dummy). both
@@ -875,7 +881,7 @@ static struct ehci_qh *qh_append_tds (
875 * HC is allowed to fetch the old dummy (4.10.2). 881 * HC is allowed to fetch the old dummy (4.10.2).
876 */ 882 */
877 token = qtd->hw_token; 883 token = qtd->hw_token;
878 qtd->hw_token = HALT_BIT; 884 qtd->hw_token = HALT_BIT(ehci);
879 wmb (); 885 wmb ();
880 dummy = qh->dummy; 886 dummy = qh->dummy;
881 887
@@ -887,14 +893,14 @@ static struct ehci_qh *qh_append_tds (
887 list_add (&dummy->qtd_list, qtd_list); 893 list_add (&dummy->qtd_list, qtd_list);
888 __list_splice (qtd_list, qh->qtd_list.prev); 894 __list_splice (qtd_list, qh->qtd_list.prev);
889 895
890 ehci_qtd_init (qtd, qtd->qtd_dma); 896 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
891 qh->dummy = qtd; 897 qh->dummy = qtd;
892 898
893 /* hc must see the new dummy at list end */ 899 /* hc must see the new dummy at list end */
894 dma = qtd->qtd_dma; 900 dma = qtd->qtd_dma;
895 qtd = list_entry (qh->qtd_list.prev, 901 qtd = list_entry (qh->qtd_list.prev,
896 struct ehci_qtd, qtd_list); 902 struct ehci_qtd, qtd_list);
897 qtd->hw_next = QTD_NEXT (dma); 903 qtd->hw_next = QTD_NEXT(ehci, dma);
898 904
899 /* let the hc process these next qtds */ 905 /* let the hc process these next qtds */
900 wmb (); 906 wmb ();
@@ -970,7 +976,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
970 976
971 timer_action_done (ehci, TIMER_IAA_WATCHDOG); 977 timer_action_done (ehci, TIMER_IAA_WATCHDOG);
972 978
973 // qh->hw_next = cpu_to_le32 (qh->qh_dma); 979 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
974 qh->qh_state = QH_STATE_IDLE; 980 qh->qh_state = QH_STATE_IDLE;
975 qh->qh_next.qh = NULL; 981 qh->qh_next.qh = NULL;
976 qh_put (qh); // refcount from reclaim 982 qh_put (qh); // refcount from reclaim