aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/ehci-q.c
diff options
context:
space:
mode:
authorStefan Roese <ml@stefan-roese.de>2007-05-01 12:29:37 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2007-07-12 19:29:45 -0400
commit6dbd682b7c6d58916096616cdf94852641bc09d9 (patch)
tree74bc2fa9038a426ac5f81969ad85cae5e4262501 /drivers/usb/host/ehci-q.c
parent196705c9bbc03540429b0f7cf9ee35c2f928a534 (diff)
USB: EHCI support for big-endian descriptors
This patch implements supports for EHCI controllers whose in-memory data structures are represented in big-endian format. This is needed (unfortunately) for the AMCC PPC440EPx SoC EHCI controller; the EHCI spec doesn't specify little-endian format, although that's what most other implementations use. The guts of the patch are to introduce the hc32 type and change all references from le32 to hc32. All access routines are converted from cpu_to_le32(...) to cpu_to_hc32(ehci, ...) and similar for the other "direction". (This is the same approach used with OHCI.) David fixed: Whitespace fixes; refresh against ehci cpufreq patch; move glue for that PPC driver to the patch adding it; fix free symbol capture bugs in modified "constant" macros; and make "hc32" etc be "le32" unless we really need the BE options, so "sparse" can do some real good. Signed-off-by: Stefan Roese <sr@denx.de> Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/ehci-q.c')
-rw-r--r--drivers/usb/host/ehci-q.c92
1 files changed, 47 insertions, 45 deletions
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 903510beb299..2284028f8aa5 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -43,15 +43,15 @@
43/* fill a qtd, returning how much of the buffer we were able to queue up */ 43/* fill a qtd, returning how much of the buffer we were able to queue up */
44 44
45static int 45static int
46qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, 46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 int token, int maxpacket) 47 size_t len, int token, int maxpacket)
48{ 48{
49 int i, count; 49 int i, count;
50 u64 addr = buf; 50 u64 addr = buf;
51 51
52 /* one buffer entry per 4K ... first might be short or unaligned */ 52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf [0] = cpu_to_le32 ((u32)addr); 53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32)); 54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */ 56 if (likely (len < count)) /* ... iff needed */
57 count = len; 57 count = len;
@@ -62,8 +62,9 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
62 /* per-qtd limit: from 16K to 20K (best alignment) */ 62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) { 63 for (i = 1; count < len && i < 5; i++) {
64 addr = buf; 64 addr = buf;
65 qtd->hw_buf [i] = cpu_to_le32 ((u32)addr); 65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32)); 66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
67 (u32)(addr >> 32));
67 buf += 0x1000; 68 buf += 0x1000;
68 if ((count + 0x1000) < len) 69 if ((count + 0x1000) < len)
69 count += 0x1000; 70 count += 0x1000;
@@ -75,7 +76,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
75 if (count != len) 76 if (count != len)
76 count -= (count % maxpacket); 77 count -= (count % maxpacket);
77 } 78 }
78 qtd->hw_token = cpu_to_le32 ((count << 16) | token); 79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
79 qtd->length = count; 80 qtd->length = count;
80 81
81 return count; 82 return count;
@@ -89,28 +90,28 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
89 /* writes to an active overlay are unsafe */ 90 /* writes to an active overlay are unsafe */
90 BUG_ON(qh->qh_state != QH_STATE_IDLE); 91 BUG_ON(qh->qh_state != QH_STATE_IDLE);
91 92
92 qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma); 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
93 qh->hw_alt_next = EHCI_LIST_END; 94 qh->hw_alt_next = EHCI_LIST_END(ehci);
94 95
95 /* Except for control endpoints, we make hardware maintain data 96 /* Except for control endpoints, we make hardware maintain data
96 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
97 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
98 * ever clear it. 99 * ever clear it.
99 */ 100 */
100 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { 101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
101 unsigned is_out, epnum; 102 unsigned is_out, epnum;
102 103
103 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); 104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
104 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; 105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
105 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
106 qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE); 107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
107 usb_settoggle (qh->dev, epnum, is_out, 1); 108 usb_settoggle (qh->dev, epnum, is_out, 1);
108 } 109 }
109 } 110 }
110 111
111 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
112 wmb (); 113 wmb ();
113 qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING); 114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
114} 115}
115 116
116/* if it weren't for a common silicon quirk (writing the dummy into the qh 117/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -128,7 +129,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
128 qtd = list_entry (qh->qtd_list.next, 129 qtd = list_entry (qh->qtd_list.next,
129 struct ehci_qtd, qtd_list); 130 struct ehci_qtd, qtd_list);
130 /* first qtd may already be partially processed */ 131 /* first qtd may already be partially processed */
131 if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current) 132 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
132 qtd = NULL; 133 qtd = NULL;
133 } 134 }
134 135
@@ -222,7 +223,7 @@ __acquires(ehci->lock)
222 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 223 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
223 224
224 /* S-mask in a QH means it's an interrupt urb */ 225 /* S-mask in a QH means it's an interrupt urb */
225 if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) { 226 if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
226 227
227 /* ... update hc-wide periodic stats (for usbfs) */ 228 /* ... update hc-wide periodic stats (for usbfs) */
228 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 229 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@@ -277,7 +278,6 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
277 * Chases up to qh->hw_current. Returns number of completions called, 278 * Chases up to qh->hw_current. Returns number of completions called,
278 * indicating how much "real" work we did. 279 * indicating how much "real" work we did.
279 */ 280 */
280#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
281static unsigned 281static unsigned
282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
283{ 283{
@@ -287,6 +287,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
287 unsigned count = 0; 287 unsigned count = 0;
288 int do_status = 0; 288 int do_status = 0;
289 u8 state; 289 u8 state;
290 u32 halt = HALT_BIT(ehci);
290 291
291 if (unlikely (list_empty (&qh->qtd_list))) 292 if (unlikely (list_empty (&qh->qtd_list)))
292 return count; 293 return count;
@@ -334,7 +335,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
334 335
335 /* hardware copies qtd out of qh overlay */ 336 /* hardware copies qtd out of qh overlay */
336 rmb (); 337 rmb ();
337 token = le32_to_cpu (qtd->hw_token); 338 token = hc32_to_cpu(ehci, qtd->hw_token);
338 339
339 /* always clean up qtds the hc de-activated */ 340 /* always clean up qtds the hc de-activated */
340 if ((token & QTD_STS_ACTIVE) == 0) { 341 if ((token & QTD_STS_ACTIVE) == 0) {
@@ -346,7 +347,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
346 * that silicon quirk can kick in with this dummy too. 347 * that silicon quirk can kick in with this dummy too.
347 */ 348 */
348 } else if (IS_SHORT_READ (token) 349 } else if (IS_SHORT_READ (token)
349 && !(qtd->hw_alt_next & EHCI_LIST_END)) { 350 && !(qtd->hw_alt_next
351 & EHCI_LIST_END(ehci))) {
350 stopped = 1; 352 stopped = 1;
351 goto halt; 353 goto halt;
352 } 354 }
@@ -378,17 +380,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
378 380
379 /* token in overlay may be most current */ 381 /* token in overlay may be most current */
380 if (state == QH_STATE_IDLE 382 if (state == QH_STATE_IDLE
381 && cpu_to_le32 (qtd->qtd_dma) 383 && cpu_to_hc32(ehci, qtd->qtd_dma)
382 == qh->hw_current) 384 == qh->hw_current)
383 token = le32_to_cpu (qh->hw_token); 385 token = hc32_to_cpu(ehci, qh->hw_token);
384 386
385 /* force halt for unlinked or blocked qh, so we'll 387 /* force halt for unlinked or blocked qh, so we'll
386 * patch the qh later and so that completions can't 388 * patch the qh later and so that completions can't
387 * activate it while we "know" it's stopped. 389 * activate it while we "know" it's stopped.
388 */ 390 */
389 if ((HALT_BIT & qh->hw_token) == 0) { 391 if ((halt & qh->hw_token) == 0) {
390halt: 392halt:
391 qh->hw_token |= HALT_BIT; 393 qh->hw_token |= halt;
392 wmb (); 394 wmb ();
393 } 395 }
394 } 396 }
@@ -423,7 +425,7 @@ halt:
423 * it after fault cleanup, or recovering from silicon wrongly 425 * it after fault cleanup, or recovering from silicon wrongly
424 * overlaying the dummy qtd (which reduces DMA chatter). 426 * overlaying the dummy qtd (which reduces DMA chatter).
425 */ 427 */
426 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { 428 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
427 switch (state) { 429 switch (state) {
428 case QH_STATE_IDLE: 430 case QH_STATE_IDLE:
429 qh_refresh(ehci, qh); 431 qh_refresh(ehci, qh);
@@ -432,7 +434,7 @@ halt:
432 /* should be rare for periodic transfers, 434 /* should be rare for periodic transfers,
433 * except maybe high bandwidth ... 435 * except maybe high bandwidth ...
434 */ 436 */
435 if ((__constant_cpu_to_le32 (QH_SMASK) 437 if ((cpu_to_hc32(ehci, QH_SMASK)
436 & qh->hw_info2) != 0) { 438 & qh->hw_info2) != 0) {
437 intr_deschedule (ehci, qh); 439 intr_deschedule (ehci, qh);
438 (void) qh_schedule (ehci, qh); 440 (void) qh_schedule (ehci, qh);
@@ -506,8 +508,9 @@ qh_urb_transaction (
506 is_input = usb_pipein (urb->pipe); 508 is_input = usb_pipein (urb->pipe);
507 if (usb_pipecontrol (urb->pipe)) { 509 if (usb_pipecontrol (urb->pipe)) {
508 /* SETUP pid */ 510 /* SETUP pid */
509 qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest), 511 qtd_fill(ehci, qtd, urb->setup_dma,
510 token | (2 /* "setup" */ << 8), 8); 512 sizeof (struct usb_ctrlrequest),
513 token | (2 /* "setup" */ << 8), 8);
511 514
512 /* ... and always at least one more pid */ 515 /* ... and always at least one more pid */
513 token ^= QTD_TOGGLE; 516 token ^= QTD_TOGGLE;
@@ -516,7 +519,7 @@ qh_urb_transaction (
516 if (unlikely (!qtd)) 519 if (unlikely (!qtd))
517 goto cleanup; 520 goto cleanup;
518 qtd->urb = urb; 521 qtd->urb = urb;
519 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 522 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
520 list_add_tail (&qtd->qtd_list, head); 523 list_add_tail (&qtd->qtd_list, head);
521 524
522 /* for zero length DATA stages, STATUS is always IN */ 525 /* for zero length DATA stages, STATUS is always IN */
@@ -543,7 +546,7 @@ qh_urb_transaction (
543 for (;;) { 546 for (;;) {
544 int this_qtd_len; 547 int this_qtd_len;
545 548
546 this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket); 549 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
547 len -= this_qtd_len; 550 len -= this_qtd_len;
548 buf += this_qtd_len; 551 buf += this_qtd_len;
549 if (is_input) 552 if (is_input)
@@ -561,7 +564,7 @@ qh_urb_transaction (
561 if (unlikely (!qtd)) 564 if (unlikely (!qtd))
562 goto cleanup; 565 goto cleanup;
563 qtd->urb = urb; 566 qtd->urb = urb;
564 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 567 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
565 list_add_tail (&qtd->qtd_list, head); 568 list_add_tail (&qtd->qtd_list, head);
566 } 569 }
567 570
@@ -570,7 +573,7 @@ qh_urb_transaction (
570 */ 573 */
571 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 574 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
572 || usb_pipecontrol (urb->pipe))) 575 || usb_pipecontrol (urb->pipe)))
573 qtd->hw_alt_next = EHCI_LIST_END; 576 qtd->hw_alt_next = EHCI_LIST_END(ehci);
574 577
575 /* 578 /*
576 * control requests may need a terminating data "status" ack; 579 * control requests may need a terminating data "status" ack;
@@ -594,17 +597,17 @@ qh_urb_transaction (
594 if (unlikely (!qtd)) 597 if (unlikely (!qtd))
595 goto cleanup; 598 goto cleanup;
596 qtd->urb = urb; 599 qtd->urb = urb;
597 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 600 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
598 list_add_tail (&qtd->qtd_list, head); 601 list_add_tail (&qtd->qtd_list, head);
599 602
600 /* never any data in such packets */ 603 /* never any data in such packets */
601 qtd_fill (qtd, 0, 0, token, 0); 604 qtd_fill(ehci, qtd, 0, 0, token, 0);
602 } 605 }
603 } 606 }
604 607
605 /* by default, enable interrupt on urb completion */ 608 /* by default, enable interrupt on urb completion */
606 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 609 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
607 qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC); 610 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
608 return head; 611 return head;
609 612
610cleanup: 613cleanup:
@@ -773,8 +776,8 @@ done:
773 776
774 /* init as live, toggle clear, advance to dummy */ 777 /* init as live, toggle clear, advance to dummy */
775 qh->qh_state = QH_STATE_IDLE; 778 qh->qh_state = QH_STATE_IDLE;
776 qh->hw_info1 = cpu_to_le32 (info1); 779 qh->hw_info1 = cpu_to_hc32(ehci, info1);
777 qh->hw_info2 = cpu_to_le32 (info2); 780 qh->hw_info2 = cpu_to_hc32(ehci, info2);
778 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 781 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
779 qh_refresh (ehci, qh); 782 qh_refresh (ehci, qh);
780 return qh; 783 return qh;
@@ -786,7 +789,7 @@ done:
786 789
787static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 790static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
788{ 791{
789 __le32 dma = QH_NEXT (qh->qh_dma); 792 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
790 struct ehci_qh *head; 793 struct ehci_qh *head;
791 794
792 /* (re)start the async schedule? */ 795 /* (re)start the async schedule? */
@@ -824,8 +827,6 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
824 827
825/*-------------------------------------------------------------------------*/ 828/*-------------------------------------------------------------------------*/
826 829
827#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
828
829/* 830/*
830 * For control/bulk/interrupt, return QH with these TDs appended. 831 * For control/bulk/interrupt, return QH with these TDs appended.
831 * Allocates and initializes the QH if necessary. 832 * Allocates and initializes the QH if necessary.
@@ -841,6 +842,7 @@ static struct ehci_qh *qh_append_tds (
841) 842)
842{ 843{
843 struct ehci_qh *qh = NULL; 844 struct ehci_qh *qh = NULL;
845 u32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
844 846
845 qh = (struct ehci_qh *) *ptr; 847 qh = (struct ehci_qh *) *ptr;
846 if (unlikely (qh == NULL)) { 848 if (unlikely (qh == NULL)) {
@@ -862,7 +864,7 @@ static struct ehci_qh *qh_append_tds (
862 864
863 /* usb_reset_device() briefly reverts to address 0 */ 865 /* usb_reset_device() briefly reverts to address 0 */
864 if (usb_pipedevice (urb->pipe) == 0) 866 if (usb_pipedevice (urb->pipe) == 0)
865 qh->hw_info1 &= ~QH_ADDR_MASK; 867 qh->hw_info1 &= ~qh_addr_mask;
866 } 868 }
867 869
868 /* just one way to queue requests: swap with the dummy qtd. 870 /* just one way to queue requests: swap with the dummy qtd.
@@ -871,7 +873,7 @@ static struct ehci_qh *qh_append_tds (
871 if (likely (qtd != NULL)) { 873 if (likely (qtd != NULL)) {
872 struct ehci_qtd *dummy; 874 struct ehci_qtd *dummy;
873 dma_addr_t dma; 875 dma_addr_t dma;
874 __le32 token; 876 __hc32 token;
875 877
876 /* to avoid racing the HC, use the dummy td instead of 878 /* to avoid racing the HC, use the dummy td instead of
877 * the first td of our list (becomes new dummy). both 879 * the first td of our list (becomes new dummy). both
@@ -879,7 +881,7 @@ static struct ehci_qh *qh_append_tds (
879 * HC is allowed to fetch the old dummy (4.10.2). 881 * HC is allowed to fetch the old dummy (4.10.2).
880 */ 882 */
881 token = qtd->hw_token; 883 token = qtd->hw_token;
882 qtd->hw_token = HALT_BIT; 884 qtd->hw_token = HALT_BIT(ehci);
883 wmb (); 885 wmb ();
884 dummy = qh->dummy; 886 dummy = qh->dummy;
885 887
@@ -891,14 +893,14 @@ static struct ehci_qh *qh_append_tds (
891 list_add (&dummy->qtd_list, qtd_list); 893 list_add (&dummy->qtd_list, qtd_list);
892 __list_splice (qtd_list, qh->qtd_list.prev); 894 __list_splice (qtd_list, qh->qtd_list.prev);
893 895
894 ehci_qtd_init (qtd, qtd->qtd_dma); 896 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
895 qh->dummy = qtd; 897 qh->dummy = qtd;
896 898
897 /* hc must see the new dummy at list end */ 899 /* hc must see the new dummy at list end */
898 dma = qtd->qtd_dma; 900 dma = qtd->qtd_dma;
899 qtd = list_entry (qh->qtd_list.prev, 901 qtd = list_entry (qh->qtd_list.prev,
900 struct ehci_qtd, qtd_list); 902 struct ehci_qtd, qtd_list);
901 qtd->hw_next = QTD_NEXT (dma); 903 qtd->hw_next = QTD_NEXT(ehci, dma);
902 904
903 /* let the hc process these next qtds */ 905 /* let the hc process these next qtds */
904 wmb (); 906 wmb ();
@@ -974,7 +976,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
974 976
975 timer_action_done (ehci, TIMER_IAA_WATCHDOG); 977 timer_action_done (ehci, TIMER_IAA_WATCHDOG);
976 978
977 // qh->hw_next = cpu_to_le32 (qh->qh_dma); 979 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
978 qh->qh_state = QH_STATE_IDLE; 980 qh->qh_state = QH_STATE_IDLE;
979 qh->qh_next.qh = NULL; 981 qh->qh_next.qh = NULL;
980 qh_put (qh); // refcount from reclaim 982 qh_put (qh); // refcount from reclaim