aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorStefan Roese <ml@stefan-roese.de>2007-05-01 12:29:37 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2007-07-12 19:29:45 -0400
commit6dbd682b7c6d58916096616cdf94852641bc09d9 (patch)
tree74bc2fa9038a426ac5f81969ad85cae5e4262501 /drivers/usb
parent196705c9bbc03540429b0f7cf9ee35c2f928a534 (diff)
USB: EHCI support for big-endian descriptors
This patch implements supports for EHCI controllers whose in-memory data structures are represented in big-endian format. This is needed (unfortunately) for the AMCC PPC440EPx SoC EHCI controller; the EHCI spec doesn't specify little-endian format, although that's what most other implementations use. The guts of the patch are to introduce the hc32 type and change all references from le32 to hc32. All access routines are converted from cpu_to_le32(...) to cpu_to_hc32(ehci, ...) and similar for the other "direction". (This is the same approach used with OHCI.) David fixed: Whitespace fixes; refresh against ehci cpufreq patch; move glue for that PPC driver to the patch adding it; fix free symbol capture bugs in modified "constant" macros; and make "hc32" etc be "le32" unless we really need the BE options, so "sparse" can do some real good. Signed-off-by: Stefan Roese <sr@denx.de> Signed-off-by: David Brownell <dbrownell@users.sourceforge.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/Kconfig5
-rw-r--r--drivers/usb/host/ehci-dbg.c161
-rw-r--r--drivers/usb/host/ehci-hcd.c10
-rw-r--r--drivers/usb/host/ehci-mem.c13
-rw-r--r--drivers/usb/host/ehci-q.c92
-rw-r--r--drivers/usb/host/ehci-sched.c248
-rw-r--r--drivers/usb/host/ehci.h205
7 files changed, 432 insertions, 302 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 62711870f8ee..a8faff646ac7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -72,6 +72,11 @@ config USB_EHCI_BIG_ENDIAN_MMIO
72 depends on USB_EHCI_HCD 72 depends on USB_EHCI_HCD
73 default n 73 default n
74 74
75config USB_EHCI_BIG_ENDIAN_DESC
76 bool
77 depends on USB_EHCI_HCD
78 default n
79
75config USB_ISP116X_HCD 80config USB_ISP116X_HCD
76 tristate "ISP116X HCD support" 81 tristate "ISP116X HCD support"
77 depends on USB 82 depends on USB
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 43eddaecc3dd..5bb3d0962ebe 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -52,7 +52,7 @@ static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
52 HCS_INDICATOR (params) ? " ind" : "", 52 HCS_INDICATOR (params) ? " ind" : "",
53 HCS_N_CC (params), 53 HCS_N_CC (params),
54 HCS_N_PCC (params), 54 HCS_N_PCC (params),
55 HCS_PORTROUTED (params) ? "" : " ordered", 55 HCS_PORTROUTED (params) ? "" : " ordered",
56 HCS_PPC (params) ? "" : " !ppc", 56 HCS_PPC (params) ? "" : " !ppc",
57 HCS_N_PORTS (params) 57 HCS_N_PORTS (params)
58 ); 58 );
@@ -91,20 +91,20 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
91 91
92 if (HCC_ISOC_CACHE (params)) { 92 if (HCC_ISOC_CACHE (params)) {
93 ehci_dbg (ehci, 93 ehci_dbg (ehci,
94 "%s hcc_params %04x caching frame %s%s%s\n", 94 "%s hcc_params %04x caching frame %s%s%s\n",
95 label, params, 95 label, params,
96 HCC_PGM_FRAMELISTLEN (params) ? "256/512/1024" : "1024", 96 HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
97 HCC_CANPARK (params) ? " park" : "", 97 HCC_CANPARK(params) ? " park" : "",
98 HCC_64BIT_ADDR (params) ? " 64 bit addr" : ""); 98 HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
99 } else { 99 } else {
100 ehci_dbg (ehci, 100 ehci_dbg (ehci,
101 "%s hcc_params %04x thresh %d uframes %s%s%s\n", 101 "%s hcc_params %04x thresh %d uframes %s%s%s\n",
102 label, 102 label,
103 params, 103 params,
104 HCC_ISOC_THRES (params), 104 HCC_ISOC_THRES(params),
105 HCC_PGM_FRAMELISTLEN (params) ? "256/512/1024" : "1024", 105 HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
106 HCC_CANPARK (params) ? " park" : "", 106 HCC_CANPARK(params) ? " park" : "",
107 HCC_64BIT_ADDR (params) ? " 64 bit addr" : ""); 107 HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
108 } 108 }
109} 109}
110#else 110#else
@@ -118,17 +118,17 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
118static void __attribute__((__unused__)) 118static void __attribute__((__unused__))
119dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd) 119dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
120{ 120{
121 ehci_dbg (ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, 121 ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
122 le32_to_cpup (&qtd->hw_next), 122 hc32_to_cpup(ehci, &qtd->hw_next),
123 le32_to_cpup (&qtd->hw_alt_next), 123 hc32_to_cpup(ehci, &qtd->hw_alt_next),
124 le32_to_cpup (&qtd->hw_token), 124 hc32_to_cpup(ehci, &qtd->hw_token),
125 le32_to_cpup (&qtd->hw_buf [0])); 125 hc32_to_cpup(ehci, &qtd->hw_buf [0]));
126 if (qtd->hw_buf [1]) 126 if (qtd->hw_buf [1])
127 ehci_dbg (ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n", 127 ehci_dbg(ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
128 le32_to_cpup (&qtd->hw_buf [1]), 128 hc32_to_cpup(ehci, &qtd->hw_buf[1]),
129 le32_to_cpup (&qtd->hw_buf [2]), 129 hc32_to_cpup(ehci, &qtd->hw_buf[2]),
130 le32_to_cpup (&qtd->hw_buf [3]), 130 hc32_to_cpup(ehci, &qtd->hw_buf[3]),
131 le32_to_cpup (&qtd->hw_buf [4])); 131 hc32_to_cpup(ehci, &qtd->hw_buf[4]));
132} 132}
133 133
134static void __attribute__((__unused__)) 134static void __attribute__((__unused__))
@@ -144,26 +144,27 @@ static void __attribute__((__unused__))
144dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd) 144dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
145{ 145{
146 ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n", 146 ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n",
147 label, itd->frame, itd, le32_to_cpu(itd->hw_next), itd->urb); 147 label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
148 itd->urb);
148 ehci_dbg (ehci, 149 ehci_dbg (ehci,
149 " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", 150 " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
150 le32_to_cpu(itd->hw_transaction[0]), 151 hc32_to_cpu(ehci, itd->hw_transaction[0]),
151 le32_to_cpu(itd->hw_transaction[1]), 152 hc32_to_cpu(ehci, itd->hw_transaction[1]),
152 le32_to_cpu(itd->hw_transaction[2]), 153 hc32_to_cpu(ehci, itd->hw_transaction[2]),
153 le32_to_cpu(itd->hw_transaction[3]), 154 hc32_to_cpu(ehci, itd->hw_transaction[3]),
154 le32_to_cpu(itd->hw_transaction[4]), 155 hc32_to_cpu(ehci, itd->hw_transaction[4]),
155 le32_to_cpu(itd->hw_transaction[5]), 156 hc32_to_cpu(ehci, itd->hw_transaction[5]),
156 le32_to_cpu(itd->hw_transaction[6]), 157 hc32_to_cpu(ehci, itd->hw_transaction[6]),
157 le32_to_cpu(itd->hw_transaction[7])); 158 hc32_to_cpu(ehci, itd->hw_transaction[7]));
158 ehci_dbg (ehci, 159 ehci_dbg (ehci,
159 " buf: %08x %08x %08x %08x %08x %08x %08x\n", 160 " buf: %08x %08x %08x %08x %08x %08x %08x\n",
160 le32_to_cpu(itd->hw_bufp[0]), 161 hc32_to_cpu(ehci, itd->hw_bufp[0]),
161 le32_to_cpu(itd->hw_bufp[1]), 162 hc32_to_cpu(ehci, itd->hw_bufp[1]),
162 le32_to_cpu(itd->hw_bufp[2]), 163 hc32_to_cpu(ehci, itd->hw_bufp[2]),
163 le32_to_cpu(itd->hw_bufp[3]), 164 hc32_to_cpu(ehci, itd->hw_bufp[3]),
164 le32_to_cpu(itd->hw_bufp[4]), 165 hc32_to_cpu(ehci, itd->hw_bufp[4]),
165 le32_to_cpu(itd->hw_bufp[5]), 166 hc32_to_cpu(ehci, itd->hw_bufp[5]),
166 le32_to_cpu(itd->hw_bufp[6])); 167 hc32_to_cpu(ehci, itd->hw_bufp[6]));
167 ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n", 168 ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n",
168 itd->index[0], itd->index[1], itd->index[2], 169 itd->index[0], itd->index[1], itd->index[2],
169 itd->index[3], itd->index[4], itd->index[5], 170 itd->index[3], itd->index[4], itd->index[5],
@@ -174,14 +175,15 @@ static void __attribute__((__unused__))
174dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd) 175dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
175{ 176{
176 ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n", 177 ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
177 label, sitd->frame, sitd, le32_to_cpu(sitd->hw_next), sitd->urb); 178 label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
179 sitd->urb);
178 ehci_dbg (ehci, 180 ehci_dbg (ehci,
179 " addr %08x sched %04x result %08x buf %08x %08x\n", 181 " addr %08x sched %04x result %08x buf %08x %08x\n",
180 le32_to_cpu(sitd->hw_fullspeed_ep), 182 hc32_to_cpu(ehci, sitd->hw_fullspeed_ep),
181 le32_to_cpu(sitd->hw_uframe), 183 hc32_to_cpu(ehci, sitd->hw_uframe),
182 le32_to_cpu(sitd->hw_results), 184 hc32_to_cpu(ehci, sitd->hw_results),
183 le32_to_cpu(sitd->hw_buf [0]), 185 hc32_to_cpu(ehci, sitd->hw_buf[0]),
184 le32_to_cpu(sitd->hw_buf [1])); 186 hc32_to_cpu(ehci, sitd->hw_buf[1]));
185} 187}
186 188
187static int __attribute__((__unused__)) 189static int __attribute__((__unused__))
@@ -267,8 +269,7 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
267 (status & PORT_PEC) ? " PEC" : "", 269 (status & PORT_PEC) ? " PEC" : "",
268 (status & PORT_PE) ? " PE" : "", 270 (status & PORT_PE) ? " PE" : "",
269 (status & PORT_CSC) ? " CSC" : "", 271 (status & PORT_CSC) ? " CSC" : "",
270 (status & PORT_CONNECT) ? " CONNECT" : "" 272 (status & PORT_CONNECT) ? " CONNECT" : "");
271 );
272} 273}
273 274
274#else 275#else
@@ -332,9 +333,10 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
332 default: tmp = '?'; break; \ 333 default: tmp = '?'; break; \
333 }; tmp; }) 334 }; tmp; })
334 335
335static inline char token_mark (__le32 token) 336static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
336{ 337{
337 __u32 v = le32_to_cpu (token); 338 __u32 v = hc32_to_cpu(ehci, token);
339
338 if (v & QTD_STS_ACTIVE) 340 if (v & QTD_STS_ACTIVE)
339 return '*'; 341 return '*';
340 if (v & QTD_STS_HALT) 342 if (v & QTD_STS_HALT)
@@ -360,46 +362,48 @@ static void qh_lines (
360 unsigned size = *sizep; 362 unsigned size = *sizep;
361 char *next = *nextp; 363 char *next = *nextp;
362 char mark; 364 char mark;
365 u32 list_end = EHCI_LIST_END(ehci);
363 366
364 if (qh->hw_qtd_next == EHCI_LIST_END) /* NEC does this */ 367 if (qh->hw_qtd_next == list_end) /* NEC does this */
365 mark = '@'; 368 mark = '@';
366 else 369 else
367 mark = token_mark (qh->hw_token); 370 mark = token_mark(ehci, qh->hw_token);
368 if (mark == '/') { /* qh_alt_next controls qh advance? */ 371 if (mark == '/') { /* qh_alt_next controls qh advance? */
369 if ((qh->hw_alt_next & QTD_MASK) == ehci->async->hw_alt_next) 372 if ((qh->hw_alt_next & QTD_MASK(ehci))
373 == ehci->async->hw_alt_next)
370 mark = '#'; /* blocked */ 374 mark = '#'; /* blocked */
371 else if (qh->hw_alt_next == EHCI_LIST_END) 375 else if (qh->hw_alt_next == list_end)
372 mark = '.'; /* use hw_qtd_next */ 376 mark = '.'; /* use hw_qtd_next */
373 /* else alt_next points to some other qtd */ 377 /* else alt_next points to some other qtd */
374 } 378 }
375 scratch = le32_to_cpup (&qh->hw_info1); 379 scratch = hc32_to_cpup(ehci, &qh->hw_info1);
376 hw_curr = (mark == '*') ? le32_to_cpup (&qh->hw_current) : 0; 380 hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0;
377 temp = scnprintf (next, size, 381 temp = scnprintf (next, size,
378 "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", 382 "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
379 qh, scratch & 0x007f, 383 qh, scratch & 0x007f,
380 speed_char (scratch), 384 speed_char (scratch),
381 (scratch >> 8) & 0x000f, 385 (scratch >> 8) & 0x000f,
382 scratch, le32_to_cpup (&qh->hw_info2), 386 scratch, hc32_to_cpup(ehci, &qh->hw_info2),
383 le32_to_cpup (&qh->hw_token), mark, 387 hc32_to_cpup(ehci, &qh->hw_token), mark,
384 (__constant_cpu_to_le32 (QTD_TOGGLE) & qh->hw_token) 388 (cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token)
385 ? "data1" : "data0", 389 ? "data1" : "data0",
386 (le32_to_cpup (&qh->hw_alt_next) >> 1) & 0x0f); 390 (hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f);
387 size -= temp; 391 size -= temp;
388 next += temp; 392 next += temp;
389 393
390 /* hc may be modifying the list as we read it ... */ 394 /* hc may be modifying the list as we read it ... */
391 list_for_each (entry, &qh->qtd_list) { 395 list_for_each (entry, &qh->qtd_list) {
392 td = list_entry (entry, struct ehci_qtd, qtd_list); 396 td = list_entry (entry, struct ehci_qtd, qtd_list);
393 scratch = le32_to_cpup (&td->hw_token); 397 scratch = hc32_to_cpup(ehci, &td->hw_token);
394 mark = ' '; 398 mark = ' ';
395 if (hw_curr == td->qtd_dma) 399 if (hw_curr == td->qtd_dma)
396 mark = '*'; 400 mark = '*';
397 else if (qh->hw_qtd_next == cpu_to_le32(td->qtd_dma)) 401 else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
398 mark = '+'; 402 mark = '+';
399 else if (QTD_LENGTH (scratch)) { 403 else if (QTD_LENGTH (scratch)) {
400 if (td->hw_alt_next == ehci->async->hw_alt_next) 404 if (td->hw_alt_next == ehci->async->hw_alt_next)
401 mark = '#'; 405 mark = '#';
402 else if (td->hw_alt_next != EHCI_LIST_END) 406 else if (td->hw_alt_next != list_end)
403 mark = '/'; 407 mark = '/';
404 } 408 }
405 temp = snprintf (next, size, 409 temp = snprintf (next, size,
@@ -490,7 +494,7 @@ show_periodic (struct class_device *class_dev, char *buf)
490 unsigned temp, size, seen_count; 494 unsigned temp, size, seen_count;
491 char *next; 495 char *next;
492 unsigned i; 496 unsigned i;
493 __le32 tag; 497 __hc32 tag;
494 498
495 if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC))) 499 if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
496 return 0; 500 return 0;
@@ -514,18 +518,19 @@ show_periodic (struct class_device *class_dev, char *buf)
514 p = ehci->pshadow [i]; 518 p = ehci->pshadow [i];
515 if (likely (!p.ptr)) 519 if (likely (!p.ptr))
516 continue; 520 continue;
517 tag = Q_NEXT_TYPE (ehci->periodic [i]); 521 tag = Q_NEXT_TYPE(ehci, ehci->periodic [i]);
518 522
519 temp = scnprintf (next, size, "%4d: ", i); 523 temp = scnprintf (next, size, "%4d: ", i);
520 size -= temp; 524 size -= temp;
521 next += temp; 525 next += temp;
522 526
523 do { 527 do {
524 switch (tag) { 528 switch (hc32_to_cpu(ehci, tag)) {
525 case Q_TYPE_QH: 529 case Q_TYPE_QH:
526 temp = scnprintf (next, size, " qh%d-%04x/%p", 530 temp = scnprintf (next, size, " qh%d-%04x/%p",
527 p.qh->period, 531 p.qh->period,
528 le32_to_cpup (&p.qh->hw_info2) 532 hc32_to_cpup(ehci,
533 &p.qh->hw_info2)
529 /* uframe masks */ 534 /* uframe masks */
530 & (QH_CMASK | QH_SMASK), 535 & (QH_CMASK | QH_SMASK),
531 p.qh); 536 p.qh);
@@ -543,7 +548,7 @@ show_periodic (struct class_device *class_dev, char *buf)
543 } 548 }
544 /* show more info the first time around */ 549 /* show more info the first time around */
545 if (temp == seen_count && p.ptr) { 550 if (temp == seen_count && p.ptr) {
546 u32 scratch = le32_to_cpup ( 551 u32 scratch = hc32_to_cpup(ehci,
547 &p.qh->hw_info1); 552 &p.qh->hw_info1);
548 struct ehci_qtd *qtd; 553 struct ehci_qtd *qtd;
549 char *type = ""; 554 char *type = "";
@@ -554,7 +559,8 @@ show_periodic (struct class_device *class_dev, char *buf)
554 &p.qh->qtd_list, 559 &p.qh->qtd_list,
555 qtd_list) { 560 qtd_list) {
556 temp++; 561 temp++;
557 switch (0x03 & (le32_to_cpu ( 562 switch (0x03 & (hc32_to_cpu(
563 ehci,
558 qtd->hw_token) >> 8)) { 564 qtd->hw_token) >> 8)) {
559 case 0: type = "out"; continue; 565 case 0: type = "out"; continue;
560 case 1: type = "in"; continue; 566 case 1: type = "in"; continue;
@@ -576,7 +582,7 @@ show_periodic (struct class_device *class_dev, char *buf)
576 } else 582 } else
577 temp = 0; 583 temp = 0;
578 if (p.qh) { 584 if (p.qh) {
579 tag = Q_NEXT_TYPE (p.qh->hw_next); 585 tag = Q_NEXT_TYPE(ehci, p.qh->hw_next);
580 p = p.qh->qh_next; 586 p = p.qh->qh_next;
581 } 587 }
582 break; 588 break;
@@ -584,23 +590,23 @@ show_periodic (struct class_device *class_dev, char *buf)
584 temp = scnprintf (next, size, 590 temp = scnprintf (next, size,
585 " fstn-%8x/%p", p.fstn->hw_prev, 591 " fstn-%8x/%p", p.fstn->hw_prev,
586 p.fstn); 592 p.fstn);
587 tag = Q_NEXT_TYPE (p.fstn->hw_next); 593 tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
588 p = p.fstn->fstn_next; 594 p = p.fstn->fstn_next;
589 break; 595 break;
590 case Q_TYPE_ITD: 596 case Q_TYPE_ITD:
591 temp = scnprintf (next, size, 597 temp = scnprintf (next, size,
592 " itd/%p", p.itd); 598 " itd/%p", p.itd);
593 tag = Q_NEXT_TYPE (p.itd->hw_next); 599 tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
594 p = p.itd->itd_next; 600 p = p.itd->itd_next;
595 break; 601 break;
596 case Q_TYPE_SITD: 602 case Q_TYPE_SITD:
597 temp = scnprintf (next, size, 603 temp = scnprintf (next, size,
598 " sitd%d-%04x/%p", 604 " sitd%d-%04x/%p",
599 p.sitd->stream->interval, 605 p.sitd->stream->interval,
600 le32_to_cpup (&p.sitd->hw_uframe) 606 hc32_to_cpup(ehci, &p.sitd->hw_uframe)
601 & 0x0000ffff, 607 & 0x0000ffff,
602 p.sitd); 608 p.sitd);
603 tag = Q_NEXT_TYPE (p.sitd->hw_next); 609 tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next);
604 p = p.sitd->sitd_next; 610 p = p.sitd->sitd_next;
605 break; 611 break;
606 } 612 }
@@ -673,7 +679,8 @@ show_registers (struct class_device *class_dev, char *buf)
673 unsigned count = 256/4; 679 unsigned count = 256/4;
674 680
675 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); 681 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
676 offset = HCC_EXT_CAPS (ehci_readl(ehci, &ehci->caps->hcc_params)); 682 offset = HCC_EXT_CAPS(ehci_readl(ehci,
683 &ehci->caps->hcc_params));
677 while (offset && count--) { 684 while (offset && count--) {
678 pci_read_config_dword (pdev, offset, &cap); 685 pci_read_config_dword (pdev, offset, &cap);
679 switch (cap & 0xff) { 686 switch (cap & 0xff) {
@@ -740,14 +747,16 @@ show_registers (struct class_device *class_dev, char *buf)
740 747
741 for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) { 748 for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) {
742 temp = dbg_port_buf (scratch, sizeof scratch, label, i, 749 temp = dbg_port_buf (scratch, sizeof scratch, label, i,
743 ehci_readl(ehci, &ehci->regs->port_status [i - 1])); 750 ehci_readl(ehci,
751 &ehci->regs->port_status[i - 1]));
744 temp = scnprintf (next, size, fmt, temp, scratch); 752 temp = scnprintf (next, size, fmt, temp, scratch);
745 size -= temp; 753 size -= temp;
746 next += temp; 754 next += temp;
747 if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) { 755 if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
748 temp = scnprintf (next, size, 756 temp = scnprintf (next, size,
749 " debug control %08x\n", 757 " debug control %08x\n",
750 ehci_readl(ehci, &ehci->debug->control)); 758 ehci_readl(ehci,
759 &ehci->debug->control));
751 size -= temp; 760 size -= temp;
752 next += temp; 761 next += temp;
753 } 762 }
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 566badb05b34..99ab31e9778b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -526,12 +526,12 @@ static int ehci_init(struct usb_hcd *hcd)
526 * from automatically advancing to the next td after short reads. 526 * from automatically advancing to the next td after short reads.
527 */ 527 */
528 ehci->async->qh_next.qh = NULL; 528 ehci->async->qh_next.qh = NULL;
529 ehci->async->hw_next = QH_NEXT(ehci->async->qh_dma); 529 ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
530 ehci->async->hw_info1 = cpu_to_le32(QH_HEAD); 530 ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
531 ehci->async->hw_token = cpu_to_le32(QTD_STS_HALT); 531 ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
532 ehci->async->hw_qtd_next = EHCI_LIST_END; 532 ehci->async->hw_qtd_next = EHCI_LIST_END(ehci);
533 ehci->async->qh_state = QH_STATE_LINKED; 533 ehci->async->qh_state = QH_STATE_LINKED;
534 ehci->async->hw_alt_next = QTD_NEXT(ehci->async->dummy->qtd_dma); 534 ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
535 535
536 /* clear interrupt enables, set irq latency */ 536 /* clear interrupt enables, set irq latency */
537 if (log2_irq_thresh < 0 || log2_irq_thresh > 6) 537 if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 5cff6bace5e5..bdb29e618058 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -27,7 +27,7 @@
27 * need to use dma_pool or dma_alloc_coherent 27 * need to use dma_pool or dma_alloc_coherent
28 * - driver buffers, read/written by HC ... single shot DMA mapped 28 * - driver buffers, read/written by HC ... single shot DMA mapped
29 * 29 *
30 * There's also PCI "register" data, which is memory mapped. 30 * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
31 * No memory seen by this driver is pageable. 31 * No memory seen by this driver is pageable.
32 */ 32 */
33 33
@@ -35,13 +35,14 @@
35 35
36/* Allocate the key transfer structures from the previously allocated pool */ 36/* Allocate the key transfer structures from the previously allocated pool */
37 37
38static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma) 38static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
39 dma_addr_t dma)
39{ 40{
40 memset (qtd, 0, sizeof *qtd); 41 memset (qtd, 0, sizeof *qtd);
41 qtd->qtd_dma = dma; 42 qtd->qtd_dma = dma;
42 qtd->hw_token = cpu_to_le32 (QTD_STS_HALT); 43 qtd->hw_token = cpu_to_le32 (QTD_STS_HALT);
43 qtd->hw_next = EHCI_LIST_END; 44 qtd->hw_next = EHCI_LIST_END(ehci);
44 qtd->hw_alt_next = EHCI_LIST_END; 45 qtd->hw_alt_next = EHCI_LIST_END(ehci);
45 INIT_LIST_HEAD (&qtd->qtd_list); 46 INIT_LIST_HEAD (&qtd->qtd_list);
46} 47}
47 48
@@ -52,7 +53,7 @@ static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
52 53
53 qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma); 54 qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
54 if (qtd != NULL) { 55 if (qtd != NULL) {
55 ehci_qtd_init (qtd, dma); 56 ehci_qtd_init(ehci, qtd, dma);
56 } 57 }
57 return qtd; 58 return qtd;
58} 59}
@@ -220,7 +221,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
220 goto fail; 221 goto fail;
221 } 222 }
222 for (i = 0; i < ehci->periodic_size; i++) 223 for (i = 0; i < ehci->periodic_size; i++)
223 ehci->periodic [i] = EHCI_LIST_END; 224 ehci->periodic [i] = EHCI_LIST_END(ehci);
224 225
225 /* software shadow of hardware table */ 226 /* software shadow of hardware table */
226 ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); 227 ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 903510beb299..2284028f8aa5 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -43,15 +43,15 @@
43/* fill a qtd, returning how much of the buffer we were able to queue up */ 43/* fill a qtd, returning how much of the buffer we were able to queue up */
44 44
45static int 45static int
46qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, 46qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
47 int token, int maxpacket) 47 size_t len, int token, int maxpacket)
48{ 48{
49 int i, count; 49 int i, count;
50 u64 addr = buf; 50 u64 addr = buf;
51 51
52 /* one buffer entry per 4K ... first might be short or unaligned */ 52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd->hw_buf [0] = cpu_to_le32 ((u32)addr); 53 qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
54 qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32)); 54 qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */ 55 count = 0x1000 - (buf & 0x0fff); /* rest of that page */
56 if (likely (len < count)) /* ... iff needed */ 56 if (likely (len < count)) /* ... iff needed */
57 count = len; 57 count = len;
@@ -62,8 +62,9 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
62 /* per-qtd limit: from 16K to 20K (best alignment) */ 62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i = 1; count < len && i < 5; i++) { 63 for (i = 1; count < len && i < 5; i++) {
64 addr = buf; 64 addr = buf;
65 qtd->hw_buf [i] = cpu_to_le32 ((u32)addr); 65 qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
66 qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32)); 66 qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
67 (u32)(addr >> 32));
67 buf += 0x1000; 68 buf += 0x1000;
68 if ((count + 0x1000) < len) 69 if ((count + 0x1000) < len)
69 count += 0x1000; 70 count += 0x1000;
@@ -75,7 +76,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
75 if (count != len) 76 if (count != len)
76 count -= (count % maxpacket); 77 count -= (count % maxpacket);
77 } 78 }
78 qtd->hw_token = cpu_to_le32 ((count << 16) | token); 79 qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
79 qtd->length = count; 80 qtd->length = count;
80 81
81 return count; 82 return count;
@@ -89,28 +90,28 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
89 /* writes to an active overlay are unsafe */ 90 /* writes to an active overlay are unsafe */
90 BUG_ON(qh->qh_state != QH_STATE_IDLE); 91 BUG_ON(qh->qh_state != QH_STATE_IDLE);
91 92
92 qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma); 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
93 qh->hw_alt_next = EHCI_LIST_END; 94 qh->hw_alt_next = EHCI_LIST_END(ehci);
94 95
95 /* Except for control endpoints, we make hardware maintain data 96 /* Except for control endpoints, we make hardware maintain data
96 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
97 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
98 * ever clear it. 99 * ever clear it.
99 */ 100 */
100 if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { 101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
101 unsigned is_out, epnum; 102 unsigned is_out, epnum;
102 103
103 is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); 104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
104 epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; 105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
105 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
106 qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE); 107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
107 usb_settoggle (qh->dev, epnum, is_out, 1); 108 usb_settoggle (qh->dev, epnum, is_out, 1);
108 } 109 }
109 } 110 }
110 111
111 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
112 wmb (); 113 wmb ();
113 qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING); 114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
114} 115}
115 116
116/* if it weren't for a common silicon quirk (writing the dummy into the qh 117/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -128,7 +129,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
128 qtd = list_entry (qh->qtd_list.next, 129 qtd = list_entry (qh->qtd_list.next,
129 struct ehci_qtd, qtd_list); 130 struct ehci_qtd, qtd_list);
130 /* first qtd may already be partially processed */ 131 /* first qtd may already be partially processed */
131 if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current) 132 if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
132 qtd = NULL; 133 qtd = NULL;
133 } 134 }
134 135
@@ -222,7 +223,7 @@ __acquires(ehci->lock)
222 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 223 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
223 224
224 /* S-mask in a QH means it's an interrupt urb */ 225 /* S-mask in a QH means it's an interrupt urb */
225 if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) { 226 if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
226 227
227 /* ... update hc-wide periodic stats (for usbfs) */ 228 /* ... update hc-wide periodic stats (for usbfs) */
228 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; 229 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@@ -277,7 +278,6 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
277 * Chases up to qh->hw_current. Returns number of completions called, 278 * Chases up to qh->hw_current. Returns number of completions called,
278 * indicating how much "real" work we did. 279 * indicating how much "real" work we did.
279 */ 280 */
280#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
281static unsigned 281static unsigned
282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) 282qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
283{ 283{
@@ -287,6 +287,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
287 unsigned count = 0; 287 unsigned count = 0;
288 int do_status = 0; 288 int do_status = 0;
289 u8 state; 289 u8 state;
290 u32 halt = HALT_BIT(ehci);
290 291
291 if (unlikely (list_empty (&qh->qtd_list))) 292 if (unlikely (list_empty (&qh->qtd_list)))
292 return count; 293 return count;
@@ -334,7 +335,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
334 335
335 /* hardware copies qtd out of qh overlay */ 336 /* hardware copies qtd out of qh overlay */
336 rmb (); 337 rmb ();
337 token = le32_to_cpu (qtd->hw_token); 338 token = hc32_to_cpu(ehci, qtd->hw_token);
338 339
339 /* always clean up qtds the hc de-activated */ 340 /* always clean up qtds the hc de-activated */
340 if ((token & QTD_STS_ACTIVE) == 0) { 341 if ((token & QTD_STS_ACTIVE) == 0) {
@@ -346,7 +347,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
346 * that silicon quirk can kick in with this dummy too. 347 * that silicon quirk can kick in with this dummy too.
347 */ 348 */
348 } else if (IS_SHORT_READ (token) 349 } else if (IS_SHORT_READ (token)
349 && !(qtd->hw_alt_next & EHCI_LIST_END)) { 350 && !(qtd->hw_alt_next
351 & EHCI_LIST_END(ehci))) {
350 stopped = 1; 352 stopped = 1;
351 goto halt; 353 goto halt;
352 } 354 }
@@ -378,17 +380,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
378 380
379 /* token in overlay may be most current */ 381 /* token in overlay may be most current */
380 if (state == QH_STATE_IDLE 382 if (state == QH_STATE_IDLE
381 && cpu_to_le32 (qtd->qtd_dma) 383 && cpu_to_hc32(ehci, qtd->qtd_dma)
382 == qh->hw_current) 384 == qh->hw_current)
383 token = le32_to_cpu (qh->hw_token); 385 token = hc32_to_cpu(ehci, qh->hw_token);
384 386
385 /* force halt for unlinked or blocked qh, so we'll 387 /* force halt for unlinked or blocked qh, so we'll
386 * patch the qh later and so that completions can't 388 * patch the qh later and so that completions can't
387 * activate it while we "know" it's stopped. 389 * activate it while we "know" it's stopped.
388 */ 390 */
389 if ((HALT_BIT & qh->hw_token) == 0) { 391 if ((halt & qh->hw_token) == 0) {
390halt: 392halt:
391 qh->hw_token |= HALT_BIT; 393 qh->hw_token |= halt;
392 wmb (); 394 wmb ();
393 } 395 }
394 } 396 }
@@ -423,7 +425,7 @@ halt:
423 * it after fault cleanup, or recovering from silicon wrongly 425 * it after fault cleanup, or recovering from silicon wrongly
424 * overlaying the dummy qtd (which reduces DMA chatter). 426 * overlaying the dummy qtd (which reduces DMA chatter).
425 */ 427 */
426 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { 428 if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
427 switch (state) { 429 switch (state) {
428 case QH_STATE_IDLE: 430 case QH_STATE_IDLE:
429 qh_refresh(ehci, qh); 431 qh_refresh(ehci, qh);
@@ -432,7 +434,7 @@ halt:
432 /* should be rare for periodic transfers, 434 /* should be rare for periodic transfers,
433 * except maybe high bandwidth ... 435 * except maybe high bandwidth ...
434 */ 436 */
435 if ((__constant_cpu_to_le32 (QH_SMASK) 437 if ((cpu_to_hc32(ehci, QH_SMASK)
436 & qh->hw_info2) != 0) { 438 & qh->hw_info2) != 0) {
437 intr_deschedule (ehci, qh); 439 intr_deschedule (ehci, qh);
438 (void) qh_schedule (ehci, qh); 440 (void) qh_schedule (ehci, qh);
@@ -506,8 +508,9 @@ qh_urb_transaction (
506 is_input = usb_pipein (urb->pipe); 508 is_input = usb_pipein (urb->pipe);
507 if (usb_pipecontrol (urb->pipe)) { 509 if (usb_pipecontrol (urb->pipe)) {
508 /* SETUP pid */ 510 /* SETUP pid */
509 qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest), 511 qtd_fill(ehci, qtd, urb->setup_dma,
510 token | (2 /* "setup" */ << 8), 8); 512 sizeof (struct usb_ctrlrequest),
513 token | (2 /* "setup" */ << 8), 8);
511 514
512 /* ... and always at least one more pid */ 515 /* ... and always at least one more pid */
513 token ^= QTD_TOGGLE; 516 token ^= QTD_TOGGLE;
@@ -516,7 +519,7 @@ qh_urb_transaction (
516 if (unlikely (!qtd)) 519 if (unlikely (!qtd))
517 goto cleanup; 520 goto cleanup;
518 qtd->urb = urb; 521 qtd->urb = urb;
519 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 522 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
520 list_add_tail (&qtd->qtd_list, head); 523 list_add_tail (&qtd->qtd_list, head);
521 524
522 /* for zero length DATA stages, STATUS is always IN */ 525 /* for zero length DATA stages, STATUS is always IN */
@@ -543,7 +546,7 @@ qh_urb_transaction (
543 for (;;) { 546 for (;;) {
544 int this_qtd_len; 547 int this_qtd_len;
545 548
546 this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket); 549 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
547 len -= this_qtd_len; 550 len -= this_qtd_len;
548 buf += this_qtd_len; 551 buf += this_qtd_len;
549 if (is_input) 552 if (is_input)
@@ -561,7 +564,7 @@ qh_urb_transaction (
561 if (unlikely (!qtd)) 564 if (unlikely (!qtd))
562 goto cleanup; 565 goto cleanup;
563 qtd->urb = urb; 566 qtd->urb = urb;
564 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 567 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
565 list_add_tail (&qtd->qtd_list, head); 568 list_add_tail (&qtd->qtd_list, head);
566 } 569 }
567 570
@@ -570,7 +573,7 @@ qh_urb_transaction (
570 */ 573 */
571 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 574 if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
572 || usb_pipecontrol (urb->pipe))) 575 || usb_pipecontrol (urb->pipe)))
573 qtd->hw_alt_next = EHCI_LIST_END; 576 qtd->hw_alt_next = EHCI_LIST_END(ehci);
574 577
575 /* 578 /*
576 * control requests may need a terminating data "status" ack; 579 * control requests may need a terminating data "status" ack;
@@ -594,17 +597,17 @@ qh_urb_transaction (
594 if (unlikely (!qtd)) 597 if (unlikely (!qtd))
595 goto cleanup; 598 goto cleanup;
596 qtd->urb = urb; 599 qtd->urb = urb;
597 qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); 600 qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
598 list_add_tail (&qtd->qtd_list, head); 601 list_add_tail (&qtd->qtd_list, head);
599 602
600 /* never any data in such packets */ 603 /* never any data in such packets */
601 qtd_fill (qtd, 0, 0, token, 0); 604 qtd_fill(ehci, qtd, 0, 0, token, 0);
602 } 605 }
603 } 606 }
604 607
605 /* by default, enable interrupt on urb completion */ 608 /* by default, enable interrupt on urb completion */
606 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) 609 if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
607 qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC); 610 qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
608 return head; 611 return head;
609 612
610cleanup: 613cleanup:
@@ -773,8 +776,8 @@ done:
773 776
774 /* init as live, toggle clear, advance to dummy */ 777 /* init as live, toggle clear, advance to dummy */
775 qh->qh_state = QH_STATE_IDLE; 778 qh->qh_state = QH_STATE_IDLE;
776 qh->hw_info1 = cpu_to_le32 (info1); 779 qh->hw_info1 = cpu_to_hc32(ehci, info1);
777 qh->hw_info2 = cpu_to_le32 (info2); 780 qh->hw_info2 = cpu_to_hc32(ehci, info2);
778 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 781 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
779 qh_refresh (ehci, qh); 782 qh_refresh (ehci, qh);
780 return qh; 783 return qh;
@@ -786,7 +789,7 @@ done:
786 789
787static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 790static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
788{ 791{
789 __le32 dma = QH_NEXT (qh->qh_dma); 792 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
790 struct ehci_qh *head; 793 struct ehci_qh *head;
791 794
792 /* (re)start the async schedule? */ 795 /* (re)start the async schedule? */
@@ -824,8 +827,6 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
824 827
825/*-------------------------------------------------------------------------*/ 828/*-------------------------------------------------------------------------*/
826 829
827#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
828
829/* 830/*
830 * For control/bulk/interrupt, return QH with these TDs appended. 831 * For control/bulk/interrupt, return QH with these TDs appended.
831 * Allocates and initializes the QH if necessary. 832 * Allocates and initializes the QH if necessary.
@@ -841,6 +842,7 @@ static struct ehci_qh *qh_append_tds (
841) 842)
842{ 843{
843 struct ehci_qh *qh = NULL; 844 struct ehci_qh *qh = NULL;
845 u32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
844 846
845 qh = (struct ehci_qh *) *ptr; 847 qh = (struct ehci_qh *) *ptr;
846 if (unlikely (qh == NULL)) { 848 if (unlikely (qh == NULL)) {
@@ -862,7 +864,7 @@ static struct ehci_qh *qh_append_tds (
862 864
863 /* usb_reset_device() briefly reverts to address 0 */ 865 /* usb_reset_device() briefly reverts to address 0 */
864 if (usb_pipedevice (urb->pipe) == 0) 866 if (usb_pipedevice (urb->pipe) == 0)
865 qh->hw_info1 &= ~QH_ADDR_MASK; 867 qh->hw_info1 &= ~qh_addr_mask;
866 } 868 }
867 869
868 /* just one way to queue requests: swap with the dummy qtd. 870 /* just one way to queue requests: swap with the dummy qtd.
@@ -871,7 +873,7 @@ static struct ehci_qh *qh_append_tds (
871 if (likely (qtd != NULL)) { 873 if (likely (qtd != NULL)) {
872 struct ehci_qtd *dummy; 874 struct ehci_qtd *dummy;
873 dma_addr_t dma; 875 dma_addr_t dma;
874 __le32 token; 876 __hc32 token;
875 877
876 /* to avoid racing the HC, use the dummy td instead of 878 /* to avoid racing the HC, use the dummy td instead of
877 * the first td of our list (becomes new dummy). both 879 * the first td of our list (becomes new dummy). both
@@ -879,7 +881,7 @@ static struct ehci_qh *qh_append_tds (
879 * HC is allowed to fetch the old dummy (4.10.2). 881 * HC is allowed to fetch the old dummy (4.10.2).
880 */ 882 */
881 token = qtd->hw_token; 883 token = qtd->hw_token;
882 qtd->hw_token = HALT_BIT; 884 qtd->hw_token = HALT_BIT(ehci);
883 wmb (); 885 wmb ();
884 dummy = qh->dummy; 886 dummy = qh->dummy;
885 887
@@ -891,14 +893,14 @@ static struct ehci_qh *qh_append_tds (
891 list_add (&dummy->qtd_list, qtd_list); 893 list_add (&dummy->qtd_list, qtd_list);
892 __list_splice (qtd_list, qh->qtd_list.prev); 894 __list_splice (qtd_list, qh->qtd_list.prev);
893 895
894 ehci_qtd_init (qtd, qtd->qtd_dma); 896 ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
895 qh->dummy = qtd; 897 qh->dummy = qtd;
896 898
897 /* hc must see the new dummy at list end */ 899 /* hc must see the new dummy at list end */
898 dma = qtd->qtd_dma; 900 dma = qtd->qtd_dma;
899 qtd = list_entry (qh->qtd_list.prev, 901 qtd = list_entry (qh->qtd_list.prev,
900 struct ehci_qtd, qtd_list); 902 struct ehci_qtd, qtd_list);
901 qtd->hw_next = QTD_NEXT (dma); 903 qtd->hw_next = QTD_NEXT(ehci, dma);
902 904
903 /* let the hc process these next qtds */ 905 /* let the hc process these next qtds */
904 wmb (); 906 wmb ();
@@ -974,7 +976,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
974 976
975 timer_action_done (ehci, TIMER_IAA_WATCHDOG); 977 timer_action_done (ehci, TIMER_IAA_WATCHDOG);
976 978
977 // qh->hw_next = cpu_to_le32 (qh->qh_dma); 979 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
978 qh->qh_state = QH_STATE_IDLE; 980 qh->qh_state = QH_STATE_IDLE;
979 qh->qh_next.qh = NULL; 981 qh->qh_next.qh = NULL;
980 qh_put (qh); // refcount from reclaim 982 qh_put (qh); // refcount from reclaim
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 500aebbaa741..d4a8ace49676 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -44,9 +44,10 @@ static int ehci_get_frame (struct usb_hcd *hcd);
44 * @tag: hardware tag for type of this record 44 * @tag: hardware tag for type of this record
45 */ 45 */
46static union ehci_shadow * 46static union ehci_shadow *
47periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) 47periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
48 __hc32 tag)
48{ 49{
49 switch (tag) { 50 switch (hc32_to_cpu(ehci, tag)) {
50 case Q_TYPE_QH: 51 case Q_TYPE_QH:
51 return &periodic->qh->qh_next; 52 return &periodic->qh->qh_next;
52 case Q_TYPE_FSTN: 53 case Q_TYPE_FSTN:
@@ -62,13 +63,14 @@ periodic_next_shadow (union ehci_shadow *periodic, __le32 tag)
62/* caller must hold ehci->lock */ 63/* caller must hold ehci->lock */
63static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) 64static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
64{ 65{
65 union ehci_shadow *prev_p = &ehci->pshadow [frame]; 66 union ehci_shadow *prev_p = &ehci->pshadow[frame];
66 __le32 *hw_p = &ehci->periodic [frame]; 67 __hc32 *hw_p = &ehci->periodic[frame];
67 union ehci_shadow here = *prev_p; 68 union ehci_shadow here = *prev_p;
68 69
69 /* find predecessor of "ptr"; hw and shadow lists are in sync */ 70 /* find predecessor of "ptr"; hw and shadow lists are in sync */
70 while (here.ptr && here.ptr != ptr) { 71 while (here.ptr && here.ptr != ptr) {
71 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); 72 prev_p = periodic_next_shadow(ehci, prev_p,
73 Q_NEXT_TYPE(ehci, *hw_p));
72 hw_p = here.hw_next; 74 hw_p = here.hw_next;
73 here = *prev_p; 75 here = *prev_p;
74 } 76 }
@@ -79,7 +81,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
79 /* update shadow and hardware lists ... the old "next" pointers 81 /* update shadow and hardware lists ... the old "next" pointers
80 * from ptr may still be in use, the caller updates them. 82 * from ptr may still be in use, the caller updates them.
81 */ 83 */
82 *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); 84 *prev_p = *periodic_next_shadow(ehci, &here,
85 Q_NEXT_TYPE(ehci, *hw_p));
83 *hw_p = *here.hw_next; 86 *hw_p = *here.hw_next;
84} 87}
85 88
@@ -87,18 +90,19 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
87static unsigned short 90static unsigned short
88periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) 91periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
89{ 92{
90 __le32 *hw_p = &ehci->periodic [frame]; 93 __hc32 *hw_p = &ehci->periodic [frame];
91 union ehci_shadow *q = &ehci->pshadow [frame]; 94 union ehci_shadow *q = &ehci->pshadow [frame];
92 unsigned usecs = 0; 95 unsigned usecs = 0;
93 96
94 while (q->ptr) { 97 while (q->ptr) {
95 switch (Q_NEXT_TYPE (*hw_p)) { 98 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
96 case Q_TYPE_QH: 99 case Q_TYPE_QH:
97 /* is it in the S-mask? */ 100 /* is it in the S-mask? */
98 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) 101 if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
99 usecs += q->qh->usecs; 102 usecs += q->qh->usecs;
100 /* ... or C-mask? */ 103 /* ... or C-mask? */
101 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) 104 if (q->qh->hw_info2 & cpu_to_hc32(ehci,
105 1 << (8 + uframe)))
102 usecs += q->qh->c_usecs; 106 usecs += q->qh->c_usecs;
103 hw_p = &q->qh->hw_next; 107 hw_p = &q->qh->hw_next;
104 q = &q->qh->qh_next; 108 q = &q->qh->qh_next;
@@ -108,7 +112,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
108 /* for "save place" FSTNs, count the relevant INTR 112 /* for "save place" FSTNs, count the relevant INTR
109 * bandwidth from the previous frame 113 * bandwidth from the previous frame
110 */ 114 */
111 if (q->fstn->hw_prev != EHCI_LIST_END) { 115 if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
112 ehci_dbg (ehci, "ignoring FSTN cost ...\n"); 116 ehci_dbg (ehci, "ignoring FSTN cost ...\n");
113 } 117 }
114 hw_p = &q->fstn->hw_next; 118 hw_p = &q->fstn->hw_next;
@@ -121,9 +125,10 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
121 break; 125 break;
122 case Q_TYPE_SITD: 126 case Q_TYPE_SITD:
123 /* is it in the S-mask? (count SPLIT, DATA) */ 127 /* is it in the S-mask? (count SPLIT, DATA) */
124 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { 128 if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
129 1 << uframe)) {
125 if (q->sitd->hw_fullspeed_ep & 130 if (q->sitd->hw_fullspeed_ep &
126 __constant_cpu_to_le32 (1<<31)) 131 cpu_to_hc32(ehci, 1<<31))
127 usecs += q->sitd->stream->usecs; 132 usecs += q->sitd->stream->usecs;
128 else /* worst case for OUT start-split */ 133 else /* worst case for OUT start-split */
129 usecs += HS_USECS_ISO (188); 134 usecs += HS_USECS_ISO (188);
@@ -131,7 +136,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
131 136
132 /* ... C-mask? (count CSPLIT, DATA) */ 137 /* ... C-mask? (count CSPLIT, DATA) */
133 if (q->sitd->hw_uframe & 138 if (q->sitd->hw_uframe &
134 cpu_to_le32 (1 << (8 + uframe))) { 139 cpu_to_hc32(ehci, 1 << (8 + uframe))) {
135 /* worst case for IN complete-split */ 140 /* worst case for IN complete-split */
136 usecs += q->sitd->stream->c_usecs; 141 usecs += q->sitd->stream->c_usecs;
137 } 142 }
@@ -173,9 +178,9 @@ static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
173 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag 178 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
174 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. 179 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
175 */ 180 */
176static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __le32 mask) 181static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
177{ 182{
178 unsigned char smask = QH_SMASK & le32_to_cpu(mask); 183 unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
179 if (!smask) { 184 if (!smask) {
180 ehci_err(ehci, "invalid empty smask!\n"); 185 ehci_err(ehci, "invalid empty smask!\n");
181 /* uframe 7 can't have bw so this will indicate failure */ 186 /* uframe 7 can't have bw so this will indicate failure */
@@ -217,14 +222,14 @@ periodic_tt_usecs (
217 unsigned short tt_usecs[8] 222 unsigned short tt_usecs[8]
218) 223)
219{ 224{
220 __le32 *hw_p = &ehci->periodic [frame]; 225 __hc32 *hw_p = &ehci->periodic [frame];
221 union ehci_shadow *q = &ehci->pshadow [frame]; 226 union ehci_shadow *q = &ehci->pshadow [frame];
222 unsigned char uf; 227 unsigned char uf;
223 228
224 memset(tt_usecs, 0, 16); 229 memset(tt_usecs, 0, 16);
225 230
226 while (q->ptr) { 231 while (q->ptr) {
227 switch (Q_NEXT_TYPE(*hw_p)) { 232 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
228 case Q_TYPE_ITD: 233 case Q_TYPE_ITD:
229 hw_p = &q->itd->hw_next; 234 hw_p = &q->itd->hw_next;
230 q = &q->itd->itd_next; 235 q = &q->itd->itd_next;
@@ -247,8 +252,8 @@ periodic_tt_usecs (
247 continue; 252 continue;
248 // case Q_TYPE_FSTN: 253 // case Q_TYPE_FSTN:
249 default: 254 default:
250 ehci_dbg(ehci, 255 ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
251 "ignoring periodic frame %d FSTN\n", frame); 256 frame);
252 hw_p = &q->fstn->hw_next; 257 hw_p = &q->fstn->hw_next;
253 q = &q->fstn->fstn_next; 258 q = &q->fstn->fstn_next;
254 } 259 }
@@ -368,41 +373,42 @@ static int tt_no_collision (
368 */ 373 */
369 for (; frame < ehci->periodic_size; frame += period) { 374 for (; frame < ehci->periodic_size; frame += period) {
370 union ehci_shadow here; 375 union ehci_shadow here;
371 __le32 type; 376 __hc32 type;
372 377
373 here = ehci->pshadow [frame]; 378 here = ehci->pshadow [frame];
374 type = Q_NEXT_TYPE (ehci->periodic [frame]); 379 type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
375 while (here.ptr) { 380 while (here.ptr) {
376 switch (type) { 381 switch (hc32_to_cpu(ehci, type)) {
377 case Q_TYPE_ITD: 382 case Q_TYPE_ITD:
378 type = Q_NEXT_TYPE (here.itd->hw_next); 383 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
379 here = here.itd->itd_next; 384 here = here.itd->itd_next;
380 continue; 385 continue;
381 case Q_TYPE_QH: 386 case Q_TYPE_QH:
382 if (same_tt (dev, here.qh->dev)) { 387 if (same_tt (dev, here.qh->dev)) {
383 u32 mask; 388 u32 mask;
384 389
385 mask = le32_to_cpu (here.qh->hw_info2); 390 mask = hc32_to_cpu(ehci,
391 here.qh->hw_info2);
386 /* "knows" no gap is needed */ 392 /* "knows" no gap is needed */
387 mask |= mask >> 8; 393 mask |= mask >> 8;
388 if (mask & uf_mask) 394 if (mask & uf_mask)
389 break; 395 break;
390 } 396 }
391 type = Q_NEXT_TYPE (here.qh->hw_next); 397 type = Q_NEXT_TYPE(ehci, here.qh->hw_next);
392 here = here.qh->qh_next; 398 here = here.qh->qh_next;
393 continue; 399 continue;
394 case Q_TYPE_SITD: 400 case Q_TYPE_SITD:
395 if (same_tt (dev, here.sitd->urb->dev)) { 401 if (same_tt (dev, here.sitd->urb->dev)) {
396 u16 mask; 402 u16 mask;
397 403
398 mask = le32_to_cpu (here.sitd 404 mask = hc32_to_cpu(ehci, here.sitd
399 ->hw_uframe); 405 ->hw_uframe);
400 /* FIXME assumes no gap for IN! */ 406 /* FIXME assumes no gap for IN! */
401 mask |= mask >> 8; 407 mask |= mask >> 8;
402 if (mask & uf_mask) 408 if (mask & uf_mask)
403 break; 409 break;
404 } 410 }
405 type = Q_NEXT_TYPE (here.sitd->hw_next); 411 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
406 here = here.sitd->sitd_next; 412 here = here.sitd->sitd_next;
407 continue; 413 continue;
408 // case Q_TYPE_FSTN: 414 // case Q_TYPE_FSTN:
@@ -475,13 +481,6 @@ static int disable_periodic (struct ehci_hcd *ehci)
475/*-------------------------------------------------------------------------*/ 481/*-------------------------------------------------------------------------*/
476#ifdef CONFIG_CPU_FREQ 482#ifdef CONFIG_CPU_FREQ
477 483
478/* ignore/inactivate bit in QH hw_info1 */
479#define INACTIVATE_BIT __constant_cpu_to_le32(QH_INACTIVATE)
480
481#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
482#define ACTIVE_BIT __constant_cpu_to_le32(QTD_STS_ACTIVE)
483#define STATUS_BIT __constant_cpu_to_le32(QTD_STS_STS)
484
485static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh) 484static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
486{ 485{
487 int now; /* current (frame * 8) + uframe */ 486 int now; /* current (frame * 8) + uframe */
@@ -492,8 +491,8 @@ static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
492 491
493 now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3); 492 now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3);
494 493
495 next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now) % 494 next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now)
496 (qh->period << 3); 495 % (qh->period << 3);
497 prev_start = (qh->period << 3) - next_start; 496 prev_start = (qh->period << 3) - next_start;
498 497
499 /* 498 /*
@@ -510,7 +509,7 @@ static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
510 */ 509 */
511 if ((next_start > ehci->i_thresh) && (prev_start > 1)) 510 if ((next_start > ehci->i_thresh) && (prev_start > 1))
512 /* safe to set "i" bit if split isn't in progress */ 511 /* safe to set "i" bit if split isn't in progress */
513 return (qh->hw_token & STATUS_BIT) ? 0 : 1; 512 return (qh->hw_token & STATUS_BIT(ehci)) ? 0 : 1;
514 else 513 else
515 return 0; 514 return 0;
516} 515}
@@ -520,12 +519,14 @@ static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci)
520{ 519{
521 struct ehci_qh *qh; 520 struct ehci_qh *qh;
522 int not_done, safe; 521 int not_done, safe;
522 u32 inactivate = INACTIVATE_BIT(ehci);
523 u32 active = ACTIVE_BIT(ehci);
523 524
524 do { 525 do {
525 not_done = 0; 526 not_done = 0;
526 list_for_each_entry(qh, &ehci->split_intr_qhs, 527 list_for_each_entry(qh, &ehci->split_intr_qhs,
527 split_intr_qhs) { 528 split_intr_qhs) {
528 if (qh->hw_info1 & INACTIVATE_BIT) 529 if (qh->hw_info1 & inactivate)
529 /* already off */ 530 /* already off */
530 continue; 531 continue;
531 /* 532 /*
@@ -539,8 +540,8 @@ static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci)
539 if (safe == 0) { 540 if (safe == 0) {
540 not_done = 1; 541 not_done = 1;
541 } else if (safe > 0) { 542 } else if (safe > 0) {
542 qh->was_active = qh->hw_token & ACTIVE_BIT; 543 qh->was_active = qh->hw_token & active;
543 qh->hw_info1 |= INACTIVATE_BIT; 544 qh->hw_info1 |= inactivate;
544 } 545 }
545 } 546 }
546 } while (not_done); 547 } while (not_done);
@@ -552,11 +553,14 @@ static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci)
552 struct ehci_qh *qh; 553 struct ehci_qh *qh;
553 u32 token; 554 u32 token;
554 int not_done, safe; 555 int not_done, safe;
556 u32 inactivate = INACTIVATE_BIT(ehci);
557 u32 active = ACTIVE_BIT(ehci);
558 u32 halt = HALT_BIT(ehci);
555 559
556 do { 560 do {
557 not_done = 0; 561 not_done = 0;
558 list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) { 562 list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) {
559 if (!(qh->hw_info1 & INACTIVATE_BIT)) /* already on */ 563 if (!(qh->hw_info1 & inactivate)) /* already on */
560 continue; 564 continue;
561 /* 565 /*
562 * Don't reactivate if cached, or controller might 566 * Don't reactivate if cached, or controller might
@@ -568,11 +572,11 @@ static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci)
568 } else if (safe > 0) { 572 } else if (safe > 0) {
569 /* See EHCI 1.0 section 4.15.2.4. */ 573 /* See EHCI 1.0 section 4.15.2.4. */
570 token = qh->hw_token; 574 token = qh->hw_token;
571 qh->hw_token = (token | HALT_BIT) & ~ACTIVE_BIT; 575 qh->hw_token = (token | halt) & ~active;
572 wmb(); 576 wmb();
573 qh->hw_info1 &= ~INACTIVATE_BIT; 577 qh->hw_info1 &= ~inactivate;
574 wmb(); 578 wmb();
575 qh->hw_token = (token & ~HALT_BIT) | qh->was_active; 579 qh->hw_token = (token & ~halt) | qh->was_active;
576 } 580 }
577 } 581 }
578 } while (not_done); 582 } while (not_done);
@@ -592,7 +596,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
592 596
593 dev_dbg (&qh->dev->dev, 597 dev_dbg (&qh->dev->dev,
594 "link qh%d-%04x/%p start %d [%d/%d us]\n", 598 "link qh%d-%04x/%p start %d [%d/%d us]\n",
595 period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), 599 period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
596 qh, qh->start, qh->usecs, qh->c_usecs); 600 qh, qh->start, qh->usecs, qh->c_usecs);
597 601
598#ifdef CONFIG_CPU_FREQ 602#ifdef CONFIG_CPU_FREQ
@@ -603,7 +607,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
603 */ 607 */
604 if (ehci->cpufreq_changing) 608 if (ehci->cpufreq_changing)
605 if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) 609 if (!(qh->hw_info1 & (cpu_to_le32(1 << 13))))
606 qh->hw_info1 |= INACTIVATE_BIT; 610 qh->hw_info1 |= INACTIVATE_BIT(ehci);
607#endif 611#endif
608 612
609 /* high bandwidth, or otherwise every microframe */ 613 /* high bandwidth, or otherwise every microframe */
@@ -611,17 +615,17 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
611 period = 1; 615 period = 1;
612 616
613 for (i = qh->start; i < ehci->periodic_size; i += period) { 617 for (i = qh->start; i < ehci->periodic_size; i += period) {
614 union ehci_shadow *prev = &ehci->pshadow [i]; 618 union ehci_shadow *prev = &ehci->pshadow[i];
615 __le32 *hw_p = &ehci->periodic [i]; 619 __hc32 *hw_p = &ehci->periodic[i];
616 union ehci_shadow here = *prev; 620 union ehci_shadow here = *prev;
617 __le32 type = 0; 621 __hc32 type = 0;
618 622
619 /* skip the iso nodes at list head */ 623 /* skip the iso nodes at list head */
620 while (here.ptr) { 624 while (here.ptr) {
621 type = Q_NEXT_TYPE (*hw_p); 625 type = Q_NEXT_TYPE(ehci, *hw_p);
622 if (type == Q_TYPE_QH) 626 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
623 break; 627 break;
624 prev = periodic_next_shadow (prev, type); 628 prev = periodic_next_shadow(ehci, prev, type);
625 hw_p = &here.qh->hw_next; 629 hw_p = &here.qh->hw_next;
626 here = *prev; 630 here = *prev;
627 } 631 }
@@ -643,7 +647,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
643 qh->hw_next = *hw_p; 647 qh->hw_next = *hw_p;
644 wmb (); 648 wmb ();
645 prev->qh = qh; 649 prev->qh = qh;
646 *hw_p = QH_NEXT (qh->qh_dma); 650 *hw_p = QH_NEXT (ehci, qh->qh_dma);
647 } 651 }
648 } 652 }
649 qh->qh_state = QH_STATE_LINKED; 653 qh->qh_state = QH_STATE_LINKED;
@@ -677,7 +681,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
677 // and this qh is active in the current uframe 681 // and this qh is active in the current uframe
678 // (and overlay token SplitXstate is false?) 682 // (and overlay token SplitXstate is false?)
679 // THEN 683 // THEN
680 // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */); 684 // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */);
681 685
682#ifdef CONFIG_CPU_FREQ 686#ifdef CONFIG_CPU_FREQ
683 /* remove qh from list of low/full speed interrupt QHs */ 687 /* remove qh from list of low/full speed interrupt QHs */
@@ -701,7 +705,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
701 dev_dbg (&qh->dev->dev, 705 dev_dbg (&qh->dev->dev,
702 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", 706 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
703 qh->period, 707 qh->period,
704 le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), 708 hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
705 qh, qh->start, qh->usecs, qh->c_usecs); 709 qh, qh->start, qh->usecs, qh->c_usecs);
706 710
707 /* qh->qh_next still "live" to HC */ 711 /* qh->qh_next still "live" to HC */
@@ -727,7 +731,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
727 * active high speed queues may need bigger delays... 731 * active high speed queues may need bigger delays...
728 */ 732 */
729 if (list_empty (&qh->qtd_list) 733 if (list_empty (&qh->qtd_list)
730 || (__constant_cpu_to_le32 (QH_CMASK) 734 || (cpu_to_hc32(ehci, QH_CMASK)
731 & qh->hw_info2) != 0) 735 & qh->hw_info2) != 0)
732 wait = 2; 736 wait = 2;
733 else 737 else
@@ -735,7 +739,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
735 739
736 udelay (wait); 740 udelay (wait);
737 qh->qh_state = QH_STATE_IDLE; 741 qh->qh_state = QH_STATE_IDLE;
738 qh->hw_next = EHCI_LIST_END; 742 qh->hw_next = EHCI_LIST_END(ehci);
739 wmb (); 743 wmb ();
740} 744}
741 745
@@ -792,7 +796,7 @@ static int check_intr_schedule (
792 unsigned frame, 796 unsigned frame,
793 unsigned uframe, 797 unsigned uframe,
794 const struct ehci_qh *qh, 798 const struct ehci_qh *qh,
795 __le32 *c_maskp 799 __hc32 *c_maskp
796) 800)
797{ 801{
798 int retval = -ENOSPC; 802 int retval = -ENOSPC;
@@ -824,7 +828,7 @@ static int check_intr_schedule (
824 828
825 retval = 0; 829 retval = 0;
826 830
827 *c_maskp = cpu_to_le32 (mask << 8); 831 *c_maskp = cpu_to_hc32(ehci, mask << 8);
828 } 832 }
829#else 833#else
830 /* Make sure this tt's buffer is also available for CSPLITs. 834 /* Make sure this tt's buffer is also available for CSPLITs.
@@ -835,7 +839,7 @@ static int check_intr_schedule (
835 * one smart pass... 839 * one smart pass...
836 */ 840 */
837 mask = 0x03 << (uframe + qh->gap_uf); 841 mask = 0x03 << (uframe + qh->gap_uf);
838 *c_maskp = cpu_to_le32 (mask << 8); 842 *c_maskp = cpu_to_hc32(ehci, mask << 8);
839 843
840 mask |= 1 << uframe; 844 mask |= 1 << uframe;
841 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { 845 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
@@ -855,20 +859,20 @@ done:
855/* "first fit" scheduling policy used the first time through, 859/* "first fit" scheduling policy used the first time through,
856 * or when the previous schedule slot can't be re-used. 860 * or when the previous schedule slot can't be re-used.
857 */ 861 */
858static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 862static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
859{ 863{
860 int status; 864 int status;
861 unsigned uframe; 865 unsigned uframe;
862 __le32 c_mask; 866 __hc32 c_mask;
863 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ 867 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
864 868
865 qh_refresh(ehci, qh); 869 qh_refresh(ehci, qh);
866 qh->hw_next = EHCI_LIST_END; 870 qh->hw_next = EHCI_LIST_END(ehci);
867 frame = qh->start; 871 frame = qh->start;
868 872
869 /* reuse the previous schedule slots, if we can */ 873 /* reuse the previous schedule slots, if we can */
870 if (frame < qh->period) { 874 if (frame < qh->period) {
871 uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK); 875 uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK);
872 status = check_intr_schedule (ehci, frame, --uframe, 876 status = check_intr_schedule (ehci, frame, --uframe,
873 qh, &c_mask); 877 qh, &c_mask);
874 } else { 878 } else {
@@ -904,10 +908,10 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
904 qh->start = frame; 908 qh->start = frame;
905 909
906 /* reset S-frame and (maybe) C-frame masks */ 910 /* reset S-frame and (maybe) C-frame masks */
907 qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK)); 911 qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
908 qh->hw_info2 |= qh->period 912 qh->hw_info2 |= qh->period
909 ? cpu_to_le32 (1 << uframe) 913 ? cpu_to_hc32(ehci, 1 << uframe)
910 : __constant_cpu_to_le32 (QH_SMASK); 914 : cpu_to_hc32(ehci, QH_SMASK);
911 qh->hw_info2 |= c_mask; 915 qh->hw_info2 |= c_mask;
912 } else 916 } else
913 ehci_dbg (ehci, "reused qh %p schedule\n", qh); 917 ehci_dbg (ehci, "reused qh %p schedule\n", qh);
@@ -937,7 +941,7 @@ static int intr_submit (
937 spin_lock_irqsave (&ehci->lock, flags); 941 spin_lock_irqsave (&ehci->lock, flags);
938 942
939 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, 943 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
940 &ehci_to_hcd(ehci)->flags))) { 944 &ehci_to_hcd(ehci)->flags))) {
941 status = -ESHUTDOWN; 945 status = -ESHUTDOWN;
942 goto done; 946 goto done;
943 } 947 }
@@ -1027,9 +1031,9 @@ iso_stream_init (
1027 buf1 |= maxp; 1031 buf1 |= maxp;
1028 maxp *= multi; 1032 maxp *= multi;
1029 1033
1030 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); 1034 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1031 stream->buf1 = cpu_to_le32 (buf1); 1035 stream->buf1 = cpu_to_hc32(ehci, buf1);
1032 stream->buf2 = cpu_to_le32 (multi); 1036 stream->buf2 = cpu_to_hc32(ehci, multi);
1033 1037
1034 /* usbfs wants to report the average usecs per frame tied up 1038 /* usbfs wants to report the average usecs per frame tied up
1035 * when transfers on this endpoint are scheduled ... 1039 * when transfers on this endpoint are scheduled ...
@@ -1072,7 +1076,7 @@ iso_stream_init (
1072 bandwidth /= 1 << (interval + 2); 1076 bandwidth /= 1 << (interval + 2);
1073 1077
1074 /* stream->splits gets created from raw_mask later */ 1078 /* stream->splits gets created from raw_mask later */
1075 stream->address = cpu_to_le32 (addr); 1079 stream->address = cpu_to_hc32(ehci, addr);
1076 } 1080 }
1077 stream->bandwidth = bandwidth; 1081 stream->bandwidth = bandwidth;
1078 1082
@@ -1206,7 +1210,8 @@ iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1206} 1210}
1207 1211
1208static inline void 1212static inline void
1209itd_sched_init ( 1213itd_sched_init(
1214 struct ehci_hcd *ehci,
1210 struct ehci_iso_sched *iso_sched, 1215 struct ehci_iso_sched *iso_sched,
1211 struct ehci_iso_stream *stream, 1216 struct ehci_iso_stream *stream,
1212 struct urb *urb 1217 struct urb *urb
@@ -1236,7 +1241,7 @@ itd_sched_init (
1236 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1241 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1237 trans |= EHCI_ITD_IOC; 1242 trans |= EHCI_ITD_IOC;
1238 trans |= length << 16; 1243 trans |= length << 16;
1239 uframe->transaction = cpu_to_le32 (trans); 1244 uframe->transaction = cpu_to_hc32(ehci, trans);
1240 1245
1241 /* might need to cross a buffer page within a uframe */ 1246 /* might need to cross a buffer page within a uframe */
1242 uframe->bufp = (buf & ~(u64)0x0fff); 1247 uframe->bufp = (buf & ~(u64)0x0fff);
@@ -1278,7 +1283,7 @@ itd_urb_transaction (
1278 if (unlikely (sched == NULL)) 1283 if (unlikely (sched == NULL))
1279 return -ENOMEM; 1284 return -ENOMEM;
1280 1285
1281 itd_sched_init (sched, stream, urb); 1286 itd_sched_init(ehci, sched, stream, urb);
1282 1287
1283 if (urb->interval < 8) 1288 if (urb->interval < 8)
1284 num_itds = 1 + (sched->span + 7) / 8; 1289 num_itds = 1 + (sched->span + 7) / 8;
@@ -1296,7 +1301,7 @@ itd_urb_transaction (
1296 /* prefer previously-allocated itds */ 1301 /* prefer previously-allocated itds */
1297 if (likely (!list_empty(&stream->free_list))) { 1302 if (likely (!list_empty(&stream->free_list))) {
1298 itd = list_entry (stream->free_list.prev, 1303 itd = list_entry (stream->free_list.prev,
1299 struct ehci_itd, itd_list); 1304 struct ehci_itd, itd_list);
1300 list_del (&itd->itd_list); 1305 list_del (&itd->itd_list);
1301 itd_dma = itd->itd_dma; 1306 itd_dma = itd->itd_dma;
1302 } else 1307 } else
@@ -1423,7 +1428,7 @@ sitd_slot_ok (
1423 uframe += period_uframes; 1428 uframe += period_uframes;
1424 } while (uframe < mod); 1429 } while (uframe < mod);
1425 1430
1426 stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7)); 1431 stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
1427 return 1; 1432 return 1;
1428} 1433}
1429 1434
@@ -1544,12 +1549,13 @@ ready:
1544/*-------------------------------------------------------------------------*/ 1549/*-------------------------------------------------------------------------*/
1545 1550
1546static inline void 1551static inline void
1547itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) 1552itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1553 struct ehci_itd *itd)
1548{ 1554{
1549 int i; 1555 int i;
1550 1556
1551 /* it's been recently zeroed */ 1557 /* it's been recently zeroed */
1552 itd->hw_next = EHCI_LIST_END; 1558 itd->hw_next = EHCI_LIST_END(ehci);
1553 itd->hw_bufp [0] = stream->buf0; 1559 itd->hw_bufp [0] = stream->buf0;
1554 itd->hw_bufp [1] = stream->buf1; 1560 itd->hw_bufp [1] = stream->buf1;
1555 itd->hw_bufp [2] = stream->buf2; 1561 itd->hw_bufp [2] = stream->buf2;
@@ -1561,7 +1567,8 @@ itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
1561} 1567}
1562 1568
1563static inline void 1569static inline void
1564itd_patch ( 1570itd_patch(
1571 struct ehci_hcd *ehci,
1565 struct ehci_itd *itd, 1572 struct ehci_itd *itd,
1566 struct ehci_iso_sched *iso_sched, 1573 struct ehci_iso_sched *iso_sched,
1567 unsigned index, 1574 unsigned index,
@@ -1576,17 +1583,18 @@ itd_patch (
1576 uframe &= 0x07; 1583 uframe &= 0x07;
1577 itd->index [uframe] = index; 1584 itd->index [uframe] = index;
1578 1585
1579 itd->hw_transaction [uframe] = uf->transaction; 1586 itd->hw_transaction[uframe] = uf->transaction;
1580 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12); 1587 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1581 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0); 1588 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1582 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32)); 1589 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1583 1590
1584 /* iso_frame_desc[].offset must be strictly increasing */ 1591 /* iso_frame_desc[].offset must be strictly increasing */
1585 if (unlikely (uf->cross)) { 1592 if (unlikely (uf->cross)) {
1586 u64 bufp = uf->bufp + 4096; 1593 u64 bufp = uf->bufp + 4096;
1594
1587 itd->pg = ++pg; 1595 itd->pg = ++pg;
1588 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0); 1596 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1589 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32)); 1597 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1590 } 1598 }
1591} 1599}
1592 1600
@@ -1599,7 +1607,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1599 ehci->pshadow [frame].itd = itd; 1607 ehci->pshadow [frame].itd = itd;
1600 itd->frame = frame; 1608 itd->frame = frame;
1601 wmb (); 1609 wmb ();
1602 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; 1610 ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1603} 1611}
1604 1612
1605/* fit urb's itds into the selected schedule slot; activate as needed */ 1613/* fit urb's itds into the selected schedule slot; activate as needed */
@@ -1644,14 +1652,14 @@ itd_link_urb (
1644 list_move_tail (&itd->itd_list, &stream->td_list); 1652 list_move_tail (&itd->itd_list, &stream->td_list);
1645 itd->stream = iso_stream_get (stream); 1653 itd->stream = iso_stream_get (stream);
1646 itd->urb = usb_get_urb (urb); 1654 itd->urb = usb_get_urb (urb);
1647 itd_init (stream, itd); 1655 itd_init (ehci, stream, itd);
1648 } 1656 }
1649 1657
1650 uframe = next_uframe & 0x07; 1658 uframe = next_uframe & 0x07;
1651 frame = next_uframe >> 3; 1659 frame = next_uframe >> 3;
1652 1660
1653 itd->usecs [uframe] = stream->usecs; 1661 itd->usecs [uframe] = stream->usecs;
1654 itd_patch (itd, iso_sched, packet, uframe); 1662 itd_patch(ehci, itd, iso_sched, packet, uframe);
1655 1663
1656 next_uframe += stream->interval; 1664 next_uframe += stream->interval;
1657 stream->depth += stream->interval; 1665 stream->depth += stream->interval;
@@ -1699,7 +1707,7 @@ itd_complete (
1699 urb_index = itd->index[uframe]; 1707 urb_index = itd->index[uframe];
1700 desc = &urb->iso_frame_desc [urb_index]; 1708 desc = &urb->iso_frame_desc [urb_index];
1701 1709
1702 t = le32_to_cpup (&itd->hw_transaction [uframe]); 1710 t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1703 itd->hw_transaction [uframe] = 0; 1711 itd->hw_transaction [uframe] = 0;
1704 stream->depth -= stream->interval; 1712 stream->depth -= stream->interval;
1705 1713
@@ -1829,7 +1837,8 @@ done:
1829 */ 1837 */
1830 1838
1831static inline void 1839static inline void
1832sitd_sched_init ( 1840sitd_sched_init(
1841 struct ehci_hcd *ehci,
1833 struct ehci_iso_sched *iso_sched, 1842 struct ehci_iso_sched *iso_sched,
1834 struct ehci_iso_stream *stream, 1843 struct ehci_iso_stream *stream,
1835 struct urb *urb 1844 struct urb *urb
@@ -1858,7 +1867,7 @@ sitd_sched_init (
1858 && !(urb->transfer_flags & URB_NO_INTERRUPT)) 1867 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1859 trans |= SITD_IOC; 1868 trans |= SITD_IOC;
1860 trans |= length << 16; 1869 trans |= length << 16;
1861 packet->transaction = cpu_to_le32 (trans); 1870 packet->transaction = cpu_to_hc32(ehci, trans);
1862 1871
1863 /* might need to cross a buffer page within a td */ 1872 /* might need to cross a buffer page within a td */
1864 packet->bufp = buf; 1873 packet->bufp = buf;
@@ -1894,7 +1903,7 @@ sitd_urb_transaction (
1894 if (iso_sched == NULL) 1903 if (iso_sched == NULL)
1895 return -ENOMEM; 1904 return -ENOMEM;
1896 1905
1897 sitd_sched_init (iso_sched, stream, urb); 1906 sitd_sched_init(ehci, iso_sched, stream, urb);
1898 1907
1899 /* allocate/init sITDs */ 1908 /* allocate/init sITDs */
1900 spin_lock_irqsave (&ehci->lock, flags); 1909 spin_lock_irqsave (&ehci->lock, flags);
@@ -1946,7 +1955,8 @@ sitd_urb_transaction (
1946/*-------------------------------------------------------------------------*/ 1955/*-------------------------------------------------------------------------*/
1947 1956
1948static inline void 1957static inline void
1949sitd_patch ( 1958sitd_patch(
1959 struct ehci_hcd *ehci,
1950 struct ehci_iso_stream *stream, 1960 struct ehci_iso_stream *stream,
1951 struct ehci_sitd *sitd, 1961 struct ehci_sitd *sitd,
1952 struct ehci_iso_sched *iso_sched, 1962 struct ehci_iso_sched *iso_sched,
@@ -1956,20 +1966,20 @@ sitd_patch (
1956 struct ehci_iso_packet *uf = &iso_sched->packet [index]; 1966 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1957 u64 bufp = uf->bufp; 1967 u64 bufp = uf->bufp;
1958 1968
1959 sitd->hw_next = EHCI_LIST_END; 1969 sitd->hw_next = EHCI_LIST_END(ehci);
1960 sitd->hw_fullspeed_ep = stream->address; 1970 sitd->hw_fullspeed_ep = stream->address;
1961 sitd->hw_uframe = stream->splits; 1971 sitd->hw_uframe = stream->splits;
1962 sitd->hw_results = uf->transaction; 1972 sitd->hw_results = uf->transaction;
1963 sitd->hw_backpointer = EHCI_LIST_END; 1973 sitd->hw_backpointer = EHCI_LIST_END(ehci);
1964 1974
1965 bufp = uf->bufp; 1975 bufp = uf->bufp;
1966 sitd->hw_buf [0] = cpu_to_le32 (bufp); 1976 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
1967 sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32); 1977 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
1968 1978
1969 sitd->hw_buf [1] = cpu_to_le32 (uf->buf1); 1979 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
1970 if (uf->cross) 1980 if (uf->cross)
1971 bufp += 4096; 1981 bufp += 4096;
1972 sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32); 1982 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
1973 sitd->index = index; 1983 sitd->index = index;
1974} 1984}
1975 1985
@@ -1982,7 +1992,7 @@ sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1982 ehci->pshadow [frame].sitd = sitd; 1992 ehci->pshadow [frame].sitd = sitd;
1983 sitd->frame = frame; 1993 sitd->frame = frame;
1984 wmb (); 1994 wmb ();
1985 ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD; 1995 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
1986} 1996}
1987 1997
1988/* fit urb's sitds into the selected schedule slot; activate as needed */ 1998/* fit urb's sitds into the selected schedule slot; activate as needed */
@@ -2010,7 +2020,7 @@ sitd_link_urb (
2010 urb->dev->devpath, stream->bEndpointAddress & 0x0f, 2020 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
2011 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", 2021 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
2012 (next_uframe >> 3) % ehci->periodic_size, 2022 (next_uframe >> 3) % ehci->periodic_size,
2013 stream->interval, le32_to_cpu (stream->splits)); 2023 stream->interval, hc32_to_cpu(ehci, stream->splits));
2014 stream->start = jiffies; 2024 stream->start = jiffies;
2015 } 2025 }
2016 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; 2026 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -2031,7 +2041,7 @@ sitd_link_urb (
2031 sitd->stream = iso_stream_get (stream); 2041 sitd->stream = iso_stream_get (stream);
2032 sitd->urb = usb_get_urb (urb); 2042 sitd->urb = usb_get_urb (urb);
2033 2043
2034 sitd_patch (stream, sitd, sched, packet); 2044 sitd_patch(ehci, stream, sitd, sched, packet);
2035 sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, 2045 sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
2036 sitd); 2046 sitd);
2037 2047
@@ -2069,7 +2079,7 @@ sitd_complete (
2069 2079
2070 urb_index = sitd->index; 2080 urb_index = sitd->index;
2071 desc = &urb->iso_frame_desc [urb_index]; 2081 desc = &urb->iso_frame_desc [urb_index];
2072 t = le32_to_cpup (&sitd->hw_results); 2082 t = hc32_to_cpup(ehci, &sitd->hw_results);
2073 2083
2074 /* report transfer status */ 2084 /* report transfer status */
2075 if (t & SITD_ERRS) { 2085 if (t & SITD_ERRS) {
@@ -2224,7 +2234,7 @@ scan_periodic (struct ehci_hcd *ehci)
2224 2234
2225 for (;;) { 2235 for (;;) {
2226 union ehci_shadow q, *q_p; 2236 union ehci_shadow q, *q_p;
2227 __le32 type, *hw_p; 2237 __hc32 type, *hw_p;
2228 unsigned uframes; 2238 unsigned uframes;
2229 2239
2230 /* don't scan past the live uframe */ 2240 /* don't scan past the live uframe */
@@ -2242,7 +2252,7 @@ restart:
2242 q_p = &ehci->pshadow [frame]; 2252 q_p = &ehci->pshadow [frame];
2243 hw_p = &ehci->periodic [frame]; 2253 hw_p = &ehci->periodic [frame];
2244 q.ptr = q_p->ptr; 2254 q.ptr = q_p->ptr;
2245 type = Q_NEXT_TYPE (*hw_p); 2255 type = Q_NEXT_TYPE(ehci, *hw_p);
2246 modified = 0; 2256 modified = 0;
2247 2257
2248 while (q.ptr != NULL) { 2258 while (q.ptr != NULL) {
@@ -2251,11 +2261,11 @@ restart:
2251 int live; 2261 int live;
2252 2262
2253 live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state); 2263 live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
2254 switch (type) { 2264 switch (hc32_to_cpu(ehci, type)) {
2255 case Q_TYPE_QH: 2265 case Q_TYPE_QH:
2256 /* handle any completions */ 2266 /* handle any completions */
2257 temp.qh = qh_get (q.qh); 2267 temp.qh = qh_get (q.qh);
2258 type = Q_NEXT_TYPE (q.qh->hw_next); 2268 type = Q_NEXT_TYPE(ehci, q.qh->hw_next);
2259 q = q.qh->qh_next; 2269 q = q.qh->qh_next;
2260 modified = qh_completions (ehci, temp.qh); 2270 modified = qh_completions (ehci, temp.qh);
2261 if (unlikely (list_empty (&temp.qh->qtd_list))) 2271 if (unlikely (list_empty (&temp.qh->qtd_list)))
@@ -2266,10 +2276,10 @@ restart:
2266 /* for "save place" FSTNs, look at QH entries 2276 /* for "save place" FSTNs, look at QH entries
2267 * in the previous frame for completions. 2277 * in the previous frame for completions.
2268 */ 2278 */
2269 if (q.fstn->hw_prev != EHCI_LIST_END) { 2279 if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) {
2270 dbg ("ignoring completions from FSTNs"); 2280 dbg ("ignoring completions from FSTNs");
2271 } 2281 }
2272 type = Q_NEXT_TYPE (q.fstn->hw_next); 2282 type = Q_NEXT_TYPE(ehci, q.fstn->hw_next);
2273 q = q.fstn->fstn_next; 2283 q = q.fstn->fstn_next;
2274 break; 2284 break;
2275 case Q_TYPE_ITD: 2285 case Q_TYPE_ITD:
@@ -2277,11 +2287,12 @@ restart:
2277 rmb (); 2287 rmb ();
2278 for (uf = live ? uframes : 8; uf < 8; uf++) { 2288 for (uf = live ? uframes : 8; uf < 8; uf++) {
2279 if (0 == (q.itd->hw_transaction [uf] 2289 if (0 == (q.itd->hw_transaction [uf]
2280 & ITD_ACTIVE)) 2290 & ITD_ACTIVE(ehci)))
2281 continue; 2291 continue;
2282 q_p = &q.itd->itd_next; 2292 q_p = &q.itd->itd_next;
2283 hw_p = &q.itd->hw_next; 2293 hw_p = &q.itd->hw_next;
2284 type = Q_NEXT_TYPE (q.itd->hw_next); 2294 type = Q_NEXT_TYPE(ehci,
2295 q.itd->hw_next);
2285 q = *q_p; 2296 q = *q_p;
2286 break; 2297 break;
2287 } 2298 }
@@ -2293,23 +2304,24 @@ restart:
2293 */ 2304 */
2294 *q_p = q.itd->itd_next; 2305 *q_p = q.itd->itd_next;
2295 *hw_p = q.itd->hw_next; 2306 *hw_p = q.itd->hw_next;
2296 type = Q_NEXT_TYPE (q.itd->hw_next); 2307 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2297 wmb(); 2308 wmb();
2298 modified = itd_complete (ehci, q.itd); 2309 modified = itd_complete (ehci, q.itd);
2299 q = *q_p; 2310 q = *q_p;
2300 break; 2311 break;
2301 case Q_TYPE_SITD: 2312 case Q_TYPE_SITD:
2302 if ((q.sitd->hw_results & SITD_ACTIVE) 2313 if ((q.sitd->hw_results & SITD_ACTIVE(ehci))
2303 && live) { 2314 && live) {
2304 q_p = &q.sitd->sitd_next; 2315 q_p = &q.sitd->sitd_next;
2305 hw_p = &q.sitd->hw_next; 2316 hw_p = &q.sitd->hw_next;
2306 type = Q_NEXT_TYPE (q.sitd->hw_next); 2317 type = Q_NEXT_TYPE(ehci,
2318 q.sitd->hw_next);
2307 q = *q_p; 2319 q = *q_p;
2308 break; 2320 break;
2309 } 2321 }
2310 *q_p = q.sitd->sitd_next; 2322 *q_p = q.sitd->sitd_next;
2311 *hw_p = q.sitd->hw_next; 2323 *hw_p = q.sitd->hw_next;
2312 type = Q_NEXT_TYPE (q.sitd->hw_next); 2324 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2313 wmb(); 2325 wmb();
2314 modified = sitd_complete (ehci, q.sitd); 2326 modified = sitd_complete (ehci, q.sitd);
2315 q = *q_p; 2327 q = *q_p;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index a9ba5d28cdc2..79ad2af5ef6a 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -21,6 +21,22 @@
21 21
22/* definitions used for the EHCI driver */ 22/* definitions used for the EHCI driver */
23 23
24/*
25 * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
26 * __leXX (normally) or __beXX (given EHCI_BIG_ENDIAN_DESC), depending on
27 * the host controller implementation.
28 *
29 * To facilitate the strongest possible byte-order checking from "sparse"
30 * and so on, we use __leXX unless that's not practical.
31 */
32#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
33typedef __u32 __bitwise __hc32;
34typedef __u16 __bitwise __hc16;
35#else
36#define __hc32 __le32
37#define __hc16 __le16
38#endif
39
24/* statistics can be kept for for tuning/monitoring */ 40/* statistics can be kept for for tuning/monitoring */
25struct ehci_stats { 41struct ehci_stats {
26 /* irq usage */ 42 /* irq usage */
@@ -70,7 +86,7 @@ struct ehci_hcd { /* one per controller */
70 /* periodic schedule support */ 86 /* periodic schedule support */
71#define DEFAULT_I_TDPS 1024 /* some HCs can do less */ 87#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
72 unsigned periodic_size; 88 unsigned periodic_size;
73 __le32 *periodic; /* hw periodic table */ 89 __hc32 *periodic; /* hw periodic table */
74 dma_addr_t periodic_dma; 90 dma_addr_t periodic_dma;
75 unsigned i_thresh; /* uframes HC might cache */ 91 unsigned i_thresh; /* uframes HC might cache */
76 92
@@ -103,6 +119,7 @@ struct ehci_hcd { /* one per controller */
103 unsigned no_selective_suspend:1; 119 unsigned no_selective_suspend:1;
104 unsigned has_fsl_port_bug:1; /* FreeScale */ 120 unsigned has_fsl_port_bug:1; /* FreeScale */
105 unsigned big_endian_mmio:1; 121 unsigned big_endian_mmio:1;
122 unsigned big_endian_desc:1;
106 123
107 u8 sbrn; /* packed release number */ 124 u8 sbrn; /* packed release number */
108 125
@@ -309,7 +326,7 @@ struct ehci_dbg_port {
309 326
310/*-------------------------------------------------------------------------*/ 327/*-------------------------------------------------------------------------*/
311 328
312#define QTD_NEXT(dma) cpu_to_le32((u32)dma) 329#define QTD_NEXT(ehci, dma) cpu_to_hc32(ehci, (u32)dma)
313 330
314/* 331/*
315 * EHCI Specification 0.95 Section 3.5 332 * EHCI Specification 0.95 Section 3.5
@@ -321,9 +338,9 @@ struct ehci_dbg_port {
321 */ 338 */
322struct ehci_qtd { 339struct ehci_qtd {
323 /* first part defined by EHCI spec */ 340 /* first part defined by EHCI spec */
324 __le32 hw_next; /* see EHCI 3.5.1 */ 341 __hc32 hw_next; /* see EHCI 3.5.1 */
325 __le32 hw_alt_next; /* see EHCI 3.5.2 */ 342 __hc32 hw_alt_next; /* see EHCI 3.5.2 */
326 __le32 hw_token; /* see EHCI 3.5.3 */ 343 __hc32 hw_token; /* see EHCI 3.5.3 */
327#define QTD_TOGGLE (1 << 31) /* data toggle */ 344#define QTD_TOGGLE (1 << 31) /* data toggle */
328#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff) 345#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
329#define QTD_IOC (1 << 15) /* interrupt on complete */ 346#define QTD_IOC (1 << 15) /* interrupt on complete */
@@ -337,8 +354,13 @@ struct ehci_qtd {
337#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */ 354#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
338#define QTD_STS_STS (1 << 1) /* split transaction state */ 355#define QTD_STS_STS (1 << 1) /* split transaction state */
339#define QTD_STS_PING (1 << 0) /* issue PING? */ 356#define QTD_STS_PING (1 << 0) /* issue PING? */
340 __le32 hw_buf [5]; /* see EHCI 3.5.4 */ 357
341 __le32 hw_buf_hi [5]; /* Appendix B */ 358#define ACTIVE_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_ACTIVE)
359#define HALT_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_HALT)
360#define STATUS_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_STS)
361
362 __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
363 __hc32 hw_buf_hi [5]; /* Appendix B */
342 364
343 /* the rest is HCD-private */ 365 /* the rest is HCD-private */
344 dma_addr_t qtd_dma; /* qtd address */ 366 dma_addr_t qtd_dma; /* qtd address */
@@ -348,26 +370,33 @@ struct ehci_qtd {
348} __attribute__ ((aligned (32))); 370} __attribute__ ((aligned (32)));
349 371
350/* mask NakCnt+T in qh->hw_alt_next */ 372/* mask NakCnt+T in qh->hw_alt_next */
351#define QTD_MASK __constant_cpu_to_le32 (~0x1f) 373#define QTD_MASK(ehci) cpu_to_hc32 (ehci, ~0x1f)
352 374
353#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1) 375#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
354 376
355/*-------------------------------------------------------------------------*/ 377/*-------------------------------------------------------------------------*/
356 378
357/* type tag from {qh,itd,sitd,fstn}->hw_next */ 379/* type tag from {qh,itd,sitd,fstn}->hw_next */
358#define Q_NEXT_TYPE(dma) ((dma) & __constant_cpu_to_le32 (3 << 1)) 380#define Q_NEXT_TYPE(ehci,dma) ((dma) & cpu_to_hc32(ehci, 3 << 1))
359 381
382/*
383 * Now the following defines are not converted using the
384 * __constant_cpu_to_le32() macro anymore, since we have to support
385 * "dynamic" switching between be and le support, so that the driver
386 * can be used on one system with SoC EHCI controller using big-endian
387 * descriptors as well as a normal little-endian PCI EHCI controller.
388 */
360/* values for that type tag */ 389/* values for that type tag */
361#define Q_TYPE_ITD __constant_cpu_to_le32 (0 << 1) 390#define Q_TYPE_ITD (0 << 1)
362#define Q_TYPE_QH __constant_cpu_to_le32 (1 << 1) 391#define Q_TYPE_QH (1 << 1)
363#define Q_TYPE_SITD __constant_cpu_to_le32 (2 << 1) 392#define Q_TYPE_SITD (2 << 1)
364#define Q_TYPE_FSTN __constant_cpu_to_le32 (3 << 1) 393#define Q_TYPE_FSTN (3 << 1)
365 394
366/* next async queue entry, or pointer to interrupt/periodic QH */ 395/* next async queue entry, or pointer to interrupt/periodic QH */
367#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH) 396#define QH_NEXT(ehci,dma) (cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH))
368 397
369/* for periodic/async schedules and qtd lists, mark end of list */ 398/* for periodic/async schedules and qtd lists, mark end of list */
370#define EHCI_LIST_END __constant_cpu_to_le32(1) /* "null pointer" to hw */ 399#define EHCI_LIST_END(ehci) cpu_to_hc32(ehci, 1) /* "null pointer" to hw */
371 400
372/* 401/*
373 * Entries in periodic shadow table are pointers to one of four kinds 402 * Entries in periodic shadow table are pointers to one of four kinds
@@ -382,7 +411,7 @@ union ehci_shadow {
382 struct ehci_itd *itd; /* Q_TYPE_ITD */ 411 struct ehci_itd *itd; /* Q_TYPE_ITD */
383 struct ehci_sitd *sitd; /* Q_TYPE_SITD */ 412 struct ehci_sitd *sitd; /* Q_TYPE_SITD */
384 struct ehci_fstn *fstn; /* Q_TYPE_FSTN */ 413 struct ehci_fstn *fstn; /* Q_TYPE_FSTN */
385 __le32 *hw_next; /* (all types) */ 414 __hc32 *hw_next; /* (all types) */
386 void *ptr; 415 void *ptr;
387}; 416};
388 417
@@ -398,24 +427,27 @@ union ehci_shadow {
398 427
399struct ehci_qh { 428struct ehci_qh {
400 /* first part defined by EHCI spec */ 429 /* first part defined by EHCI spec */
401 __le32 hw_next; /* see EHCI 3.6.1 */ 430 __hc32 hw_next; /* see EHCI 3.6.1 */
402 __le32 hw_info1; /* see EHCI 3.6.2 */ 431 __hc32 hw_info1; /* see EHCI 3.6.2 */
403#define QH_HEAD 0x00008000 432#define QH_HEAD 0x00008000
404#define QH_INACTIVATE 0x00000080 433#define QH_INACTIVATE 0x00000080
405 __le32 hw_info2; /* see EHCI 3.6.2 */ 434
435#define INACTIVATE_BIT(ehci) cpu_to_hc32(ehci, QH_INACTIVATE)
436
437 __hc32 hw_info2; /* see EHCI 3.6.2 */
406#define QH_SMASK 0x000000ff 438#define QH_SMASK 0x000000ff
407#define QH_CMASK 0x0000ff00 439#define QH_CMASK 0x0000ff00
408#define QH_HUBADDR 0x007f0000 440#define QH_HUBADDR 0x007f0000
409#define QH_HUBPORT 0x3f800000 441#define QH_HUBPORT 0x3f800000
410#define QH_MULT 0xc0000000 442#define QH_MULT 0xc0000000
411 __le32 hw_current; /* qtd list - see EHCI 3.6.4 */ 443 __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
412 444
413 /* qtd overlay (hardware parts of a struct ehci_qtd) */ 445 /* qtd overlay (hardware parts of a struct ehci_qtd) */
414 __le32 hw_qtd_next; 446 __hc32 hw_qtd_next;
415 __le32 hw_alt_next; 447 __hc32 hw_alt_next;
416 __le32 hw_token; 448 __hc32 hw_token;
417 __le32 hw_buf [5]; 449 __hc32 hw_buf [5];
418 __le32 hw_buf_hi [5]; 450 __hc32 hw_buf_hi [5];
419 451
420 /* the rest is HCD-private */ 452 /* the rest is HCD-private */
421 dma_addr_t qh_dma; /* address of qh */ 453 dma_addr_t qh_dma; /* address of qh */
@@ -456,7 +488,7 @@ struct ehci_qh {
456struct ehci_iso_packet { 488struct ehci_iso_packet {
457 /* These will be copied to iTD when scheduling */ 489 /* These will be copied to iTD when scheduling */
458 u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */ 490 u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
459 __le32 transaction; /* itd->hw_transaction[i] |= */ 491 __hc32 transaction; /* itd->hw_transaction[i] |= */
460 u8 cross; /* buf crosses pages */ 492 u8 cross; /* buf crosses pages */
461 /* for full speed OUT splits */ 493 /* for full speed OUT splits */
462 u32 buf1; 494 u32 buf1;
@@ -478,8 +510,8 @@ struct ehci_iso_sched {
478 */ 510 */
479struct ehci_iso_stream { 511struct ehci_iso_stream {
480 /* first two fields match QH, but info1 == 0 */ 512 /* first two fields match QH, but info1 == 0 */
481 __le32 hw_next; 513 __hc32 hw_next;
482 __le32 hw_info1; 514 __hc32 hw_info1;
483 515
484 u32 refcount; 516 u32 refcount;
485 u8 bEndpointAddress; 517 u8 bEndpointAddress;
@@ -494,7 +526,7 @@ struct ehci_iso_stream {
494 unsigned long start; /* jiffies */ 526 unsigned long start; /* jiffies */
495 unsigned long rescheduled; 527 unsigned long rescheduled;
496 int next_uframe; 528 int next_uframe;
497 __le32 splits; 529 __hc32 splits;
498 530
499 /* the rest is derived from the endpoint descriptor, 531 /* the rest is derived from the endpoint descriptor,
500 * trusting urb->interval == f(epdesc->bInterval) and 532 * trusting urb->interval == f(epdesc->bInterval) and
@@ -508,12 +540,12 @@ struct ehci_iso_stream {
508 unsigned bandwidth; 540 unsigned bandwidth;
509 541
510 /* This is used to initialize iTD's hw_bufp fields */ 542 /* This is used to initialize iTD's hw_bufp fields */
511 __le32 buf0; 543 __hc32 buf0;
512 __le32 buf1; 544 __hc32 buf1;
513 __le32 buf2; 545 __hc32 buf2;
514 546
515 /* this is used to initialize sITD's tt info */ 547 /* this is used to initialize sITD's tt info */
516 __le32 address; 548 __hc32 address;
517}; 549};
518 550
519/*-------------------------------------------------------------------------*/ 551/*-------------------------------------------------------------------------*/
@@ -526,8 +558,8 @@ struct ehci_iso_stream {
526 */ 558 */
527struct ehci_itd { 559struct ehci_itd {
528 /* first part defined by EHCI spec */ 560 /* first part defined by EHCI spec */
529 __le32 hw_next; /* see EHCI 3.3.1 */ 561 __hc32 hw_next; /* see EHCI 3.3.1 */
530 __le32 hw_transaction [8]; /* see EHCI 3.3.2 */ 562 __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
531#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */ 563#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
532#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */ 564#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */
533#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */ 565#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */
@@ -535,10 +567,10 @@ struct ehci_itd {
535#define EHCI_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff) 567#define EHCI_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
536#define EHCI_ITD_IOC (1 << 15) /* interrupt on complete */ 568#define EHCI_ITD_IOC (1 << 15) /* interrupt on complete */
537 569
538#define ITD_ACTIVE __constant_cpu_to_le32(EHCI_ISOC_ACTIVE) 570#define ITD_ACTIVE(ehci) cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE)
539 571
540 __le32 hw_bufp [7]; /* see EHCI 3.3.3 */ 572 __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
541 __le32 hw_bufp_hi [7]; /* Appendix B */ 573 __hc32 hw_bufp_hi [7]; /* Appendix B */
542 574
543 /* the rest is HCD-private */ 575 /* the rest is HCD-private */
544 dma_addr_t itd_dma; /* for this itd */ 576 dma_addr_t itd_dma; /* for this itd */
@@ -565,11 +597,11 @@ struct ehci_itd {
565 */ 597 */
566struct ehci_sitd { 598struct ehci_sitd {
567 /* first part defined by EHCI spec */ 599 /* first part defined by EHCI spec */
568 __le32 hw_next; 600 __hc32 hw_next;
569/* uses bit field macros above - see EHCI 0.95 Table 3-8 */ 601/* uses bit field macros above - see EHCI 0.95 Table 3-8 */
570 __le32 hw_fullspeed_ep; /* EHCI table 3-9 */ 602 __hc32 hw_fullspeed_ep; /* EHCI table 3-9 */
571 __le32 hw_uframe; /* EHCI table 3-10 */ 603 __hc32 hw_uframe; /* EHCI table 3-10 */
572 __le32 hw_results; /* EHCI table 3-11 */ 604 __hc32 hw_results; /* EHCI table 3-11 */
573#define SITD_IOC (1 << 31) /* interrupt on completion */ 605#define SITD_IOC (1 << 31) /* interrupt on completion */
574#define SITD_PAGE (1 << 30) /* buffer 0/1 */ 606#define SITD_PAGE (1 << 30) /* buffer 0/1 */
575#define SITD_LENGTH(x) (0x3ff & ((x)>>16)) 607#define SITD_LENGTH(x) (0x3ff & ((x)>>16))
@@ -581,11 +613,11 @@ struct ehci_sitd {
581#define SITD_STS_MMF (1 << 2) /* incomplete split transaction */ 613#define SITD_STS_MMF (1 << 2) /* incomplete split transaction */
582#define SITD_STS_STS (1 << 1) /* split transaction state */ 614#define SITD_STS_STS (1 << 1) /* split transaction state */
583 615
584#define SITD_ACTIVE __constant_cpu_to_le32(SITD_STS_ACTIVE) 616#define SITD_ACTIVE(ehci) cpu_to_hc32(ehci, SITD_STS_ACTIVE)
585 617
586 __le32 hw_buf [2]; /* EHCI table 3-12 */ 618 __hc32 hw_buf [2]; /* EHCI table 3-12 */
587 __le32 hw_backpointer; /* EHCI table 3-13 */ 619 __hc32 hw_backpointer; /* EHCI table 3-13 */
588 __le32 hw_buf_hi [2]; /* Appendix B */ 620 __hc32 hw_buf_hi [2]; /* Appendix B */
589 621
590 /* the rest is HCD-private */ 622 /* the rest is HCD-private */
591 dma_addr_t sitd_dma; 623 dma_addr_t sitd_dma;
@@ -610,8 +642,8 @@ struct ehci_sitd {
610 * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work. 642 * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
611 */ 643 */
612struct ehci_fstn { 644struct ehci_fstn {
613 __le32 hw_next; /* any periodic q entry */ 645 __hc32 hw_next; /* any periodic q entry */
614 __le32 hw_prev; /* qh or EHCI_LIST_END */ 646 __hc32 hw_prev; /* qh or EHCI_LIST_END */
615 647
616 /* the rest is HCD-private */ 648 /* the rest is HCD-private */
617 dma_addr_t fstn_dma; 649 dma_addr_t fstn_dma;
@@ -683,8 +715,21 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
683#define ehci_big_endian_mmio(e) 0 715#define ehci_big_endian_mmio(e) 0
684#endif 716#endif
685 717
686static inline unsigned int ehci_readl (const struct ehci_hcd *ehci, 718/*
687 __u32 __iomem * regs) 719 * Big-endian read/write functions are arch-specific.
720 * Other arches can be added if/when they're needed.
721 *
722 * REVISIT: arch/powerpc now has readl/writel_be, so the
723 * definition below can die once the 4xx support is
724 * finally ported over.
725 */
726#if defined(CONFIG_PPC)
727#define readl_be(addr) in_be32((__force unsigned *)addr)
728#define writel_be(val, addr) out_be32((__force unsigned *)addr, val)
729#endif
730
731static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
732 __u32 __iomem * regs)
688{ 733{
689#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO 734#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
690 return ehci_big_endian_mmio(ehci) ? 735 return ehci_big_endian_mmio(ehci) ?
@@ -695,8 +740,8 @@ static inline unsigned int ehci_readl (const struct ehci_hcd *ehci,
695#endif 740#endif
696} 741}
697 742
698static inline void ehci_writel (const struct ehci_hcd *ehci, 743static inline void ehci_writel(const struct ehci_hcd *ehci,
699 const unsigned int val, __u32 __iomem *regs) 744 const unsigned int val, __u32 __iomem *regs)
700{ 745{
701#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO 746#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
702 ehci_big_endian_mmio(ehci) ? 747 ehci_big_endian_mmio(ehci) ?
@@ -709,6 +754,62 @@ static inline void ehci_writel (const struct ehci_hcd *ehci,
709 754
710/*-------------------------------------------------------------------------*/ 755/*-------------------------------------------------------------------------*/
711 756
757/*
758 * The AMCC 440EPx not only implements its EHCI registers in big-endian
759 * format, but also its DMA data structures (descriptors).
760 *
761 * EHCI controllers accessed through PCI work normally (little-endian
762 * everywhere), so we won't bother supporting a BE-only mode for now.
763 */
764#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
765#define ehci_big_endian_desc(e) ((e)->big_endian_desc)
766
767/* cpu to ehci */
768static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
769{
770 return ehci_big_endian_desc(ehci)
771 ? (__force __hc32)cpu_to_be32(x)
772 : (__force __hc32)cpu_to_le32(x);
773}
774
775/* ehci to cpu */
776static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
777{
778 return ehci_big_endian_desc(ehci)
779 ? be32_to_cpu((__force __be32)x)
780 : le32_to_cpu((__force __le32)x);
781}
782
783static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
784{
785 return ehci_big_endian_desc(ehci)
786 ? be32_to_cpup((__force __be32 *)x)
787 : le32_to_cpup((__force __le32 *)x);
788}
789
790#else
791
792/* cpu to ehci */
793static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
794{
795 return cpu_to_le32(x);
796}
797
798/* ehci to cpu */
799static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
800{
801 return le32_to_cpu(x);
802}
803
804static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
805{
806 return le32_to_cpup(x);
807}
808
809#endif
810
811/*-------------------------------------------------------------------------*/
812
712#ifndef DEBUG 813#ifndef DEBUG
713#define STUB_DEBUG_FILES 814#define STUB_DEBUG_FILES
714#endif /* DEBUG */ 815#endif /* DEBUG */