aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:26:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:26:12 -0400
commit7a9b149212f3716c598afe973b6261fd58453b7a (patch)
tree477716d84c71da124448b72278e98da28aadbd3d /drivers/usb/host
parent3d62e3fdce8ef265a3706c52ae1ca6ab84e30f0e (diff)
parente26bcf37234c67624f62d9fc95f922b8dbda1363 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (229 commits) USB: remove unused usb_buffer_alloc and usb_buffer_free macros usb: musb: update gfp/slab.h includes USB: ftdi_sio: fix legacy SIO-device header USB: kl5usb105: reimplement using generic framework USB: kl5usb105: minor clean ups USB: kl5usb105: fix memory leak USB: io_ti: use kfifo to implement write buffering USB: io_ti: remove unsused private counter USB: ti_usb: use kfifo to implement write buffering USB: ir-usb: fix incorrect write-buffer length USB: aircable: fix incorrect write-buffer length USB: safe_serial: straighten out read processing USB: safe_serial: reimplement read using generic framework USB: safe_serial: reimplement write using generic framework usb-storage: always print quirks USB: usb-storage: trivial debug improvements USB: oti6858: use port write fifo USB: oti6858: use kfifo to implement write buffering USB: cypress_m8: use kfifo to implement write buffering USB: cypress_m8: remove unused drain define ... Fix up conflicts (due to usb_buffer_alloc/free renaming) in drivers/input/tablet/acecad.c drivers/input/tablet/kbtab.c drivers/input/tablet/wacom_sys.c drivers/media/video/gspca/gspca.c sound/usb/usbaudio.c
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig15
-rw-r--r--drivers/usb/host/ehci-au1xxx.c27
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/ehci-hub.c182
-rw-r--r--drivers/usb/host/ehci-omap.c21
-rw-r--r--drivers/usb/host/ehci-pci.c18
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ehci.h18
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fhci-hub.c2
-rw-r--r--drivers/usb/host/fhci-mem.c2
-rw-r--r--drivers/usb/host/fhci-q.c2
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/host/fhci-tds.c2
-rw-r--r--drivers/usb/host/fhci.h11
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/isp1760-hcd.c29
-rw-r--r--drivers/usb/host/isp1760-if.c13
-rw-r--r--drivers/usb/host/ohci-hcd.c33
-rw-r--r--drivers/usb/host/ohci-omap3.c735
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c31
-rw-r--r--drivers/usb/host/r8a66597-hcd.c39
-rw-r--r--drivers/usb/host/sl811-hcd.c60
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/whci/debug.c2
-rw-r--r--drivers/usb/host/whci/qset.c6
-rw-r--r--drivers/usb/host/xhci-dbg.c24
-rw-r--r--drivers/usb/host/xhci-hub.c39
-rw-r--r--drivers/usb/host/xhci-mem.c489
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c329
-rw-r--r--drivers/usb/host/xhci.c416
-rw-r--r--drivers/usb/host/xhci.h112
38 files changed, 2358 insertions, 338 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d3df0397de3..f865be2276d4 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -207,6 +207,21 @@ config USB_OHCI_HCD
207 To compile this driver as a module, choose M here: the 207 To compile this driver as a module, choose M here: the
208 module will be called ohci-hcd. 208 module will be called ohci-hcd.
209 209
210config USB_OHCI_HCD_OMAP1
211 bool "OHCI support for OMAP1/2 chips"
212 depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2)
213 default y
214 ---help---
215 Enables support for the OHCI controller on OMAP1/2 chips.
216
217config USB_OHCI_HCD_OMAP3
218 bool "OHCI support for OMAP3 and later chips"
219 depends on USB_OHCI_HCD && (ARCH_OMAP3 || ARCH_OMAP4)
220 default y
221 ---help---
222 Enables support for the on-chip OHCI controller on
223 OMAP3 and later chips.
224
210config USB_OHCI_HCD_PPC_SOC 225config USB_OHCI_HCD_PPC_SOC
211 bool "OHCI support for on-chip PPC USB controller" 226 bool "OHCI support for on-chip PPC USB controller"
212 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx) 227 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index e3a74e75e822..faa61748db70 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -69,6 +69,15 @@ static void au1xxx_stop_ehc(void)
69 au_sync(); 69 au_sync();
70} 70}
71 71
72static int au1xxx_ehci_setup(struct usb_hcd *hcd)
73{
74 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
75 int ret = ehci_init(hcd);
76
77 ehci->need_io_watchdog = 0;
78 return ret;
79}
80
72static const struct hc_driver ehci_au1xxx_hc_driver = { 81static const struct hc_driver ehci_au1xxx_hc_driver = {
73 .description = hcd_name, 82 .description = hcd_name,
74 .product_desc = "Au1xxx EHCI", 83 .product_desc = "Au1xxx EHCI",
@@ -86,7 +95,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
86 * FIXME -- ehci_init() doesn't do enough here. 95 * FIXME -- ehci_init() doesn't do enough here.
87 * See ehci-ppc-soc for a complete implementation. 96 * See ehci-ppc-soc for a complete implementation.
88 */ 97 */
89 .reset = ehci_init, 98 .reset = au1xxx_ehci_setup,
90 .start = ehci_run, 99 .start = ehci_run,
91 .stop = ehci_stop, 100 .stop = ehci_stop,
92 .shutdown = ehci_shutdown, 101 .shutdown = ehci_shutdown,
@@ -215,26 +224,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
215 msleep(10); 224 msleep(10);
216 225
217 /* Root hub was already suspended. Disable irq emission and 226 /* Root hub was already suspended. Disable irq emission and
218 * mark HW unaccessible, bail out if RH has been resumed. Use 227 * mark HW unaccessible. The PM and USB cores make sure that
219 * the spinlock to properly synchronize with possible pending 228 * the root hub is either suspended or stopped.
220 * RH suspend or resume activity.
221 *
222 * This is still racy as hcd->state is manipulated outside of
223 * any locks =P But that will be a different fix.
224 */ 229 */
225 spin_lock_irqsave(&ehci->lock, flags); 230 spin_lock_irqsave(&ehci->lock, flags);
226 if (hcd->state != HC_STATE_SUSPENDED) { 231 ehci_prepare_ports_for_controller_suspend(ehci);
227 rc = -EINVAL;
228 goto bail;
229 }
230 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 232 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
231 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 233 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
232 234
233 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 235 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
234 236
235 au1xxx_stop_ehc(); 237 au1xxx_stop_ehc();
236
237bail:
238 spin_unlock_irqrestore(&ehci->lock, flags); 238 spin_unlock_irqrestore(&ehci->lock, flags);
239 239
240 // could save FLADJ in case of Vaux power loss 240 // could save FLADJ in case of Vaux power loss
@@ -264,6 +264,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
264 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { 264 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
265 int mask = INTR_MASK; 265 int mask = INTR_MASK;
266 266
267 ehci_prepare_ports_for_controller_resume(ehci);
267 if (!hcd->self.root_hub->do_remote_wakeup) 268 if (!hcd->self.root_hub->do_remote_wakeup)
268 mask &= ~STS_PCD; 269 mask &= ~STS_PCD;
269 ehci_writel(ehci, mask, &ehci->regs->intr_enable); 270 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 0e26aa13f158..5cd967d28938 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
313 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); 313 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
314 void __iomem *non_ehci = hcd->regs; 314 void __iomem *non_ehci = hcd->regs;
315 315
316 ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
316 if (!fsl_deep_sleep()) 317 if (!fsl_deep_sleep())
317 return 0; 318 return 0;
318 319
@@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
327 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 328 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
328 void __iomem *non_ehci = hcd->regs; 329 void __iomem *non_ehci = hcd->regs;
329 330
331 ehci_prepare_ports_for_controller_resume(ehci);
330 if (!fsl_deep_sleep()) 332 if (!fsl_deep_sleep())
331 return 0; 333 return 0;
332 334
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 13ead00aecd5..ef3e88f0b3c3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -31,13 +31,12 @@
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/usb.h> 33#include <linux/usb.h>
34#include <linux/usb/hcd.h>
34#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <linux/debugfs.h> 37#include <linux/debugfs.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
38 39
39#include "../core/hcd.h"
40
41#include <asm/byteorder.h> 40#include <asm/byteorder.h>
42#include <asm/io.h> 41#include <asm/io.h>
43#include <asm/irq.h> 42#include <asm/irq.h>
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c7178bcde67a..e7d3d8def282 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
106 ehci->owned_ports = 0; 106 ehci->owned_ports = 0;
107} 107}
108 108
109static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
110 bool suspending)
111{
112 int port;
113 u32 temp;
114
115 /* If remote wakeup is enabled for the root hub but disabled
116 * for the controller, we must adjust all the port wakeup flags
117 * when the controller is suspended or resumed. In all other
118 * cases they don't need to be changed.
119 */
120 if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
121 device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
122 return;
123
124 /* clear phy low-power mode before changing wakeup flags */
125 if (ehci->has_hostpc) {
126 port = HCS_N_PORTS(ehci->hcs_params);
127 while (port--) {
128 u32 __iomem *hostpc_reg;
129
130 hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
131 + HOSTPC0 + 4 * port);
132 temp = ehci_readl(ehci, hostpc_reg);
133 ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
134 }
135 msleep(5);
136 }
137
138 port = HCS_N_PORTS(ehci->hcs_params);
139 while (port--) {
140 u32 __iomem *reg = &ehci->regs->port_status[port];
141 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
142 u32 t2 = t1 & ~PORT_WAKE_BITS;
143
144 /* If we are suspending the controller, clear the flags.
145 * If we are resuming the controller, set the wakeup flags.
146 */
147 if (!suspending) {
148 if (t1 & PORT_CONNECT)
149 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
150 else
151 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
152 }
153 ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
154 port + 1, t1, t2);
155 ehci_writel(ehci, t2, reg);
156 }
157
158 /* enter phy low-power mode again */
159 if (ehci->has_hostpc) {
160 port = HCS_N_PORTS(ehci->hcs_params);
161 while (port--) {
162 u32 __iomem *hostpc_reg;
163
164 hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
165 + HOSTPC0 + 4 * port);
166 temp = ehci_readl(ehci, hostpc_reg);
167 ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
168 }
169 }
170}
171
109static int ehci_bus_suspend (struct usb_hcd *hcd) 172static int ehci_bus_suspend (struct usb_hcd *hcd)
110{ 173{
111 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 174 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
112 int port; 175 int port;
113 int mask; 176 int mask;
114 u32 __iomem *hostpc_reg = NULL; 177 int changed;
115 178
116 ehci_dbg(ehci, "suspend root hub\n"); 179 ehci_dbg(ehci, "suspend root hub\n");
117 180
@@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
155 */ 218 */
156 ehci->bus_suspended = 0; 219 ehci->bus_suspended = 0;
157 ehci->owned_ports = 0; 220 ehci->owned_ports = 0;
221 changed = 0;
158 port = HCS_N_PORTS(ehci->hcs_params); 222 port = HCS_N_PORTS(ehci->hcs_params);
159 while (port--) { 223 while (port--) {
160 u32 __iomem *reg = &ehci->regs->port_status [port]; 224 u32 __iomem *reg = &ehci->regs->port_status [port];
161 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; 225 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
162 u32 t2 = t1; 226 u32 t2 = t1 & ~PORT_WAKE_BITS;
163 227
164 if (ehci->has_hostpc)
165 hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
166 + HOSTPC0 + 4 * (port & 0xff));
167 /* keep track of which ports we suspend */ 228 /* keep track of which ports we suspend */
168 if (t1 & PORT_OWNER) 229 if (t1 & PORT_OWNER)
169 set_bit(port, &ehci->owned_ports); 230 set_bit(port, &ehci->owned_ports);
@@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
172 set_bit(port, &ehci->bus_suspended); 233 set_bit(port, &ehci->bus_suspended);
173 } 234 }
174 235
175 /* enable remote wakeup on all ports */ 236 /* enable remote wakeup on all ports, if told to do so */
176 if (hcd->self.root_hub->do_remote_wakeup) { 237 if (hcd->self.root_hub->do_remote_wakeup) {
177 /* only enable appropriate wake bits, otherwise the 238 /* only enable appropriate wake bits, otherwise the
178 * hardware can not go phy low power mode. If a race 239 * hardware can not go phy low power mode. If a race
179 * condition happens here(connection change during bits 240 * condition happens here(connection change during bits
180 * set), the port change detection will finally fix it. 241 * set), the port change detection will finally fix it.
181 */ 242 */
182 if (t1 & PORT_CONNECT) { 243 if (t1 & PORT_CONNECT)
183 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 244 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
184 t2 &= ~PORT_WKCONN_E; 245 else
185 } else {
186 t2 |= PORT_WKOC_E | PORT_WKCONN_E; 246 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
187 t2 &= ~PORT_WKDISC_E; 247 }
188 }
189 } else
190 t2 &= ~PORT_WAKE_BITS;
191 248
192 if (t1 != t2) { 249 if (t1 != t2) {
193 ehci_vdbg (ehci, "port %d, %08x -> %08x\n", 250 ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
194 port + 1, t1, t2); 251 port + 1, t1, t2);
195 ehci_writel(ehci, t2, reg); 252 ehci_writel(ehci, t2, reg);
196 if (hostpc_reg) { 253 changed = 1;
197 u32 t3; 254 }
255 }
198 256
199 spin_unlock_irq(&ehci->lock); 257 if (changed && ehci->has_hostpc) {
200 msleep(5);/* 5ms for HCD enter low pwr mode */ 258 spin_unlock_irq(&ehci->lock);
201 spin_lock_irq(&ehci->lock); 259 msleep(5); /* 5 ms for HCD to enter low-power mode */
202 t3 = ehci_readl(ehci, hostpc_reg); 260 spin_lock_irq(&ehci->lock);
203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 261
204 t3 = ehci_readl(ehci, hostpc_reg); 262 port = HCS_N_PORTS(ehci->hcs_params);
205 ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", 263 while (port--) {
264 u32 __iomem *hostpc_reg;
265 u32 t3;
266
267 hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
268 + HOSTPC0 + 4 * port);
269 t3 = ehci_readl(ehci, hostpc_reg);
270 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
271 t3 = ehci_readl(ehci, hostpc_reg);
272 ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
206 port, (t3 & HOSTPC_PHCD) ? 273 port, (t3 & HOSTPC_PHCD) ?
207 "succeeded" : "failed"); 274 "succeeded" : "failed");
208 }
209 } 275 }
210 } 276 }
211 277
@@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
291 msleep(8); 357 msleep(8);
292 spin_lock_irq(&ehci->lock); 358 spin_lock_irq(&ehci->lock);
293 359
360 /* clear phy low-power mode before resume */
361 if (ehci->bus_suspended && ehci->has_hostpc) {
362 i = HCS_N_PORTS(ehci->hcs_params);
363 while (i--) {
364 if (test_bit(i, &ehci->bus_suspended)) {
365 u32 __iomem *hostpc_reg;
366
367 hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
368 + HOSTPC0 + 4 * i);
369 temp = ehci_readl(ehci, hostpc_reg);
370 ehci_writel(ehci, temp & ~HOSTPC_PHCD,
371 hostpc_reg);
372 }
373 }
374 spin_unlock_irq(&ehci->lock);
375 msleep(5);
376 spin_lock_irq(&ehci->lock);
377 }
378
294 /* manually resume the ports we suspended during bus_suspend() */ 379 /* manually resume the ports we suspended during bus_suspend() */
295 i = HCS_N_PORTS (ehci->hcs_params); 380 i = HCS_N_PORTS (ehci->hcs_params);
296 while (i--) { 381 while (i--) {
@@ -659,7 +744,7 @@ static int ehci_hub_control (
659 * Even if OWNER is set, so the port is owned by the 744 * Even if OWNER is set, so the port is owned by the
660 * companion controller, khubd needs to be able to clear 745 * companion controller, khubd needs to be able to clear
661 * the port-change status bits (especially 746 * the port-change status bits (especially
662 * USB_PORT_FEAT_C_CONNECTION). 747 * USB_PORT_STAT_C_CONNECTION).
663 */ 748 */
664 749
665 switch (wValue) { 750 switch (wValue) {
@@ -675,16 +760,25 @@ static int ehci_hub_control (
675 goto error; 760 goto error;
676 if (ehci->no_selective_suspend) 761 if (ehci->no_selective_suspend)
677 break; 762 break;
678 if (temp & PORT_SUSPEND) { 763 if (!(temp & PORT_SUSPEND))
679 if ((temp & PORT_PE) == 0) 764 break;
680 goto error; 765 if ((temp & PORT_PE) == 0)
681 /* resume signaling for 20 msec */ 766 goto error;
682 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); 767
683 ehci_writel(ehci, temp | PORT_RESUME, 768 /* clear phy low-power mode before resume */
684 status_reg); 769 if (hostpc_reg) {
685 ehci->reset_done [wIndex] = jiffies 770 temp1 = ehci_readl(ehci, hostpc_reg);
686 + msecs_to_jiffies (20); 771 ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
772 hostpc_reg);
773 spin_unlock_irqrestore(&ehci->lock, flags);
774 msleep(5);/* wait to leave low-power mode */
775 spin_lock_irqsave(&ehci->lock, flags);
687 } 776 }
777 /* resume signaling for 20 msec */
778 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
779 ehci_writel(ehci, temp | PORT_RESUME, status_reg);
780 ehci->reset_done[wIndex] = jiffies
781 + msecs_to_jiffies(20);
688 break; 782 break;
689 case USB_PORT_FEAT_C_SUSPEND: 783 case USB_PORT_FEAT_C_SUSPEND:
690 clear_bit(wIndex, &ehci->port_c_suspend); 784 clear_bit(wIndex, &ehci->port_c_suspend);
@@ -729,12 +823,12 @@ static int ehci_hub_control (
729 823
730 // wPortChange bits 824 // wPortChange bits
731 if (temp & PORT_CSC) 825 if (temp & PORT_CSC)
732 status |= 1 << USB_PORT_FEAT_C_CONNECTION; 826 status |= USB_PORT_STAT_C_CONNECTION << 16;
733 if (temp & PORT_PEC) 827 if (temp & PORT_PEC)
734 status |= 1 << USB_PORT_FEAT_C_ENABLE; 828 status |= USB_PORT_STAT_C_ENABLE << 16;
735 829
736 if ((temp & PORT_OCC) && !ignore_oc){ 830 if ((temp & PORT_OCC) && !ignore_oc){
737 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; 831 status |= USB_PORT_STAT_C_OVERCURRENT << 16;
738 832
739 /* 833 /*
740 * Hubs should disable port power on over-current. 834 * Hubs should disable port power on over-current.
@@ -791,7 +885,7 @@ static int ehci_hub_control (
791 if ((temp & PORT_RESET) 885 if ((temp & PORT_RESET)
792 && time_after_eq(jiffies, 886 && time_after_eq(jiffies,
793 ehci->reset_done[wIndex])) { 887 ehci->reset_done[wIndex])) {
794 status |= 1 << USB_PORT_FEAT_C_RESET; 888 status |= USB_PORT_STAT_C_RESET << 16;
795 ehci->reset_done [wIndex] = 0; 889 ehci->reset_done [wIndex] = 0;
796 890
797 /* force reset to complete */ 891 /* force reset to complete */
@@ -833,7 +927,7 @@ static int ehci_hub_control (
833 */ 927 */
834 928
835 if (temp & PORT_CONNECT) { 929 if (temp & PORT_CONNECT) {
836 status |= 1 << USB_PORT_FEAT_CONNECTION; 930 status |= USB_PORT_STAT_CONNECTION;
837 // status may be from integrated TT 931 // status may be from integrated TT
838 if (ehci->has_hostpc) { 932 if (ehci->has_hostpc) {
839 temp1 = ehci_readl(ehci, hostpc_reg); 933 temp1 = ehci_readl(ehci, hostpc_reg);
@@ -842,11 +936,11 @@ static int ehci_hub_control (
842 status |= ehci_port_speed(ehci, temp); 936 status |= ehci_port_speed(ehci, temp);
843 } 937 }
844 if (temp & PORT_PE) 938 if (temp & PORT_PE)
845 status |= 1 << USB_PORT_FEAT_ENABLE; 939 status |= USB_PORT_STAT_ENABLE;
846 940
847 /* maybe the port was unsuspended without our knowledge */ 941 /* maybe the port was unsuspended without our knowledge */
848 if (temp & (PORT_SUSPEND|PORT_RESUME)) { 942 if (temp & (PORT_SUSPEND|PORT_RESUME)) {
849 status |= 1 << USB_PORT_FEAT_SUSPEND; 943 status |= USB_PORT_STAT_SUSPEND;
850 } else if (test_bit(wIndex, &ehci->suspended_ports)) { 944 } else if (test_bit(wIndex, &ehci->suspended_ports)) {
851 clear_bit(wIndex, &ehci->suspended_ports); 945 clear_bit(wIndex, &ehci->suspended_ports);
852 ehci->reset_done[wIndex] = 0; 946 ehci->reset_done[wIndex] = 0;
@@ -855,13 +949,13 @@ static int ehci_hub_control (
855 } 949 }
856 950
857 if (temp & PORT_OC) 951 if (temp & PORT_OC)
858 status |= 1 << USB_PORT_FEAT_OVER_CURRENT; 952 status |= USB_PORT_STAT_OVERCURRENT;
859 if (temp & PORT_RESET) 953 if (temp & PORT_RESET)
860 status |= 1 << USB_PORT_FEAT_RESET; 954 status |= USB_PORT_STAT_RESET;
861 if (temp & PORT_POWER) 955 if (temp & PORT_POWER)
862 status |= 1 << USB_PORT_FEAT_POWER; 956 status |= USB_PORT_STAT_POWER;
863 if (test_bit(wIndex, &ehci->port_c_suspend)) 957 if (test_bit(wIndex, &ehci->port_c_suspend))
864 status |= 1 << USB_PORT_FEAT_C_SUSPEND; 958 status |= USB_PORT_STAT_C_SUSPEND << 16;
865 959
866#ifndef VERBOSE_DEBUG 960#ifndef VERBOSE_DEBUG
867 if (status & ~0xffff) /* only if wPortChange is interesting */ 961 if (status & ~0xffff) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 0cd6c7795d90..5450e628157f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -116,6 +116,8 @@
116#define OMAP_UHH_DEBUG_CSR (0x44) 116#define OMAP_UHH_DEBUG_CSR (0x44)
117 117
118/* EHCI Register Set */ 118/* EHCI Register Set */
119#define EHCI_INSNREG04 (0xA0)
120#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
119#define EHCI_INSNREG05_ULPI (0xA4) 121#define EHCI_INSNREG05_ULPI (0xA4)
120#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31 122#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
121#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24 123#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
@@ -352,8 +354,8 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
352 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; 354 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
353 355
354 /* Bypass the TLL module for PHY mode operation */ 356 /* Bypass the TLL module for PHY mode operation */
355 if (omap_rev() <= OMAP3430_REV_ES2_1) { 357 if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
356 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n"); 358 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
357 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) || 359 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
358 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) || 360 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
359 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)) 361 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
@@ -382,6 +384,18 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
382 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); 384 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
383 385
384 386
387 /*
388 * An undocumented "feature" in the OMAP3 EHCI controller,
389 * causes suspended ports to be taken out of suspend when
390 * the USBCMD.Run/Stop bit is cleared (for example when
391 * we do ehci_bus_suspend).
392 * This breaks suspend-resume if the root-hub is allowed
393 * to suspend. Writing 1 to this undocumented register bit
394 * disables this feature and restores normal behavior.
395 */
396 ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04,
397 EHCI_INSNREG04_DISABLE_UNSUSPEND);
398
385 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) || 399 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
386 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) || 400 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
387 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) { 401 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
@@ -659,6 +673,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
659 goto err_add_hcd; 673 goto err_add_hcd;
660 } 674 }
661 675
676 /* root ports should always stay powered */
677 ehci_port_power(omap->ehci, 1);
678
662 return 0; 679 return 0;
663 680
664err_add_hcd: 681err_add_hcd:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index ead5f4f2aa5a..d43d176161aa 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -109,6 +109,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
109 return retval; 109 return retval;
110 110
111 switch (pdev->vendor) { 111 switch (pdev->vendor) {
112 case PCI_VENDOR_ID_NEC:
113 ehci->need_io_watchdog = 0;
114 break;
112 case PCI_VENDOR_ID_INTEL: 115 case PCI_VENDOR_ID_INTEL:
113 ehci->need_io_watchdog = 0; 116 ehci->need_io_watchdog = 0;
114 if (pdev->device == 0x27cc) { 117 if (pdev->device == 0x27cc) {
@@ -284,23 +287,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
284 msleep(10); 287 msleep(10);
285 288
286 /* Root hub was already suspended. Disable irq emission and 289 /* Root hub was already suspended. Disable irq emission and
287 * mark HW unaccessible, bail out if RH has been resumed. Use 290 * mark HW unaccessible. The PM and USB cores make sure that
288 * the spinlock to properly synchronize with possible pending 291 * the root hub is either suspended or stopped.
289 * RH suspend or resume activity.
290 *
291 * This is still racy as hcd->state is manipulated outside of
292 * any locks =P But that will be a different fix.
293 */ 292 */
294 spin_lock_irqsave (&ehci->lock, flags); 293 spin_lock_irqsave (&ehci->lock, flags);
295 if (hcd->state != HC_STATE_SUSPENDED) { 294 ehci_prepare_ports_for_controller_suspend(ehci);
296 rc = -EINVAL;
297 goto bail;
298 }
299 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 295 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
300 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 296 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
301 297
302 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 298 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
303 bail:
304 spin_unlock_irqrestore (&ehci->lock, flags); 299 spin_unlock_irqrestore (&ehci->lock, flags);
305 300
306 // could save FLADJ in case of Vaux power loss 301 // could save FLADJ in case of Vaux power loss
@@ -330,6 +325,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
330 !hibernated) { 325 !hibernated) {
331 int mask = INTR_MASK; 326 int mask = INTR_MASK;
332 327
328 ehci_prepare_ports_for_controller_resume(ehci);
333 if (!hcd->self.root_hub->do_remote_wakeup) 329 if (!hcd->self.root_hub->do_remote_wakeup)
334 mask &= ~STS_PCD; 330 mask &= ~STS_PCD;
335 ehci_writel(ehci, mask, &ehci->regs->intr_enable); 331 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 89521775c567..11a79c4f4a9d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -663,7 +663,7 @@ qh_urb_transaction (
663 */ 663 */
664 i = urb->num_sgs; 664 i = urb->num_sgs;
665 if (len > 0 && i > 0) { 665 if (len > 0 && i > 0) {
666 sg = urb->sg->sg; 666 sg = urb->sg;
667 buf = sg_dma_address(sg); 667 buf = sg_dma_address(sg);
668 668
669 /* urb->transfer_buffer_length may be smaller than the 669 /* urb->transfer_buffer_length may be smaller than the
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 556c0b48f3ab..650a687f2854 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -536,6 +536,16 @@ struct ehci_fstn {
536 536
537/*-------------------------------------------------------------------------*/ 537/*-------------------------------------------------------------------------*/
538 538
539/* Prepare the PORTSC wakeup flags during controller suspend/resume */
540
541#define ehci_prepare_ports_for_controller_suspend(ehci) \
542 ehci_adjust_port_wakeup_flags(ehci, true);
543
544#define ehci_prepare_ports_for_controller_resume(ehci) \
545 ehci_adjust_port_wakeup_flags(ehci, false);
546
547/*-------------------------------------------------------------------------*/
548
539#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT 549#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
540 550
541/* 551/*
@@ -556,20 +566,20 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
556 case 0: 566 case 0:
557 return 0; 567 return 0;
558 case 1: 568 case 1:
559 return (1<<USB_PORT_FEAT_LOWSPEED); 569 return USB_PORT_STAT_LOW_SPEED;
560 case 2: 570 case 2:
561 default: 571 default:
562 return (1<<USB_PORT_FEAT_HIGHSPEED); 572 return USB_PORT_STAT_HIGH_SPEED;
563 } 573 }
564 } 574 }
565 return (1<<USB_PORT_FEAT_HIGHSPEED); 575 return USB_PORT_STAT_HIGH_SPEED;
566} 576}
567 577
568#else 578#else
569 579
570#define ehci_is_TDI(e) (0) 580#define ehci_is_TDI(e) (0)
571 581
572#define ehci_port_speed(ehci, portsc) (1<<USB_PORT_FEAT_HIGHSPEED) 582#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED
573#endif 583#endif
574 584
575/*-------------------------------------------------------------------------*/ 585/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index e799f86dab11..6fe550049119 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -20,7 +20,7 @@
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/usb.h> 22#include <linux/usb.h>
23#include "../core/hcd.h" 23#include <linux/usb/hcd.h>
24#include "fhci.h" 24#include "fhci.h"
25 25
26void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er) 26void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 15379c636143..90453379a434 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -25,12 +25,12 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/usb.h> 27#include <linux/usb.h>
28#include <linux/usb/hcd.h>
28#include <linux/of_platform.h> 29#include <linux/of_platform.h>
29#include <linux/of_gpio.h> 30#include <linux/of_gpio.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <asm/qe.h> 32#include <asm/qe.h>
32#include <asm/fsl_gtm.h> 33#include <asm/fsl_gtm.h>
33#include "../core/hcd.h"
34#include "fhci.h" 34#include "fhci.h"
35 35
36void fhci_start_sof_timer(struct fhci_hcd *fhci) 36void fhci_start_sof_timer(struct fhci_hcd *fhci)
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 0cfaedc3e124..348fe62e94f7 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -22,9 +22,9 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/usb.h> 24#include <linux/usb.h>
25#include <linux/usb/hcd.h>
25#include <linux/gpio.h> 26#include <linux/gpio.h>
26#include <asm/qe.h> 27#include <asm/qe.h>
27#include "../core/hcd.h"
28#include "fhci.h" 28#include "fhci.h"
29 29
30/* virtual root hub specific descriptor */ 30/* virtual root hub specific descriptor */
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index 5591bfb499d1..b0b88f57a5ac 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/usb.h> 23#include <linux/usb.h>
24#include "../core/hcd.h" 24#include <linux/usb/hcd.h>
25#include "fhci.h" 25#include "fhci.h"
26 26
27static void init_td(struct td *td) 27static void init_td(struct td *td)
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index f73c92359beb..03be7494a476 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -22,7 +22,7 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/usb.h> 24#include <linux/usb.h>
25#include "../core/hcd.h" 25#include <linux/usb/hcd.h>
26#include "fhci.h" 26#include "fhci.h"
27 27
28/* maps the hardware error code to the USB error code */ 28/* maps the hardware error code to the USB error code */
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index ff43747a614f..4f2cbdcc0273 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -24,9 +24,9 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/usb/hcd.h>
27#include <asm/qe.h> 28#include <asm/qe.h>
28#include <asm/fsl_gtm.h> 29#include <asm/fsl_gtm.h>
29#include "../core/hcd.h"
30#include "fhci.h" 30#include "fhci.h"
31 31
32static void recycle_frame(struct fhci_usb *usb, struct packet *pkt) 32static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 57013479d7f7..7be548ca2183 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -22,7 +22,7 @@
22#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/usb.h> 24#include <linux/usb.h>
25#include "../core/hcd.h" 25#include <linux/usb/hcd.h>
26#include "fhci.h" 26#include "fhci.h"
27 27
28#define DUMMY_BD_BUFFER 0xdeadbeef 28#define DUMMY_BD_BUFFER 0xdeadbeef
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 72dae1c5ab38..71c3caaea4c1 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -20,13 +20,14 @@
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/bug.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/kfifo.h> 26#include <linux/kfifo.h>
26#include <linux/io.h> 27#include <linux/io.h>
27#include <linux/usb.h> 28#include <linux/usb.h>
29#include <linux/usb/hcd.h>
28#include <asm/qe.h> 30#include <asm/qe.h>
29#include "../core/hcd.h"
30 31
31#define USB_CLOCK 48000000 32#define USB_CLOCK 48000000
32 33
@@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)
515 516
516static inline void *cq_get(struct kfifo *kfifo) 517static inline void *cq_get(struct kfifo *kfifo)
517{ 518{
518 void *p = NULL; 519 unsigned int sz;
520 void *p;
521
522 sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
523 if (sz != sizeof(p))
524 return NULL;
519 525
520 kfifo_out(kfifo, (void *)&p, sizeof(p));
521 return p; 526 return p;
522} 527}
523 528
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 8a12f297645f..ca0e98d8e1f4 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -56,8 +56,8 @@
56#include <linux/platform_device.h> 56#include <linux/platform_device.h>
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/usb.h> 58#include <linux/usb.h>
59#include <linux/usb/hcd.h>
59 60
60#include "../core/hcd.h"
61#include "imx21-hcd.h" 61#include "imx21-hcd.h"
62 62
63#ifdef DEBUG 63#ifdef DEBUG
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 92de71dc7729..d9e82123de2a 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -65,6 +65,7 @@
65#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/usb.h> 66#include <linux/usb.h>
67#include <linux/usb/isp116x.h> 67#include <linux/usb/isp116x.h>
68#include <linux/usb/hcd.h>
68#include <linux/platform_device.h> 69#include <linux/platform_device.h>
69 70
70#include <asm/io.h> 71#include <asm/io.h>
@@ -72,7 +73,6 @@
72#include <asm/system.h> 73#include <asm/system.h>
73#include <asm/byteorder.h> 74#include <asm/byteorder.h>
74 75
75#include "../core/hcd.h"
76#include "isp116x.h" 76#include "isp116x.h"
77 77
78#define DRIVER_VERSION "03 Nov 2005" 78#define DRIVER_VERSION "03 Nov 2005"
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 217fb5170200..20a0dfe0fe36 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -77,6 +77,7 @@
77#include <linux/interrupt.h> 77#include <linux/interrupt.h>
78#include <linux/usb.h> 78#include <linux/usb.h>
79#include <linux/usb/isp1362.h> 79#include <linux/usb/isp1362.h>
80#include <linux/usb/hcd.h>
80#include <linux/platform_device.h> 81#include <linux/platform_device.h>
81#include <linux/pm.h> 82#include <linux/pm.h>
82#include <linux/io.h> 83#include <linux/io.h>
@@ -95,7 +96,6 @@ module_param(dbg_level, int, 0);
95#define STUB_DEBUG_FILE 96#define STUB_DEBUG_FILE
96#endif 97#endif
97 98
98#include "../core/hcd.h"
99#include "../core/usb.h" 99#include "../core/usb.h"
100#include "isp1362.h" 100#include "isp1362.h"
101 101
@@ -1265,7 +1265,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1265 1265
1266 /* don't submit to a dead or disabled port */ 1266 /* don't submit to a dead or disabled port */
1267 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) & 1267 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1268 (1 << USB_PORT_FEAT_ENABLE)) || 1268 USB_PORT_STAT_ENABLE) ||
1269 !HC_IS_RUNNING(hcd->state)) { 1269 !HC_IS_RUNNING(hcd->state)) {
1270 kfree(ep); 1270 kfree(ep);
1271 retval = -ENODEV; 1271 retval = -ENODEV;
@@ -2217,7 +2217,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2217static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) 2217static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2218{ 2218{
2219 if (isp1362_hcd->pde) 2219 if (isp1362_hcd->pde)
2220 remove_proc_entry(proc_filename, 0); 2220 remove_proc_entry(proc_filename, NULL);
2221} 2221}
2222 2222
2223#endif 2223#endif
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 9f01293600b0..dbcafa29c775 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/usb.h> 16#include <linux/usb.h>
17#include <linux/usb/hcd.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/uaccess.h> 19#include <linux/uaccess.h>
19#include <linux/io.h> 20#include <linux/io.h>
@@ -21,7 +22,6 @@
21#include <asm/unaligned.h> 22#include <asm/unaligned.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23 24
24#include "../core/hcd.h"
25#include "isp1760-hcd.h" 25#include "isp1760-hcd.h"
26 26
27static struct kmem_cache *qtd_cachep; 27static struct kmem_cache *qtd_cachep;
@@ -111,7 +111,7 @@ struct isp1760_qh {
111 u32 ping; 111 u32 ping;
112}; 112};
113 113
114#define ehci_port_speed(priv, portsc) (1 << USB_PORT_FEAT_HIGHSPEED) 114#define ehci_port_speed(priv, portsc) USB_PORT_STAT_HIGH_SPEED
115 115
116static unsigned int isp1760_readl(__u32 __iomem *regs) 116static unsigned int isp1760_readl(__u32 __iomem *regs)
117{ 117{
@@ -713,12 +713,11 @@ static int check_error(struct ptd *ptd)
713 u32 dw3; 713 u32 dw3;
714 714
715 dw3 = le32_to_cpu(ptd->dw3); 715 dw3 = le32_to_cpu(ptd->dw3);
716 if (dw3 & DW3_HALT_BIT) 716 if (dw3 & DW3_HALT_BIT) {
717 error = -EPIPE; 717 error = -EPIPE;
718 718
719 if (dw3 & DW3_ERROR_BIT) { 719 if (dw3 & DW3_ERROR_BIT)
720 printk(KERN_ERR "error bit is set in DW3\n"); 720 pr_err("error bit is set in DW3\n");
721 error = -EPIPE;
722 } 721 }
723 722
724 if (dw3 & DW3_QTD_ACTIVE) { 723 if (dw3 & DW3_QTD_ACTIVE) {
@@ -1923,7 +1922,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
1923 * Even if OWNER is set, so the port is owned by the 1922 * Even if OWNER is set, so the port is owned by the
1924 * companion controller, khubd needs to be able to clear 1923 * companion controller, khubd needs to be able to clear
1925 * the port-change status bits (especially 1924 * the port-change status bits (especially
1926 * USB_PORT_FEAT_C_CONNECTION). 1925 * USB_PORT_STAT_C_CONNECTION).
1927 */ 1926 */
1928 1927
1929 switch (wValue) { 1928 switch (wValue) {
@@ -1987,7 +1986,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
1987 1986
1988 /* wPortChange bits */ 1987 /* wPortChange bits */
1989 if (temp & PORT_CSC) 1988 if (temp & PORT_CSC)
1990 status |= 1 << USB_PORT_FEAT_C_CONNECTION; 1989 status |= USB_PORT_STAT_C_CONNECTION << 16;
1991 1990
1992 1991
1993 /* whoever resumes must GetPortStatus to complete it!! */ 1992 /* whoever resumes must GetPortStatus to complete it!! */
@@ -2007,7 +2006,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
2007 /* resume completed? */ 2006 /* resume completed? */
2008 else if (time_after_eq(jiffies, 2007 else if (time_after_eq(jiffies,
2009 priv->reset_done)) { 2008 priv->reset_done)) {
2010 status |= 1 << USB_PORT_FEAT_C_SUSPEND; 2009 status |= USB_PORT_STAT_C_SUSPEND << 16;
2011 priv->reset_done = 0; 2010 priv->reset_done = 0;
2012 2011
2013 /* stop resume signaling */ 2012 /* stop resume signaling */
@@ -2031,7 +2030,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
2031 if ((temp & PORT_RESET) 2030 if ((temp & PORT_RESET)
2032 && time_after_eq(jiffies, 2031 && time_after_eq(jiffies,
2033 priv->reset_done)) { 2032 priv->reset_done)) {
2034 status |= 1 << USB_PORT_FEAT_C_RESET; 2033 status |= USB_PORT_STAT_C_RESET << 16;
2035 priv->reset_done = 0; 2034 priv->reset_done = 0;
2036 2035
2037 /* force reset to complete */ 2036 /* force reset to complete */
@@ -2062,18 +2061,18 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
2062 printk(KERN_ERR "Warning: PORT_OWNER is set\n"); 2061 printk(KERN_ERR "Warning: PORT_OWNER is set\n");
2063 2062
2064 if (temp & PORT_CONNECT) { 2063 if (temp & PORT_CONNECT) {
2065 status |= 1 << USB_PORT_FEAT_CONNECTION; 2064 status |= USB_PORT_STAT_CONNECTION;
2066 /* status may be from integrated TT */ 2065 /* status may be from integrated TT */
2067 status |= ehci_port_speed(priv, temp); 2066 status |= ehci_port_speed(priv, temp);
2068 } 2067 }
2069 if (temp & PORT_PE) 2068 if (temp & PORT_PE)
2070 status |= 1 << USB_PORT_FEAT_ENABLE; 2069 status |= USB_PORT_STAT_ENABLE;
2071 if (temp & (PORT_SUSPEND|PORT_RESUME)) 2070 if (temp & (PORT_SUSPEND|PORT_RESUME))
2072 status |= 1 << USB_PORT_FEAT_SUSPEND; 2071 status |= USB_PORT_STAT_SUSPEND;
2073 if (temp & PORT_RESET) 2072 if (temp & PORT_RESET)
2074 status |= 1 << USB_PORT_FEAT_RESET; 2073 status |= USB_PORT_STAT_RESET;
2075 if (temp & PORT_POWER) 2074 if (temp & PORT_POWER)
2076 status |= 1 << USB_PORT_FEAT_POWER; 2075 status |= USB_PORT_STAT_POWER;
2077 2076
2078 put_unaligned(cpu_to_le32(status), (__le32 *) buf); 2077 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
2079 break; 2078 break;
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 4293cfd28d61..8f0259eaa2c7 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -13,8 +13,8 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/usb/isp1760.h> 15#include <linux/usb/isp1760.h>
16#include <linux/usb/hcd.h>
16 17
17#include "../core/hcd.h"
18#include "isp1760-hcd.h" 18#include "isp1760-hcd.h"
19 19
20#ifdef CONFIG_PPC_OF 20#ifdef CONFIG_PPC_OF
@@ -36,7 +36,7 @@ static int of_isp1760_probe(struct of_device *dev,
36 struct resource memory; 36 struct resource memory;
37 struct of_irq oirq; 37 struct of_irq oirq;
38 int virq; 38 int virq;
39 u64 res_len; 39 resource_size_t res_len;
40 int ret; 40 int ret;
41 const unsigned int *prop; 41 const unsigned int *prop;
42 unsigned int devflags = 0; 42 unsigned int devflags = 0;
@@ -45,13 +45,12 @@ static int of_isp1760_probe(struct of_device *dev,
45 if (ret) 45 if (ret)
46 return -ENXIO; 46 return -ENXIO;
47 47
48 res = request_mem_region(memory.start, memory.end - memory.start + 1, 48 res_len = resource_size(&memory);
49 dev_name(&dev->dev)); 49
50 res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
50 if (!res) 51 if (!res)
51 return -EBUSY; 52 return -EBUSY;
52 53
53 res_len = memory.end - memory.start + 1;
54
55 if (of_irq_map_one(dp, 0, &oirq)) { 54 if (of_irq_map_one(dp, 0, &oirq)) {
56 ret = -ENODEV; 55 ret = -ENODEV;
57 goto release_reg; 56 goto release_reg;
@@ -92,7 +91,7 @@ static int of_isp1760_probe(struct of_device *dev,
92 return ret; 91 return ret;
93 92
94release_reg: 93release_reg:
95 release_mem_region(memory.start, memory.end - memory.start + 1); 94 release_mem_region(memory.start, res_len);
96 return ret; 95 return ret;
97} 96}
98 97
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index afe59be23645..fc576557d8a5 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -32,6 +32,7 @@
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/usb.h> 33#include <linux/usb.h>
34#include <linux/usb/otg.h> 34#include <linux/usb/otg.h>
35#include <linux/usb/hcd.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <linux/dmapool.h> 37#include <linux/dmapool.h>
37#include <linux/workqueue.h> 38#include <linux/workqueue.h>
@@ -43,7 +44,6 @@
43#include <asm/unaligned.h> 44#include <asm/unaligned.h>
44#include <asm/byteorder.h> 45#include <asm/byteorder.h>
45 46
46#include "../core/hcd.h"
47 47
48#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell" 48#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
49#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver" 49#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
@@ -1006,9 +1006,14 @@ MODULE_LICENSE ("GPL");
1006#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver 1006#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
1007#endif 1007#endif
1008 1008
1009#ifdef CONFIG_ARCH_OMAP 1009#ifdef CONFIG_USB_OHCI_HCD_OMAP1
1010#include "ohci-omap.c" 1010#include "ohci-omap.c"
1011#define PLATFORM_DRIVER ohci_hcd_omap_driver 1011#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
1012#endif
1013
1014#ifdef CONFIG_USB_OHCI_HCD_OMAP3
1015#include "ohci-omap3.c"
1016#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
1012#endif 1017#endif
1013 1018
1014#ifdef CONFIG_ARCH_LH7A404 1019#ifdef CONFIG_ARCH_LH7A404
@@ -1092,6 +1097,8 @@ MODULE_LICENSE ("GPL");
1092 1097
1093#if !defined(PCI_DRIVER) && \ 1098#if !defined(PCI_DRIVER) && \
1094 !defined(PLATFORM_DRIVER) && \ 1099 !defined(PLATFORM_DRIVER) && \
1100 !defined(OMAP1_PLATFORM_DRIVER) && \
1101 !defined(OMAP3_PLATFORM_DRIVER) && \
1095 !defined(OF_PLATFORM_DRIVER) && \ 1102 !defined(OF_PLATFORM_DRIVER) && \
1096 !defined(SA1111_DRIVER) && \ 1103 !defined(SA1111_DRIVER) && \
1097 !defined(PS3_SYSTEM_BUS_DRIVER) && \ 1104 !defined(PS3_SYSTEM_BUS_DRIVER) && \
@@ -1133,6 +1140,18 @@ static int __init ohci_hcd_mod_init(void)
1133 goto error_platform; 1140 goto error_platform;
1134#endif 1141#endif
1135 1142
1143#ifdef OMAP1_PLATFORM_DRIVER
1144 retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
1145 if (retval < 0)
1146 goto error_omap1_platform;
1147#endif
1148
1149#ifdef OMAP3_PLATFORM_DRIVER
1150 retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
1151 if (retval < 0)
1152 goto error_omap3_platform;
1153#endif
1154
1136#ifdef OF_PLATFORM_DRIVER 1155#ifdef OF_PLATFORM_DRIVER
1137 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER); 1156 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
1138 if (retval < 0) 1157 if (retval < 0)
@@ -1200,6 +1219,14 @@ static int __init ohci_hcd_mod_init(void)
1200 platform_driver_unregister(&PLATFORM_DRIVER); 1219 platform_driver_unregister(&PLATFORM_DRIVER);
1201 error_platform: 1220 error_platform:
1202#endif 1221#endif
1222#ifdef OMAP1_PLATFORM_DRIVER
1223 platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
1224 error_omap1_platform:
1225#endif
1226#ifdef OMAP3_PLATFORM_DRIVER
1227 platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
1228 error_omap3_platform:
1229#endif
1203#ifdef PS3_SYSTEM_BUS_DRIVER 1230#ifdef PS3_SYSTEM_BUS_DRIVER
1204 ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1231 ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1205 error_ps3: 1232 error_ps3:
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
new file mode 100644
index 000000000000..2cc8a504b18c
--- /dev/null
+++ b/drivers/usb/host/ohci-omap3.c
@@ -0,0 +1,735 @@
1/*
2 * ohci-omap3.c - driver for OHCI on OMAP3 and later processors
3 *
4 * Bus Glue for OMAP3 USBHOST 3 port OHCI controller
5 * This controller is also used in later OMAPs and AM35x chips
6 *
7 * Copyright (C) 2007-2010 Texas Instruments, Inc.
8 * Author: Vikram Pandita <vikram.pandita@ti.com>
9 * Author: Anand Gadiyar <gadiyar@ti.com>
10 *
11 * Based on ehci-omap.c and some other ohci glue layers
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * TODO (last updated Mar 10th, 2010):
28 * - add kernel-doc
29 * - Factor out code common to EHCI to a separate file
30 * - Make EHCI and OHCI coexist together
31 * - needs newer silicon versions to actually work
32 * - the last one to be loaded currently steps on the other's toes
33 * - Add hooks for configuring transceivers, etc. at init/exit
34 * - Add aggressive clock-management code
35 */
36
37#include <linux/platform_device.h>
38#include <linux/clk.h>
39
40#include <plat/usb.h>
41
42/*
43 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
44 * Use ohci_omap_readl()/ohci_omap_writel() functions
45 */
46
47/* TLL Register Set */
48#define OMAP_USBTLL_REVISION (0x00)
49#define OMAP_USBTLL_SYSCONFIG (0x10)
50#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
51#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
52#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
53#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
54#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
55
56#define OMAP_USBTLL_SYSSTATUS (0x14)
57#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
58
59#define OMAP_USBTLL_IRQSTATUS (0x18)
60#define OMAP_USBTLL_IRQENABLE (0x1C)
61
62#define OMAP_TLL_SHARED_CONF (0x30)
63#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
64#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
65#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
66#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
67#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
68
69#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
70#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
71#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
72#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
73#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
74#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
75#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
76#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
77
78#define OMAP_TLL_CHANNEL_COUNT 3
79
80/* UHH Register Set */
81#define OMAP_UHH_REVISION (0x00)
82#define OMAP_UHH_SYSCONFIG (0x10)
83#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
84#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
85#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
86#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
87#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
88#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
89
90#define OMAP_UHH_SYSSTATUS (0x14)
91#define OMAP_UHH_SYSSTATUS_UHHRESETDONE (1 << 0)
92#define OMAP_UHH_SYSSTATUS_OHCIRESETDONE (1 << 1)
93#define OMAP_UHH_SYSSTATUS_EHCIRESETDONE (1 << 2)
94#define OMAP_UHH_HOSTCONFIG (0x40)
95#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
96#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
97#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
98#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
99#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
100#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
101#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
102#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
103#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
104#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
105#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
106
107#define OMAP_UHH_DEBUG_CSR (0x44)
108
109/*-------------------------------------------------------------------------*/
110
111static inline void ohci_omap_writel(void __iomem *base, u32 reg, u32 val)
112{
113 __raw_writel(val, base + reg);
114}
115
116static inline u32 ohci_omap_readl(void __iomem *base, u32 reg)
117{
118 return __raw_readl(base + reg);
119}
120
121static inline void ohci_omap_writeb(void __iomem *base, u8 reg, u8 val)
122{
123 __raw_writeb(val, base + reg);
124}
125
126static inline u8 ohci_omap_readb(void __iomem *base, u8 reg)
127{
128 return __raw_readb(base + reg);
129}
130
131/*-------------------------------------------------------------------------*/
132
133struct ohci_hcd_omap3 {
134 struct ohci_hcd *ohci;
135 struct device *dev;
136
137 struct clk *usbhost_ick;
138 struct clk *usbhost2_120m_fck;
139 struct clk *usbhost1_48m_fck;
140 struct clk *usbtll_fck;
141 struct clk *usbtll_ick;
142
143 /* port_mode: TLL/PHY, 2/3/4/6-PIN, DP-DM/DAT-SE0 */
144 enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS];
145 void __iomem *uhh_base;
146 void __iomem *tll_base;
147 void __iomem *ohci_base;
148
149 unsigned es2_compatibility:1;
150};
151
152/*-------------------------------------------------------------------------*/
153
154static void ohci_omap3_clock_power(struct ohci_hcd_omap3 *omap, int on)
155{
156 if (on) {
157 clk_enable(omap->usbtll_ick);
158 clk_enable(omap->usbtll_fck);
159 clk_enable(omap->usbhost_ick);
160 clk_enable(omap->usbhost1_48m_fck);
161 clk_enable(omap->usbhost2_120m_fck);
162 } else {
163 clk_disable(omap->usbhost2_120m_fck);
164 clk_disable(omap->usbhost1_48m_fck);
165 clk_disable(omap->usbhost_ick);
166 clk_disable(omap->usbtll_fck);
167 clk_disable(omap->usbtll_ick);
168 }
169}
170
171static int ohci_omap3_init(struct usb_hcd *hcd)
172{
173 dev_dbg(hcd->self.controller, "starting OHCI controller\n");
174
175 return ohci_init(hcd_to_ohci(hcd));
176}
177
178
179/*-------------------------------------------------------------------------*/
180
181static int ohci_omap3_start(struct usb_hcd *hcd)
182{
183 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
184 int ret;
185
186 /*
187 * RemoteWakeupConnected has to be set explicitly before
188 * calling ohci_run. The reset value of RWC is 0.
189 */
190 ohci->hc_control = OHCI_CTRL_RWC;
191 writel(OHCI_CTRL_RWC, &ohci->regs->control);
192
193 ret = ohci_run(ohci);
194
195 if (ret < 0) {
196 dev_err(hcd->self.controller, "can't start\n");
197 ohci_stop(hcd);
198 }
199
200 return ret;
201}
202
203/*-------------------------------------------------------------------------*/
204
205/*
206 * convert the port-mode enum to a value we can use in the FSLSMODE
207 * field of USBTLL_CHANNEL_CONF
208 */
209static unsigned ohci_omap3_fslsmode(enum ohci_omap3_port_mode mode)
210{
211 switch (mode) {
212 case OMAP_OHCI_PORT_MODE_UNUSED:
213 case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
214 return 0x0;
215
216 case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
217 return 0x1;
218
219 case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
220 return 0x2;
221
222 case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
223 return 0x3;
224
225 case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
226 return 0x4;
227
228 case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
229 return 0x5;
230
231 case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
232 return 0x6;
233
234 case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
235 return 0x7;
236
237 case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
238 return 0xA;
239
240 case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
241 return 0xB;
242 default:
243 pr_warning("Invalid port mode, using default\n");
244 return 0x0;
245 }
246}
247
248static void ohci_omap3_tll_config(struct ohci_hcd_omap3 *omap)
249{
250 u32 reg;
251 int i;
252
253 /* Program TLL SHARED CONF */
254 reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
255 reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
256 reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN;
257 reg |= OMAP_TLL_SHARED_CONF_USB_DIVRATION;
258 reg |= OMAP_TLL_SHARED_CONF_FCLK_IS_ON;
259 ohci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
260
261 /* Program each TLL channel */
262 /*
263 * REVISIT: Only the 3-pin and 4-pin PHY modes have
264 * actually been tested.
265 */
266 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
267
268 /* Enable only those channels that are actually used */
269 if (omap->port_mode[i] == OMAP_OHCI_PORT_MODE_UNUSED)
270 continue;
271
272 reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
273 reg |= ohci_omap3_fslsmode(omap->port_mode[i])
274 << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT;
275 reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS;
276 reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
277 ohci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
278 }
279}
280
281/* omap3_start_ohci
282 * - Start the TI USBHOST controller
283 */
284static int omap3_start_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
285{
286 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
287 u32 reg = 0;
288 int ret = 0;
289
290 dev_dbg(omap->dev, "starting TI OHCI USB Controller\n");
291
292 /* Get all the clock handles we need */
293 omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
294 if (IS_ERR(omap->usbhost_ick)) {
295 dev_err(omap->dev, "could not get usbhost_ick\n");
296 ret = PTR_ERR(omap->usbhost_ick);
297 goto err_host_ick;
298 }
299
300 omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
301 if (IS_ERR(omap->usbhost2_120m_fck)) {
302 dev_err(omap->dev, "could not get usbhost_120m_fck\n");
303 ret = PTR_ERR(omap->usbhost2_120m_fck);
304 goto err_host_120m_fck;
305 }
306
307 omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
308 if (IS_ERR(omap->usbhost1_48m_fck)) {
309 dev_err(omap->dev, "could not get usbhost_48m_fck\n");
310 ret = PTR_ERR(omap->usbhost1_48m_fck);
311 goto err_host_48m_fck;
312 }
313
314 omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
315 if (IS_ERR(omap->usbtll_fck)) {
316 dev_err(omap->dev, "could not get usbtll_fck\n");
317 ret = PTR_ERR(omap->usbtll_fck);
318 goto err_tll_fck;
319 }
320
321 omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
322 if (IS_ERR(omap->usbtll_ick)) {
323 dev_err(omap->dev, "could not get usbtll_ick\n");
324 ret = PTR_ERR(omap->usbtll_ick);
325 goto err_tll_ick;
326 }
327
328 /* Now enable all the clocks in the correct order */
329 ohci_omap3_clock_power(omap, 1);
330
331 /* perform TLL soft reset, and wait until reset is complete */
332 ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
333 OMAP_USBTLL_SYSCONFIG_SOFTRESET);
334
335 /* Wait for TLL reset to complete */
336 while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
337 & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
338 cpu_relax();
339
340 if (time_after(jiffies, timeout)) {
341 dev_dbg(omap->dev, "operation timed out\n");
342 ret = -EINVAL;
343 goto err_sys_status;
344 }
345 }
346
347 dev_dbg(omap->dev, "TLL reset done\n");
348
349 /* (1<<3) = no idle mode only for initial debugging */
350 ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
351 OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
352 OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
353 OMAP_USBTLL_SYSCONFIG_CACTIVITY);
354
355
356 /* Put UHH in NoIdle/NoStandby mode */
357 reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
358 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
359 | OMAP_UHH_SYSCONFIG_SIDLEMODE
360 | OMAP_UHH_SYSCONFIG_CACTIVITY
361 | OMAP_UHH_SYSCONFIG_MIDLEMODE);
362 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
363 reg &= ~OMAP_UHH_SYSCONFIG_SOFTRESET;
364
365 ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
366
367 reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
368
369 /* setup ULPI bypass and burst configurations */
370 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
371 | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
372 | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
373 reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
374
375 /*
376 * REVISIT: Pi_CONNECT_STATUS controls MStandby
377 * assertion and Swakeup generation - let us not
378 * worry about this for now. OMAP HWMOD framework
379 * might take care of this later. If not, we can
380 * update these registers when adding aggressive
381 * clock management code.
382 *
383 * For now, turn off all the Pi_CONNECT_STATUS bits
384 *
385 if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
386 reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
387 if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
388 reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
389 if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
390 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
391 */
392 reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
393 reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
394 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
395
396 if (omap->es2_compatibility) {
397 /*
398 * All OHCI modes need to go through the TLL,
399 * unlike in the EHCI case. So use UTMI mode
400 * for all ports for OHCI, on ES2.x silicon
401 */
402 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
403 reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
404 } else {
405 dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
406 if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
407 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
408 else
409 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
410
411 if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
412 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
413 else
414 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
415
416 if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
417 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
418 else
419 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
420
421 }
422 ohci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
423 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
424
425 ohci_omap3_tll_config(omap);
426
427 return 0;
428
429err_sys_status:
430 ohci_omap3_clock_power(omap, 0);
431 clk_put(omap->usbtll_ick);
432
433err_tll_ick:
434 clk_put(omap->usbtll_fck);
435
436err_tll_fck:
437 clk_put(omap->usbhost1_48m_fck);
438
439err_host_48m_fck:
440 clk_put(omap->usbhost2_120m_fck);
441
442err_host_120m_fck:
443 clk_put(omap->usbhost_ick);
444
445err_host_ick:
446 return ret;
447}
448
449static void omap3_stop_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
450{
451 unsigned long timeout = jiffies + msecs_to_jiffies(100);
452
453 dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
454
455 /* Reset USBHOST for insmod/rmmod to work */
456 ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
457 OMAP_UHH_SYSCONFIG_SOFTRESET);
458 while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
459 & OMAP_UHH_SYSSTATUS_UHHRESETDONE)) {
460 cpu_relax();
461
462 if (time_after(jiffies, timeout))
463 dev_dbg(omap->dev, "operation timed out\n");
464 }
465
466 while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
467 & OMAP_UHH_SYSSTATUS_OHCIRESETDONE)) {
468 cpu_relax();
469
470 if (time_after(jiffies, timeout))
471 dev_dbg(omap->dev, "operation timed out\n");
472 }
473
474 while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
475 & OMAP_UHH_SYSSTATUS_EHCIRESETDONE)) {
476 cpu_relax();
477
478 if (time_after(jiffies, timeout))
479 dev_dbg(omap->dev, "operation timed out\n");
480 }
481
482 ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
483
484 while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
485 & (1 << 0))) {
486 cpu_relax();
487
488 if (time_after(jiffies, timeout))
489 dev_dbg(omap->dev, "operation timed out\n");
490 }
491
492 ohci_omap3_clock_power(omap, 0);
493
494 if (omap->usbtll_fck != NULL) {
495 clk_put(omap->usbtll_fck);
496 omap->usbtll_fck = NULL;
497 }
498
499 if (omap->usbhost_ick != NULL) {
500 clk_put(omap->usbhost_ick);
501 omap->usbhost_ick = NULL;
502 }
503
504 if (omap->usbhost1_48m_fck != NULL) {
505 clk_put(omap->usbhost1_48m_fck);
506 omap->usbhost1_48m_fck = NULL;
507 }
508
509 if (omap->usbhost2_120m_fck != NULL) {
510 clk_put(omap->usbhost2_120m_fck);
511 omap->usbhost2_120m_fck = NULL;
512 }
513
514 if (omap->usbtll_ick != NULL) {
515 clk_put(omap->usbtll_ick);
516 omap->usbtll_ick = NULL;
517 }
518
519 dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
520}
521
522/*-------------------------------------------------------------------------*/
523
524static const struct hc_driver ohci_omap3_hc_driver = {
525 .description = hcd_name,
526 .product_desc = "OMAP3 OHCI Host Controller",
527 .hcd_priv_size = sizeof(struct ohci_hcd),
528
529 /*
530 * generic hardware linkage
531 */
532 .irq = ohci_irq,
533 .flags = HCD_USB11 | HCD_MEMORY,
534
535 /*
536 * basic lifecycle operations
537 */
538 .reset = ohci_omap3_init,
539 .start = ohci_omap3_start,
540 .stop = ohci_stop,
541 .shutdown = ohci_shutdown,
542
543 /*
544 * managing i/o requests and associated device resources
545 */
546 .urb_enqueue = ohci_urb_enqueue,
547 .urb_dequeue = ohci_urb_dequeue,
548 .endpoint_disable = ohci_endpoint_disable,
549
550 /*
551 * scheduling support
552 */
553 .get_frame_number = ohci_get_frame,
554
555 /*
556 * root hub support
557 */
558 .hub_status_data = ohci_hub_status_data,
559 .hub_control = ohci_hub_control,
560#ifdef CONFIG_PM
561 .bus_suspend = ohci_bus_suspend,
562 .bus_resume = ohci_bus_resume,
563#endif
564 .start_port_reset = ohci_start_port_reset,
565};
566
567/*-------------------------------------------------------------------------*/
568
569/*
570 * configure so an HC device and id are always provided
571 * always called with process context; sleeping is OK
572 */
573
574/**
575 * ohci_hcd_omap3_probe - initialize OMAP-based HCDs
576 *
577 * Allocates basic resources for this USB host controller, and
578 * then invokes the start() method for the HCD associated with it
579 * through the hotplug entry's driver_data.
580 */
581static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
582{
583 struct ohci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
584 struct ohci_hcd_omap3 *omap;
585 struct resource *res;
586 struct usb_hcd *hcd;
587 int ret = -ENODEV;
588 int irq;
589
590 if (usb_disabled())
591 goto err_disabled;
592
593 if (!pdata) {
594 dev_dbg(&pdev->dev, "missing platform_data\n");
595 goto err_pdata;
596 }
597
598 irq = platform_get_irq(pdev, 0);
599
600 omap = kzalloc(sizeof(*omap), GFP_KERNEL);
601 if (!omap) {
602 ret = -ENOMEM;
603 goto err_disabled;
604 }
605
606 hcd = usb_create_hcd(&ohci_omap3_hc_driver, &pdev->dev,
607 dev_name(&pdev->dev));
608 if (!hcd) {
609 ret = -ENOMEM;
610 goto err_create_hcd;
611 }
612
613 platform_set_drvdata(pdev, omap);
614 omap->dev = &pdev->dev;
615 omap->port_mode[0] = pdata->port_mode[0];
616 omap->port_mode[1] = pdata->port_mode[1];
617 omap->port_mode[2] = pdata->port_mode[2];
618 omap->es2_compatibility = pdata->es2_compatibility;
619 omap->ohci = hcd_to_ohci(hcd);
620
621 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
622
623 hcd->rsrc_start = res->start;
624 hcd->rsrc_len = resource_size(res);
625
626 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
627 if (!hcd->regs) {
628 dev_err(&pdev->dev, "OHCI ioremap failed\n");
629 ret = -ENOMEM;
630 goto err_ioremap;
631 }
632
633 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
634 omap->uhh_base = ioremap(res->start, resource_size(res));
635 if (!omap->uhh_base) {
636 dev_err(&pdev->dev, "UHH ioremap failed\n");
637 ret = -ENOMEM;
638 goto err_uhh_ioremap;
639 }
640
641 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
642 omap->tll_base = ioremap(res->start, resource_size(res));
643 if (!omap->tll_base) {
644 dev_err(&pdev->dev, "TLL ioremap failed\n");
645 ret = -ENOMEM;
646 goto err_tll_ioremap;
647 }
648
649 ret = omap3_start_ohci(omap, hcd);
650 if (ret) {
651 dev_dbg(&pdev->dev, "failed to start ehci\n");
652 goto err_start;
653 }
654
655 ohci_hcd_init(omap->ohci);
656
657 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
658 if (ret) {
659 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
660 goto err_add_hcd;
661 }
662
663 return 0;
664
665err_add_hcd:
666 omap3_stop_ohci(omap, hcd);
667
668err_start:
669 iounmap(omap->tll_base);
670
671err_tll_ioremap:
672 iounmap(omap->uhh_base);
673
674err_uhh_ioremap:
675 iounmap(hcd->regs);
676
677err_ioremap:
678 usb_put_hcd(hcd);
679
680err_create_hcd:
681 kfree(omap);
682err_pdata:
683err_disabled:
684 return ret;
685}
686
687/*
688 * may be called without controller electrically present
689 * may be called with controller, bus, and devices active
690 */
691
692/**
693 * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs
694 * @pdev: USB Host Controller being removed
695 *
696 * Reverses the effect of ohci_hcd_omap3_probe(), first invoking
697 * the HCD's stop() method. It is always called from a thread
698 * context, normally "rmmod", "apmd", or something similar.
699 */
700static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
701{
702 struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
703 struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
704
705 usb_remove_hcd(hcd);
706 omap3_stop_ohci(omap, hcd);
707 iounmap(hcd->regs);
708 iounmap(omap->tll_base);
709 iounmap(omap->uhh_base);
710 usb_put_hcd(hcd);
711 kfree(omap);
712
713 return 0;
714}
715
716static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
717{
718 struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
719 struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
720
721 if (hcd->driver->shutdown)
722 hcd->driver->shutdown(hcd);
723}
724
725static struct platform_driver ohci_hcd_omap3_driver = {
726 .probe = ohci_hcd_omap3_probe,
727 .remove = __devexit_p(ohci_hcd_omap3_remove),
728 .shutdown = ohci_hcd_omap3_shutdown,
729 .driver = {
730 .name = "ohci-omap3",
731 },
732};
733
734MODULE_ALIAS("platform:ohci-omap3");
735MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e62b30b3e429..f608dfd09a8a 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -34,12 +34,11 @@
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/usb.h> 36#include <linux/usb.h>
37#include <linux/usb/hcd.h>
37#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
38#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
39#include <linux/io.h> 40#include <linux/io.h>
40 41
41#include "../core/hcd.h"
42
43#include <asm/irq.h> 42#include <asm/irq.h>
44#include <asm/system.h> 43#include <asm/system.h>
45#include <asm/unaligned.h> 44#include <asm/unaligned.h>
@@ -3154,10 +3153,10 @@ static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
3154 case 0: 3153 case 0:
3155 return 0; 3154 return 0;
3156 case 1: 3155 case 1:
3157 return 1 << USB_PORT_FEAT_LOWSPEED; 3156 return USB_PORT_STAT_LOW_SPEED;
3158 case 2: 3157 case 2:
3159 default: 3158 default:
3160 return 1 << USB_PORT_FEAT_HIGHSPEED; 3159 return USB_PORT_STAT_HIGH_SPEED;
3161 } 3160 }
3162} 3161}
3163 3162
@@ -3202,7 +3201,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
3202 * Even if OWNER is set, so the port is owned by the 3201 * Even if OWNER is set, so the port is owned by the
3203 * companion controller, khubd needs to be able to clear 3202 * companion controller, khubd needs to be able to clear
3204 * the port-change status bits (especially 3203 * the port-change status bits (especially
3205 * USB_PORT_FEAT_C_CONNECTION). 3204 * USB_PORT_STAT_C_CONNECTION).
3206 */ 3205 */
3207 3206
3208 switch (wValue) { 3207 switch (wValue) {
@@ -3264,11 +3263,11 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
3264 3263
3265 /* wPortChange bits */ 3264 /* wPortChange bits */
3266 if (temp & PORT_CSC) 3265 if (temp & PORT_CSC)
3267 status |= 1 << USB_PORT_FEAT_C_CONNECTION; 3266 status |= USB_PORT_STAT_C_CONNECTION << 16;
3268 if (temp & PORT_PEC) 3267 if (temp & PORT_PEC)
3269 status |= 1 << USB_PORT_FEAT_C_ENABLE; 3268 status |= USB_PORT_STAT_C_ENABLE << 16;
3270 if ((temp & PORT_OCC) && !ignore_oc) 3269 if ((temp & PORT_OCC) && !ignore_oc)
3271 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; 3270 status |= USB_PORT_STAT_C_OVERCURRENT << 16;
3272 3271
3273 /* whoever resumes must GetPortStatus to complete it!! */ 3272 /* whoever resumes must GetPortStatus to complete it!! */
3274 if (temp & PORT_RESUME) { 3273 if (temp & PORT_RESUME) {
@@ -3286,7 +3285,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
3286 /* resume completed? */ 3285 /* resume completed? */
3287 else if (time_after_eq(jiffies, 3286 else if (time_after_eq(jiffies,
3288 oxu->reset_done[wIndex])) { 3287 oxu->reset_done[wIndex])) {
3289 status |= 1 << USB_PORT_FEAT_C_SUSPEND; 3288 status |= USB_PORT_STAT_C_SUSPEND << 16;
3290 oxu->reset_done[wIndex] = 0; 3289 oxu->reset_done[wIndex] = 0;
3291 3290
3292 /* stop resume signaling */ 3291 /* stop resume signaling */
@@ -3309,7 +3308,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
3309 if ((temp & PORT_RESET) 3308 if ((temp & PORT_RESET)
3310 && time_after_eq(jiffies, 3309 && time_after_eq(jiffies,
3311 oxu->reset_done[wIndex])) { 3310 oxu->reset_done[wIndex])) {
3312 status |= 1 << USB_PORT_FEAT_C_RESET; 3311 status |= USB_PORT_STAT_C_RESET << 16;
3313 oxu->reset_done[wIndex] = 0; 3312 oxu->reset_done[wIndex] = 0;
3314 3313
3315 /* force reset to complete */ 3314 /* force reset to complete */
@@ -3348,20 +3347,20 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
3348 */ 3347 */
3349 3348
3350 if (temp & PORT_CONNECT) { 3349 if (temp & PORT_CONNECT) {
3351 status |= 1 << USB_PORT_FEAT_CONNECTION; 3350 status |= USB_PORT_STAT_CONNECTION;
3352 /* status may be from integrated TT */ 3351 /* status may be from integrated TT */
3353 status |= oxu_port_speed(oxu, temp); 3352 status |= oxu_port_speed(oxu, temp);
3354 } 3353 }
3355 if (temp & PORT_PE) 3354 if (temp & PORT_PE)
3356 status |= 1 << USB_PORT_FEAT_ENABLE; 3355 status |= USB_PORT_STAT_ENABLE;
3357 if (temp & (PORT_SUSPEND|PORT_RESUME)) 3356 if (temp & (PORT_SUSPEND|PORT_RESUME))
3358 status |= 1 << USB_PORT_FEAT_SUSPEND; 3357 status |= USB_PORT_STAT_SUSPEND;
3359 if (temp & PORT_OC) 3358 if (temp & PORT_OC)
3360 status |= 1 << USB_PORT_FEAT_OVER_CURRENT; 3359 status |= USB_PORT_STAT_OVERCURRENT;
3361 if (temp & PORT_RESET) 3360 if (temp & PORT_RESET)
3362 status |= 1 << USB_PORT_FEAT_RESET; 3361 status |= USB_PORT_STAT_RESET;
3363 if (temp & PORT_POWER) 3362 if (temp & PORT_POWER)
3364 status |= 1 << USB_PORT_FEAT_POWER; 3363 status |= USB_PORT_STAT_POWER;
3365 3364
3366#ifndef OXU_VERBOSE_DEBUG 3365#ifndef OXU_VERBOSE_DEBUG
3367 if (status & ~0xffff) /* only if wPortChange is interesting */ 3366 if (status & ~0xffff) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d478ffad59b4..6db57ab6079d 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -33,6 +33,7 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/usb/hcd.h>
36#include <linux/platform_device.h> 37#include <linux/platform_device.h>
37#include <linux/io.h> 38#include <linux/io.h>
38#include <linux/mm.h> 39#include <linux/mm.h>
@@ -40,7 +41,6 @@
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <asm/cacheflush.h> 42#include <asm/cacheflush.h>
42 43
43#include "../core/hcd.h"
44#include "r8a66597.h" 44#include "r8a66597.h"
45 45
46MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver"); 46MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver");
@@ -1018,10 +1018,10 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
1018 rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST; 1018 rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
1019 rh->scount = R8A66597_MAX_SAMPLING; 1019 rh->scount = R8A66597_MAX_SAMPLING;
1020 if (connect) 1020 if (connect)
1021 rh->port |= 1 << USB_PORT_FEAT_CONNECTION; 1021 rh->port |= USB_PORT_STAT_CONNECTION;
1022 else 1022 else
1023 rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION); 1023 rh->port &= ~USB_PORT_STAT_CONNECTION;
1024 rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION; 1024 rh->port |= USB_PORT_STAT_C_CONNECTION << 16;
1025 1025
1026 r8a66597_root_hub_start_polling(r8a66597); 1026 r8a66597_root_hub_start_polling(r8a66597);
1027} 1027}
@@ -1059,15 +1059,14 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
1059 u16 speed = get_rh_usb_speed(r8a66597, port); 1059 u16 speed = get_rh_usb_speed(r8a66597, port);
1060 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; 1060 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
1061 1061
1062 rh->port &= ~((1 << USB_PORT_FEAT_HIGHSPEED) | 1062 rh->port &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED);
1063 (1 << USB_PORT_FEAT_LOWSPEED));
1064 if (speed == HSMODE) 1063 if (speed == HSMODE)
1065 rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED); 1064 rh->port |= USB_PORT_STAT_HIGH_SPEED;
1066 else if (speed == LSMODE) 1065 else if (speed == LSMODE)
1067 rh->port |= (1 << USB_PORT_FEAT_LOWSPEED); 1066 rh->port |= USB_PORT_STAT_LOW_SPEED;
1068 1067
1069 rh->port &= ~(1 << USB_PORT_FEAT_RESET); 1068 rh->port &= USB_PORT_STAT_RESET;
1070 rh->port |= 1 << USB_PORT_FEAT_ENABLE; 1069 rh->port |= USB_PORT_STAT_ENABLE;
1071} 1070}
1072 1071
1073/* this function must be called with interrupt disabled */ 1072/* this function must be called with interrupt disabled */
@@ -1706,7 +1705,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
1706 u16 tmp; 1705 u16 tmp;
1707 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; 1706 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
1708 1707
1709 if (rh->port & (1 << USB_PORT_FEAT_RESET)) { 1708 if (rh->port & USB_PORT_STAT_RESET) {
1710 unsigned long dvstctr_reg = get_dvstctr_reg(port); 1709 unsigned long dvstctr_reg = get_dvstctr_reg(port);
1711 1710
1712 tmp = r8a66597_read(r8a66597, dvstctr_reg); 1711 tmp = r8a66597_read(r8a66597, dvstctr_reg);
@@ -1718,7 +1717,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
1718 r8a66597_usb_connect(r8a66597, port); 1717 r8a66597_usb_connect(r8a66597, port);
1719 } 1718 }
1720 1719
1721 if (!(rh->port & (1 << USB_PORT_FEAT_CONNECTION))) { 1720 if (!(rh->port & USB_PORT_STAT_CONNECTION)) {
1722 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1721 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
1723 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); 1722 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
1724 } 1723 }
@@ -2186,7 +2185,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2186 2185
2187 switch (wValue) { 2186 switch (wValue) {
2188 case USB_PORT_FEAT_ENABLE: 2187 case USB_PORT_FEAT_ENABLE:
2189 rh->port &= ~(1 << USB_PORT_FEAT_POWER); 2188 rh->port &= ~USB_PORT_STAT_POWER;
2190 break; 2189 break;
2191 case USB_PORT_FEAT_SUSPEND: 2190 case USB_PORT_FEAT_SUSPEND:
2192 break; 2191 break;
@@ -2227,12 +2226,12 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2227 break; 2226 break;
2228 case USB_PORT_FEAT_POWER: 2227 case USB_PORT_FEAT_POWER:
2229 r8a66597_port_power(r8a66597, port, 1); 2228 r8a66597_port_power(r8a66597, port, 1);
2230 rh->port |= (1 << USB_PORT_FEAT_POWER); 2229 rh->port |= USB_PORT_STAT_POWER;
2231 break; 2230 break;
2232 case USB_PORT_FEAT_RESET: { 2231 case USB_PORT_FEAT_RESET: {
2233 struct r8a66597_device *dev = rh->dev; 2232 struct r8a66597_device *dev = rh->dev;
2234 2233
2235 rh->port |= (1 << USB_PORT_FEAT_RESET); 2234 rh->port |= USB_PORT_STAT_RESET;
2236 2235
2237 disable_r8a66597_pipe_all(r8a66597, dev); 2236 disable_r8a66597_pipe_all(r8a66597, dev);
2238 free_usb_address(r8a66597, dev, 1); 2237 free_usb_address(r8a66597, dev, 1);
@@ -2270,12 +2269,12 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd)
2270 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; 2269 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
2271 unsigned long dvstctr_reg = get_dvstctr_reg(port); 2270 unsigned long dvstctr_reg = get_dvstctr_reg(port);
2272 2271
2273 if (!(rh->port & (1 << USB_PORT_FEAT_ENABLE))) 2272 if (!(rh->port & USB_PORT_STAT_ENABLE))
2274 continue; 2273 continue;
2275 2274
2276 dbg("suspend port = %d", port); 2275 dbg("suspend port = %d", port);
2277 r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */ 2276 r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */
2278 rh->port |= 1 << USB_PORT_FEAT_SUSPEND; 2277 rh->port |= USB_PORT_STAT_SUSPEND;
2279 2278
2280 if (rh->dev->udev->do_remote_wakeup) { 2279 if (rh->dev->udev->do_remote_wakeup) {
2281 msleep(3); /* waiting last SOF */ 2280 msleep(3); /* waiting last SOF */
@@ -2301,12 +2300,12 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
2301 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; 2300 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
2302 unsigned long dvstctr_reg = get_dvstctr_reg(port); 2301 unsigned long dvstctr_reg = get_dvstctr_reg(port);
2303 2302
2304 if (!(rh->port & (1 << USB_PORT_FEAT_SUSPEND))) 2303 if (!(rh->port & USB_PORT_STAT_SUSPEND))
2305 continue; 2304 continue;
2306 2305
2307 dbg("resume port = %d", port); 2306 dbg("resume port = %d", port);
2308 rh->port &= ~(1 << USB_PORT_FEAT_SUSPEND); 2307 rh->port &= ~USB_PORT_STAT_SUSPEND;
2309 rh->port |= 1 << USB_PORT_FEAT_C_SUSPEND; 2308 rh->port |= USB_PORT_STAT_C_SUSPEND < 16;
2310 r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg); 2309 r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
2311 msleep(50); 2310 msleep(50);
2312 r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg); 2311 r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 3b867a8af7b2..bcf9f0e809de 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -45,6 +45,7 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46#include <linux/usb.h> 46#include <linux/usb.h>
47#include <linux/usb/sl811.h> 47#include <linux/usb/sl811.h>
48#include <linux/usb/hcd.h>
48#include <linux/platform_device.h> 49#include <linux/platform_device.h>
49 50
50#include <asm/io.h> 51#include <asm/io.h>
@@ -53,7 +54,6 @@
53#include <asm/byteorder.h> 54#include <asm/byteorder.h>
54#include <asm/unaligned.h> 55#include <asm/unaligned.h>
55 56
56#include "../core/hcd.h"
57#include "sl811.h" 57#include "sl811.h"
58 58
59 59
@@ -90,10 +90,10 @@ static void port_power(struct sl811 *sl811, int is_on)
90 90
91 /* hub is inactive unless the port is powered */ 91 /* hub is inactive unless the port is powered */
92 if (is_on) { 92 if (is_on) {
93 if (sl811->port1 & (1 << USB_PORT_FEAT_POWER)) 93 if (sl811->port1 & USB_PORT_STAT_POWER)
94 return; 94 return;
95 95
96 sl811->port1 = (1 << USB_PORT_FEAT_POWER); 96 sl811->port1 = USB_PORT_STAT_POWER;
97 sl811->irq_enable = SL11H_INTMASK_INSRMV; 97 sl811->irq_enable = SL11H_INTMASK_INSRMV;
98 } else { 98 } else {
99 sl811->port1 = 0; 99 sl811->port1 = 0;
@@ -407,7 +407,7 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
407 407
408static inline void start_transfer(struct sl811 *sl811) 408static inline void start_transfer(struct sl811 *sl811)
409{ 409{
410 if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) 410 if (sl811->port1 & USB_PORT_STAT_SUSPEND)
411 return; 411 return;
412 if (sl811->active_a == NULL) { 412 if (sl811->active_a == NULL) {
413 sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF)); 413 sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF));
@@ -721,23 +721,23 @@ retry:
721 * force the reset and make khubd clean up later. 721 * force the reset and make khubd clean up later.
722 */ 722 */
723 if (irqstat & SL11H_INTMASK_RD) 723 if (irqstat & SL11H_INTMASK_RD)
724 sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION); 724 sl811->port1 &= ~USB_PORT_STAT_CONNECTION;
725 else 725 else
726 sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION; 726 sl811->port1 |= USB_PORT_STAT_CONNECTION;
727 727
728 sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION; 728 sl811->port1 |= USB_PORT_STAT_C_CONNECTION << 16;
729 729
730 } else if (irqstat & SL11H_INTMASK_RD) { 730 } else if (irqstat & SL11H_INTMASK_RD) {
731 if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) { 731 if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
732 DBG("wakeup\n"); 732 DBG("wakeup\n");
733 sl811->port1 |= 1 << USB_PORT_FEAT_C_SUSPEND; 733 sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
734 sl811->stat_wake++; 734 sl811->stat_wake++;
735 } else 735 } else
736 irqstat &= ~SL11H_INTMASK_RD; 736 irqstat &= ~SL11H_INTMASK_RD;
737 } 737 }
738 738
739 if (irqstat) { 739 if (irqstat) {
740 if (sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) 740 if (sl811->port1 & USB_PORT_STAT_ENABLE)
741 start_transfer(sl811); 741 start_transfer(sl811);
742 ret = IRQ_HANDLED; 742 ret = IRQ_HANDLED;
743 if (retries--) 743 if (retries--)
@@ -819,7 +819,7 @@ static int sl811h_urb_enqueue(
819 spin_lock_irqsave(&sl811->lock, flags); 819 spin_lock_irqsave(&sl811->lock, flags);
820 820
821 /* don't submit to a dead or disabled port */ 821 /* don't submit to a dead or disabled port */
822 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) 822 if (!(sl811->port1 & USB_PORT_STAT_ENABLE)
823 || !HC_IS_RUNNING(hcd->state)) { 823 || !HC_IS_RUNNING(hcd->state)) {
824 retval = -ENODEV; 824 retval = -ENODEV;
825 kfree(ep); 825 kfree(ep);
@@ -1119,9 +1119,9 @@ sl811h_timer(unsigned long _sl811)
1119 unsigned long flags; 1119 unsigned long flags;
1120 u8 irqstat; 1120 u8 irqstat;
1121 u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE; 1121 u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
1122 const u32 mask = (1 << USB_PORT_FEAT_CONNECTION) 1122 const u32 mask = USB_PORT_STAT_CONNECTION
1123 | (1 << USB_PORT_FEAT_ENABLE) 1123 | USB_PORT_STAT_ENABLE
1124 | (1 << USB_PORT_FEAT_LOWSPEED); 1124 | USB_PORT_STAT_LOW_SPEED;
1125 1125
1126 spin_lock_irqsave(&sl811->lock, flags); 1126 spin_lock_irqsave(&sl811->lock, flags);
1127 1127
@@ -1135,8 +1135,8 @@ sl811h_timer(unsigned long _sl811)
1135 switch (signaling) { 1135 switch (signaling) {
1136 case SL11H_CTL1MASK_SE0: 1136 case SL11H_CTL1MASK_SE0:
1137 DBG("end reset\n"); 1137 DBG("end reset\n");
1138 sl811->port1 = (1 << USB_PORT_FEAT_C_RESET) 1138 sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
1139 | (1 << USB_PORT_FEAT_POWER); 1139 | USB_PORT_STAT_POWER;
1140 sl811->ctrl1 = 0; 1140 sl811->ctrl1 = 0;
1141 /* don't wrongly ack RD */ 1141 /* don't wrongly ack RD */
1142 if (irqstat & SL11H_INTMASK_INSRMV) 1142 if (irqstat & SL11H_INTMASK_INSRMV)
@@ -1144,7 +1144,7 @@ sl811h_timer(unsigned long _sl811)
1144 break; 1144 break;
1145 case SL11H_CTL1MASK_K: 1145 case SL11H_CTL1MASK_K:
1146 DBG("end resume\n"); 1146 DBG("end resume\n");
1147 sl811->port1 &= ~(1 << USB_PORT_FEAT_SUSPEND); 1147 sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
1148 break; 1148 break;
1149 default: 1149 default:
1150 DBG("odd timer signaling: %02x\n", signaling); 1150 DBG("odd timer signaling: %02x\n", signaling);
@@ -1154,26 +1154,26 @@ sl811h_timer(unsigned long _sl811)
1154 1154
1155 if (irqstat & SL11H_INTMASK_RD) { 1155 if (irqstat & SL11H_INTMASK_RD) {
1156 /* usbcore nukes all pending transactions on disconnect */ 1156 /* usbcore nukes all pending transactions on disconnect */
1157 if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) 1157 if (sl811->port1 & USB_PORT_STAT_CONNECTION)
1158 sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION) 1158 sl811->port1 |= (USB_PORT_STAT_C_CONNECTION << 16)
1159 | (1 << USB_PORT_FEAT_C_ENABLE); 1159 | (USB_PORT_STAT_C_ENABLE << 16);
1160 sl811->port1 &= ~mask; 1160 sl811->port1 &= ~mask;
1161 sl811->irq_enable = SL11H_INTMASK_INSRMV; 1161 sl811->irq_enable = SL11H_INTMASK_INSRMV;
1162 } else { 1162 } else {
1163 sl811->port1 |= mask; 1163 sl811->port1 |= mask;
1164 if (irqstat & SL11H_INTMASK_DP) 1164 if (irqstat & SL11H_INTMASK_DP)
1165 sl811->port1 &= ~(1 << USB_PORT_FEAT_LOWSPEED); 1165 sl811->port1 &= ~USB_PORT_STAT_LOW_SPEED;
1166 sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD; 1166 sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD;
1167 } 1167 }
1168 1168
1169 if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) { 1169 if (sl811->port1 & USB_PORT_STAT_CONNECTION) {
1170 u8 ctrl2 = SL811HS_CTL2_INIT; 1170 u8 ctrl2 = SL811HS_CTL2_INIT;
1171 1171
1172 sl811->irq_enable |= SL11H_INTMASK_DONE_A; 1172 sl811->irq_enable |= SL11H_INTMASK_DONE_A;
1173#ifdef USE_B 1173#ifdef USE_B
1174 sl811->irq_enable |= SL11H_INTMASK_DONE_B; 1174 sl811->irq_enable |= SL11H_INTMASK_DONE_B;
1175#endif 1175#endif
1176 if (sl811->port1 & (1 << USB_PORT_FEAT_LOWSPEED)) { 1176 if (sl811->port1 & USB_PORT_STAT_LOW_SPEED) {
1177 sl811->ctrl1 |= SL11H_CTL1MASK_LSPD; 1177 sl811->ctrl1 |= SL11H_CTL1MASK_LSPD;
1178 ctrl2 |= SL811HS_CTL2MASK_DSWAP; 1178 ctrl2 |= SL811HS_CTL2MASK_DSWAP;
1179 } 1179 }
@@ -1233,7 +1233,7 @@ sl811h_hub_control(
1233 1233
1234 switch (wValue) { 1234 switch (wValue) {
1235 case USB_PORT_FEAT_ENABLE: 1235 case USB_PORT_FEAT_ENABLE:
1236 sl811->port1 &= (1 << USB_PORT_FEAT_POWER); 1236 sl811->port1 &= USB_PORT_STAT_POWER;
1237 sl811->ctrl1 = 0; 1237 sl811->ctrl1 = 0;
1238 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); 1238 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1239 sl811->irq_enable = SL11H_INTMASK_INSRMV; 1239 sl811->irq_enable = SL11H_INTMASK_INSRMV;
@@ -1241,7 +1241,7 @@ sl811h_hub_control(
1241 sl811->irq_enable); 1241 sl811->irq_enable);
1242 break; 1242 break;
1243 case USB_PORT_FEAT_SUSPEND: 1243 case USB_PORT_FEAT_SUSPEND:
1244 if (!(sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))) 1244 if (!(sl811->port1 & USB_PORT_STAT_SUSPEND))
1245 break; 1245 break;
1246 1246
1247 /* 20 msec of resume/K signaling, other irqs blocked */ 1247 /* 20 msec of resume/K signaling, other irqs blocked */
@@ -1290,9 +1290,9 @@ sl811h_hub_control(
1290 goto error; 1290 goto error;
1291 switch (wValue) { 1291 switch (wValue) {
1292 case USB_PORT_FEAT_SUSPEND: 1292 case USB_PORT_FEAT_SUSPEND:
1293 if (sl811->port1 & (1 << USB_PORT_FEAT_RESET)) 1293 if (sl811->port1 & USB_PORT_STAT_RESET)
1294 goto error; 1294 goto error;
1295 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))) 1295 if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
1296 goto error; 1296 goto error;
1297 1297
1298 DBG("suspend...\n"); 1298 DBG("suspend...\n");
@@ -1303,9 +1303,9 @@ sl811h_hub_control(
1303 port_power(sl811, 1); 1303 port_power(sl811, 1);
1304 break; 1304 break;
1305 case USB_PORT_FEAT_RESET: 1305 case USB_PORT_FEAT_RESET:
1306 if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) 1306 if (sl811->port1 & USB_PORT_STAT_SUSPEND)
1307 goto error; 1307 goto error;
1308 if (!(sl811->port1 & (1 << USB_PORT_FEAT_POWER))) 1308 if (!(sl811->port1 & USB_PORT_STAT_POWER))
1309 break; 1309 break;
1310 1310
1311 /* 50 msec of reset/SE0 signaling, irqs blocked */ 1311 /* 50 msec of reset/SE0 signaling, irqs blocked */
@@ -1314,7 +1314,7 @@ sl811h_hub_control(
1314 sl811->irq_enable); 1314 sl811->irq_enable);
1315 sl811->ctrl1 = SL11H_CTL1MASK_SE0; 1315 sl811->ctrl1 = SL11H_CTL1MASK_SE0;
1316 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1); 1316 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
1317 sl811->port1 |= (1 << USB_PORT_FEAT_RESET); 1317 sl811->port1 |= USB_PORT_STAT_RESET;
1318 mod_timer(&sl811->timer, jiffies 1318 mod_timer(&sl811->timer, jiffies
1319 + msecs_to_jiffies(50)); 1319 + msecs_to_jiffies(50));
1320 break; 1320 break;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 228f2b070f2b..5b31bae92dbc 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -49,6 +49,7 @@
49#include <linux/list.h> 49#include <linux/list.h>
50#include <linux/interrupt.h> 50#include <linux/interrupt.h>
51#include <linux/usb.h> 51#include <linux/usb.h>
52#include <linux/usb/hcd.h>
52#include <linux/workqueue.h> 53#include <linux/workqueue.h>
53#include <linux/platform_device.h> 54#include <linux/platform_device.h>
54#include <linux/mutex.h> 55#include <linux/mutex.h>
@@ -56,7 +57,6 @@
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/system.h> 58#include <asm/system.h>
58#include <asm/byteorder.h> 59#include <asm/byteorder.h>
59#include "../core/hcd.h"
60 60
61 /* FIXME ohci.h is ONLY for internal use by the OHCI driver. 61 /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
62 * If you're going to try stuff like this, you need to split 62 * If you're going to try stuff like this, you need to split
@@ -1446,9 +1446,9 @@ static void u132_hcd_endp_work_scheduler(struct work_struct *work)
1446 return; 1446 return;
1447 } else { 1447 } else {
1448 int retval; 1448 int retval;
1449 u8 address = u132->addr[endp->usb_addr].address;
1450 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & 1449 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
1451 endp->queue_next]; 1450 endp->queue_next];
1451 address = u132->addr[endp->usb_addr].address;
1452 endp->active = 1; 1452 endp->active = 1;
1453 ring->curr_endp = endp; 1453 ring->curr_endp = endp;
1454 ring->in_use = 1; 1454 ring->in_use = 1;
@@ -3120,8 +3120,8 @@ static int __devinit u132_probe(struct platform_device *pdev)
3120 ftdi_elan_gone_away(pdev); 3120 ftdi_elan_gone_away(pdev);
3121 return -ENOMEM; 3121 return -ENOMEM;
3122 } else { 3122 } else {
3123 int retval = 0;
3124 struct u132 *u132 = hcd_to_u132(hcd); 3123 struct u132 *u132 = hcd_to_u132(hcd);
3124 retval = 0;
3125 hcd->rsrc_start = 0; 3125 hcd->rsrc_start = 0;
3126 mutex_lock(&u132_module_lock); 3126 mutex_lock(&u132_module_lock);
3127 list_add_tail(&u132->u132_list, &u132_static_list); 3127 list_add_tail(&u132->u132_list, &u132_static_list);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 09197067fe6b..6637e52736dd 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -38,6 +38,7 @@
38#include <linux/dmapool.h> 38#include <linux/dmapool.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/usb.h> 40#include <linux/usb.h>
41#include <linux/usb/hcd.h>
41#include <linux/bitops.h> 42#include <linux/bitops.h>
42#include <linux/dmi.h> 43#include <linux/dmi.h>
43 44
@@ -46,7 +47,6 @@
46#include <asm/irq.h> 47#include <asm/irq.h>
47#include <asm/system.h> 48#include <asm/system.h>
48 49
49#include "../core/hcd.h"
50#include "uhci-hcd.h" 50#include "uhci-hcd.h"
51#include "pci-quirks.h" 51#include "pci-quirks.h"
52 52
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index c5305b599ca0..767af265e002 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -30,7 +30,7 @@ struct whc_dbg {
30 struct dentry *pzl_f; 30 struct dentry *pzl_f;
31}; 31};
32 32
33void qset_print(struct seq_file *s, struct whc_qset *qset) 33static void qset_print(struct seq_file *s, struct whc_qset *qset)
34{ 34{
35 static const char *qh_type[] = { 35 static const char *qh_type[] = {
36 "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", }; 36 "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 141d049beb3e..ab5a14fbfeeb 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
443 443
444 remaining = urb->transfer_buffer_length; 444 remaining = urb->transfer_buffer_length;
445 445
446 for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) { 446 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
447 dma_addr_t dma_addr; 447 dma_addr_t dma_addr;
448 size_t dma_remaining; 448 size_t dma_remaining;
449 dma_addr_t sp, ep; 449 dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
561 561
562 remaining = urb->transfer_buffer_length; 562 remaining = urb->transfer_buffer_length;
563 563
564 for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) { 564 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
565 size_t len; 565 size_t len;
566 size_t sg_remaining; 566 size_t sg_remaining;
567 void *orig; 567 void *orig;
@@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
646 wurb->urb = urb; 646 wurb->urb = urb;
647 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); 647 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
648 648
649 if (urb->sg) { 649 if (urb->num_sgs) {
650 ret = qset_add_urb_sg(whc, qset, urb, mem_flags); 650 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
651 if (ret == -EINVAL) { 651 if (ret == -EINVAL) {
652 qset_free_stds(qset, urb); 652 qset_free_stds(qset, urb);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 105fa8b025bb..fcbf4abbf381 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -364,6 +364,30 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
364 xhci_debug_segment(xhci, seg); 364 xhci_debug_segment(xhci, seg);
365} 365}
366 366
367void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
368 unsigned int slot_id, unsigned int ep_index,
369 struct xhci_virt_ep *ep)
370{
371 int i;
372 struct xhci_ring *ring;
373
374 if (ep->ep_state & EP_HAS_STREAMS) {
375 for (i = 1; i < ep->stream_info->num_streams; i++) {
376 ring = ep->stream_info->stream_rings[i];
377 xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
378 slot_id, ep_index, i);
379 xhci_debug_segment(xhci, ring->deq_seg);
380 }
381 } else {
382 ring = ep->ring;
383 if (!ring)
384 return;
385 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
386 slot_id, ep_index);
387 xhci_debug_segment(xhci, ring->deq_seg);
388 }
389}
390
367void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) 391void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
368{ 392{
369 u32 addr = (u32) erst->erst_dma_addr; 393 u32 addr = (u32) erst->erst_dma_addr;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 208b805b80eb..a1a7a9795536 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -64,15 +64,15 @@ static void xhci_hub_descriptor(struct xhci_hcd *xhci,
64static unsigned int xhci_port_speed(unsigned int port_status) 64static unsigned int xhci_port_speed(unsigned int port_status)
65{ 65{
66 if (DEV_LOWSPEED(port_status)) 66 if (DEV_LOWSPEED(port_status))
67 return 1 << USB_PORT_FEAT_LOWSPEED; 67 return USB_PORT_STAT_LOW_SPEED;
68 if (DEV_HIGHSPEED(port_status)) 68 if (DEV_HIGHSPEED(port_status))
69 return 1 << USB_PORT_FEAT_HIGHSPEED; 69 return USB_PORT_STAT_HIGH_SPEED;
70 if (DEV_SUPERSPEED(port_status)) 70 if (DEV_SUPERSPEED(port_status))
71 return 1 << USB_PORT_FEAT_SUPERSPEED; 71 return USB_PORT_STAT_SUPER_SPEED;
72 /* 72 /*
73 * FIXME: Yes, we should check for full speed, but the core uses that as 73 * FIXME: Yes, we should check for full speed, but the core uses that as
74 * a default in portspeed() in usb/core/hub.c (which is the only place 74 * a default in portspeed() in usb/core/hub.c (which is the only place
75 * USB_PORT_FEAT_*SPEED is used). 75 * USB_PORT_STAT_*_SPEED is used).
76 */ 76 */
77 return 0; 77 return 0;
78} 78}
@@ -205,27 +205,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
205 205
206 /* wPortChange bits */ 206 /* wPortChange bits */
207 if (temp & PORT_CSC) 207 if (temp & PORT_CSC)
208 status |= 1 << USB_PORT_FEAT_C_CONNECTION; 208 status |= USB_PORT_STAT_C_CONNECTION << 16;
209 if (temp & PORT_PEC) 209 if (temp & PORT_PEC)
210 status |= 1 << USB_PORT_FEAT_C_ENABLE; 210 status |= USB_PORT_STAT_C_ENABLE << 16;
211 if ((temp & PORT_OCC)) 211 if ((temp & PORT_OCC))
212 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; 212 status |= USB_PORT_STAT_C_OVERCURRENT << 16;
213 /* 213 /*
214 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific 214 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
215 * changes 215 * changes
216 */ 216 */
217 if (temp & PORT_CONNECT) { 217 if (temp & PORT_CONNECT) {
218 status |= 1 << USB_PORT_FEAT_CONNECTION; 218 status |= USB_PORT_STAT_CONNECTION;
219 status |= xhci_port_speed(temp); 219 status |= xhci_port_speed(temp);
220 } 220 }
221 if (temp & PORT_PE) 221 if (temp & PORT_PE)
222 status |= 1 << USB_PORT_FEAT_ENABLE; 222 status |= USB_PORT_STAT_ENABLE;
223 if (temp & PORT_OC) 223 if (temp & PORT_OC)
224 status |= 1 << USB_PORT_FEAT_OVER_CURRENT; 224 status |= USB_PORT_STAT_OVERCURRENT;
225 if (temp & PORT_RESET) 225 if (temp & PORT_RESET)
226 status |= 1 << USB_PORT_FEAT_RESET; 226 status |= USB_PORT_STAT_RESET;
227 if (temp & PORT_POWER) 227 if (temp & PORT_POWER)
228 status |= 1 << USB_PORT_FEAT_POWER; 228 status |= USB_PORT_STAT_POWER;
229 xhci_dbg(xhci, "Get port status returned 0x%x\n", status); 229 xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
230 put_unaligned(cpu_to_le32(status), (__le32 *) buf); 230 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
231 break; 231 break;
@@ -298,7 +298,6 @@ error:
298 * Returns 0 if the status hasn't changed, or the number of bytes in buf. 298 * Returns 0 if the status hasn't changed, or the number of bytes in buf.
299 * Ports are 0-indexed from the HCD point of view, 299 * Ports are 0-indexed from the HCD point of view,
300 * and 1-indexed from the USB core pointer of view. 300 * and 1-indexed from the USB core pointer of view.
301 * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
302 * 301 *
303 * Note that the status change bits will be cleared as soon as a port status 302 * Note that the status change bits will be cleared as soon as a port status
304 * change event is generated, so we use the saved status from that event. 303 * change event is generated, so we use the saved status from that event.
@@ -315,14 +314,9 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
315 ports = HCS_MAX_PORTS(xhci->hcs_params1); 314 ports = HCS_MAX_PORTS(xhci->hcs_params1);
316 315
317 /* Initial status is no changes */ 316 /* Initial status is no changes */
318 buf[0] = 0; 317 retval = (ports + 8) / 8;
318 memset(buf, 0, retval);
319 status = 0; 319 status = 0;
320 if (ports > 7) {
321 buf[1] = 0;
322 retval = 2;
323 } else {
324 retval = 1;
325 }
326 320
327 spin_lock_irqsave(&xhci->lock, flags); 321 spin_lock_irqsave(&xhci->lock, flags);
328 /* For each port, did anything change? If so, set that bit in buf. */ 322 /* For each port, did anything change? If so, set that bit in buf. */
@@ -331,10 +325,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
331 NUM_PORT_REGS*i; 325 NUM_PORT_REGS*i;
332 temp = xhci_readl(xhci, addr); 326 temp = xhci_readl(xhci, addr);
333 if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) { 327 if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
334 if (i < 7) 328 buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
335 buf[0] |= 1 << (i + 1);
336 else
337 buf[1] |= 1 << (i - 7);
338 status = 1; 329 status = 1;
339 } 330 }
340 } 331 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d64f5724bfc4..fd9e03afd91c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -41,13 +41,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
41 41
42 seg = kzalloc(sizeof *seg, flags); 42 seg = kzalloc(sizeof *seg, flags);
43 if (!seg) 43 if (!seg)
44 return 0; 44 return NULL;
45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); 45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
46 46
47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); 47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
48 if (!seg->trbs) { 48 if (!seg->trbs) {
49 kfree(seg); 49 kfree(seg);
50 return 0; 50 return NULL;
51 } 51 }
52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", 52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg->trbs, (unsigned long long)dma); 53 seg->trbs, (unsigned long long)dma);
@@ -159,7 +159,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
159 ring = kzalloc(sizeof *(ring), flags); 159 ring = kzalloc(sizeof *(ring), flags);
160 xhci_dbg(xhci, "Allocating ring at %p\n", ring); 160 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
161 if (!ring) 161 if (!ring)
162 return 0; 162 return NULL;
163 163
164 INIT_LIST_HEAD(&ring->td_list); 164 INIT_LIST_HEAD(&ring->td_list);
165 if (num_segs == 0) 165 if (num_segs == 0)
@@ -196,7 +196,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
196 196
197fail: 197fail:
198 xhci_ring_free(xhci, ring); 198 xhci_ring_free(xhci, ring);
199 return 0; 199 return NULL;
200} 200}
201 201
202void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, 202void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
@@ -247,7 +247,7 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
247 247
248#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 248#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
249 249
250struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 250static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
251 int type, gfp_t flags) 251 int type, gfp_t flags)
252{ 252{
253 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 253 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
@@ -265,7 +265,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
265 return ctx; 265 return ctx;
266} 266}
267 267
268void xhci_free_container_ctx(struct xhci_hcd *xhci, 268static void xhci_free_container_ctx(struct xhci_hcd *xhci,
269 struct xhci_container_ctx *ctx) 269 struct xhci_container_ctx *ctx)
270{ 270{
271 if (!ctx) 271 if (!ctx)
@@ -304,6 +304,422 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
304 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 304 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
305} 305}
306 306
307
308/***************** Streams structures manipulation *************************/
309
310void xhci_free_stream_ctx(struct xhci_hcd *xhci,
311 unsigned int num_stream_ctxs,
312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
313{
314 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
315
316 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
317 pci_free_consistent(pdev,
318 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
319 stream_ctx, dma);
320 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
321 return dma_pool_free(xhci->small_streams_pool,
322 stream_ctx, dma);
323 else
324 return dma_pool_free(xhci->medium_streams_pool,
325 stream_ctx, dma);
326}
327
328/*
329 * The stream context array for each endpoint with bulk streams enabled can
330 * vary in size, based on:
331 * - how many streams the endpoint supports,
332 * - the maximum primary stream array size the host controller supports,
333 * - and how many streams the device driver asks for.
334 *
335 * The stream context array must be a power of 2, and can be as small as
336 * 64 bytes or as large as 1MB.
337 */
338struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
339 unsigned int num_stream_ctxs, dma_addr_t *dma,
340 gfp_t mem_flags)
341{
342 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
343
344 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
345 return pci_alloc_consistent(pdev,
346 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
347 dma);
348 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
349 return dma_pool_alloc(xhci->small_streams_pool,
350 mem_flags, dma);
351 else
352 return dma_pool_alloc(xhci->medium_streams_pool,
353 mem_flags, dma);
354}
355
356struct xhci_ring *xhci_dma_to_transfer_ring(
357 struct xhci_virt_ep *ep,
358 u64 address)
359{
360 if (ep->ep_state & EP_HAS_STREAMS)
361 return radix_tree_lookup(&ep->stream_info->trb_address_map,
362 address >> SEGMENT_SHIFT);
363 return ep->ring;
364}
365
366/* Only use this when you know stream_info is valid */
367#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
368static struct xhci_ring *dma_to_stream_ring(
369 struct xhci_stream_info *stream_info,
370 u64 address)
371{
372 return radix_tree_lookup(&stream_info->trb_address_map,
373 address >> SEGMENT_SHIFT);
374}
375#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
376
377struct xhci_ring *xhci_stream_id_to_ring(
378 struct xhci_virt_device *dev,
379 unsigned int ep_index,
380 unsigned int stream_id)
381{
382 struct xhci_virt_ep *ep = &dev->eps[ep_index];
383
384 if (stream_id == 0)
385 return ep->ring;
386 if (!ep->stream_info)
387 return NULL;
388
389 if (stream_id > ep->stream_info->num_streams)
390 return NULL;
391 return ep->stream_info->stream_rings[stream_id];
392}
393
394struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
395 unsigned int slot_id, unsigned int ep_index,
396 unsigned int stream_id)
397{
398 struct xhci_virt_ep *ep;
399
400 ep = &xhci->devs[slot_id]->eps[ep_index];
401 /* Common case: no streams */
402 if (!(ep->ep_state & EP_HAS_STREAMS))
403 return ep->ring;
404
405 if (stream_id == 0) {
406 xhci_warn(xhci,
407 "WARN: Slot ID %u, ep index %u has streams, "
408 "but URB has no stream ID.\n",
409 slot_id, ep_index);
410 return NULL;
411 }
412
413 if (stream_id < ep->stream_info->num_streams)
414 return ep->stream_info->stream_rings[stream_id];
415
416 xhci_warn(xhci,
417 "WARN: Slot ID %u, ep index %u has "
418 "stream IDs 1 to %u allocated, "
419 "but stream ID %u is requested.\n",
420 slot_id, ep_index,
421 ep->stream_info->num_streams - 1,
422 stream_id);
423 return NULL;
424}
425
426/* Get the right ring for the given URB.
427 * If the endpoint supports streams, boundary check the URB's stream ID.
428 * If the endpoint doesn't support streams, return the singular endpoint ring.
429 */
430struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
431 struct urb *urb)
432{
433 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
434 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
435}
436
437#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
438static int xhci_test_radix_tree(struct xhci_hcd *xhci,
439 unsigned int num_streams,
440 struct xhci_stream_info *stream_info)
441{
442 u32 cur_stream;
443 struct xhci_ring *cur_ring;
444 u64 addr;
445
446 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
447 struct xhci_ring *mapped_ring;
448 int trb_size = sizeof(union xhci_trb);
449
450 cur_ring = stream_info->stream_rings[cur_stream];
451 for (addr = cur_ring->first_seg->dma;
452 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
453 addr += trb_size) {
454 mapped_ring = dma_to_stream_ring(stream_info, addr);
455 if (cur_ring != mapped_ring) {
456 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
457 "didn't map to stream ID %u; "
458 "mapped to ring %p\n",
459 (unsigned long long) addr,
460 cur_stream,
461 mapped_ring);
462 return -EINVAL;
463 }
464 }
465 /* One TRB after the end of the ring segment shouldn't return a
466 * pointer to the current ring (although it may be a part of a
467 * different ring).
468 */
469 mapped_ring = dma_to_stream_ring(stream_info, addr);
470 if (mapped_ring != cur_ring) {
471 /* One TRB before should also fail */
472 addr = cur_ring->first_seg->dma - trb_size;
473 mapped_ring = dma_to_stream_ring(stream_info, addr);
474 }
475 if (mapped_ring == cur_ring) {
476 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
477 "mapped to valid stream ID %u; "
478 "mapped ring = %p\n",
479 (unsigned long long) addr,
480 cur_stream,
481 mapped_ring);
482 return -EINVAL;
483 }
484 }
485 return 0;
486}
487#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
488
489/*
490 * Change an endpoint's internal structure so it supports stream IDs. The
491 * number of requested streams includes stream 0, which cannot be used by device
492 * drivers.
493 *
494 * The number of stream contexts in the stream context array may be bigger than
495 * the number of streams the driver wants to use. This is because the number of
496 * stream context array entries must be a power of two.
497 *
498 * We need a radix tree for mapping physical addresses of TRBs to which stream
499 * ID they belong to. We need to do this because the host controller won't tell
500 * us which stream ring the TRB came from. We could store the stream ID in an
501 * event data TRB, but that doesn't help us for the cancellation case, since the
502 * endpoint may stop before it reaches that event data TRB.
503 *
504 * The radix tree maps the upper portion of the TRB DMA address to a ring
505 * segment that has the same upper portion of DMA addresses. For example, say I
506 * have segments of size 1KB, that are always 64-byte aligned. A segment may
507 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
508 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
509 * pass the radix tree a key to get the right stream ID:
510 *
511 * 0x10c90fff >> 10 = 0x43243
512 * 0x10c912c0 >> 10 = 0x43244
513 * 0x10c91400 >> 10 = 0x43245
514 *
515 * Obviously, only those TRBs with DMA addresses that are within the segment
516 * will make the radix tree return the stream ID for that ring.
517 *
518 * Caveats for the radix tree:
519 *
520 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
521 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
522 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
523 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
524 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
525 * extended systems (where the DMA address can be bigger than 32-bits),
526 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
527 */
528struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
529 unsigned int num_stream_ctxs,
530 unsigned int num_streams, gfp_t mem_flags)
531{
532 struct xhci_stream_info *stream_info;
533 u32 cur_stream;
534 struct xhci_ring *cur_ring;
535 unsigned long key;
536 u64 addr;
537 int ret;
538
539 xhci_dbg(xhci, "Allocating %u streams and %u "
540 "stream context array entries.\n",
541 num_streams, num_stream_ctxs);
542 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
543 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
544 return NULL;
545 }
546 xhci->cmd_ring_reserved_trbs++;
547
548 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
549 if (!stream_info)
550 goto cleanup_trbs;
551
552 stream_info->num_streams = num_streams;
553 stream_info->num_stream_ctxs = num_stream_ctxs;
554
555 /* Initialize the array of virtual pointers to stream rings. */
556 stream_info->stream_rings = kzalloc(
557 sizeof(struct xhci_ring *)*num_streams,
558 mem_flags);
559 if (!stream_info->stream_rings)
560 goto cleanup_info;
561
562 /* Initialize the array of DMA addresses for stream rings for the HW. */
563 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
564 num_stream_ctxs, &stream_info->ctx_array_dma,
565 mem_flags);
566 if (!stream_info->stream_ctx_array)
567 goto cleanup_ctx;
568 memset(stream_info->stream_ctx_array, 0,
569 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
570
571 /* Allocate everything needed to free the stream rings later */
572 stream_info->free_streams_command =
573 xhci_alloc_command(xhci, true, true, mem_flags);
574 if (!stream_info->free_streams_command)
575 goto cleanup_ctx;
576
577 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
578
579 /* Allocate rings for all the streams that the driver will use,
580 * and add their segment DMA addresses to the radix tree.
581 * Stream 0 is reserved.
582 */
583 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
584 stream_info->stream_rings[cur_stream] =
585 xhci_ring_alloc(xhci, 1, true, mem_flags);
586 cur_ring = stream_info->stream_rings[cur_stream];
587 if (!cur_ring)
588 goto cleanup_rings;
589 cur_ring->stream_id = cur_stream;
590 /* Set deq ptr, cycle bit, and stream context type */
591 addr = cur_ring->first_seg->dma |
592 SCT_FOR_CTX(SCT_PRI_TR) |
593 cur_ring->cycle_state;
594 stream_info->stream_ctx_array[cur_stream].stream_ring = addr;
595 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
596 cur_stream, (unsigned long long) addr);
597
598 key = (unsigned long)
599 (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
600 ret = radix_tree_insert(&stream_info->trb_address_map,
601 key, cur_ring);
602 if (ret) {
603 xhci_ring_free(xhci, cur_ring);
604 stream_info->stream_rings[cur_stream] = NULL;
605 goto cleanup_rings;
606 }
607 }
608 /* Leave the other unused stream ring pointers in the stream context
609 * array initialized to zero. This will cause the xHC to give us an
610 * error if the device asks for a stream ID we don't have setup (if it
611 * was any other way, the host controller would assume the ring is
612 * "empty" and wait forever for data to be queued to that stream ID).
613 */
614#if XHCI_DEBUG
615 /* Do a little test on the radix tree to make sure it returns the
616 * correct values.
617 */
618 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
619 goto cleanup_rings;
620#endif
621
622 return stream_info;
623
624cleanup_rings:
625 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
626 cur_ring = stream_info->stream_rings[cur_stream];
627 if (cur_ring) {
628 addr = cur_ring->first_seg->dma;
629 radix_tree_delete(&stream_info->trb_address_map,
630 addr >> SEGMENT_SHIFT);
631 xhci_ring_free(xhci, cur_ring);
632 stream_info->stream_rings[cur_stream] = NULL;
633 }
634 }
635 xhci_free_command(xhci, stream_info->free_streams_command);
636cleanup_ctx:
637 kfree(stream_info->stream_rings);
638cleanup_info:
639 kfree(stream_info);
640cleanup_trbs:
641 xhci->cmd_ring_reserved_trbs--;
642 return NULL;
643}
644/*
645 * Sets the MaxPStreams field and the Linear Stream Array field.
646 * Sets the dequeue pointer to the stream context array.
647 */
648void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
649 struct xhci_ep_ctx *ep_ctx,
650 struct xhci_stream_info *stream_info)
651{
652 u32 max_primary_streams;
653 /* MaxPStreams is the number of stream context array entries, not the
654 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
655 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
656 */
657 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
658 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
659 1 << (max_primary_streams + 1));
660 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
661 ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams);
662 ep_ctx->ep_info |= EP_HAS_LSA;
663 ep_ctx->deq = stream_info->ctx_array_dma;
664}
665
666/*
667 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
668 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
669 * not at the beginning of the ring).
670 */
671void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
672 struct xhci_ep_ctx *ep_ctx,
673 struct xhci_virt_ep *ep)
674{
675 dma_addr_t addr;
676 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
677 ep_ctx->ep_info &= ~EP_HAS_LSA;
678 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
679 ep_ctx->deq = addr | ep->ring->cycle_state;
680}
681
682/* Frees all stream contexts associated with the endpoint,
683 *
684 * Caller should fix the endpoint context streams fields.
685 */
686void xhci_free_stream_info(struct xhci_hcd *xhci,
687 struct xhci_stream_info *stream_info)
688{
689 int cur_stream;
690 struct xhci_ring *cur_ring;
691 dma_addr_t addr;
692
693 if (!stream_info)
694 return;
695
696 for (cur_stream = 1; cur_stream < stream_info->num_streams;
697 cur_stream++) {
698 cur_ring = stream_info->stream_rings[cur_stream];
699 if (cur_ring) {
700 addr = cur_ring->first_seg->dma;
701 radix_tree_delete(&stream_info->trb_address_map,
702 addr >> SEGMENT_SHIFT);
703 xhci_ring_free(xhci, cur_ring);
704 stream_info->stream_rings[cur_stream] = NULL;
705 }
706 }
707 xhci_free_command(xhci, stream_info->free_streams_command);
708 xhci->cmd_ring_reserved_trbs--;
709 if (stream_info->stream_ctx_array)
710 xhci_free_stream_ctx(xhci,
711 stream_info->num_stream_ctxs,
712 stream_info->stream_ctx_array,
713 stream_info->ctx_array_dma);
714
715 if (stream_info)
716 kfree(stream_info->stream_rings);
717 kfree(stream_info);
718}
719
720
721/***************** Device context manipulation *************************/
722
307static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, 723static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
308 struct xhci_virt_ep *ep) 724 struct xhci_virt_ep *ep)
309{ 725{
@@ -328,9 +744,13 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
328 if (!dev) 744 if (!dev)
329 return; 745 return;
330 746
331 for (i = 0; i < 31; ++i) 747 for (i = 0; i < 31; ++i) {
332 if (dev->eps[i].ring) 748 if (dev->eps[i].ring)
333 xhci_ring_free(xhci, dev->eps[i].ring); 749 xhci_ring_free(xhci, dev->eps[i].ring);
750 if (dev->eps[i].stream_info)
751 xhci_free_stream_info(xhci,
752 dev->eps[i].stream_info);
753 }
334 754
335 if (dev->ring_cache) { 755 if (dev->ring_cache) {
336 for (i = 0; i < dev->num_rings_cached; i++) 756 for (i = 0; i < dev->num_rings_cached; i++)
@@ -344,7 +764,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
344 xhci_free_container_ctx(xhci, dev->out_ctx); 764 xhci_free_container_ctx(xhci, dev->out_ctx);
345 765
346 kfree(xhci->devs[slot_id]); 766 kfree(xhci->devs[slot_id]);
347 xhci->devs[slot_id] = 0; 767 xhci->devs[slot_id] = NULL;
348} 768}
349 769
350int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 770int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
@@ -590,9 +1010,9 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
590static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, 1010static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
591 struct usb_host_endpoint *ep) 1011 struct usb_host_endpoint *ep)
592{ 1012{
593 if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp) 1013 if (udev->speed != USB_SPEED_SUPER)
594 return 0; 1014 return 0;
595 return ep->ss_ep_comp->desc.bmAttributes; 1015 return ep->ss_ep_comp.bmAttributes;
596} 1016}
597 1017
598static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 1018static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
@@ -641,13 +1061,8 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
641 usb_endpoint_xfer_bulk(&ep->desc)) 1061 usb_endpoint_xfer_bulk(&ep->desc))
642 return 0; 1062 return 0;
643 1063
644 if (udev->speed == USB_SPEED_SUPER) { 1064 if (udev->speed == USB_SPEED_SUPER)
645 if (ep->ss_ep_comp) 1065 return ep->ss_ep_comp.wBytesPerInterval;
646 return ep->ss_ep_comp->desc.wBytesPerInterval;
647 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
648 /* Assume no bursts, no multiple opportunities to send. */
649 return ep->desc.wMaxPacketSize;
650 }
651 1066
652 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 1067 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
653 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 1068 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
@@ -655,6 +1070,9 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
655 return max_packet * (max_burst + 1); 1070 return max_packet * (max_burst + 1);
656} 1071}
657 1072
1073/* Set up an endpoint with one ring segment. Do not allocate stream rings.
1074 * Drivers will have to call usb_alloc_streams() to do that.
1075 */
658int xhci_endpoint_init(struct xhci_hcd *xhci, 1076int xhci_endpoint_init(struct xhci_hcd *xhci,
659 struct xhci_virt_device *virt_dev, 1077 struct xhci_virt_device *virt_dev,
660 struct usb_device *udev, 1078 struct usb_device *udev,
@@ -708,12 +1126,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
708 max_packet = ep->desc.wMaxPacketSize; 1126 max_packet = ep->desc.wMaxPacketSize;
709 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 1127 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
710 /* dig out max burst from ep companion desc */ 1128 /* dig out max burst from ep companion desc */
711 if (!ep->ss_ep_comp) { 1129 max_packet = ep->ss_ep_comp.bMaxBurst;
712 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); 1130 if (!max_packet)
713 max_packet = 0; 1131 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
714 } else {
715 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
716 }
717 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 1132 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
718 break; 1133 break;
719 case USB_SPEED_HIGH: 1134 case USB_SPEED_HIGH:
@@ -1003,6 +1418,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1003 xhci->device_pool = NULL; 1418 xhci->device_pool = NULL;
1004 xhci_dbg(xhci, "Freed device context pool\n"); 1419 xhci_dbg(xhci, "Freed device context pool\n");
1005 1420
1421 if (xhci->small_streams_pool)
1422 dma_pool_destroy(xhci->small_streams_pool);
1423 xhci->small_streams_pool = NULL;
1424 xhci_dbg(xhci, "Freed small stream array pool\n");
1425
1426 if (xhci->medium_streams_pool)
1427 dma_pool_destroy(xhci->medium_streams_pool);
1428 xhci->medium_streams_pool = NULL;
1429 xhci_dbg(xhci, "Freed medium stream array pool\n");
1430
1006 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 1431 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1007 if (xhci->dcbaa) 1432 if (xhci->dcbaa)
1008 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 1433 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
@@ -1239,6 +1664,22 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1239 if (!xhci->segment_pool || !xhci->device_pool) 1664 if (!xhci->segment_pool || !xhci->device_pool)
1240 goto fail; 1665 goto fail;
1241 1666
1667 /* Linear stream context arrays don't have any boundary restrictions,
1668 * and only need to be 16-byte aligned.
1669 */
1670 xhci->small_streams_pool =
1671 dma_pool_create("xHCI 256 byte stream ctx arrays",
1672 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
1673 xhci->medium_streams_pool =
1674 dma_pool_create("xHCI 1KB stream ctx arrays",
1675 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
1676 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
1677 * will be allocated with pci_alloc_consistent()
1678 */
1679
1680 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
1681 goto fail;
1682
1242 /* Set up the command ring to have one segments for now. */ 1683 /* Set up the command ring to have one segments for now. */
1243 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 1684 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
1244 if (!xhci->cmd_ring) 1685 if (!xhci->cmd_ring)
@@ -1330,7 +1771,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1330 */ 1771 */
1331 init_completion(&xhci->addr_dev); 1772 init_completion(&xhci->addr_dev);
1332 for (i = 0; i < MAX_HC_SLOTS; ++i) 1773 for (i = 0; i < MAX_HC_SLOTS; ++i)
1333 xhci->devs[i] = 0; 1774 xhci->devs[i] = NULL;
1334 1775
1335 if (scratchpad_alloc(xhci, flags)) 1776 if (scratchpad_alloc(xhci, flags))
1336 goto fail; 1777 goto fail;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 417d37aff8d7..edffd81fc253 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
54 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 54 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
55 int retval; 55 int retval;
56 56
57 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1; 57 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
58 58
59 xhci->cap_regs = hcd->regs; 59 xhci->cap_regs = hcd->regs;
60 xhci->op_regs = hcd->regs + 60 xhci->op_regs = hcd->regs +
@@ -132,6 +132,8 @@ static const struct hc_driver xhci_pci_hc_driver = {
132 .urb_dequeue = xhci_urb_dequeue, 132 .urb_dequeue = xhci_urb_dequeue,
133 .alloc_dev = xhci_alloc_dev, 133 .alloc_dev = xhci_alloc_dev,
134 .free_dev = xhci_free_dev, 134 .free_dev = xhci_free_dev,
135 .alloc_streams = xhci_alloc_streams,
136 .free_streams = xhci_free_streams,
135 .add_endpoint = xhci_add_endpoint, 137 .add_endpoint = xhci_add_endpoint,
136 .drop_endpoint = xhci_drop_endpoint, 138 .drop_endpoint = xhci_drop_endpoint,
137 .endpoint_reset = xhci_endpoint_reset, 139 .endpoint_reset = xhci_endpoint_reset,
@@ -175,12 +177,12 @@ static struct pci_driver xhci_pci_driver = {
175 .shutdown = usb_hcd_pci_shutdown, 177 .shutdown = usb_hcd_pci_shutdown,
176}; 178};
177 179
178int xhci_register_pci() 180int xhci_register_pci(void)
179{ 181{
180 return pci_register_driver(&xhci_pci_driver); 182 return pci_register_driver(&xhci_pci_driver);
181} 183}
182 184
183void xhci_unregister_pci() 185void xhci_unregister_pci(void)
184{ 186{
185 pci_unregister_driver(&xhci_pci_driver); 187 pci_unregister_driver(&xhci_pci_driver);
186} 188}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 85d7e8f2085e..36c858e5b529 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -112,6 +112,12 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
113} 113}
114 114
115static inline int enqueue_is_link_trb(struct xhci_ring *ring)
116{
117 struct xhci_link_trb *link = &ring->enqueue->link;
118 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
119}
120
115/* Updates trb to point to the next TRB in the ring, and updates seg if the next 121/* Updates trb to point to the next TRB in the ring, and updates seg if the next
116 * TRB is in a new segment. This does not skip over link TRBs, and it does not 122 * TRB is in a new segment. This does not skip over link TRBs, and it does not
117 * effect the ring dequeue or enqueue pointers. 123 * effect the ring dequeue or enqueue pointers.
@@ -193,20 +199,15 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
193 while (last_trb(xhci, ring, ring->enq_seg, next)) { 199 while (last_trb(xhci, ring, ring->enq_seg, next)) {
194 if (!consumer) { 200 if (!consumer) {
195 if (ring != xhci->event_ring) { 201 if (ring != xhci->event_ring) {
196 /* If we're not dealing with 0.95 hardware, 202 if (chain) {
197 * carry over the chain bit of the previous TRB 203 next->link.control |= TRB_CHAIN;
198 * (which may mean the chain bit is cleared). 204
199 */ 205 /* Give this link TRB to the hardware */
200 if (!xhci_link_trb_quirk(xhci)) { 206 wmb();
201 next->link.control &= ~TRB_CHAIN; 207 next->link.control ^= TRB_CYCLE;
202 next->link.control |= chain; 208 } else {
209 break;
203 } 210 }
204 /* Give this link TRB to the hardware */
205 wmb();
206 if (next->link.control & TRB_CYCLE)
207 next->link.control &= (u32) ~TRB_CYCLE;
208 else
209 next->link.control |= (u32) TRB_CYCLE;
210 } 211 }
211 /* Toggle the cycle bit after the last ring segment. */ 212 /* Toggle the cycle bit after the last ring segment. */
212 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 213 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -242,10 +243,34 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
242 int i; 243 int i;
243 union xhci_trb *enq = ring->enqueue; 244 union xhci_trb *enq = ring->enqueue;
244 struct xhci_segment *enq_seg = ring->enq_seg; 245 struct xhci_segment *enq_seg = ring->enq_seg;
246 struct xhci_segment *cur_seg;
247 unsigned int left_on_ring;
248
249 /* If we are currently pointing to a link TRB, advance the
250 * enqueue pointer before checking for space */
251 while (last_trb(xhci, ring, enq_seg, enq)) {
252 enq_seg = enq_seg->next;
253 enq = enq_seg->trbs;
254 }
245 255
246 /* Check if ring is empty */ 256 /* Check if ring is empty */
247 if (enq == ring->dequeue) 257 if (enq == ring->dequeue) {
258 /* Can't use link trbs */
259 left_on_ring = TRBS_PER_SEGMENT - 1;
260 for (cur_seg = enq_seg->next; cur_seg != enq_seg;
261 cur_seg = cur_seg->next)
262 left_on_ring += TRBS_PER_SEGMENT - 1;
263
264 /* Always need one TRB free in the ring. */
265 left_on_ring -= 1;
266 if (num_trbs > left_on_ring) {
267 xhci_warn(xhci, "Not enough room on ring; "
268 "need %u TRBs, %u TRBs left\n",
269 num_trbs, left_on_ring);
270 return 0;
271 }
248 return 1; 272 return 1;
273 }
249 /* Make sure there's an extra empty TRB available */ 274 /* Make sure there's an extra empty TRB available */
250 for (i = 0; i <= num_trbs; ++i) { 275 for (i = 0; i <= num_trbs; ++i) {
251 if (enq == ring->dequeue) 276 if (enq == ring->dequeue)
@@ -295,7 +320,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
295 320
296static void ring_ep_doorbell(struct xhci_hcd *xhci, 321static void ring_ep_doorbell(struct xhci_hcd *xhci,
297 unsigned int slot_id, 322 unsigned int slot_id,
298 unsigned int ep_index) 323 unsigned int ep_index,
324 unsigned int stream_id)
299{ 325{
300 struct xhci_virt_ep *ep; 326 struct xhci_virt_ep *ep;
301 unsigned int ep_state; 327 unsigned int ep_state;
@@ -306,11 +332,16 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
306 ep_state = ep->ep_state; 332 ep_state = ep->ep_state;
307 /* Don't ring the doorbell for this endpoint if there are pending 333 /* Don't ring the doorbell for this endpoint if there are pending
308 * cancellations because the we don't want to interrupt processing. 334 * cancellations because the we don't want to interrupt processing.
335 * We don't want to restart any stream rings if there's a set dequeue
336 * pointer command pending because the device can choose to start any
337 * stream once the endpoint is on the HW schedule.
338 * FIXME - check all the stream rings for pending cancellations.
309 */ 339 */
310 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 340 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
311 && !(ep_state & EP_HALTED)) { 341 && !(ep_state & EP_HALTED)) {
312 field = xhci_readl(xhci, db_addr) & DB_MASK; 342 field = xhci_readl(xhci, db_addr) & DB_MASK;
313 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 343 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
344 xhci_writel(xhci, field, db_addr);
314 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 345 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
315 * isn't time-critical and we shouldn't make the CPU wait for 346 * isn't time-critical and we shouldn't make the CPU wait for
316 * the flush. 347 * the flush.
@@ -319,6 +350,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
319 } 350 }
320} 351}
321 352
353/* Ring the doorbell for any rings with pending URBs */
354static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
355 unsigned int slot_id,
356 unsigned int ep_index)
357{
358 unsigned int stream_id;
359 struct xhci_virt_ep *ep;
360
361 ep = &xhci->devs[slot_id]->eps[ep_index];
362
363 /* A ring has pending URBs if its TD list is not empty */
364 if (!(ep->ep_state & EP_HAS_STREAMS)) {
365 if (!(list_empty(&ep->ring->td_list)))
366 ring_ep_doorbell(xhci, slot_id, ep_index, 0);
367 return;
368 }
369
370 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
371 stream_id++) {
372 struct xhci_stream_info *stream_info = ep->stream_info;
373 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
374 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
375 }
376}
377
322/* 378/*
323 * Find the segment that trb is in. Start searching in start_seg. 379 * Find the segment that trb is in. Start searching in start_seg.
324 * If we must move past a segment that has a link TRB with a toggle cycle state 380 * If we must move past a segment that has a link TRB with a toggle cycle state
@@ -334,13 +390,14 @@ static struct xhci_segment *find_trb_seg(
334 while (cur_seg->trbs > trb || 390 while (cur_seg->trbs > trb ||
335 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 391 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
336 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 392 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
337 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && 393 if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
394 TRB_TYPE(TRB_LINK) &&
338 (generic_trb->field[3] & LINK_TOGGLE)) 395 (generic_trb->field[3] & LINK_TOGGLE))
339 *cycle_state = ~(*cycle_state) & 0x1; 396 *cycle_state = ~(*cycle_state) & 0x1;
340 cur_seg = cur_seg->next; 397 cur_seg = cur_seg->next;
341 if (cur_seg == start_seg) 398 if (cur_seg == start_seg)
342 /* Looped over the entire list. Oops! */ 399 /* Looped over the entire list. Oops! */
343 return 0; 400 return NULL;
344 } 401 }
345 return cur_seg; 402 return cur_seg;
346} 403}
@@ -361,14 +418,23 @@ static struct xhci_segment *find_trb_seg(
361 */ 418 */
362void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 419void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
363 unsigned int slot_id, unsigned int ep_index, 420 unsigned int slot_id, unsigned int ep_index,
364 struct xhci_td *cur_td, struct xhci_dequeue_state *state) 421 unsigned int stream_id, struct xhci_td *cur_td,
422 struct xhci_dequeue_state *state)
365{ 423{
366 struct xhci_virt_device *dev = xhci->devs[slot_id]; 424 struct xhci_virt_device *dev = xhci->devs[slot_id];
367 struct xhci_ring *ep_ring = dev->eps[ep_index].ring; 425 struct xhci_ring *ep_ring;
368 struct xhci_generic_trb *trb; 426 struct xhci_generic_trb *trb;
369 struct xhci_ep_ctx *ep_ctx; 427 struct xhci_ep_ctx *ep_ctx;
370 dma_addr_t addr; 428 dma_addr_t addr;
371 429
430 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
431 ep_index, stream_id);
432 if (!ep_ring) {
433 xhci_warn(xhci, "WARN can't find new dequeue state "
434 "for invalid stream ID %u.\n",
435 stream_id);
436 return;
437 }
372 state->new_cycle_state = 0; 438 state->new_cycle_state = 0;
373 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 439 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
374 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 440 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
@@ -390,7 +456,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
390 BUG(); 456 BUG();
391 457
392 trb = &state->new_deq_ptr->generic; 458 trb = &state->new_deq_ptr->generic;
393 if (TRB_TYPE(trb->field[3]) == TRB_LINK && 459 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
394 (trb->field[3] & LINK_TOGGLE)) 460 (trb->field[3] & LINK_TOGGLE))
395 state->new_cycle_state = ~(state->new_cycle_state) & 0x1; 461 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
396 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 462 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -448,11 +514,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
448} 514}
449 515
450static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 516static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
451 unsigned int ep_index, struct xhci_segment *deq_seg, 517 unsigned int ep_index, unsigned int stream_id,
518 struct xhci_segment *deq_seg,
452 union xhci_trb *deq_ptr, u32 cycle_state); 519 union xhci_trb *deq_ptr, u32 cycle_state);
453 520
454void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 521void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
455 unsigned int slot_id, unsigned int ep_index, 522 unsigned int slot_id, unsigned int ep_index,
523 unsigned int stream_id,
456 struct xhci_dequeue_state *deq_state) 524 struct xhci_dequeue_state *deq_state)
457{ 525{
458 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 526 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
@@ -464,7 +532,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
464 deq_state->new_deq_ptr, 532 deq_state->new_deq_ptr,
465 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 533 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
466 deq_state->new_cycle_state); 534 deq_state->new_cycle_state);
467 queue_set_tr_deq(xhci, slot_id, ep_index, 535 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
468 deq_state->new_deq_seg, 536 deq_state->new_deq_seg,
469 deq_state->new_deq_ptr, 537 deq_state->new_deq_ptr,
470 (u32) deq_state->new_cycle_state); 538 (u32) deq_state->new_cycle_state);
@@ -523,7 +591,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
523 struct xhci_ring *ep_ring; 591 struct xhci_ring *ep_ring;
524 struct xhci_virt_ep *ep; 592 struct xhci_virt_ep *ep;
525 struct list_head *entry; 593 struct list_head *entry;
526 struct xhci_td *cur_td = 0; 594 struct xhci_td *cur_td = NULL;
527 struct xhci_td *last_unlinked_td; 595 struct xhci_td *last_unlinked_td;
528 596
529 struct xhci_dequeue_state deq_state; 597 struct xhci_dequeue_state deq_state;
@@ -532,11 +600,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
532 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 600 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
533 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 601 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
534 ep = &xhci->devs[slot_id]->eps[ep_index]; 602 ep = &xhci->devs[slot_id]->eps[ep_index];
535 ep_ring = ep->ring;
536 603
537 if (list_empty(&ep->cancelled_td_list)) { 604 if (list_empty(&ep->cancelled_td_list)) {
538 xhci_stop_watchdog_timer_in_irq(xhci, ep); 605 xhci_stop_watchdog_timer_in_irq(xhci, ep);
539 ring_ep_doorbell(xhci, slot_id, ep_index); 606 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
540 return; 607 return;
541 } 608 }
542 609
@@ -550,15 +617,36 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
550 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 617 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
551 cur_td->first_trb, 618 cur_td->first_trb,
552 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 619 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
620 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
621 if (!ep_ring) {
622 /* This shouldn't happen unless a driver is mucking
623 * with the stream ID after submission. This will
624 * leave the TD on the hardware ring, and the hardware
625 * will try to execute it, and may access a buffer
626 * that has already been freed. In the best case, the
627 * hardware will execute it, and the event handler will
628 * ignore the completion event for that TD, since it was
629 * removed from the td_list for that endpoint. In
630 * short, don't muck with the stream ID after
631 * submission.
632 */
633 xhci_warn(xhci, "WARN Cancelled URB %p "
634 "has invalid stream ID %u.\n",
635 cur_td->urb,
636 cur_td->urb->stream_id);
637 goto remove_finished_td;
638 }
553 /* 639 /*
554 * If we stopped on the TD we need to cancel, then we have to 640 * If we stopped on the TD we need to cancel, then we have to
555 * move the xHC endpoint ring dequeue pointer past this TD. 641 * move the xHC endpoint ring dequeue pointer past this TD.
556 */ 642 */
557 if (cur_td == ep->stopped_td) 643 if (cur_td == ep->stopped_td)
558 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 644 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
559 &deq_state); 645 cur_td->urb->stream_id,
646 cur_td, &deq_state);
560 else 647 else
561 td_to_noop(xhci, ep_ring, cur_td); 648 td_to_noop(xhci, ep_ring, cur_td);
649remove_finished_td:
562 /* 650 /*
563 * The event handler won't see a completion for this TD anymore, 651 * The event handler won't see a completion for this TD anymore,
564 * so remove it from the endpoint ring's TD list. Keep it in 652 * so remove it from the endpoint ring's TD list. Keep it in
@@ -572,12 +660,16 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
572 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 660 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
573 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 661 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
574 xhci_queue_new_dequeue_state(xhci, 662 xhci_queue_new_dequeue_state(xhci,
575 slot_id, ep_index, &deq_state); 663 slot_id, ep_index,
664 ep->stopped_td->urb->stream_id,
665 &deq_state);
576 xhci_ring_cmd_db(xhci); 666 xhci_ring_cmd_db(xhci);
577 } else { 667 } else {
578 /* Otherwise just ring the doorbell to restart the ring */ 668 /* Otherwise ring the doorbell(s) to restart queued transfers */
579 ring_ep_doorbell(xhci, slot_id, ep_index); 669 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
580 } 670 }
671 ep->stopped_td = NULL;
672 ep->stopped_trb = NULL;
581 673
582 /* 674 /*
583 * Drop the lock and complete the URBs in the cancelled TD list. 675 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -734,6 +826,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
734{ 826{
735 unsigned int slot_id; 827 unsigned int slot_id;
736 unsigned int ep_index; 828 unsigned int ep_index;
829 unsigned int stream_id;
737 struct xhci_ring *ep_ring; 830 struct xhci_ring *ep_ring;
738 struct xhci_virt_device *dev; 831 struct xhci_virt_device *dev;
739 struct xhci_ep_ctx *ep_ctx; 832 struct xhci_ep_ctx *ep_ctx;
@@ -741,8 +834,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
741 834
742 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 835 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
743 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 836 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
837 stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
744 dev = xhci->devs[slot_id]; 838 dev = xhci->devs[slot_id];
745 ep_ring = dev->eps[ep_index].ring; 839
840 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
841 if (!ep_ring) {
842 xhci_warn(xhci, "WARN Set TR deq ptr command for "
843 "freed stream ID %u\n",
844 stream_id);
845 /* XXX: Harmless??? */
846 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
847 return;
848 }
849
746 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 850 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
747 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 851 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
748 852
@@ -787,7 +891,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
787 } 891 }
788 892
789 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 893 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
790 ring_ep_doorbell(xhci, slot_id, ep_index); 894 /* Restart any rings with pending URBs */
895 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
791} 896}
792 897
793static void handle_reset_ep_completion(struct xhci_hcd *xhci, 898static void handle_reset_ep_completion(struct xhci_hcd *xhci,
@@ -796,11 +901,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
796{ 901{
797 int slot_id; 902 int slot_id;
798 unsigned int ep_index; 903 unsigned int ep_index;
799 struct xhci_ring *ep_ring;
800 904
801 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 905 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
802 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 906 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
803 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
804 /* This command will only fail if the endpoint wasn't halted, 907 /* This command will only fail if the endpoint wasn't halted,
805 * but we don't care. 908 * but we don't care.
806 */ 909 */
@@ -818,9 +921,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
818 false); 921 false);
819 xhci_ring_cmd_db(xhci); 922 xhci_ring_cmd_db(xhci);
820 } else { 923 } else {
821 /* Clear our internal halted state and restart the ring */ 924 /* Clear our internal halted state and restart the ring(s) */
822 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 925 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
823 ring_ep_doorbell(xhci, slot_id, ep_index); 926 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
824 } 927 }
825} 928}
826 929
@@ -897,16 +1000,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
897 * Configure endpoint commands can come from the USB core 1000 * Configure endpoint commands can come from the USB core
898 * configuration or alt setting changes, or because the HW 1001 * configuration or alt setting changes, or because the HW
899 * needed an extra configure endpoint command after a reset 1002 * needed an extra configure endpoint command after a reset
900 * endpoint command. In the latter case, the xHCI driver is 1003 * endpoint command or streams were being configured.
901 * not waiting on the configure endpoint command. 1004 * If the command was for a halted endpoint, the xHCI driver
1005 * is not waiting on the configure endpoint command.
902 */ 1006 */
903 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1007 ctrl_ctx = xhci_get_input_control_ctx(xhci,
904 virt_dev->in_ctx); 1008 virt_dev->in_ctx);
905 /* Input ctx add_flags are the endpoint index plus one */ 1009 /* Input ctx add_flags are the endpoint index plus one */
906 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1010 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
907 /* A usb_set_interface() call directly after clearing a halted 1011 /* A usb_set_interface() call directly after clearing a halted
908 * condition may race on this quirky hardware. 1012 * condition may race on this quirky hardware. Not worth
909 * Not worth worrying about, since this is prototype hardware. 1013 * worrying about, since this is prototype hardware. Not sure
1014 * if this will work for streams, but streams support was
1015 * untested on this prototype.
910 */ 1016 */
911 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1017 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
912 ep_index != (unsigned int) -1 && 1018 ep_index != (unsigned int) -1 &&
@@ -919,10 +1025,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
919 xhci_dbg(xhci, "Completed config ep cmd - " 1025 xhci_dbg(xhci, "Completed config ep cmd - "
920 "last ep index = %d, state = %d\n", 1026 "last ep index = %d, state = %d\n",
921 ep_index, ep_state); 1027 ep_index, ep_state);
922 /* Clear our internal halted state and restart ring */ 1028 /* Clear internal halted state and restart ring(s) */
923 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1029 xhci->devs[slot_id]->eps[ep_index].ep_state &=
924 ~EP_HALTED; 1030 ~EP_HALTED;
925 ring_ep_doorbell(xhci, slot_id, ep_index); 1031 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
926 break; 1032 break;
927 } 1033 }
928bandwidth_change: 1034bandwidth_change:
@@ -1018,7 +1124,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1018 1124
1019 do { 1125 do {
1020 if (start_dma == 0) 1126 if (start_dma == 0)
1021 return 0; 1127 return NULL;
1022 /* We may get an event for a Link TRB in the middle of a TD */ 1128 /* We may get an event for a Link TRB in the middle of a TD */
1023 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1129 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1024 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1130 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
@@ -1040,7 +1146,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1040 suspect_dma <= end_trb_dma)) 1146 suspect_dma <= end_trb_dma))
1041 return cur_seg; 1147 return cur_seg;
1042 } 1148 }
1043 return 0; 1149 return NULL;
1044 } else { 1150 } else {
1045 /* Might still be somewhere in this segment */ 1151 /* Might still be somewhere in this segment */
1046 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1152 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
@@ -1050,19 +1156,27 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1050 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1156 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1051 } while (cur_seg != start_seg); 1157 } while (cur_seg != start_seg);
1052 1158
1053 return 0; 1159 return NULL;
1054} 1160}
1055 1161
1056static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1162static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1057 unsigned int slot_id, unsigned int ep_index, 1163 unsigned int slot_id, unsigned int ep_index,
1164 unsigned int stream_id,
1058 struct xhci_td *td, union xhci_trb *event_trb) 1165 struct xhci_td *td, union xhci_trb *event_trb)
1059{ 1166{
1060 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1167 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1061 ep->ep_state |= EP_HALTED; 1168 ep->ep_state |= EP_HALTED;
1062 ep->stopped_td = td; 1169 ep->stopped_td = td;
1063 ep->stopped_trb = event_trb; 1170 ep->stopped_trb = event_trb;
1171 ep->stopped_stream = stream_id;
1172
1064 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1173 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1065 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1174 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1175
1176 ep->stopped_td = NULL;
1177 ep->stopped_trb = NULL;
1178 ep->stopped_stream = 0;
1179
1066 xhci_ring_cmd_db(xhci); 1180 xhci_ring_cmd_db(xhci);
1067} 1181}
1068 1182
@@ -1119,11 +1233,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1119 struct xhci_ring *ep_ring; 1233 struct xhci_ring *ep_ring;
1120 unsigned int slot_id; 1234 unsigned int slot_id;
1121 int ep_index; 1235 int ep_index;
1122 struct xhci_td *td = 0; 1236 struct xhci_td *td = NULL;
1123 dma_addr_t event_dma; 1237 dma_addr_t event_dma;
1124 struct xhci_segment *event_seg; 1238 struct xhci_segment *event_seg;
1125 union xhci_trb *event_trb; 1239 union xhci_trb *event_trb;
1126 struct urb *urb = 0; 1240 struct urb *urb = NULL;
1127 int status = -EINPROGRESS; 1241 int status = -EINPROGRESS;
1128 struct xhci_ep_ctx *ep_ctx; 1242 struct xhci_ep_ctx *ep_ctx;
1129 u32 trb_comp_code; 1243 u32 trb_comp_code;
@@ -1140,10 +1254,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1140 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1254 ep_index = TRB_TO_EP_ID(event->flags) - 1;
1141 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1255 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1142 ep = &xdev->eps[ep_index]; 1256 ep = &xdev->eps[ep_index];
1143 ep_ring = ep->ring; 1257 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1144 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1258 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1145 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1259 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
1146 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 1260 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1261 "or incorrect stream ring\n");
1147 return -ENODEV; 1262 return -ENODEV;
1148 } 1263 }
1149 1264
@@ -1274,7 +1389,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1274 td->urb->actual_length = 0; 1389 td->urb->actual_length = 0;
1275 1390
1276 xhci_cleanup_halted_endpoint(xhci, 1391 xhci_cleanup_halted_endpoint(xhci,
1277 slot_id, ep_index, td, event_trb); 1392 slot_id, ep_index, 0, td, event_trb);
1278 goto td_cleanup; 1393 goto td_cleanup;
1279 } 1394 }
1280 /* 1395 /*
@@ -1390,8 +1505,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1390 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1505 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1391 cur_trb != event_trb; 1506 cur_trb != event_trb;
1392 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1507 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1393 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && 1508 if ((cur_trb->generic.field[3] &
1394 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) 1509 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1510 (cur_trb->generic.field[3] &
1511 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1395 td->urb->actual_length += 1512 td->urb->actual_length +=
1396 TRB_LEN(cur_trb->generic.field[2]); 1513 TRB_LEN(cur_trb->generic.field[2]);
1397 } 1514 }
@@ -1423,6 +1540,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1423 */ 1540 */
1424 ep->stopped_td = td; 1541 ep->stopped_td = td;
1425 ep->stopped_trb = event_trb; 1542 ep->stopped_trb = event_trb;
1543 ep->stopped_stream = ep_ring->stream_id;
1426 } else if (xhci_requires_manual_halt_cleanup(xhci, 1544 } else if (xhci_requires_manual_halt_cleanup(xhci,
1427 ep_ctx, trb_comp_code)) { 1545 ep_ctx, trb_comp_code)) {
1428 /* Other types of errors halt the endpoint, but the 1546 /* Other types of errors halt the endpoint, but the
@@ -1431,7 +1549,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1431 * xHCI hardware manually. 1549 * xHCI hardware manually.
1432 */ 1550 */
1433 xhci_cleanup_halted_endpoint(xhci, 1551 xhci_cleanup_halted_endpoint(xhci,
1434 slot_id, ep_index, td, event_trb); 1552 slot_id, ep_index, ep_ring->stream_id, td, event_trb);
1435 } else { 1553 } else {
1436 /* Update ring dequeue pointer */ 1554 /* Update ring dequeue pointer */
1437 while (ep_ring->dequeue != td->last_trb) 1555 while (ep_ring->dequeue != td->last_trb)
@@ -1621,20 +1739,66 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1621 xhci_err(xhci, "ERROR no room on ep ring\n"); 1739 xhci_err(xhci, "ERROR no room on ep ring\n");
1622 return -ENOMEM; 1740 return -ENOMEM;
1623 } 1741 }
1742
1743 if (enqueue_is_link_trb(ep_ring)) {
1744 struct xhci_ring *ring = ep_ring;
1745 union xhci_trb *next;
1746
1747 xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
1748 next = ring->enqueue;
1749
1750 while (last_trb(xhci, ring, ring->enq_seg, next)) {
1751
1752 /* If we're not dealing with 0.95 hardware,
1753 * clear the chain bit.
1754 */
1755 if (!xhci_link_trb_quirk(xhci))
1756 next->link.control &= ~TRB_CHAIN;
1757 else
1758 next->link.control |= TRB_CHAIN;
1759
1760 wmb();
1761 next->link.control ^= (u32) TRB_CYCLE;
1762
1763 /* Toggle the cycle bit after the last ring segment. */
1764 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
1765 ring->cycle_state = (ring->cycle_state ? 0 : 1);
1766 if (!in_interrupt()) {
1767 xhci_dbg(xhci, "queue_trb: Toggle cycle "
1768 "state for ring %p = %i\n",
1769 ring, (unsigned int)ring->cycle_state);
1770 }
1771 }
1772 ring->enq_seg = ring->enq_seg->next;
1773 ring->enqueue = ring->enq_seg->trbs;
1774 next = ring->enqueue;
1775 }
1776 }
1777
1624 return 0; 1778 return 0;
1625} 1779}
1626 1780
1627static int prepare_transfer(struct xhci_hcd *xhci, 1781static int prepare_transfer(struct xhci_hcd *xhci,
1628 struct xhci_virt_device *xdev, 1782 struct xhci_virt_device *xdev,
1629 unsigned int ep_index, 1783 unsigned int ep_index,
1784 unsigned int stream_id,
1630 unsigned int num_trbs, 1785 unsigned int num_trbs,
1631 struct urb *urb, 1786 struct urb *urb,
1632 struct xhci_td **td, 1787 struct xhci_td **td,
1633 gfp_t mem_flags) 1788 gfp_t mem_flags)
1634{ 1789{
1635 int ret; 1790 int ret;
1791 struct xhci_ring *ep_ring;
1636 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1792 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1637 ret = prepare_ring(xhci, xdev->eps[ep_index].ring, 1793
1794 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
1795 if (!ep_ring) {
1796 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
1797 stream_id);
1798 return -EINVAL;
1799 }
1800
1801 ret = prepare_ring(xhci, ep_ring,
1638 ep_ctx->ep_info & EP_STATE_MASK, 1802 ep_ctx->ep_info & EP_STATE_MASK,
1639 num_trbs, mem_flags); 1803 num_trbs, mem_flags);
1640 if (ret) 1804 if (ret)
@@ -1654,9 +1818,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1654 (*td)->urb = urb; 1818 (*td)->urb = urb;
1655 urb->hcpriv = (void *) (*td); 1819 urb->hcpriv = (void *) (*td);
1656 /* Add this TD to the tail of the endpoint ring's TD list */ 1820 /* Add this TD to the tail of the endpoint ring's TD list */
1657 list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); 1821 list_add_tail(&(*td)->td_list, &ep_ring->td_list);
1658 (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; 1822 (*td)->start_seg = ep_ring->enq_seg;
1659 (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; 1823 (*td)->first_trb = ep_ring->enqueue;
1660 1824
1661 return 0; 1825 return 0;
1662} 1826}
@@ -1672,7 +1836,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1672 1836
1673 xhci_dbg(xhci, "count sg list trbs: \n"); 1837 xhci_dbg(xhci, "count sg list trbs: \n");
1674 num_trbs = 0; 1838 num_trbs = 0;
1675 for_each_sg(urb->sg->sg, sg, num_sgs, i) { 1839 for_each_sg(urb->sg, sg, num_sgs, i) {
1676 unsigned int previous_total_trbs = num_trbs; 1840 unsigned int previous_total_trbs = num_trbs;
1677 unsigned int len = sg_dma_len(sg); 1841 unsigned int len = sg_dma_len(sg);
1678 1842
@@ -1722,7 +1886,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1722} 1886}
1723 1887
1724static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1888static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1725 unsigned int ep_index, int start_cycle, 1889 unsigned int ep_index, unsigned int stream_id, int start_cycle,
1726 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1890 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1727{ 1891{
1728 /* 1892 /*
@@ -1731,7 +1895,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1731 */ 1895 */
1732 wmb(); 1896 wmb();
1733 start_trb->field[3] |= start_cycle; 1897 start_trb->field[3] |= start_cycle;
1734 ring_ep_doorbell(xhci, slot_id, ep_index); 1898 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
1735} 1899}
1736 1900
1737/* 1901/*
@@ -1805,12 +1969,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1805 struct xhci_generic_trb *start_trb; 1969 struct xhci_generic_trb *start_trb;
1806 int start_cycle; 1970 int start_cycle;
1807 1971
1808 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1972 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1973 if (!ep_ring)
1974 return -EINVAL;
1975
1809 num_trbs = count_sg_trbs_needed(xhci, urb); 1976 num_trbs = count_sg_trbs_needed(xhci, urb);
1810 num_sgs = urb->num_sgs; 1977 num_sgs = urb->num_sgs;
1811 1978
1812 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 1979 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1813 ep_index, num_trbs, urb, &td, mem_flags); 1980 ep_index, urb->stream_id,
1981 num_trbs, urb, &td, mem_flags);
1814 if (trb_buff_len < 0) 1982 if (trb_buff_len < 0)
1815 return trb_buff_len; 1983 return trb_buff_len;
1816 /* 1984 /*
@@ -1831,7 +1999,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1831 * the amount of memory allocated for this scatter-gather list. 1999 * the amount of memory allocated for this scatter-gather list.
1832 * 3. TRBs buffers can't cross 64KB boundaries. 2000 * 3. TRBs buffers can't cross 64KB boundaries.
1833 */ 2001 */
1834 sg = urb->sg->sg; 2002 sg = urb->sg;
1835 addr = (u64) sg_dma_address(sg); 2003 addr = (u64) sg_dma_address(sg);
1836 this_sg_len = sg_dma_len(sg); 2004 this_sg_len = sg_dma_len(sg);
1837 trb_buff_len = TRB_MAX_BUFF_SIZE - 2005 trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -1919,7 +2087,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1919 } while (running_total < urb->transfer_buffer_length); 2087 } while (running_total < urb->transfer_buffer_length);
1920 2088
1921 check_trb_math(urb, num_trbs, running_total); 2089 check_trb_math(urb, num_trbs, running_total);
1922 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2090 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2091 start_cycle, start_trb, td);
1923 return 0; 2092 return 0;
1924} 2093}
1925 2094
@@ -1938,10 +2107,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1938 int running_total, trb_buff_len, ret; 2107 int running_total, trb_buff_len, ret;
1939 u64 addr; 2108 u64 addr;
1940 2109
1941 if (urb->sg) 2110 if (urb->num_sgs)
1942 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2111 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1943 2112
1944 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2113 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2114 if (!ep_ring)
2115 return -EINVAL;
1945 2116
1946 num_trbs = 0; 2117 num_trbs = 0;
1947 /* How much data is (potentially) left before the 64KB boundary? */ 2118 /* How much data is (potentially) left before the 64KB boundary? */
@@ -1968,7 +2139,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1968 (unsigned long long)urb->transfer_dma, 2139 (unsigned long long)urb->transfer_dma,
1969 num_trbs); 2140 num_trbs);
1970 2141
1971 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 2142 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2143 ep_index, urb->stream_id,
1972 num_trbs, urb, &td, mem_flags); 2144 num_trbs, urb, &td, mem_flags);
1973 if (ret < 0) 2145 if (ret < 0)
1974 return ret; 2146 return ret;
@@ -2038,7 +2210,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2038 } while (running_total < urb->transfer_buffer_length); 2210 } while (running_total < urb->transfer_buffer_length);
2039 2211
2040 check_trb_math(urb, num_trbs, running_total); 2212 check_trb_math(urb, num_trbs, running_total);
2041 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2213 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2214 start_cycle, start_trb, td);
2042 return 0; 2215 return 0;
2043} 2216}
2044 2217
@@ -2055,7 +2228,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2055 u32 field, length_field; 2228 u32 field, length_field;
2056 struct xhci_td *td; 2229 struct xhci_td *td;
2057 2230
2058 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2231 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2232 if (!ep_ring)
2233 return -EINVAL;
2059 2234
2060 /* 2235 /*
2061 * Need to copy setup packet into setup TRB, so we can't use the setup 2236 * Need to copy setup packet into setup TRB, so we can't use the setup
@@ -2076,8 +2251,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2076 */ 2251 */
2077 if (urb->transfer_buffer_length > 0) 2252 if (urb->transfer_buffer_length > 0)
2078 num_trbs++; 2253 num_trbs++;
2079 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 2254 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2080 urb, &td, mem_flags); 2255 ep_index, urb->stream_id,
2256 num_trbs, urb, &td, mem_flags);
2081 if (ret < 0) 2257 if (ret < 0)
2082 return ret; 2258 return ret;
2083 2259
@@ -2132,7 +2308,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2132 /* Event on completion */ 2308 /* Event on completion */
2133 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2309 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
2134 2310
2135 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2311 giveback_first_trb(xhci, slot_id, ep_index, 0,
2312 start_cycle, start_trb, td);
2136 return 0; 2313 return 0;
2137} 2314}
2138 2315
@@ -2244,12 +2421,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
2244 * This should not be used for endpoints that have streams enabled. 2421 * This should not be used for endpoints that have streams enabled.
2245 */ 2422 */
2246static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 2423static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2247 unsigned int ep_index, struct xhci_segment *deq_seg, 2424 unsigned int ep_index, unsigned int stream_id,
2425 struct xhci_segment *deq_seg,
2248 union xhci_trb *deq_ptr, u32 cycle_state) 2426 union xhci_trb *deq_ptr, u32 cycle_state)
2249{ 2427{
2250 dma_addr_t addr; 2428 dma_addr_t addr;
2251 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2429 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
2252 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2430 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
2431 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
2253 u32 type = TRB_TYPE(TRB_SET_DEQ); 2432 u32 type = TRB_TYPE(TRB_SET_DEQ);
2254 2433
2255 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 2434 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
@@ -2260,7 +2439,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2260 return 0; 2439 return 0;
2261 } 2440 }
2262 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 2441 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
2263 upper_32_bits(addr), 0, 2442 upper_32_bits(addr), trb_stream_id,
2264 trb_slot_id | trb_ep_index | type, false); 2443 trb_slot_id | trb_ep_index | type, false);
2265} 2444}
2266 2445
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7e4277273908..40e0a0c221b8 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/log2.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -352,11 +353,7 @@ void xhci_event_ring_work(unsigned long arg)
352 if (!xhci->devs[i]) 353 if (!xhci->devs[i])
353 continue; 354 continue;
354 for (j = 0; j < 31; ++j) { 355 for (j = 0; j < 31; ++j) {
355 struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; 356 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
356 if (!ring)
357 continue;
358 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
359 xhci_debug_segment(xhci, ring->deq_seg);
360 } 357 }
361 } 358 }
362 359
@@ -726,8 +723,21 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
726 spin_lock_irqsave(&xhci->lock, flags); 723 spin_lock_irqsave(&xhci->lock, flags);
727 if (xhci->xhc_state & XHCI_STATE_DYING) 724 if (xhci->xhc_state & XHCI_STATE_DYING)
728 goto dying; 725 goto dying;
729 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 726 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
730 slot_id, ep_index); 727 EP_GETTING_STREAMS) {
728 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
729 "is transitioning to using streams.\n");
730 ret = -EINVAL;
731 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
732 EP_GETTING_NO_STREAMS) {
733 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
734 "is transitioning to "
735 "not having streams.\n");
736 ret = -EINVAL;
737 } else {
738 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
739 slot_id, ep_index);
740 }
731 spin_unlock_irqrestore(&xhci->lock, flags); 741 spin_unlock_irqrestore(&xhci->lock, flags);
732 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 742 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
733 spin_lock_irqsave(&xhci->lock, flags); 743 spin_lock_irqsave(&xhci->lock, flags);
@@ -825,7 +835,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
825 xhci_debug_ring(xhci, xhci->event_ring); 835 xhci_debug_ring(xhci, xhci->event_ring);
826 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 836 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
827 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 837 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
828 ep_ring = ep->ring; 838 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
839 if (!ep_ring) {
840 ret = -EINVAL;
841 goto done;
842 }
843
829 xhci_dbg(xhci, "Endpoint ring:\n"); 844 xhci_dbg(xhci, "Endpoint ring:\n");
830 xhci_debug_ring(xhci, ep_ring); 845 xhci_debug_ring(xhci, ep_ring);
831 td = (struct xhci_td *) urb->hcpriv; 846 td = (struct xhci_td *) urb->hcpriv;
@@ -1369,7 +1384,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1369 * or it will attempt to resend it on the next doorbell ring. 1384 * or it will attempt to resend it on the next doorbell ring.
1370 */ 1385 */
1371 xhci_find_new_dequeue_state(xhci, udev->slot_id, 1386 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1372 ep_index, ep->stopped_td, 1387 ep_index, ep->stopped_stream, ep->stopped_td,
1373 &deq_state); 1388 &deq_state);
1374 1389
1375 /* HW with the reset endpoint quirk will use the saved dequeue state to 1390 /* HW with the reset endpoint quirk will use the saved dequeue state to
@@ -1378,10 +1393,12 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1378 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 1393 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1379 xhci_dbg(xhci, "Queueing new dequeue state\n"); 1394 xhci_dbg(xhci, "Queueing new dequeue state\n");
1380 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 1395 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1381 ep_index, &deq_state); 1396 ep_index, ep->stopped_stream, &deq_state);
1382 } else { 1397 } else {
1383 /* Better hope no one uses the input context between now and the 1398 /* Better hope no one uses the input context between now and the
1384 * reset endpoint completion! 1399 * reset endpoint completion!
1400 * XXX: No idea how this hardware will react when stream rings
1401 * are enabled.
1385 */ 1402 */
1386 xhci_dbg(xhci, "Setting up input context for " 1403 xhci_dbg(xhci, "Setting up input context for "
1387 "configure endpoint command\n"); 1404 "configure endpoint command\n");
@@ -1438,12 +1455,391 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1438 kfree(virt_ep->stopped_td); 1455 kfree(virt_ep->stopped_td);
1439 xhci_ring_cmd_db(xhci); 1456 xhci_ring_cmd_db(xhci);
1440 } 1457 }
1458 virt_ep->stopped_td = NULL;
1459 virt_ep->stopped_trb = NULL;
1460 virt_ep->stopped_stream = 0;
1441 spin_unlock_irqrestore(&xhci->lock, flags); 1461 spin_unlock_irqrestore(&xhci->lock, flags);
1442 1462
1443 if (ret) 1463 if (ret)
1444 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 1464 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1445} 1465}
1446 1466
1467static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1468 struct usb_device *udev, struct usb_host_endpoint *ep,
1469 unsigned int slot_id)
1470{
1471 int ret;
1472 unsigned int ep_index;
1473 unsigned int ep_state;
1474
1475 if (!ep)
1476 return -EINVAL;
1477 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__);
1478 if (ret <= 0)
1479 return -EINVAL;
1480 if (ep->ss_ep_comp.bmAttributes == 0) {
1481 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1482 " descriptor for ep 0x%x does not support streams\n",
1483 ep->desc.bEndpointAddress);
1484 return -EINVAL;
1485 }
1486
1487 ep_index = xhci_get_endpoint_index(&ep->desc);
1488 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1489 if (ep_state & EP_HAS_STREAMS ||
1490 ep_state & EP_GETTING_STREAMS) {
1491 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1492 "already has streams set up.\n",
1493 ep->desc.bEndpointAddress);
1494 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1495 "dynamic stream context array reallocation.\n");
1496 return -EINVAL;
1497 }
1498 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1499 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1500 "endpoint 0x%x; URBs are pending.\n",
1501 ep->desc.bEndpointAddress);
1502 return -EINVAL;
1503 }
1504 return 0;
1505}
1506
1507static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1508 unsigned int *num_streams, unsigned int *num_stream_ctxs)
1509{
1510 unsigned int max_streams;
1511
1512 /* The stream context array size must be a power of two */
1513 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
1514 /*
1515 * Find out how many primary stream array entries the host controller
1516 * supports. Later we may use secondary stream arrays (similar to 2nd
1517 * level page entries), but that's an optional feature for xHCI host
1518 * controllers. xHCs must support at least 4 stream IDs.
1519 */
1520 max_streams = HCC_MAX_PSA(xhci->hcc_params);
1521 if (*num_stream_ctxs > max_streams) {
1522 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1523 max_streams);
1524 *num_stream_ctxs = max_streams;
1525 *num_streams = max_streams;
1526 }
1527}
1528
1529/* Returns an error code if one of the endpoint already has streams.
1530 * This does not change any data structures, it only checks and gathers
1531 * information.
1532 */
1533static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1534 struct usb_device *udev,
1535 struct usb_host_endpoint **eps, unsigned int num_eps,
1536 unsigned int *num_streams, u32 *changed_ep_bitmask)
1537{
1538 unsigned int max_streams;
1539 unsigned int endpoint_flag;
1540 int i;
1541 int ret;
1542
1543 for (i = 0; i < num_eps; i++) {
1544 ret = xhci_check_streams_endpoint(xhci, udev,
1545 eps[i], udev->slot_id);
1546 if (ret < 0)
1547 return ret;
1548
1549 max_streams = USB_SS_MAX_STREAMS(
1550 eps[i]->ss_ep_comp.bmAttributes);
1551 if (max_streams < (*num_streams - 1)) {
1552 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1553 eps[i]->desc.bEndpointAddress,
1554 max_streams);
1555 *num_streams = max_streams+1;
1556 }
1557
1558 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
1559 if (*changed_ep_bitmask & endpoint_flag)
1560 return -EINVAL;
1561 *changed_ep_bitmask |= endpoint_flag;
1562 }
1563 return 0;
1564}
1565
1566static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1567 struct usb_device *udev,
1568 struct usb_host_endpoint **eps, unsigned int num_eps)
1569{
1570 u32 changed_ep_bitmask = 0;
1571 unsigned int slot_id;
1572 unsigned int ep_index;
1573 unsigned int ep_state;
1574 int i;
1575
1576 slot_id = udev->slot_id;
1577 if (!xhci->devs[slot_id])
1578 return 0;
1579
1580 for (i = 0; i < num_eps; i++) {
1581 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1582 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1583 /* Are streams already being freed for the endpoint? */
1584 if (ep_state & EP_GETTING_NO_STREAMS) {
1585 xhci_warn(xhci, "WARN Can't disable streams for "
1586 "endpoint 0x%x\n, "
1587 "streams are being disabled already.",
1588 eps[i]->desc.bEndpointAddress);
1589 return 0;
1590 }
1591 /* Are there actually any streams to free? */
1592 if (!(ep_state & EP_HAS_STREAMS) &&
1593 !(ep_state & EP_GETTING_STREAMS)) {
1594 xhci_warn(xhci, "WARN Can't disable streams for "
1595 "endpoint 0x%x\n, "
1596 "streams are already disabled!",
1597 eps[i]->desc.bEndpointAddress);
1598 xhci_warn(xhci, "WARN xhci_free_streams() called "
1599 "with non-streams endpoint\n");
1600 return 0;
1601 }
1602 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
1603 }
1604 return changed_ep_bitmask;
1605}
1606
1607/*
1608 * The USB device drivers use this function (though the HCD interface in USB
1609 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
1610 * coordinate mass storage command queueing across multiple endpoints (basically
1611 * a stream ID == a task ID).
1612 *
1613 * Setting up streams involves allocating the same size stream context array
1614 * for each endpoint and issuing a configure endpoint command for all endpoints.
1615 *
1616 * Don't allow the call to succeed if one endpoint only supports one stream
1617 * (which means it doesn't support streams at all).
1618 *
1619 * Drivers may get less stream IDs than they asked for, if the host controller
1620 * hardware or endpoints claim they can't support the number of requested
1621 * stream IDs.
1622 */
1623int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1624 struct usb_host_endpoint **eps, unsigned int num_eps,
1625 unsigned int num_streams, gfp_t mem_flags)
1626{
1627 int i, ret;
1628 struct xhci_hcd *xhci;
1629 struct xhci_virt_device *vdev;
1630 struct xhci_command *config_cmd;
1631 unsigned int ep_index;
1632 unsigned int num_stream_ctxs;
1633 unsigned long flags;
1634 u32 changed_ep_bitmask = 0;
1635
1636 if (!eps)
1637 return -EINVAL;
1638
1639 /* Add one to the number of streams requested to account for
1640 * stream 0 that is reserved for xHCI usage.
1641 */
1642 num_streams += 1;
1643 xhci = hcd_to_xhci(hcd);
1644 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
1645 num_streams);
1646
1647 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1648 if (!config_cmd) {
1649 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1650 return -ENOMEM;
1651 }
1652
1653 /* Check to make sure all endpoints are not already configured for
1654 * streams. While we're at it, find the maximum number of streams that
1655 * all the endpoints will support and check for duplicate endpoints.
1656 */
1657 spin_lock_irqsave(&xhci->lock, flags);
1658 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
1659 num_eps, &num_streams, &changed_ep_bitmask);
1660 if (ret < 0) {
1661 xhci_free_command(xhci, config_cmd);
1662 spin_unlock_irqrestore(&xhci->lock, flags);
1663 return ret;
1664 }
1665 if (num_streams <= 1) {
1666 xhci_warn(xhci, "WARN: endpoints can't handle "
1667 "more than one stream.\n");
1668 xhci_free_command(xhci, config_cmd);
1669 spin_unlock_irqrestore(&xhci->lock, flags);
1670 return -EINVAL;
1671 }
1672 vdev = xhci->devs[udev->slot_id];
1673 /* Mark each endpoint as being in transistion, so
1674 * xhci_urb_enqueue() will reject all URBs.
1675 */
1676 for (i = 0; i < num_eps; i++) {
1677 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1678 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
1679 }
1680 spin_unlock_irqrestore(&xhci->lock, flags);
1681
1682 /* Setup internal data structures and allocate HW data structures for
1683 * streams (but don't install the HW structures in the input context
1684 * until we're sure all memory allocation succeeded).
1685 */
1686 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
1687 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
1688 num_stream_ctxs, num_streams);
1689
1690 for (i = 0; i < num_eps; i++) {
1691 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1692 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
1693 num_stream_ctxs,
1694 num_streams, mem_flags);
1695 if (!vdev->eps[ep_index].stream_info)
1696 goto cleanup;
1697 /* Set maxPstreams in endpoint context and update deq ptr to
1698 * point to stream context array. FIXME
1699 */
1700 }
1701
1702 /* Set up the input context for a configure endpoint command. */
1703 for (i = 0; i < num_eps; i++) {
1704 struct xhci_ep_ctx *ep_ctx;
1705
1706 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1707 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
1708
1709 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
1710 vdev->out_ctx, ep_index);
1711 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
1712 vdev->eps[ep_index].stream_info);
1713 }
1714 /* Tell the HW to drop its old copy of the endpoint context info
1715 * and add the updated copy from the input context.
1716 */
1717 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
1718 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
1719
1720 /* Issue and wait for the configure endpoint command */
1721 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
1722 false, false);
1723
1724 /* xHC rejected the configure endpoint command for some reason, so we
1725 * leave the old ring intact and free our internal streams data
1726 * structure.
1727 */
1728 if (ret < 0)
1729 goto cleanup;
1730
1731 spin_lock_irqsave(&xhci->lock, flags);
1732 for (i = 0; i < num_eps; i++) {
1733 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1734 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
1735 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
1736 udev->slot_id, ep_index);
1737 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
1738 }
1739 xhci_free_command(xhci, config_cmd);
1740 spin_unlock_irqrestore(&xhci->lock, flags);
1741
1742 /* Subtract 1 for stream 0, which drivers can't use */
1743 return num_streams - 1;
1744
1745cleanup:
1746 /* If it didn't work, free the streams! */
1747 for (i = 0; i < num_eps; i++) {
1748 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1749 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1750 vdev->eps[ep_index].stream_info = NULL;
1751 /* FIXME Unset maxPstreams in endpoint context and
1752 * update deq ptr to point to normal string ring.
1753 */
1754 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
1755 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
1756 xhci_endpoint_zero(xhci, vdev, eps[i]);
1757 }
1758 xhci_free_command(xhci, config_cmd);
1759 return -ENOMEM;
1760}
1761
1762/* Transition the endpoint from using streams to being a "normal" endpoint
1763 * without streams.
1764 *
1765 * Modify the endpoint context state, submit a configure endpoint command,
1766 * and free all endpoint rings for streams if that completes successfully.
1767 */
1768int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1769 struct usb_host_endpoint **eps, unsigned int num_eps,
1770 gfp_t mem_flags)
1771{
1772 int i, ret;
1773 struct xhci_hcd *xhci;
1774 struct xhci_virt_device *vdev;
1775 struct xhci_command *command;
1776 unsigned int ep_index;
1777 unsigned long flags;
1778 u32 changed_ep_bitmask;
1779
1780 xhci = hcd_to_xhci(hcd);
1781 vdev = xhci->devs[udev->slot_id];
1782
1783 /* Set up a configure endpoint command to remove the streams rings */
1784 spin_lock_irqsave(&xhci->lock, flags);
1785 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
1786 udev, eps, num_eps);
1787 if (changed_ep_bitmask == 0) {
1788 spin_unlock_irqrestore(&xhci->lock, flags);
1789 return -EINVAL;
1790 }
1791
1792 /* Use the xhci_command structure from the first endpoint. We may have
1793 * allocated too many, but the driver may call xhci_free_streams() for
1794 * each endpoint it grouped into one call to xhci_alloc_streams().
1795 */
1796 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
1797 command = vdev->eps[ep_index].stream_info->free_streams_command;
1798 for (i = 0; i < num_eps; i++) {
1799 struct xhci_ep_ctx *ep_ctx;
1800
1801 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1802 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1803 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
1804 EP_GETTING_NO_STREAMS;
1805
1806 xhci_endpoint_copy(xhci, command->in_ctx,
1807 vdev->out_ctx, ep_index);
1808 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
1809 &vdev->eps[ep_index]);
1810 }
1811 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
1812 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
1813 spin_unlock_irqrestore(&xhci->lock, flags);
1814
1815 /* Issue and wait for the configure endpoint command,
1816 * which must succeed.
1817 */
1818 ret = xhci_configure_endpoint(xhci, udev, command,
1819 false, true);
1820
1821 /* xHC rejected the configure endpoint command for some reason, so we
1822 * leave the streams rings intact.
1823 */
1824 if (ret < 0)
1825 return ret;
1826
1827 spin_lock_irqsave(&xhci->lock, flags);
1828 for (i = 0; i < num_eps; i++) {
1829 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1830 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1831 vdev->eps[ep_index].stream_info = NULL;
1832 /* FIXME Unset maxPstreams in endpoint context and
1833 * update deq ptr to point to normal string ring.
1834 */
1835 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
1836 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
1837 }
1838 spin_unlock_irqrestore(&xhci->lock, flags);
1839
1840 return 0;
1841}
1842
1447/* 1843/*
1448 * This submits a Reset Device Command, which will set the device state to 0, 1844 * This submits a Reset Device Command, which will set the device state to 0,
1449 * set the device address to 0, and disable all the endpoints except the default 1845 * set the device address to 0, and disable all the endpoints except the default
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea389e9a4931..dada2fb59261 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -26,8 +26,8 @@
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/usb/hcd.h>
29 30
30#include "../core/hcd.h"
31/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
32#include "xhci-ext-caps.h" 32#include "xhci-ext-caps.h"
33 33
@@ -117,7 +117,7 @@ struct xhci_cap_regs {
117/* true: no secondary Stream ID Support */ 117/* true: no secondary Stream ID Support */
118#define HCC_NSS(p) ((p) & (1 << 7)) 118#define HCC_NSS(p) ((p) & (1 << 7))
119/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */ 119/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
120#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1)) 120#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
121/* Extended Capabilities pointer from PCI base - section 5.3.6 */ 121/* Extended Capabilities pointer from PCI base - section 5.3.6 */
122#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p) 122#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
123 123
@@ -444,6 +444,7 @@ struct xhci_doorbell_array {
444 444
445/* Endpoint Target - bits 0:7 */ 445/* Endpoint Target - bits 0:7 */
446#define EPI_TO_DB(p) (((p) + 1) & 0xff) 446#define EPI_TO_DB(p) (((p) + 1) & 0xff)
447#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
447 448
448 449
449/** 450/**
@@ -585,6 +586,10 @@ struct xhci_ep_ctx {
585/* Interval - period between requests to an endpoint - 125u increments. */ 586/* Interval - period between requests to an endpoint - 125u increments. */
586#define EP_INTERVAL(p) ((p & 0xff) << 16) 587#define EP_INTERVAL(p) ((p & 0xff) << 16)
587#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) 588#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
589#define EP_MAXPSTREAMS_MASK (0x1f << 10)
590#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
591/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
592#define EP_HAS_LSA (1 << 15)
588 593
589/* ep_info2 bitmasks */ 594/* ep_info2 bitmasks */
590/* 595/*
@@ -648,8 +653,50 @@ struct xhci_command {
648/* add context bitmasks */ 653/* add context bitmasks */
649#define ADD_EP(x) (0x1 << x) 654#define ADD_EP(x) (0x1 << x)
650 655
656struct xhci_stream_ctx {
657 /* 64-bit stream ring address, cycle state, and stream type */
658 u64 stream_ring;
659 /* offset 0x14 - 0x1f reserved for HC internal use */
660 u32 reserved[2];
661};
662
663/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
664#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
665/* Secondary stream array type, dequeue pointer is to a transfer ring */
666#define SCT_SEC_TR 0
667/* Primary stream array type, dequeue pointer is to a transfer ring */
668#define SCT_PRI_TR 1
669/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
670#define SCT_SSA_8 2
671#define SCT_SSA_16 3
672#define SCT_SSA_32 4
673#define SCT_SSA_64 5
674#define SCT_SSA_128 6
675#define SCT_SSA_256 7
676
677/* Assume no secondary streams for now */
678struct xhci_stream_info {
679 struct xhci_ring **stream_rings;
680 /* Number of streams, including stream 0 (which drivers can't use) */
681 unsigned int num_streams;
682 /* The stream context array may be bigger than
683 * the number of streams the driver asked for
684 */
685 struct xhci_stream_ctx *stream_ctx_array;
686 unsigned int num_stream_ctxs;
687 dma_addr_t ctx_array_dma;
688 /* For mapping physical TRB addresses to segments in stream rings */
689 struct radix_tree_root trb_address_map;
690 struct xhci_command *free_streams_command;
691};
692
693#define SMALL_STREAM_ARRAY_SIZE 256
694#define MEDIUM_STREAM_ARRAY_SIZE 1024
695
651struct xhci_virt_ep { 696struct xhci_virt_ep {
652 struct xhci_ring *ring; 697 struct xhci_ring *ring;
698 /* Related to endpoints that are configured to use stream IDs only */
699 struct xhci_stream_info *stream_info;
653 /* Temporary storage in case the configure endpoint command fails and we 700 /* Temporary storage in case the configure endpoint command fails and we
654 * have to restore the device state to the previous state 701 * have to restore the device state to the previous state
655 */ 702 */
@@ -658,11 +705,17 @@ struct xhci_virt_ep {
658#define SET_DEQ_PENDING (1 << 0) 705#define SET_DEQ_PENDING (1 << 0)
659#define EP_HALTED (1 << 1) /* For stall handling */ 706#define EP_HALTED (1 << 1) /* For stall handling */
660#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */ 707#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
708/* Transitioning the endpoint to using streams, don't enqueue URBs */
709#define EP_GETTING_STREAMS (1 << 3)
710#define EP_HAS_STREAMS (1 << 4)
711/* Transitioning the endpoint to not using streams, don't enqueue URBs */
712#define EP_GETTING_NO_STREAMS (1 << 5)
661 /* ---- Related to URB cancellation ---- */ 713 /* ---- Related to URB cancellation ---- */
662 struct list_head cancelled_td_list; 714 struct list_head cancelled_td_list;
663 /* The TRB that was last reported in a stopped endpoint ring */ 715 /* The TRB that was last reported in a stopped endpoint ring */
664 union xhci_trb *stopped_trb; 716 union xhci_trb *stopped_trb;
665 struct xhci_td *stopped_td; 717 struct xhci_td *stopped_td;
718 unsigned int stopped_stream;
666 /* Watchdog timer for stop endpoint command to cancel URBs */ 719 /* Watchdog timer for stop endpoint command to cancel URBs */
667 struct timer_list stop_cmd_timer; 720 struct timer_list stop_cmd_timer;
668 int stop_cmds_pending; 721 int stop_cmds_pending;
@@ -710,14 +763,6 @@ struct xhci_device_context_array {
710 */ 763 */
711 764
712 765
713struct xhci_stream_ctx {
714 /* 64-bit stream ring address, cycle state, and stream type */
715 u64 stream_ring;
716 /* offset 0x14 - 0x1f reserved for HC internal use */
717 u32 reserved[2];
718};
719
720
721struct xhci_transfer_event { 766struct xhci_transfer_event {
722 /* 64-bit buffer address, or immediate data */ 767 /* 64-bit buffer address, or immediate data */
723 u64 buffer; 768 u64 buffer;
@@ -828,6 +873,10 @@ struct xhci_event_cmd {
828#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) 873#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
829#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) 874#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
830 875
876/* Set TR Dequeue Pointer command TRB fields */
877#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
878#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
879
831 880
832/* Port Status Change Event TRB fields */ 881/* Port Status Change Event TRB fields */
833/* Port ID - bits 31:24 */ 882/* Port ID - bits 31:24 */
@@ -952,6 +1001,10 @@ union xhci_trb {
952/* Allow two commands + a link TRB, along with any reserved command TRBs */ 1001/* Allow two commands + a link TRB, along with any reserved command TRBs */
953#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) 1002#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
954#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) 1003#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
1004/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
1005 * Change this if you change TRBS_PER_SEGMENT!
1006 */
1007#define SEGMENT_SHIFT 10
955/* TRB buffer pointers can't cross 64KB boundaries */ 1008/* TRB buffer pointers can't cross 64KB boundaries */
956#define TRB_MAX_BUFF_SHIFT 16 1009#define TRB_MAX_BUFF_SHIFT 16
957#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT) 1010#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
@@ -993,6 +1046,7 @@ struct xhci_ring {
993 * if we own the TRB (if we are the consumer). See section 4.9.1. 1046 * if we own the TRB (if we are the consumer). See section 4.9.1.
994 */ 1047 */
995 u32 cycle_state; 1048 u32 cycle_state;
1049 unsigned int stream_id;
996}; 1050};
997 1051
998struct xhci_erst_entry { 1052struct xhci_erst_entry {
@@ -1088,6 +1142,8 @@ struct xhci_hcd {
1088 /* DMA pools */ 1142 /* DMA pools */
1089 struct dma_pool *device_pool; 1143 struct dma_pool *device_pool;
1090 struct dma_pool *segment_pool; 1144 struct dma_pool *segment_pool;
1145 struct dma_pool *small_streams_pool;
1146 struct dma_pool *medium_streams_pool;
1091 1147
1092#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 1148#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1093 /* Poll the rings - for debugging */ 1149 /* Poll the rings - for debugging */
@@ -1216,6 +1272,9 @@ void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1216void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); 1272void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1217char *xhci_get_slot_state(struct xhci_hcd *xhci, 1273char *xhci_get_slot_state(struct xhci_hcd *xhci,
1218 struct xhci_container_ctx *ctx); 1274 struct xhci_container_ctx *ctx);
1275void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
1276 unsigned int slot_id, unsigned int ep_index,
1277 struct xhci_virt_ep *ep);
1219 1278
1220/* xHCI memory management */ 1279/* xHCI memory management */
1221void xhci_mem_cleanup(struct xhci_hcd *xhci); 1280void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1242,6 +1301,29 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1242void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, 1301void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
1243 struct xhci_virt_device *virt_dev, 1302 struct xhci_virt_device *virt_dev,
1244 unsigned int ep_index); 1303 unsigned int ep_index);
1304struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
1305 unsigned int num_stream_ctxs,
1306 unsigned int num_streams, gfp_t flags);
1307void xhci_free_stream_info(struct xhci_hcd *xhci,
1308 struct xhci_stream_info *stream_info);
1309void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
1310 struct xhci_ep_ctx *ep_ctx,
1311 struct xhci_stream_info *stream_info);
1312void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
1313 struct xhci_ep_ctx *ep_ctx,
1314 struct xhci_virt_ep *ep);
1315struct xhci_ring *xhci_dma_to_transfer_ring(
1316 struct xhci_virt_ep *ep,
1317 u64 address);
1318struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1319 struct urb *urb);
1320struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
1321 unsigned int slot_id, unsigned int ep_index,
1322 unsigned int stream_id);
1323struct xhci_ring *xhci_stream_id_to_ring(
1324 struct xhci_virt_device *dev,
1325 unsigned int ep_index,
1326 unsigned int stream_id);
1245struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1327struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1246 bool allocate_in_ctx, bool allocate_completion, 1328 bool allocate_in_ctx, bool allocate_completion,
1247 gfp_t mem_flags); 1329 gfp_t mem_flags);
@@ -1266,6 +1348,12 @@ int xhci_get_frame(struct usb_hcd *hcd);
1266irqreturn_t xhci_irq(struct usb_hcd *hcd); 1348irqreturn_t xhci_irq(struct usb_hcd *hcd);
1267int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); 1349int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
1268void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); 1350void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
1351int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1352 struct usb_host_endpoint **eps, unsigned int num_eps,
1353 unsigned int num_streams, gfp_t mem_flags);
1354int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1355 struct usb_host_endpoint **eps, unsigned int num_eps,
1356 gfp_t mem_flags);
1269int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); 1357int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
1270int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 1358int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1271 struct usb_tt *tt, gfp_t mem_flags); 1359 struct usb_tt *tt, gfp_t mem_flags);
@@ -1308,9 +1396,11 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1308int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); 1396int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
1309void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 1397void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1310 unsigned int slot_id, unsigned int ep_index, 1398 unsigned int slot_id, unsigned int ep_index,
1311 struct xhci_td *cur_td, struct xhci_dequeue_state *state); 1399 unsigned int stream_id, struct xhci_td *cur_td,
1400 struct xhci_dequeue_state *state);
1312void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 1401void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
1313 unsigned int slot_id, unsigned int ep_index, 1402 unsigned int slot_id, unsigned int ep_index,
1403 unsigned int stream_id,
1314 struct xhci_dequeue_state *deq_state); 1404 struct xhci_dequeue_state *deq_state);
1315void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 1405void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1316 struct usb_device *udev, unsigned int ep_index); 1406 struct usb_device *udev, unsigned int ep_index);