aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig33
-rw-r--r--drivers/usb/host/Makefile6
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c8
-rw-r--r--drivers/usb/host/ehci-fsl.c97
-rw-r--r--drivers/usb/host/ehci-hcd.c31
-rw-r--r--drivers/usb/host/ehci-hub.c37
-rw-r--r--drivers/usb/host/ehci-mem.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c296
-rw-r--r--drivers/usb/host/ehci-omap.c794
-rw-r--r--drivers/usb/host/ehci-orion.c8
-rw-r--r--drivers/usb/host/ehci-ppc-of.c14
-rw-r--r--drivers/usb/host/ehci-q.c43
-rw-r--r--drivers/usb/host/ehci-sched.c114
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c300
-rw-r--r--drivers/usb/host/ehci.h10
-rw-r--r--drivers/usb/host/fhci-hcd.c8
-rw-r--r--drivers/usb/host/fhci-mem.c1
-rw-r--r--drivers/usb/host/fhci-q.c1
-rw-r--r--drivers/usb/host/fhci-sched.c12
-rw-r--r--drivers/usb/host/fhci-tds.c42
-rw-r--r--drivers/usb/host/fhci.h16
-rw-r--r--drivers/usb/host/hwa-hc.c1
-rw-r--r--drivers/usb/host/imx21-dbg.c527
-rw-r--r--drivers/usb/host/imx21-hcd.c1790
-rw-r--r--drivers/usb/host/imx21-hcd.h436
-rw-r--r--drivers/usb/host/isp116x-hcd.c1
-rw-r--r--drivers/usb/host/isp1362-hcd.c66
-rw-r--r--drivers/usb/host/isp1362.h4
-rw-r--r--drivers/usb/host/isp1760-hcd.c16
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-at91.c12
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c456
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/ohci-lh7a404.c11
-rw-r--r--drivers/usb/host/ohci-omap.c6
-rw-r--r--drivers/usb/host/ohci-pnx4008.c14
-rw-r--r--drivers/usb/host/ohci-ppc-of.c10
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c8
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-q.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c8
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c81
-rw-r--r--drivers/usb/host/sl811-hcd.c11
-rw-r--r--drivers/usb/host/sl811_cs.c49
-rw-r--r--drivers/usb/host/uhci-debug.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c16
-rw-r--r--drivers/usb/host/uhci-hub.c2
-rw-r--r--drivers/usb/host/whci/asl.c1
-rw-r--r--drivers/usb/host/whci/debug.c21
-rw-r--r--drivers/usb/host/whci/hcd.c1
-rw-r--r--drivers/usb/host/whci/init.c1
-rw-r--r--drivers/usb/host/whci/pzl.c1
-rw-r--r--drivers/usb/host/whci/qset.c351
-rw-r--r--drivers/usb/host/whci/whcd.h9
-rw-r--r--drivers/usb/host/whci/whci-hc.h14
-rw-r--r--drivers/usb/host/xhci-dbg.c19
-rw-r--r--drivers/usb/host/xhci-ext-caps.h7
-rw-r--r--drivers/usb/host/xhci-hub.c65
-rw-r--r--drivers/usb/host/xhci-mem.c373
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c371
-rw-r--r--drivers/usb/host/xhci.c (renamed from drivers/usb/host/xhci-hcd.c)255
-rw-r--r--drivers/usb/host/xhci.h52
68 files changed, 6486 insertions, 483 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 9b43b226817f..8d3df0397de3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -90,14 +90,25 @@ config USB_EHCI_TT_NEWSCHED
90 90
91config USB_EHCI_BIG_ENDIAN_MMIO 91config USB_EHCI_BIG_ENDIAN_MMIO
92 bool 92 bool
93 depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX) 93 depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
94 default y 94 default y
95 95
96config USB_EHCI_BIG_ENDIAN_DESC 96config USB_EHCI_BIG_ENDIAN_DESC
97 bool 97 bool
98 depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX) 98 depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
99 default y 99 default y
100 100
101config XPS_USB_HCD_XILINX
102 bool "Use Xilinx usb host EHCI controller core"
103 depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE)
104 select USB_EHCI_BIG_ENDIAN_DESC
105 select USB_EHCI_BIG_ENDIAN_MMIO
106 ---help---
107 Xilinx xps USB host controller core is EHCI compilant and has
108 transaction translator built-in. It can be configured to either
109 support both high speed and full speed devices, or high speed
110 devices only.
111
101config USB_EHCI_FSL 112config USB_EHCI_FSL
102 bool "Support for Freescale on-chip EHCI USB controller" 113 bool "Support for Freescale on-chip EHCI USB controller"
103 depends on USB_EHCI_HCD && FSL_SOC 114 depends on USB_EHCI_HCD && FSL_SOC
@@ -105,6 +116,13 @@ config USB_EHCI_FSL
105 ---help--- 116 ---help---
106 Variation of ARC USB block used in some Freescale chips. 117 Variation of ARC USB block used in some Freescale chips.
107 118
119config USB_EHCI_MXC
120 bool "Support for Freescale on-chip EHCI USB controller"
121 depends on USB_EHCI_HCD && ARCH_MXC
122 select USB_EHCI_ROOT_HUB_TT
123 ---help---
124 Variation of ARC USB block used in some Freescale chips.
125
108config USB_EHCI_HCD_PPC_OF 126config USB_EHCI_HCD_PPC_OF
109 bool "EHCI support for PPC USB controller on OF platform bus" 127 bool "EHCI support for PPC USB controller on OF platform bus"
110 depends on USB_EHCI_HCD && PPC_OF 128 depends on USB_EHCI_HCD && PPC_OF
@@ -381,3 +399,14 @@ config USB_HWA_HCD
381 399
382 To compile this driver a module, choose M here: the module 400 To compile this driver a module, choose M here: the module
383 will be called "hwa-hc". 401 will be called "hwa-hc".
402
403config USB_IMX21_HCD
404 tristate "iMX21 HCD support"
405 depends on USB && ARM && MACH_MX21
406 help
407 This driver enables support for the on-chip USB host in the
408 iMX21 processor.
409
410 To compile this driver as a module, choose M here: the
411 module will be called "imx21-hcd".
412
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f58b2494c44a..b6315aa47f7a 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,7 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
12ifeq ($(CONFIG_FHCI_DEBUG),y) 12ifeq ($(CONFIG_FHCI_DEBUG),y)
13fhci-objs += fhci-dbg.o 13fhci-objs += fhci-dbg.o
14endif 14endif
15xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o 15xhci-hcd-objs := xhci.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
16 16
17obj-$(CONFIG_USB_WHCI_HCD) += whci/ 17obj-$(CONFIG_USB_WHCI_HCD) += whci/
18 18
@@ -25,10 +25,12 @@ obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
25obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o 25obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
26obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o 26obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
27obj-$(CONFIG_USB_FHCI_HCD) += fhci.o 27obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
28obj-$(CONFIG_USB_XHCI_HCD) += xhci.o 28obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
29obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o 29obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
30obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 30obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
31obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 31obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
32obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 32obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o 33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o 34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
35obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
36
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 87c1b7c34c0e..51bd0edf544f 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
149 goto fail_request_resource; 149 goto fail_request_resource;
150 } 150 }
151 hcd->rsrc_start = res->start; 151 hcd->rsrc_start = res->start;
152 hcd->rsrc_len = res->end - res->start + 1; 152 hcd->rsrc_len = resource_size(res);
153 153
154 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, 154 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
155 driver->description)) { 155 driver->description)) {
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index ed77be76d6bb..e3a74e75e822 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -121,6 +121,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
121{ 121{
122 struct usb_hcd *hcd; 122 struct usb_hcd *hcd;
123 struct ehci_hcd *ehci; 123 struct ehci_hcd *ehci;
124 struct resource *res;
124 int ret; 125 int ret;
125 126
126 if (usb_disabled()) 127 if (usb_disabled())
@@ -144,8 +145,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
144 if (!hcd) 145 if (!hcd)
145 return -ENOMEM; 146 return -ENOMEM;
146 147
147 hcd->rsrc_start = pdev->resource[0].start; 148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148 hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; 149 hcd->rsrc_start = res->start;
150 hcd->rsrc_len = resource_size(res);
149 151
150 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 152 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
151 pr_debug("request_mem_region failed"); 153 pr_debug("request_mem_region failed");
@@ -297,7 +299,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
297 return 0; 299 return 0;
298} 300}
299 301
300static struct dev_pm_ops au1xxx_ehci_pmops = { 302static const struct dev_pm_ops au1xxx_ehci_pmops = {
301 .suspend = ehci_hcd_au1xxx_drv_suspend, 303 .suspend = ehci_hcd_au1xxx_drv_suspend,
302 .resume = ehci_hcd_au1xxx_drv_resume, 304 .resume = ehci_hcd_au1xxx_drv_resume,
303}; 305};
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 991174937db3..0e26aa13f158 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 MontaVista Software 2 * Copyright 2005-2009 MontaVista Software, Inc.
3 * Copyright 2008 Freescale Semiconductor, Inc.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the 6 * under the terms of the GNU General Public License as published by the
@@ -17,17 +18,20 @@
17 * 18 *
18 * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided 19 * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
19 * by Hunter Wu. 20 * by Hunter Wu.
21 * Power Management support by Dave Liu <daveliu@freescale.com>,
22 * Jerry Huang <Chang-Ming.Huang@freescale.com> and
23 * Anton Vorontsov <avorontsov@ru.mvista.com>.
20 */ 24 */
21 25
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/delay.h>
29#include <linux/pm.h>
22#include <linux/platform_device.h> 30#include <linux/platform_device.h>
23#include <linux/fsl_devices.h> 31#include <linux/fsl_devices.h>
24 32
25#include "ehci-fsl.h" 33#include "ehci-fsl.h"
26 34
27/* FIXME: Power Management is un-ported so temporarily disable it */
28#undef CONFIG_PM
29
30
31/* configure so an HC device and id are always provided */ 35/* configure so an HC device and id are always provided */
32/* always called with process context; sleeping is OK */ 36/* always called with process context; sleeping is OK */
33 37
@@ -40,8 +44,8 @@
40 * Allocates basic resources for this USB host controller. 44 * Allocates basic resources for this USB host controller.
41 * 45 *
42 */ 46 */
43int usb_hcd_fsl_probe(const struct hc_driver *driver, 47static int usb_hcd_fsl_probe(const struct hc_driver *driver,
44 struct platform_device *pdev) 48 struct platform_device *pdev)
45{ 49{
46 struct fsl_usb2_platform_data *pdata; 50 struct fsl_usb2_platform_data *pdata;
47 struct usb_hcd *hcd; 51 struct usb_hcd *hcd;
@@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
147 * Reverses the effect of usb_hcd_fsl_probe(). 151 * Reverses the effect of usb_hcd_fsl_probe().
148 * 152 *
149 */ 153 */
150void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) 154static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
155 struct platform_device *pdev)
151{ 156{
152 usb_remove_hcd(hcd); 157 usb_remove_hcd(hcd);
153 iounmap(hcd->regs); 158 iounmap(hcd->regs);
@@ -284,10 +289,81 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
284 return retval; 289 return retval;
285} 290}
286 291
292struct ehci_fsl {
293 struct ehci_hcd ehci;
294
295#ifdef CONFIG_PM
296 /* Saved USB PHY settings, need to restore after deep sleep. */
297 u32 usb_ctrl;
298#endif
299};
300
301#ifdef CONFIG_PM
302
303static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
304{
305 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
306
307 return container_of(ehci, struct ehci_fsl, ehci);
308}
309
310static int ehci_fsl_drv_suspend(struct device *dev)
311{
312 struct usb_hcd *hcd = dev_get_drvdata(dev);
313 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
314 void __iomem *non_ehci = hcd->regs;
315
316 if (!fsl_deep_sleep())
317 return 0;
318
319 ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
320 return 0;
321}
322
323static int ehci_fsl_drv_resume(struct device *dev)
324{
325 struct usb_hcd *hcd = dev_get_drvdata(dev);
326 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
327 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
328 void __iomem *non_ehci = hcd->regs;
329
330 if (!fsl_deep_sleep())
331 return 0;
332
333 usb_root_hub_lost_power(hcd->self.root_hub);
334
335 /* Restore USB PHY settings and enable the controller. */
336 out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
337
338 ehci_reset(ehci);
339 ehci_fsl_reinit(ehci);
340
341 return 0;
342}
343
344static int ehci_fsl_drv_restore(struct device *dev)
345{
346 struct usb_hcd *hcd = dev_get_drvdata(dev);
347
348 usb_root_hub_lost_power(hcd->self.root_hub);
349 return 0;
350}
351
352static struct dev_pm_ops ehci_fsl_pm_ops = {
353 .suspend = ehci_fsl_drv_suspend,
354 .resume = ehci_fsl_drv_resume,
355 .restore = ehci_fsl_drv_restore,
356};
357
358#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
359#else
360#define EHCI_FSL_PM_OPS NULL
361#endif /* CONFIG_PM */
362
287static const struct hc_driver ehci_fsl_hc_driver = { 363static const struct hc_driver ehci_fsl_hc_driver = {
288 .description = hcd_name, 364 .description = hcd_name,
289 .product_desc = "Freescale On-Chip EHCI Host Controller", 365 .product_desc = "Freescale On-Chip EHCI Host Controller",
290 .hcd_priv_size = sizeof(struct ehci_hcd), 366 .hcd_priv_size = sizeof(struct ehci_fsl),
291 367
292 /* 368 /*
293 * generic hardware linkage 369 * generic hardware linkage
@@ -354,6 +430,7 @@ static struct platform_driver ehci_fsl_driver = {
354 .remove = ehci_fsl_drv_remove, 430 .remove = ehci_fsl_drv_remove,
355 .shutdown = usb_hcd_platform_shutdown, 431 .shutdown = usb_hcd_platform_shutdown,
356 .driver = { 432 .driver = {
357 .name = "fsl-ehci", 433 .name = "fsl-ehci",
434 .pm = EHCI_FSL_PM_OPS,
358 }, 435 },
359}; 436};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index f5f5601701c9..13ead00aecd5 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -23,7 +23,6 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
28#include <linux/errno.h> 27#include <linux/errno.h>
29#include <linux/init.h> 28#include <linux/init.h>
@@ -35,6 +34,7 @@
35#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
36#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
37#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/slab.h>
38 38
39#include "../core/hcd.h" 39#include "../core/hcd.h"
40 40
@@ -210,7 +210,7 @@ static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
210 if (error) { 210 if (error) {
211 ehci_halt(ehci); 211 ehci_halt(ehci);
212 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 212 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
213 ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n", 213 ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
214 ptr, mask, done, error); 214 ptr, mask, done, error);
215 } 215 }
216 216
@@ -543,13 +543,14 @@ static int ehci_init(struct usb_hcd *hcd)
543 */ 543 */
544 ehci->periodic_size = DEFAULT_I_TDPS; 544 ehci->periodic_size = DEFAULT_I_TDPS;
545 INIT_LIST_HEAD(&ehci->cached_itd_list); 545 INIT_LIST_HEAD(&ehci->cached_itd_list);
546 INIT_LIST_HEAD(&ehci->cached_sitd_list);
546 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 547 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
547 return retval; 548 return retval;
548 549
549 /* controllers may cache some of the periodic schedule ... */ 550 /* controllers may cache some of the periodic schedule ... */
550 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); 551 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
551 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 552 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
552 ehci->i_thresh = 8; 553 ehci->i_thresh = 2 + 8;
553 else // N microframes cached 554 else // N microframes cached
554 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); 555 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
555 556
@@ -605,6 +606,8 @@ static int ehci_init(struct usb_hcd *hcd)
605 } 606 }
606 ehci->command = temp; 607 ehci->command = temp;
607 608
609 /* Accept arbitrarily long scatter-gather lists */
610 hcd->self.sg_tablesize = ~0;
608 return 0; 611 return 0;
609} 612}
610 613
@@ -785,9 +788,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
785 788
786 /* start 20 msec resume signaling from this port, 789 /* start 20 msec resume signaling from this port,
787 * and make khubd collect PORT_STAT_C_SUSPEND to 790 * and make khubd collect PORT_STAT_C_SUSPEND to
788 * stop that signaling. 791 * stop that signaling. Use 5 ms extra for safety,
792 * like usb_port_resume() does.
789 */ 793 */
790 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); 794 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
791 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 795 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
792 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 796 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
793 } 797 }
@@ -992,7 +996,7 @@ rescan:
992 /* endpoints can be iso streams. for now, we don't 996 /* endpoints can be iso streams. for now, we don't
993 * accelerate iso completions ... so spin a while. 997 * accelerate iso completions ... so spin a while.
994 */ 998 */
995 if (qh->hw->hw_info1 == 0) { 999 if (qh->hw == NULL) {
996 ehci_vdbg (ehci, "iso delay\n"); 1000 ehci_vdbg (ehci, "iso delay\n");
997 goto idle_timeout; 1001 goto idle_timeout;
998 } 1002 }
@@ -1105,11 +1109,21 @@ MODULE_LICENSE ("GPL");
1105#define PLATFORM_DRIVER ehci_fsl_driver 1109#define PLATFORM_DRIVER ehci_fsl_driver
1106#endif 1110#endif
1107 1111
1112#ifdef CONFIG_USB_EHCI_MXC
1113#include "ehci-mxc.c"
1114#define PLATFORM_DRIVER ehci_mxc_driver
1115#endif
1116
1108#ifdef CONFIG_SOC_AU1200 1117#ifdef CONFIG_SOC_AU1200
1109#include "ehci-au1xxx.c" 1118#include "ehci-au1xxx.c"
1110#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 1119#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
1111#endif 1120#endif
1112 1121
1122#ifdef CONFIG_ARCH_OMAP3
1123#include "ehci-omap.c"
1124#define PLATFORM_DRIVER ehci_hcd_omap_driver
1125#endif
1126
1113#ifdef CONFIG_PPC_PS3 1127#ifdef CONFIG_PPC_PS3
1114#include "ehci-ps3.c" 1128#include "ehci-ps3.c"
1115#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver 1129#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
@@ -1120,6 +1134,11 @@ MODULE_LICENSE ("GPL");
1120#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver 1134#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
1121#endif 1135#endif
1122 1136
1137#ifdef CONFIG_XPS_USB_HCD_XILINX
1138#include "ehci-xilinx-of.c"
1139#define OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
1140#endif
1141
1123#ifdef CONFIG_PLAT_ORION 1142#ifdef CONFIG_PLAT_ORION
1124#include "ehci-orion.c" 1143#include "ehci-orion.c"
1125#define PLATFORM_DRIVER ehci_orion_driver 1144#define PLATFORM_DRIVER ehci_orion_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1b6f1c0e5cee..c7178bcde67a 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
120 del_timer_sync(&ehci->watchdog); 120 del_timer_sync(&ehci->watchdog);
121 del_timer_sync(&ehci->iaa_watchdog); 121 del_timer_sync(&ehci->iaa_watchdog);
122 122
123 port = HCS_N_PORTS (ehci->hcs_params);
124 spin_lock_irq (&ehci->lock); 123 spin_lock_irq (&ehci->lock);
125 124
125 /* Once the controller is stopped, port resumes that are already
126 * in progress won't complete. Hence if remote wakeup is enabled
127 * for the root hub and any ports are in the middle of a resume or
128 * remote wakeup, we must fail the suspend.
129 */
130 if (hcd->self.root_hub->do_remote_wakeup) {
131 port = HCS_N_PORTS(ehci->hcs_params);
132 while (port--) {
133 if (ehci->reset_done[port] != 0) {
134 spin_unlock_irq(&ehci->lock);
135 ehci_dbg(ehci, "suspend failed because "
136 "port %d is resuming\n",
137 port + 1);
138 return -EBUSY;
139 }
140 }
141 }
142
126 /* stop schedules, clean any completed work */ 143 /* stop schedules, clean any completed work */
127 if (HC_IS_RUNNING(hcd->state)) { 144 if (HC_IS_RUNNING(hcd->state)) {
128 ehci_quiesce (ehci); 145 ehci_quiesce (ehci);
@@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
138 */ 155 */
139 ehci->bus_suspended = 0; 156 ehci->bus_suspended = 0;
140 ehci->owned_ports = 0; 157 ehci->owned_ports = 0;
158 port = HCS_N_PORTS(ehci->hcs_params);
141 while (port--) { 159 while (port--) {
142 u32 __iomem *reg = &ehci->regs->port_status [port]; 160 u32 __iomem *reg = &ehci->regs->port_status [port];
143 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; 161 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
@@ -178,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
178 if (hostpc_reg) { 196 if (hostpc_reg) {
179 u32 t3; 197 u32 t3;
180 198
199 spin_unlock_irq(&ehci->lock);
181 msleep(5);/* 5ms for HCD enter low pwr mode */ 200 msleep(5);/* 5ms for HCD enter low pwr mode */
201 spin_lock_irq(&ehci->lock);
182 t3 = ehci_readl(ehci, hostpc_reg); 202 t3 = ehci_readl(ehci, hostpc_reg);
183 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
184 t3 = ehci_readl(ehci, hostpc_reg); 204 t3 = ehci_readl(ehci, hostpc_reg);
@@ -236,7 +256,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
236 } 256 }
237 257
238 if (unlikely(ehci->debug)) { 258 if (unlikely(ehci->debug)) {
239 if (ehci->debug && !dbgp_reset_prep()) 259 if (!dbgp_reset_prep())
240 ehci->debug = NULL; 260 ehci->debug = NULL;
241 else 261 else
242 dbgp_external_startup(); 262 dbgp_external_startup();
@@ -781,7 +801,7 @@ static int ehci_hub_control (
781 * this bit; seems too long to spin routinely... 801 * this bit; seems too long to spin routinely...
782 */ 802 */
783 retval = handshake(ehci, status_reg, 803 retval = handshake(ehci, status_reg,
784 PORT_RESET, 0, 750); 804 PORT_RESET, 0, 1000);
785 if (retval != 0) { 805 if (retval != 0) {
786 ehci_err (ehci, "port %d reset error %d\n", 806 ehci_err (ehci, "port %d reset error %d\n",
787 wIndex + 1, retval); 807 wIndex + 1, retval);
@@ -886,17 +906,18 @@ static int ehci_hub_control (
886 if ((temp & PORT_PE) == 0 906 if ((temp & PORT_PE) == 0
887 || (temp & PORT_RESET) != 0) 907 || (temp & PORT_RESET) != 0)
888 goto error; 908 goto error;
889 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 909
890 /* After above check the port must be connected. 910 /* After above check the port must be connected.
891 * Set appropriate bit thus could put phy into low power 911 * Set appropriate bit thus could put phy into low power
892 * mode if we have hostpc feature 912 * mode if we have hostpc feature
893 */ 913 */
914 temp &= ~PORT_WKCONN_E;
915 temp |= PORT_WKDISC_E | PORT_WKOC_E;
916 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
894 if (hostpc_reg) { 917 if (hostpc_reg) {
895 temp &= ~PORT_WKCONN_E; 918 spin_unlock_irqrestore(&ehci->lock, flags);
896 temp |= (PORT_WKDISC_E | PORT_WKOC_E);
897 ehci_writel(ehci, temp | PORT_SUSPEND,
898 status_reg);
899 msleep(5);/* 5ms for HCD enter low pwr mode */ 919 msleep(5);/* 5ms for HCD enter low pwr mode */
920 spin_lock_irqsave(&ehci->lock, flags);
900 temp1 = ehci_readl(ehci, hostpc_reg); 921 temp1 = ehci_readl(ehci, hostpc_reg);
901 ehci_writel(ehci, temp1 | HOSTPC_PHCD, 922 ehci_writel(ehci, temp1 | HOSTPC_PHCD,
902 hostpc_reg); 923 hostpc_reg);
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index aeda96e0af67..1f3f01eacaf0 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
136 136
137static void ehci_mem_cleanup (struct ehci_hcd *ehci) 137static void ehci_mem_cleanup (struct ehci_hcd *ehci)
138{ 138{
139 free_cached_itd_list(ehci); 139 free_cached_lists(ehci);
140 if (ehci->async) 140 if (ehci->async)
141 qh_put (ehci->async); 141 qh_put (ehci->async);
142 ehci->async = NULL; 142 ehci->async = NULL;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
new file mode 100644
index 000000000000..ead59f42e69b
--- /dev/null
+++ b/drivers/usb/host/ehci-mxc.c
@@ -0,0 +1,296 @@
1/*
2 * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
3 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/platform_device.h>
21#include <linux/clk.h>
22#include <linux/delay.h>
23#include <linux/usb/otg.h>
24#include <linux/slab.h>
25
26#include <mach/mxc_ehci.h>
27
28#define ULPI_VIEWPORT_OFFSET 0x170
29#define PORTSC_OFFSET 0x184
30#define USBMODE_OFFSET 0x1a8
31#define USBMODE_CM_HOST 3
32
33struct ehci_mxc_priv {
34 struct clk *usbclk, *ahbclk;
35 struct usb_hcd *hcd;
36};
37
38/* called during probe() after chip reset completes */
39static int ehci_mxc_setup(struct usb_hcd *hcd)
40{
41 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
42 int retval;
43
44 /* EHCI registers start at offset 0x100 */
45 ehci->caps = hcd->regs + 0x100;
46 ehci->regs = hcd->regs + 0x100 +
47 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
48 dbg_hcs_params(ehci, "reset");
49 dbg_hcc_params(ehci, "reset");
50
51 /* cache this readonly data; minimize chip reads */
52 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
53
54 retval = ehci_halt(ehci);
55 if (retval)
56 return retval;
57
58 /* data structure init */
59 retval = ehci_init(hcd);
60 if (retval)
61 return retval;
62
63 hcd->has_tt = 1;
64
65 ehci->sbrn = 0x20;
66
67 ehci_reset(ehci);
68
69 ehci_port_power(ehci, 0);
70 return 0;
71}
72
73static const struct hc_driver ehci_mxc_hc_driver = {
74 .description = hcd_name,
75 .product_desc = "Freescale On-Chip EHCI Host Controller",
76 .hcd_priv_size = sizeof(struct ehci_hcd),
77
78 /*
79 * generic hardware linkage
80 */
81 .irq = ehci_irq,
82 .flags = HCD_USB2 | HCD_MEMORY,
83
84 /*
85 * basic lifecycle operations
86 */
87 .reset = ehci_mxc_setup,
88 .start = ehci_run,
89 .stop = ehci_stop,
90 .shutdown = ehci_shutdown,
91
92 /*
93 * managing i/o requests and associated device resources
94 */
95 .urb_enqueue = ehci_urb_enqueue,
96 .urb_dequeue = ehci_urb_dequeue,
97 .endpoint_disable = ehci_endpoint_disable,
98
99 /*
100 * scheduling support
101 */
102 .get_frame_number = ehci_get_frame,
103
104 /*
105 * root hub support
106 */
107 .hub_status_data = ehci_hub_status_data,
108 .hub_control = ehci_hub_control,
109 .bus_suspend = ehci_bus_suspend,
110 .bus_resume = ehci_bus_resume,
111 .relinquish_port = ehci_relinquish_port,
112 .port_handed_over = ehci_port_handed_over,
113};
114
115static int ehci_mxc_drv_probe(struct platform_device *pdev)
116{
117 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
118 struct usb_hcd *hcd;
119 struct resource *res;
120 int irq, ret, temp;
121 struct ehci_mxc_priv *priv;
122 struct device *dev = &pdev->dev;
123
124 dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
125
126 if (!pdata) {
127 dev_err(dev, "No platform data given, bailing out.\n");
128 return -EINVAL;
129 }
130
131 irq = platform_get_irq(pdev, 0);
132
133 hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
134 if (!hcd)
135 return -ENOMEM;
136
137 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
138 if (!priv) {
139 ret = -ENOMEM;
140 goto err_alloc;
141 }
142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_err(dev, "Found HC with no register addr. Check setup!\n");
146 ret = -ENODEV;
147 goto err_get_resource;
148 }
149
150 hcd->rsrc_start = res->start;
151 hcd->rsrc_len = resource_size(res);
152
153 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
154 dev_dbg(dev, "controller already in use\n");
155 ret = -EBUSY;
156 goto err_request_mem;
157 }
158
159 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
160 if (!hcd->regs) {
161 dev_err(dev, "error mapping memory\n");
162 ret = -EFAULT;
163 goto err_ioremap;
164 }
165
166 /* call platform specific init function */
167 if (pdata->init) {
168 ret = pdata->init(pdev);
169 if (ret) {
170 dev_err(dev, "platform init failed\n");
171 goto err_init;
172 }
173 /* platforms need some time to settle changed IO settings */
174 mdelay(10);
175 }
176
177 /* enable clocks */
178 priv->usbclk = clk_get(dev, "usb");
179 if (IS_ERR(priv->usbclk)) {
180 ret = PTR_ERR(priv->usbclk);
181 goto err_clk;
182 }
183 clk_enable(priv->usbclk);
184
185 if (!cpu_is_mx35()) {
186 priv->ahbclk = clk_get(dev, "usb_ahb");
187 if (IS_ERR(priv->ahbclk)) {
188 ret = PTR_ERR(priv->ahbclk);
189 goto err_clk_ahb;
190 }
191 clk_enable(priv->ahbclk);
192 }
193
194 /* set USBMODE to host mode */
195 temp = readl(hcd->regs + USBMODE_OFFSET);
196 writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET);
197
198 /* set up the PORTSCx register */
199 writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
200 mdelay(10);
201
202 /* setup USBCONTROL. */
203 ret = mxc_set_usbcontrol(pdev->id, pdata->flags);
204 if (ret < 0)
205 goto err_init;
206
207 /* Initialize the transceiver */
208 if (pdata->otg) {
209 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
210 if (otg_init(pdata->otg) != 0)
211 dev_err(dev, "unable to init transceiver\n");
212 else if (otg_set_vbus(pdata->otg, 1) != 0)
213 dev_err(dev, "unable to enable vbus on transceiver\n");
214 }
215
216 priv->hcd = hcd;
217 platform_set_drvdata(pdev, priv);
218
219 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
220 if (ret)
221 goto err_add;
222
223 return 0;
224
225err_add:
226 if (pdata && pdata->exit)
227 pdata->exit(pdev);
228err_init:
229 if (priv->ahbclk) {
230 clk_disable(priv->ahbclk);
231 clk_put(priv->ahbclk);
232 }
233err_clk_ahb:
234 clk_disable(priv->usbclk);
235 clk_put(priv->usbclk);
236err_clk:
237 iounmap(hcd->regs);
238err_ioremap:
239 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
240err_request_mem:
241err_get_resource:
242 kfree(priv);
243err_alloc:
244 usb_put_hcd(hcd);
245 return ret;
246}
247
248static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
249{
250 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
251 struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
252 struct usb_hcd *hcd = priv->hcd;
253
254 if (pdata && pdata->exit)
255 pdata->exit(pdev);
256
257 if (pdata->otg)
258 otg_shutdown(pdata->otg);
259
260 usb_remove_hcd(hcd);
261 iounmap(hcd->regs);
262 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
263 usb_put_hcd(hcd);
264 platform_set_drvdata(pdev, NULL);
265
266 clk_disable(priv->usbclk);
267 clk_put(priv->usbclk);
268 if (priv->ahbclk) {
269 clk_disable(priv->ahbclk);
270 clk_put(priv->ahbclk);
271 }
272
273 kfree(priv);
274
275 return 0;
276}
277
278static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
279{
280 struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
281 struct usb_hcd *hcd = priv->hcd;
282
283 if (hcd->driver->shutdown)
284 hcd->driver->shutdown(hcd);
285}
286
287MODULE_ALIAS("platform:mxc-ehci");
288
289static struct platform_driver ehci_mxc_driver = {
290 .probe = ehci_mxc_drv_probe,
291 .remove = __exit_p(ehci_mxc_drv_remove),
292 .shutdown = ehci_mxc_drv_shutdown,
293 .driver = {
294 .name = "mxc-ehci",
295 },
296};
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
new file mode 100644
index 000000000000..40a858335035
--- /dev/null
+++ b/drivers/usb/host/ehci-omap.c
@@ -0,0 +1,794 @@
1/*
2 * ehci-omap.c - driver for USBHOST on OMAP 34xx processor
3 *
4 * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller
5 * Tested on OMAP3430 ES2.0 SDP
6 *
7 * Copyright (C) 2007-2008 Texas Instruments, Inc.
8 * Author: Vikram Pandita <vikram.pandita@ti.com>
9 *
10 * Copyright (C) 2009 Nokia Corporation
11 * Contact: Felipe Balbi <felipe.balbi@nokia.com>
12 *
13 * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 *
29 * TODO (last updated Feb 12, 2010):
30 * - add kernel-doc
31 * - enable AUTOIDLE
32 * - add suspend/resume
33 * - move workarounds to board-files
34 */
35
36#include <linux/platform_device.h>
37#include <linux/clk.h>
38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h>
40#include <linux/slab.h>
41#include <plat/usb.h>
42
43/*
44 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
45 * Use ehci_omap_readl()/ehci_omap_writel() functions
46 */
47
48/* TLL Register Set */
49#define OMAP_USBTLL_REVISION (0x00)
50#define OMAP_USBTLL_SYSCONFIG (0x10)
51#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
52#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
53#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
54#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
55#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
56
57#define OMAP_USBTLL_SYSSTATUS (0x14)
58#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
59
60#define OMAP_USBTLL_IRQSTATUS (0x18)
61#define OMAP_USBTLL_IRQENABLE (0x1C)
62
63#define OMAP_TLL_SHARED_CONF (0x30)
64#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
65#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
66#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
67#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
68#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
69
70#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
71#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
72#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
73#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
74#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
75#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
76
77#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num)
78#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num)
79#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num)
80#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num)
81#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num)
82#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num)
83#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num)
84#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num)
85#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
86
87#define OMAP_TLL_CHANNEL_COUNT 3
88#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1)
89#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2)
90#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4)
91
92/* UHH Register Set */
93#define OMAP_UHH_REVISION (0x00)
94#define OMAP_UHH_SYSCONFIG (0x10)
95#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
96#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
97#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
98#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
99#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
100#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
101
102#define OMAP_UHH_SYSSTATUS (0x14)
103#define OMAP_UHH_HOSTCONFIG (0x40)
104#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
105#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
106#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
107#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
108#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
109#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
110#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
111#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
112#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
113#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
114#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
115
116#define OMAP_UHH_DEBUG_CSR (0x44)
117
118/* EHCI Register Set */
119#define EHCI_INSNREG05_ULPI (0xA4)
120#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
121#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
122#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
123#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
124#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
125#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
126
127/*-------------------------------------------------------------------------*/
128
129static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
130{
131 __raw_writel(val, base + reg);
132}
133
134static inline u32 ehci_omap_readl(void __iomem *base, u32 reg)
135{
136 return __raw_readl(base + reg);
137}
138
139static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val)
140{
141 __raw_writeb(val, base + reg);
142}
143
144static inline u8 ehci_omap_readb(void __iomem *base, u8 reg)
145{
146 return __raw_readb(base + reg);
147}
148
149/*-------------------------------------------------------------------------*/
150
151struct ehci_hcd_omap {
152 struct ehci_hcd *ehci;
153 struct device *dev;
154
155 struct clk *usbhost_ick;
156 struct clk *usbhost2_120m_fck;
157 struct clk *usbhost1_48m_fck;
158 struct clk *usbtll_fck;
159 struct clk *usbtll_ick;
160
161 /* FIXME the following two workarounds are
162 * board specific not silicon-specific so these
163 * should be moved to board-file instead.
164 *
165 * Maybe someone from TI will know better which
166 * board is affected and needs the workarounds
167 * to be applied
168 */
169
170 /* gpio for resetting phy */
171 int reset_gpio_port[OMAP3_HS_USB_PORTS];
172
173 /* phy reset workaround */
174 int phy_reset;
175
176 /* desired phy_mode: TLL, PHY */
177 enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
178
179 void __iomem *uhh_base;
180 void __iomem *tll_base;
181 void __iomem *ehci_base;
182
183 /* Regulators for USB PHYs.
184 * Each PHY can have a seperate regulator.
185 */
186 struct regulator *regulator[OMAP3_HS_USB_PORTS];
187};
188
189/*-------------------------------------------------------------------------*/
190
191static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
192{
193 unsigned reg;
194 int i;
195
196 /* Program the 3 TLL channels upfront */
197 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
198 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
199
200 /* Disable AutoIdle, BitStuffing and use SDR Mode */
201 reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
202 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
203 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
204 ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
205 }
206
207 /* Program Common TLL register */
208 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
209 reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
210 | OMAP_TLL_SHARED_CONF_USB_DIVRATION
211 | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN);
212 reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
213
214 ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
215
216 /* Enable channels now */
217 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
218 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
219
220 /* Enable only the reg that is needed */
221 if (!(tll_channel_mask & 1<<i))
222 continue;
223
224 reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
225 ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
226
227 ehci_omap_writeb(omap->tll_base,
228 OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
229 dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n",
230 i+1, ehci_omap_readb(omap->tll_base,
231 OMAP_TLL_ULPI_SCRATCH_REGISTER(i)));
232 }
233}
234
235/*-------------------------------------------------------------------------*/
236
237/* omap_start_ehc
238 * - Start the TI USBHOST controller
239 */
240static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
241{
242 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
243 u8 tll_ch_mask = 0;
244 unsigned reg = 0;
245 int ret = 0;
246
247 dev_dbg(omap->dev, "starting TI EHCI USB Controller\n");
248
249 /* Enable Clocks for USBHOST */
250 omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
251 if (IS_ERR(omap->usbhost_ick)) {
252 ret = PTR_ERR(omap->usbhost_ick);
253 goto err_host_ick;
254 }
255 clk_enable(omap->usbhost_ick);
256
257 omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
258 if (IS_ERR(omap->usbhost2_120m_fck)) {
259 ret = PTR_ERR(omap->usbhost2_120m_fck);
260 goto err_host_120m_fck;
261 }
262 clk_enable(omap->usbhost2_120m_fck);
263
264 omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
265 if (IS_ERR(omap->usbhost1_48m_fck)) {
266 ret = PTR_ERR(omap->usbhost1_48m_fck);
267 goto err_host_48m_fck;
268 }
269 clk_enable(omap->usbhost1_48m_fck);
270
271 if (omap->phy_reset) {
272 /* Refer: ISSUE1 */
273 if (gpio_is_valid(omap->reset_gpio_port[0])) {
274 gpio_request(omap->reset_gpio_port[0],
275 "USB1 PHY reset");
276 gpio_direction_output(omap->reset_gpio_port[0], 0);
277 }
278
279 if (gpio_is_valid(omap->reset_gpio_port[1])) {
280 gpio_request(omap->reset_gpio_port[1],
281 "USB2 PHY reset");
282 gpio_direction_output(omap->reset_gpio_port[1], 0);
283 }
284
285 /* Hold the PHY in RESET for enough time till DIR is high */
286 udelay(10);
287 }
288
289 /* Configure TLL for 60Mhz clk for ULPI */
290 omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
291 if (IS_ERR(omap->usbtll_fck)) {
292 ret = PTR_ERR(omap->usbtll_fck);
293 goto err_tll_fck;
294 }
295 clk_enable(omap->usbtll_fck);
296
297 omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
298 if (IS_ERR(omap->usbtll_ick)) {
299 ret = PTR_ERR(omap->usbtll_ick);
300 goto err_tll_ick;
301 }
302 clk_enable(omap->usbtll_ick);
303
304 /* perform TLL soft reset, and wait until reset is complete */
305 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
306 OMAP_USBTLL_SYSCONFIG_SOFTRESET);
307
308 /* Wait for TLL reset to complete */
309 while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
310 & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
311 cpu_relax();
312
313 if (time_after(jiffies, timeout)) {
314 dev_dbg(omap->dev, "operation timed out\n");
315 ret = -EINVAL;
316 goto err_sys_status;
317 }
318 }
319
320 dev_dbg(omap->dev, "TLL RESET DONE\n");
321
322 /* (1<<3) = no idle mode only for initial debugging */
323 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
324 OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
325 OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
326 OMAP_USBTLL_SYSCONFIG_CACTIVITY);
327
328
329 /* Put UHH in NoIdle/NoStandby mode */
330 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
331 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
332 | OMAP_UHH_SYSCONFIG_SIDLEMODE
333 | OMAP_UHH_SYSCONFIG_CACTIVITY
334 | OMAP_UHH_SYSCONFIG_MIDLEMODE);
335 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
336
337 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
338
339 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
340
341 /* setup ULPI bypass and burst configurations */
342 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
343 | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
344 | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
345 reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
346
347 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
348 reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
349 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
350 reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
351 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
352 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
353
354 /* Bypass the TLL module for PHY mode operation */
355 if (omap_rev() <= OMAP3430_REV_ES2_1) {
356 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n");
357 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
358 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
359 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
360 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
361 else
362 reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
363 } else {
364 dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
365 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
366 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
367 else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
368 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
369
370 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
371 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
372 else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
373 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
374
375 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)
376 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
377 else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
378 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
379
380 }
381 ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
382 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
383
384
385 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
386 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
387 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
388
389 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
390 tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK;
391 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
392 tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK;
393 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
394 tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
395
396 /* Enable UTMI mode for required TLL channels */
397 omap_usb_utmi_init(omap, tll_ch_mask);
398 }
399
400 if (omap->phy_reset) {
401 /* Refer ISSUE1:
402 * Hold the PHY in RESET for enough time till
403 * PHY is settled and ready
404 */
405 udelay(10);
406
407 if (gpio_is_valid(omap->reset_gpio_port[0]))
408 gpio_set_value(omap->reset_gpio_port[0], 1);
409
410 if (gpio_is_valid(omap->reset_gpio_port[1]))
411 gpio_set_value(omap->reset_gpio_port[1], 1);
412 }
413
414 return 0;
415
416err_sys_status:
417 clk_disable(omap->usbtll_ick);
418 clk_put(omap->usbtll_ick);
419
420err_tll_ick:
421 clk_disable(omap->usbtll_fck);
422 clk_put(omap->usbtll_fck);
423
424err_tll_fck:
425 clk_disable(omap->usbhost1_48m_fck);
426 clk_put(omap->usbhost1_48m_fck);
427
428 if (omap->phy_reset) {
429 if (gpio_is_valid(omap->reset_gpio_port[0]))
430 gpio_free(omap->reset_gpio_port[0]);
431
432 if (gpio_is_valid(omap->reset_gpio_port[1]))
433 gpio_free(omap->reset_gpio_port[1]);
434 }
435
436err_host_48m_fck:
437 clk_disable(omap->usbhost2_120m_fck);
438 clk_put(omap->usbhost2_120m_fck);
439
440err_host_120m_fck:
441 clk_disable(omap->usbhost_ick);
442 clk_put(omap->usbhost_ick);
443
444err_host_ick:
445 return ret;
446}
447
448static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
449{
450 unsigned long timeout = jiffies + msecs_to_jiffies(100);
451
452 dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
453
454 /* Reset OMAP modules for insmod/rmmod to work */
455 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
456 OMAP_UHH_SYSCONFIG_SOFTRESET);
457 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
458 & (1 << 0))) {
459 cpu_relax();
460
461 if (time_after(jiffies, timeout))
462 dev_dbg(omap->dev, "operation timed out\n");
463 }
464
465 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
466 & (1 << 1))) {
467 cpu_relax();
468
469 if (time_after(jiffies, timeout))
470 dev_dbg(omap->dev, "operation timed out\n");
471 }
472
473 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
474 & (1 << 2))) {
475 cpu_relax();
476
477 if (time_after(jiffies, timeout))
478 dev_dbg(omap->dev, "operation timed out\n");
479 }
480
481 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
482
483 while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
484 & (1 << 0))) {
485 cpu_relax();
486
487 if (time_after(jiffies, timeout))
488 dev_dbg(omap->dev, "operation timed out\n");
489 }
490
491 if (omap->usbtll_fck != NULL) {
492 clk_disable(omap->usbtll_fck);
493 clk_put(omap->usbtll_fck);
494 omap->usbtll_fck = NULL;
495 }
496
497 if (omap->usbhost_ick != NULL) {
498 clk_disable(omap->usbhost_ick);
499 clk_put(omap->usbhost_ick);
500 omap->usbhost_ick = NULL;
501 }
502
503 if (omap->usbhost1_48m_fck != NULL) {
504 clk_disable(omap->usbhost1_48m_fck);
505 clk_put(omap->usbhost1_48m_fck);
506 omap->usbhost1_48m_fck = NULL;
507 }
508
509 if (omap->usbhost2_120m_fck != NULL) {
510 clk_disable(omap->usbhost2_120m_fck);
511 clk_put(omap->usbhost2_120m_fck);
512 omap->usbhost2_120m_fck = NULL;
513 }
514
515 if (omap->usbtll_ick != NULL) {
516 clk_disable(omap->usbtll_ick);
517 clk_put(omap->usbtll_ick);
518 omap->usbtll_ick = NULL;
519 }
520
521 if (omap->phy_reset) {
522 if (gpio_is_valid(omap->reset_gpio_port[0]))
523 gpio_free(omap->reset_gpio_port[0]);
524
525 if (gpio_is_valid(omap->reset_gpio_port[1]))
526 gpio_free(omap->reset_gpio_port[1]);
527 }
528
529 dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
530}
531
532/*-------------------------------------------------------------------------*/
533
534static const struct hc_driver ehci_omap_hc_driver;
535
536/* configure so an HC device and id are always provided */
537/* always called with process context; sleeping is OK */
538
539/**
540 * ehci_hcd_omap_probe - initialize TI-based HCDs
541 *
542 * Allocates basic resources for this USB host controller, and
543 * then invokes the start() method for the HCD associated with it
544 * through the hotplug entry's driver_data.
545 */
546static int ehci_hcd_omap_probe(struct platform_device *pdev)
547{
548 struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
549 struct ehci_hcd_omap *omap;
550 struct resource *res;
551 struct usb_hcd *hcd;
552
553 int irq = platform_get_irq(pdev, 0);
554 int ret = -ENODEV;
555 int i;
556 char supply[7];
557
558 if (!pdata) {
559 dev_dbg(&pdev->dev, "missing platform_data\n");
560 goto err_pdata;
561 }
562
563 if (usb_disabled())
564 goto err_disabled;
565
566 omap = kzalloc(sizeof(*omap), GFP_KERNEL);
567 if (!omap) {
568 ret = -ENOMEM;
569 goto err_disabled;
570 }
571
572 hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
573 dev_name(&pdev->dev));
574 if (!hcd) {
575 dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
576 ret = -ENOMEM;
577 goto err_create_hcd;
578 }
579
580 platform_set_drvdata(pdev, omap);
581 omap->dev = &pdev->dev;
582 omap->phy_reset = pdata->phy_reset;
583 omap->reset_gpio_port[0] = pdata->reset_gpio_port[0];
584 omap->reset_gpio_port[1] = pdata->reset_gpio_port[1];
585 omap->reset_gpio_port[2] = pdata->reset_gpio_port[2];
586 omap->port_mode[0] = pdata->port_mode[0];
587 omap->port_mode[1] = pdata->port_mode[1];
588 omap->port_mode[2] = pdata->port_mode[2];
589 omap->ehci = hcd_to_ehci(hcd);
590 omap->ehci->sbrn = 0x20;
591
592 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
593
594 hcd->rsrc_start = res->start;
595 hcd->rsrc_len = resource_size(res);
596
597 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
598 if (!hcd->regs) {
599 dev_err(&pdev->dev, "EHCI ioremap failed\n");
600 ret = -ENOMEM;
601 goto err_ioremap;
602 }
603
604 /* we know this is the memory we want, no need to ioremap again */
605 omap->ehci->caps = hcd->regs;
606 omap->ehci_base = hcd->regs;
607
608 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
609 omap->uhh_base = ioremap(res->start, resource_size(res));
610 if (!omap->uhh_base) {
611 dev_err(&pdev->dev, "UHH ioremap failed\n");
612 ret = -ENOMEM;
613 goto err_uhh_ioremap;
614 }
615
616 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
617 omap->tll_base = ioremap(res->start, resource_size(res));
618 if (!omap->tll_base) {
619 dev_err(&pdev->dev, "TLL ioremap failed\n");
620 ret = -ENOMEM;
621 goto err_tll_ioremap;
622 }
623
624 /* get ehci regulator and enable */
625 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
626 if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
627 omap->regulator[i] = NULL;
628 continue;
629 }
630 snprintf(supply, sizeof(supply), "hsusb%d", i);
631 omap->regulator[i] = regulator_get(omap->dev, supply);
632 if (IS_ERR(omap->regulator[i])) {
633 omap->regulator[i] = NULL;
634 dev_dbg(&pdev->dev,
635 "failed to get ehci port%d regulator\n", i);
636 } else {
637 regulator_enable(omap->regulator[i]);
638 }
639 }
640
641 ret = omap_start_ehc(omap, hcd);
642 if (ret) {
643 dev_dbg(&pdev->dev, "failed to start ehci\n");
644 goto err_start;
645 }
646
647 omap->ehci->regs = hcd->regs
648 + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
649
650 dbg_hcs_params(omap->ehci, "reset");
651 dbg_hcc_params(omap->ehci, "reset");
652
653 /* cache this readonly data; minimize chip reads */
654 omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
655
656 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
657 if (ret) {
658 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
659 goto err_add_hcd;
660 }
661
662 return 0;
663
664err_add_hcd:
665 omap_stop_ehc(omap, hcd);
666
667err_start:
668 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
669 if (omap->regulator[i]) {
670 regulator_disable(omap->regulator[i]);
671 regulator_put(omap->regulator[i]);
672 }
673 }
674 iounmap(omap->tll_base);
675
676err_tll_ioremap:
677 iounmap(omap->uhh_base);
678
679err_uhh_ioremap:
680 iounmap(hcd->regs);
681
682err_ioremap:
683 usb_put_hcd(hcd);
684
685err_create_hcd:
686 kfree(omap);
687err_disabled:
688err_pdata:
689 return ret;
690}
691
692/* may be called without controller electrically present */
693/* may be called with controller, bus, and devices active */
694
695/**
696 * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
697 * @pdev: USB Host Controller being removed
698 *
699 * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
700 * the HCD's stop() method. It is always called from a thread
701 * context, normally "rmmod", "apmd", or something similar.
702 */
703static int ehci_hcd_omap_remove(struct platform_device *pdev)
704{
705 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
706 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
707 int i;
708
709 usb_remove_hcd(hcd);
710 omap_stop_ehc(omap, hcd);
711 iounmap(hcd->regs);
712 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
713 if (omap->regulator[i]) {
714 regulator_disable(omap->regulator[i]);
715 regulator_put(omap->regulator[i]);
716 }
717 }
718 iounmap(omap->tll_base);
719 iounmap(omap->uhh_base);
720 usb_put_hcd(hcd);
721 kfree(omap);
722
723 return 0;
724}
725
726static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
727{
728 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
729 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
730
731 if (hcd->driver->shutdown)
732 hcd->driver->shutdown(hcd);
733}
734
735static struct platform_driver ehci_hcd_omap_driver = {
736 .probe = ehci_hcd_omap_probe,
737 .remove = ehci_hcd_omap_remove,
738 .shutdown = ehci_hcd_omap_shutdown,
739 /*.suspend = ehci_hcd_omap_suspend, */
740 /*.resume = ehci_hcd_omap_resume, */
741 .driver = {
742 .name = "ehci-omap",
743 }
744};
745
746/*-------------------------------------------------------------------------*/
747
748static const struct hc_driver ehci_omap_hc_driver = {
749 .description = hcd_name,
750 .product_desc = "OMAP-EHCI Host Controller",
751 .hcd_priv_size = sizeof(struct ehci_hcd),
752
753 /*
754 * generic hardware linkage
755 */
756 .irq = ehci_irq,
757 .flags = HCD_MEMORY | HCD_USB2,
758
759 /*
760 * basic lifecycle operations
761 */
762 .reset = ehci_init,
763 .start = ehci_run,
764 .stop = ehci_stop,
765 .shutdown = ehci_shutdown,
766
767 /*
768 * managing i/o requests and associated device resources
769 */
770 .urb_enqueue = ehci_urb_enqueue,
771 .urb_dequeue = ehci_urb_dequeue,
772 .endpoint_disable = ehci_endpoint_disable,
773 .endpoint_reset = ehci_endpoint_reset,
774
775 /*
776 * scheduling support
777 */
778 .get_frame_number = ehci_get_frame,
779
780 /*
781 * root hub support
782 */
783 .hub_status_data = ehci_hub_status_data,
784 .hub_control = ehci_hub_control,
785 .bus_suspend = ehci_bus_suspend,
786 .bus_resume = ehci_bus_resume,
787
788 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
789};
790
791MODULE_ALIAS("platform:omap-ehci");
792MODULE_AUTHOR("Texas Instruments, Inc.");
793MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
794
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1d283e1b2b8d..0f87dc72820a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
222 goto err1; 222 goto err1;
223 } 223 }
224 224
225 if (!request_mem_region(res->start, res->end - res->start + 1, 225 if (!request_mem_region(res->start, resource_size(res),
226 ehci_orion_hc_driver.description)) { 226 ehci_orion_hc_driver.description)) {
227 dev_dbg(&pdev->dev, "controller already in use\n"); 227 dev_dbg(&pdev->dev, "controller already in use\n");
228 err = -EBUSY; 228 err = -EBUSY;
229 goto err1; 229 goto err1;
230 } 230 }
231 231
232 regs = ioremap(res->start, res->end - res->start + 1); 232 regs = ioremap(res->start, resource_size(res));
233 if (regs == NULL) { 233 if (regs == NULL) {
234 dev_dbg(&pdev->dev, "error mapping memory\n"); 234 dev_dbg(&pdev->dev, "error mapping memory\n");
235 err = -EFAULT; 235 err = -EFAULT;
@@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
244 } 244 }
245 245
246 hcd->rsrc_start = res->start; 246 hcd->rsrc_start = res->start;
247 hcd->rsrc_len = res->end - res->start + 1; 247 hcd->rsrc_len = resource_size(res);
248 hcd->regs = regs; 248 hcd->regs = regs;
249 249
250 ehci = hcd_to_ehci(hcd); 250 ehci = hcd_to_ehci(hcd);
@@ -287,7 +287,7 @@ err4:
287err3: 287err3:
288 iounmap(regs); 288 iounmap(regs);
289err2: 289err2:
290 release_mem_region(res->start, res->end - res->start + 1); 290 release_mem_region(res->start, resource_size(res));
291err1: 291err1:
292 dev_err(&pdev->dev, "init %s fail, %d\n", 292 dev_err(&pdev->dev, "init %s fail, %d\n",
293 dev_name(&pdev->dev), err); 293 dev_name(&pdev->dev), err);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 36f96da129f5..8df33b8a634c 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
134 hcd->rsrc_len = res.end - res.start + 1; 134 hcd->rsrc_len = res.end - res.start + 1;
135 135
136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
137 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 137 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
138 rv = -EBUSY; 138 rv = -EBUSY;
139 goto err_rmr; 139 goto err_rmr;
140 } 140 }
141 141
142 irq = irq_of_parse_and_map(dn, 0); 142 irq = irq_of_parse_and_map(dn, 0);
143 if (irq == NO_IRQ) { 143 if (irq == NO_IRQ) {
144 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 144 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
145 rv = -EBUSY; 145 rv = -EBUSY;
146 goto err_irq; 146 goto err_irq;
147 } 147 }
148 148
149 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 149 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
150 if (!hcd->regs) { 150 if (!hcd->regs) {
151 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 151 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
152 rv = -ENOMEM; 152 rv = -ENOMEM;
153 goto err_ioremap; 153 goto err_ioremap;
154 } 154 }
@@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
161 ehci->ohci_hcctrl_reg = ioremap(res.start + 161 ehci->ohci_hcctrl_reg = ioremap(res.start +
162 OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); 162 OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
163 else 163 else
164 pr_debug(__FILE__ ": no ohci offset in fdt\n"); 164 pr_debug("%s: no ohci offset in fdt\n", __FILE__);
165 if (!ehci->ohci_hcctrl_reg) { 165 if (!ehci->ohci_hcctrl_reg) {
166 pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n"); 166 pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
167 } else { 167 } else {
168 ehci->has_amcc_usb23 = 1; 168 ehci->has_amcc_usb23 = 1;
169 } 169 }
@@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
241 else 241 else
242 release_mem_region(res.start, 0x4); 242 release_mem_region(res.start, 0x4);
243 else 243 else
244 pr_debug(__FILE__ ": no ohci offset in fdt\n"); 244 pr_debug("%s: no ohci offset in fdt\n", __FILE__);
245 of_node_put(np); 245 of_node_put(np);
246 } 246 }
247 247
@@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
264} 264}
265 265
266 266
267static struct of_device_id ehci_hcd_ppc_of_match[] = { 267static const struct of_device_id ehci_hcd_ppc_of_match[] = {
268 { 268 {
269 .compatible = "usb-ehci", 269 .compatible = "usb-ehci",
270 }, 270 },
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 139a2cc3f641..89521775c567 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -616,9 +616,11 @@ qh_urb_transaction (
616) { 616) {
617 struct ehci_qtd *qtd, *qtd_prev; 617 struct ehci_qtd *qtd, *qtd_prev;
618 dma_addr_t buf; 618 dma_addr_t buf;
619 int len, maxpacket; 619 int len, this_sg_len, maxpacket;
620 int is_input; 620 int is_input;
621 u32 token; 621 u32 token;
622 int i;
623 struct scatterlist *sg;
622 624
623 /* 625 /*
624 * URBs map to sequences of QTDs: one logical transaction 626 * URBs map to sequences of QTDs: one logical transaction
@@ -659,7 +661,20 @@ qh_urb_transaction (
659 /* 661 /*
660 * data transfer stage: buffer setup 662 * data transfer stage: buffer setup
661 */ 663 */
662 buf = urb->transfer_dma; 664 i = urb->num_sgs;
665 if (len > 0 && i > 0) {
666 sg = urb->sg->sg;
667 buf = sg_dma_address(sg);
668
669 /* urb->transfer_buffer_length may be smaller than the
670 * size of the scatterlist (or vice versa)
671 */
672 this_sg_len = min_t(int, sg_dma_len(sg), len);
673 } else {
674 sg = NULL;
675 buf = urb->transfer_dma;
676 this_sg_len = len;
677 }
663 678
664 if (is_input) 679 if (is_input)
665 token |= (1 /* "in" */ << 8); 680 token |= (1 /* "in" */ << 8);
@@ -675,7 +690,9 @@ qh_urb_transaction (
675 for (;;) { 690 for (;;) {
676 int this_qtd_len; 691 int this_qtd_len;
677 692
678 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket); 693 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
694 maxpacket);
695 this_sg_len -= this_qtd_len;
679 len -= this_qtd_len; 696 len -= this_qtd_len;
680 buf += this_qtd_len; 697 buf += this_qtd_len;
681 698
@@ -691,8 +708,13 @@ qh_urb_transaction (
691 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 708 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
692 token ^= QTD_TOGGLE; 709 token ^= QTD_TOGGLE;
693 710
694 if (likely (len <= 0)) 711 if (likely(this_sg_len <= 0)) {
695 break; 712 if (--i <= 0 || len <= 0)
713 break;
714 sg = sg_next(sg);
715 buf = sg_dma_address(sg);
716 this_sg_len = min_t(int, sg_dma_len(sg), len);
717 }
696 718
697 qtd_prev = qtd; 719 qtd_prev = qtd;
698 qtd = ehci_qtd_alloc (ehci, flags); 720 qtd = ehci_qtd_alloc (ehci, flags);
@@ -827,9 +849,10 @@ qh_make (
827 * But interval 1 scheduling is simpler, and 849 * But interval 1 scheduling is simpler, and
828 * includes high bandwidth. 850 * includes high bandwidth.
829 */ 851 */
830 dbg ("intr period %d uframes, NYET!", 852 urb->interval = 1;
831 urb->interval); 853 } else if (qh->period > ehci->periodic_size) {
832 goto done; 854 qh->period = ehci->periodic_size;
855 urb->interval = qh->period << 3;
833 } 856 }
834 } else { 857 } else {
835 int think_time; 858 int think_time;
@@ -852,6 +875,10 @@ qh_make (
852 usb_calc_bus_time (urb->dev->speed, 875 usb_calc_bus_time (urb->dev->speed,
853 is_input, 0, max_packet (maxp))); 876 is_input, 0, max_packet (maxp)));
854 qh->period = urb->interval; 877 qh->period = urb->interval;
878 if (qh->period > ehci->periodic_size) {
879 qh->period = ehci->periodic_size;
880 urb->interval = qh->period;
881 }
855 } 882 }
856 } 883 }
857 884
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a5535b5e3fe2..805ec633a652 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci)
510 ehci_writel(ehci, cmd, &ehci->regs->command); 510 ehci_writel(ehci, cmd, &ehci->regs->command);
511 /* posted write ... */ 511 /* posted write ... */
512 512
513 free_cached_lists(ehci);
514
513 ehci->next_uframe = -1; 515 ehci->next_uframe = -1;
514 return 0; 516 return 0;
515} 517}
@@ -1121,8 +1123,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1121 urb->interval); 1123 urb->interval);
1122 } 1124 }
1123 1125
1124 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ 1126 /* if dev->ep [epnum] is a QH, hw is set */
1125 } else if (unlikely (stream->hw_info1 != 0)) { 1127 } else if (unlikely (stream->hw != NULL)) {
1126 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", 1128 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1127 urb->dev->devpath, epnum, 1129 urb->dev->devpath, epnum,
1128 usb_pipein(urb->pipe) ? "in" : "out"); 1130 usb_pipein(urb->pipe) ? "in" : "out");
@@ -1385,7 +1387,7 @@ sitd_slot_ok (
1385 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! 1387 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1386 */ 1388 */
1387 1389
1388#define SCHEDULE_SLOP 10 /* frames */ 1390#define SCHEDULE_SLOP 80 /* microframes */
1389 1391
1390static int 1392static int
1391iso_stream_schedule ( 1393iso_stream_schedule (
@@ -1394,12 +1396,13 @@ iso_stream_schedule (
1394 struct ehci_iso_stream *stream 1396 struct ehci_iso_stream *stream
1395) 1397)
1396{ 1398{
1397 u32 now, start, max, period; 1399 u32 now, next, start, period;
1398 int status; 1400 int status;
1399 unsigned mod = ehci->periodic_size << 3; 1401 unsigned mod = ehci->periodic_size << 3;
1400 struct ehci_iso_sched *sched = urb->hcpriv; 1402 struct ehci_iso_sched *sched = urb->hcpriv;
1403 struct pci_dev *pdev;
1401 1404
1402 if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { 1405 if (sched->span > (mod - SCHEDULE_SLOP)) {
1403 ehci_dbg (ehci, "iso request %p too long\n", urb); 1406 ehci_dbg (ehci, "iso request %p too long\n", urb);
1404 status = -EFBIG; 1407 status = -EFBIG;
1405 goto fail; 1408 goto fail;
@@ -1418,26 +1421,35 @@ iso_stream_schedule (
1418 1421
1419 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; 1422 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
1420 1423
1421 /* when's the last uframe this urb could start? */
1422 max = now + mod;
1423
1424 /* Typical case: reuse current schedule, stream is still active. 1424 /* Typical case: reuse current schedule, stream is still active.
1425 * Hopefully there are no gaps from the host falling behind 1425 * Hopefully there are no gaps from the host falling behind
1426 * (irq delays etc), but if there are we'll take the next 1426 * (irq delays etc), but if there are we'll take the next
1427 * slot in the schedule, implicitly assuming URB_ISO_ASAP. 1427 * slot in the schedule, implicitly assuming URB_ISO_ASAP.
1428 */ 1428 */
1429 if (likely (!list_empty (&stream->td_list))) { 1429 if (likely (!list_empty (&stream->td_list))) {
1430 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
1430 start = stream->next_uframe; 1431 start = stream->next_uframe;
1431 if (start < now) 1432
1432 start += mod; 1433 /* For high speed devices, allow scheduling within the
1434 * isochronous scheduling threshold. For full speed devices,
1435 * don't. (Work around for Intel ICH9 bug.)
1436 */
1437 if (!stream->highspeed &&
1438 pdev->vendor == PCI_VENDOR_ID_INTEL)
1439 next = now + ehci->i_thresh;
1440 else
1441 next = now;
1433 1442
1434 /* Fell behind (by up to twice the slop amount)? */ 1443 /* Fell behind (by up to twice the slop amount)? */
1435 if (start >= max - 2 * 8 * SCHEDULE_SLOP) 1444 if (((start - next) & (mod - 1)) >=
1445 mod - 2 * SCHEDULE_SLOP)
1436 start += period * DIV_ROUND_UP( 1446 start += period * DIV_ROUND_UP(
1437 max - start, period) - mod; 1447 (next - start) & (mod - 1),
1448 period);
1438 1449
1439 /* Tried to schedule too far into the future? */ 1450 /* Tried to schedule too far into the future? */
1440 if (unlikely((start + sched->span) >= max)) { 1451 if (unlikely(((start - now) & (mod - 1)) + sched->span
1452 >= mod - 2 * SCHEDULE_SLOP)) {
1441 status = -EFBIG; 1453 status = -EFBIG;
1442 goto fail; 1454 goto fail;
1443 } 1455 }
@@ -1451,7 +1463,7 @@ iso_stream_schedule (
1451 * can also help high bandwidth if the dma and irq loads don't 1463 * can also help high bandwidth if the dma and irq loads don't
1452 * jump until after the queue is primed. 1464 * jump until after the queue is primed.
1453 */ 1465 */
1454 start = SCHEDULE_SLOP * 8 + (now & ~0x07); 1466 start = SCHEDULE_SLOP + (now & ~0x07);
1455 start %= mod; 1467 start %= mod;
1456 stream->next_uframe = start; 1468 stream->next_uframe = start;
1457 1469
@@ -1482,7 +1494,7 @@ iso_stream_schedule (
1482 /* no room in the schedule */ 1494 /* no room in the schedule */
1483 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", 1495 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
1484 list_empty (&stream->td_list) ? "" : "re", 1496 list_empty (&stream->td_list) ? "" : "re",
1485 urb, now, max); 1497 urb, now, now + mod);
1486 status = -ENOSPC; 1498 status = -ENOSPC;
1487 1499
1488fail: 1500fail:
@@ -1553,13 +1565,27 @@ itd_patch(
1553static inline void 1565static inline void
1554itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) 1566itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1555{ 1567{
1556 /* always prepend ITD/SITD ... only QH tree is order-sensitive */ 1568 union ehci_shadow *prev = &ehci->pshadow[frame];
1557 itd->itd_next = ehci->pshadow [frame]; 1569 __hc32 *hw_p = &ehci->periodic[frame];
1558 itd->hw_next = ehci->periodic [frame]; 1570 union ehci_shadow here = *prev;
1559 ehci->pshadow [frame].itd = itd; 1571 __hc32 type = 0;
1572
1573 /* skip any iso nodes which might belong to previous microframes */
1574 while (here.ptr) {
1575 type = Q_NEXT_TYPE(ehci, *hw_p);
1576 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1577 break;
1578 prev = periodic_next_shadow(ehci, prev, type);
1579 hw_p = shadow_next_periodic(ehci, &here, type);
1580 here = *prev;
1581 }
1582
1583 itd->itd_next = here;
1584 itd->hw_next = *hw_p;
1585 prev->itd = itd;
1560 itd->frame = frame; 1586 itd->frame = frame;
1561 wmb (); 1587 wmb ();
1562 ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); 1588 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1563} 1589}
1564 1590
1565/* fit urb's itds into the selected schedule slot; activate as needed */ 1591/* fit urb's itds into the selected schedule slot; activate as needed */
@@ -2113,13 +2139,27 @@ sitd_complete (
2113 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 2139 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2114 } 2140 }
2115 iso_stream_put (ehci, stream); 2141 iso_stream_put (ehci, stream);
2116 /* OK to recycle this SITD now that its completion callback ran. */ 2142
2117done: 2143done:
2118 sitd->urb = NULL; 2144 sitd->urb = NULL;
2119 sitd->stream = NULL; 2145 if (ehci->clock_frame != sitd->frame) {
2120 list_move(&sitd->sitd_list, &stream->free_list); 2146 /* OK to recycle this SITD now. */
2121 iso_stream_put(ehci, stream); 2147 sitd->stream = NULL;
2122 2148 list_move(&sitd->sitd_list, &stream->free_list);
2149 iso_stream_put(ehci, stream);
2150 } else {
2151 /* HW might remember this SITD, so we can't recycle it yet.
2152 * Move it to a safe place until a new frame starts.
2153 */
2154 list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2155 if (stream->refcount == 2) {
2156 /* If iso_stream_put() were called here, stream
2157 * would be freed. Instead, just prevent reuse.
2158 */
2159 stream->ep->hcpriv = NULL;
2160 stream->ep = NULL;
2161 }
2162 }
2123 return retval; 2163 return retval;
2124} 2164}
2125 2165
@@ -2185,9 +2225,10 @@ done:
2185 2225
2186/*-------------------------------------------------------------------------*/ 2226/*-------------------------------------------------------------------------*/
2187 2227
2188static void free_cached_itd_list(struct ehci_hcd *ehci) 2228static void free_cached_lists(struct ehci_hcd *ehci)
2189{ 2229{
2190 struct ehci_itd *itd, *n; 2230 struct ehci_itd *itd, *n;
2231 struct ehci_sitd *sitd, *sn;
2191 2232
2192 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { 2233 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2193 struct ehci_iso_stream *stream = itd->stream; 2234 struct ehci_iso_stream *stream = itd->stream;
@@ -2195,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
2195 list_move(&itd->itd_list, &stream->free_list); 2236 list_move(&itd->itd_list, &stream->free_list);
2196 iso_stream_put(ehci, stream); 2237 iso_stream_put(ehci, stream);
2197 } 2238 }
2239
2240 list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2241 struct ehci_iso_stream *stream = sitd->stream;
2242 sitd->stream = NULL;
2243 list_move(&sitd->sitd_list, &stream->free_list);
2244 iso_stream_put(ehci, stream);
2245 }
2198} 2246}
2199 2247
2200/*-------------------------------------------------------------------------*/ 2248/*-------------------------------------------------------------------------*/
@@ -2221,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci)
2221 clock_frame = -1; 2269 clock_frame = -1;
2222 } 2270 }
2223 if (ehci->clock_frame != clock_frame) { 2271 if (ehci->clock_frame != clock_frame) {
2224 free_cached_itd_list(ehci); 2272 free_cached_lists(ehci);
2225 ehci->clock_frame = clock_frame; 2273 ehci->clock_frame = clock_frame;
2226 } 2274 }
2227 clock %= mod; 2275 clock %= mod;
@@ -2312,9 +2360,13 @@ restart:
2312 * No need to check for activity unless the 2360 * No need to check for activity unless the
2313 * frame is current. 2361 * frame is current.
2314 */ 2362 */
2315 if (frame == clock_frame && live && 2363 if (((frame == clock_frame) ||
2316 (q.sitd->hw_results & 2364 (((frame + 1) % ehci->periodic_size)
2317 SITD_ACTIVE(ehci))) { 2365 == clock_frame))
2366 && live
2367 && (q.sitd->hw_results &
2368 SITD_ACTIVE(ehci))) {
2369
2318 incomplete = true; 2370 incomplete = true;
2319 q_p = &q.sitd->sitd_next; 2371 q_p = &q.sitd->sitd_next;
2320 hw_p = &q.sitd->hw_next; 2372 hw_p = &q.sitd->hw_next;
@@ -2384,7 +2436,7 @@ restart:
2384 clock = now; 2436 clock = now;
2385 clock_frame = clock >> 3; 2437 clock_frame = clock >> 3;
2386 if (ehci->clock_frame != clock_frame) { 2438 if (ehci->clock_frame != clock_frame) {
2387 free_cached_itd_list(ehci); 2439 free_cached_lists(ehci);
2388 ehci->clock_frame = clock_frame; 2440 ehci->clock_frame = clock_frame;
2389 } 2441 }
2390 } else { 2442 } else {
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
new file mode 100644
index 000000000000..f603bb2c0a8e
--- /dev/null
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -0,0 +1,300 @@
1/*
2 * EHCI HCD (Host Controller Driver) for USB.
3 *
4 * Bus Glue for Xilinx EHCI core on the of_platform bus
5 *
6 * Copyright (c) 2009 Xilinx, Inc.
7 *
8 * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
9 * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
10 * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software Foundation,
24 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 */
27
28#include <linux/signal.h>
29
30#include <linux/of.h>
31#include <linux/of_platform.h>
32
33/**
34 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
35 * @hcd: Pointer to the usb_hcd device to which the host controller bound
36 *
37 * called during probe() after chip reset completes.
38 */
39static int ehci_xilinx_of_setup(struct usb_hcd *hcd)
40{
41 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
42 int retval;
43
44 retval = ehci_halt(ehci);
45 if (retval)
46 return retval;
47
48 retval = ehci_init(hcd);
49 if (retval)
50 return retval;
51
52 ehci->sbrn = 0x20;
53
54 return ehci_reset(ehci);
55}
56
57/**
58 * ehci_xilinx_port_handed_over - hand the port out if failed to enable it
59 * @hcd: Pointer to the usb_hcd device to which the host controller bound
60 * @portnum:Port number to which the device is attached.
61 *
62 * This function is used as a place to tell the user that the Xilinx USB host
63 * controller does support LS devices. And in an HS only configuration, it
64 * does not support FS devices either. It is hoped that this can help a
65 * confused user.
66 *
67 * There are cases when the host controller fails to enable the port due to,
68 * for example, insufficient power that can be supplied to the device from
69 * the USB bus. In those cases, the messages printed here are not helpful.
70 */
71static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
72{
73 dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
74 if (hcd->has_tt) {
75 dev_warn(hcd->self.controller,
76 "Maybe you have connected a low speed device?\n");
77
78 dev_warn(hcd->self.controller,
79 "We do not support low speed devices\n");
80 } else {
81 dev_warn(hcd->self.controller,
82 "Maybe your device is not a high speed device?\n");
83 dev_warn(hcd->self.controller,
84 "The USB host controller does not support full speed "
85 "nor low speed devices\n");
86 dev_warn(hcd->self.controller,
87 "You can reconfigure the host controller to have "
88 "full speed support\n");
89 }
90
91 return 0;
92}
93
94
95static const struct hc_driver ehci_xilinx_of_hc_driver = {
96 .description = hcd_name,
97 .product_desc = "OF EHCI",
98 .hcd_priv_size = sizeof(struct ehci_hcd),
99
100 /*
101 * generic hardware linkage
102 */
103 .irq = ehci_irq,
104 .flags = HCD_MEMORY | HCD_USB2,
105
106 /*
107 * basic lifecycle operations
108 */
109 .reset = ehci_xilinx_of_setup,
110 .start = ehci_run,
111 .stop = ehci_stop,
112 .shutdown = ehci_shutdown,
113
114 /*
115 * managing i/o requests and associated device resources
116 */
117 .urb_enqueue = ehci_urb_enqueue,
118 .urb_dequeue = ehci_urb_dequeue,
119 .endpoint_disable = ehci_endpoint_disable,
120
121 /*
122 * scheduling support
123 */
124 .get_frame_number = ehci_get_frame,
125
126 /*
127 * root hub support
128 */
129 .hub_status_data = ehci_hub_status_data,
130 .hub_control = ehci_hub_control,
131#ifdef CONFIG_PM
132 .bus_suspend = ehci_bus_suspend,
133 .bus_resume = ehci_bus_resume,
134#endif
135 .relinquish_port = NULL,
136 .port_handed_over = ehci_xilinx_port_handed_over,
137
138 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
139};
140
141/**
142 * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
143 * @op: pointer to the of_device to which the host controller bound
144 * @match: pointer to of_device_id structure, not used
145 *
146 * This function requests resources and sets up appropriate properties for the
147 * host controller. Because the Xilinx USB host controller can be configured
148 * as HS only or HS/FS only, it checks the configuration in the device tree
149 * entry, and sets an appropriate value for hcd->has_tt.
150 */
151static int __devinit
152ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
153{
154 struct device_node *dn = op->node;
155 struct usb_hcd *hcd;
156 struct ehci_hcd *ehci;
157 struct resource res;
158 int irq;
159 int rv;
160 int *value;
161
162 if (usb_disabled())
163 return -ENODEV;
164
165 dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
166
167 rv = of_address_to_resource(dn, 0, &res);
168 if (rv)
169 return rv;
170
171 hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
172 "XILINX-OF USB");
173 if (!hcd)
174 return -ENOMEM;
175
176 hcd->rsrc_start = res.start;
177 hcd->rsrc_len = res.end - res.start + 1;
178
179 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
180 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
181 rv = -EBUSY;
182 goto err_rmr;
183 }
184
185 irq = irq_of_parse_and_map(dn, 0);
186 if (irq == NO_IRQ) {
187 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
188 rv = -EBUSY;
189 goto err_irq;
190 }
191
192 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
193 if (!hcd->regs) {
194 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
195 rv = -ENOMEM;
196 goto err_ioremap;
197 }
198
199 ehci = hcd_to_ehci(hcd);
200
201 /* This core always has big-endian register interface and uses
202 * big-endian memory descriptors.
203 */
204 ehci->big_endian_mmio = 1;
205 ehci->big_endian_desc = 1;
206
207 /* Check whether the FS support option is selected in the hardware.
208 */
209 value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
210 if (value && (*value == 1)) {
211 ehci_dbg(ehci, "USB host controller supports FS devices\n");
212 hcd->has_tt = 1;
213 } else {
214 ehci_dbg(ehci,
215 "USB host controller is HS only\n");
216 hcd->has_tt = 0;
217 }
218
219 /* Debug registers are at the first 0x100 region
220 */
221 ehci->caps = hcd->regs + 0x100;
222 ehci->regs = hcd->regs + 0x100 +
223 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
224
225 /* cache this readonly data; minimize chip reads */
226 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
227
228 rv = usb_add_hcd(hcd, irq, 0);
229 if (rv == 0)
230 return 0;
231
232 iounmap(hcd->regs);
233
234err_ioremap:
235err_irq:
236 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
237err_rmr:
238 usb_put_hcd(hcd);
239
240 return rv;
241}
242
243/**
244 * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
245 * @op: pointer to of_device structure that is to be removed
246 *
247 * Remove the hcd structure, and release resources that has been requested
248 * during probe.
249 */
250static int ehci_hcd_xilinx_of_remove(struct of_device *op)
251{
252 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
253 dev_set_drvdata(&op->dev, NULL);
254
255 dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
256
257 usb_remove_hcd(hcd);
258
259 iounmap(hcd->regs);
260 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
261
262 usb_put_hcd(hcd);
263
264 return 0;
265}
266
267/**
268 * ehci_hcd_xilinx_of_shutdown - shutdown the hcd
269 * @op: pointer to of_device structure that is to be removed
270 *
271 * Properly shutdown the hcd, call driver's shutdown routine.
272 */
273static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
274{
275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
276
277 if (hcd->driver->shutdown)
278 hcd->driver->shutdown(hcd);
279
280 return 0;
281}
282
283
284static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
285 {.compatible = "xlnx,xps-usb-host-1.00.a",},
286 {},
287};
288MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
289
290static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
291 .name = "xilinx-of-ehci",
292 .match_table = ehci_hcd_xilinx_of_match,
293 .probe = ehci_hcd_xilinx_of_probe,
294 .remove = ehci_hcd_xilinx_of_remove,
295 .shutdown = ehci_hcd_xilinx_of_shutdown,
296 .driver = {
297 .name = "xilinx-of-ehci",
298 .owner = THIS_MODULE,
299 },
300};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 2d85e21ff282..556c0b48f3ab 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
87 int next_uframe; /* scan periodic, start here */ 87 int next_uframe; /* scan periodic, start here */
88 unsigned periodic_sched; /* periodic activity count */ 88 unsigned periodic_sched; /* periodic activity count */
89 89
90 /* list of itds completed while clock_frame was still active */ 90 /* list of itds & sitds completed while clock_frame was still active */
91 struct list_head cached_itd_list; 91 struct list_head cached_itd_list;
92 struct list_head cached_sitd_list;
92 unsigned clock_frame; 93 unsigned clock_frame;
93 94
94 /* per root hub port */ 95 /* per root hub port */
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
195 clear_bit (action, &ehci->actions); 196 clear_bit (action, &ehci->actions);
196} 197}
197 198
198static void free_cached_itd_list(struct ehci_hcd *ehci); 199static void free_cached_lists(struct ehci_hcd *ehci);
199 200
200/*-------------------------------------------------------------------------*/ 201/*-------------------------------------------------------------------------*/
201 202
@@ -394,9 +395,8 @@ struct ehci_iso_sched {
394 * acts like a qh would, if EHCI had them for ISO. 395 * acts like a qh would, if EHCI had them for ISO.
395 */ 396 */
396struct ehci_iso_stream { 397struct ehci_iso_stream {
397 /* first two fields match QH, but info1 == 0 */ 398 /* first field matches ehci_hq, but is NULL */
398 __hc32 hw_next; 399 struct ehci_qh_hw *hw;
399 __hc32 hw_info1;
400 400
401 u32 refcount; 401 u32 refcount;
402 u8 bEndpointAddress; 402 u8 bEndpointAddress;
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0951818ef93b..15379c636143 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -27,6 +27,7 @@
27#include <linux/usb.h> 27#include <linux/usb.h>
28#include <linux/of_platform.h> 28#include <linux/of_platform.h>
29#include <linux/of_gpio.h> 29#include <linux/of_gpio.h>
30#include <linux/slab.h>
30#include <asm/qe.h> 31#include <asm/qe.h>
31#include <asm/fsl_gtm.h> 32#include <asm/fsl_gtm.h>
32#include "../core/hcd.h" 33#include "../core/hcd.h"
@@ -242,9 +243,10 @@ err:
242static void fhci_usb_free(void *lld) 243static void fhci_usb_free(void *lld)
243{ 244{
244 struct fhci_usb *usb = lld; 245 struct fhci_usb *usb = lld;
245 struct fhci_hcd *fhci = usb->fhci; 246 struct fhci_hcd *fhci;
246 247
247 if (usb) { 248 if (usb) {
249 fhci = usb->fhci;
248 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); 250 fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
249 fhci_ep0_free(usb); 251 fhci_ep0_free(usb);
250 kfree(usb->actual_frame); 252 kfree(usb->actual_frame);
@@ -432,7 +434,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
432 return -ENOMEM; 434 return -ENOMEM;
433 435
434 /* allocate the private part of the URB */ 436 /* allocate the private part of the URB */
435 urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags); 437 urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
436 if (!urb_priv->tds) { 438 if (!urb_priv->tds) {
437 kfree(urb_priv); 439 kfree(urb_priv);
438 return -ENOMEM; 440 return -ENOMEM;
@@ -804,7 +806,7 @@ static int __devexit of_fhci_remove(struct of_device *ofdev)
804 return fhci_remove(&ofdev->dev); 806 return fhci_remove(&ofdev->dev);
805} 807}
806 808
807static struct of_device_id of_fhci_match[] = { 809static const struct of_device_id of_fhci_match[] = {
808 { .compatible = "fsl,mpc8323-qe-usb", }, 810 { .compatible = "fsl,mpc8323-qe-usb", },
809 {}, 811 {},
810}; 812};
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index 2c0736c99712..5591bfb499d1 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/slab.h>
21#include <linux/list.h> 22#include <linux/list.h>
22#include <linux/usb.h> 23#include <linux/usb.h>
23#include "../core/hcd.h" 24#include "../core/hcd.h"
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index b0a1446ba292..f73c92359beb 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/slab.h>
22#include <linux/list.h> 23#include <linux/list.h>
23#include <linux/usb.h> 24#include <linux/usb.h>
24#include "../core/hcd.h" 25#include "../core/hcd.h"
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 62a226b61670..ff43747a614f 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
37 pkt->info = 0; 37 pkt->info = 0;
38 pkt->priv_data = NULL; 38 pkt->priv_data = NULL;
39 39
40 cq_put(usb->ep0->empty_frame_Q, pkt); 40 cq_put(&usb->ep0->empty_frame_Q, pkt);
41} 41}
42 42
43/* confirm submitted packet */ 43/* confirm submitted packet */
@@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
57 if ((td->data + td->actual_len) && trans_len) 57 if ((td->data + td->actual_len) && trans_len)
58 memcpy(td->data + td->actual_len, pkt->data, 58 memcpy(td->data + td->actual_len, pkt->data,
59 trans_len); 59 trans_len);
60 cq_put(usb->ep0->dummy_packets_Q, pkt->data); 60 cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
61 } 61 }
62 62
63 recycle_frame(usb, pkt); 63 recycle_frame(usb, pkt);
@@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
213 } 213 }
214 214
215 /* update frame object fields before transmitting */ 215 /* update frame object fields before transmitting */
216 pkt = cq_get(usb->ep0->empty_frame_Q); 216 pkt = cq_get(&usb->ep0->empty_frame_Q);
217 if (!pkt) { 217 if (!pkt) {
218 fhci_dbg(usb->fhci, "there is no empty frame\n"); 218 fhci_dbg(usb->fhci, "there is no empty frame\n");
219 return -1; 219 return -1;
@@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
222 222
223 pkt->info = 0; 223 pkt->info = 0;
224 if (data == NULL) { 224 if (data == NULL) {
225 data = cq_get(usb->ep0->dummy_packets_Q); 225 data = cq_get(&usb->ep0->dummy_packets_Q);
226 BUG_ON(!data); 226 BUG_ON(!data);
227 pkt->info = PKT_DUMMY_PACKET; 227 pkt->info = PKT_DUMMY_PACKET;
228 } 228 }
@@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
246 list_del_init(&td->frame_lh); 246 list_del_init(&td->frame_lh);
247 td->status = USB_TD_OK; 247 td->status = USB_TD_OK;
248 if (pkt->info & PKT_DUMMY_PACKET) 248 if (pkt->info & PKT_DUMMY_PACKET)
249 cq_put(usb->ep0->dummy_packets_Q, pkt->data); 249 cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
250 recycle_frame(usb, pkt); 250 recycle_frame(usb, pkt);
251 usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD); 251 usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
252 fhci_err(usb->fhci, "host transaction failed\n"); 252 fhci_err(usb->fhci, "host transaction failed\n");
@@ -627,7 +627,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd)
627 627
628 628
629/* 629/*
630 * Process normal completions(error or sucess) and clean the schedule. 630 * Process normal completions(error or success) and clean the schedule.
631 * 631 *
632 * This is the main path for handing urbs back to drivers. The only other patth 632 * This is the main path for handing urbs back to drivers. The only other patth
633 * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning 633 * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index b40332290319..57013479d7f7 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/slab.h>
21#include <linux/list.h> 22#include <linux/list.h>
22#include <linux/io.h> 23#include <linux/io.h>
23#include <linux/usb.h> 24#include <linux/usb.h>
@@ -105,34 +106,34 @@ void fhci_ep0_free(struct fhci_usb *usb)
105 if (ep->td_base) 106 if (ep->td_base)
106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 107 cpm_muram_free(cpm_muram_offset(ep->td_base));
107 108
108 if (ep->conf_frame_Q) { 109 if (kfifo_initialized(&ep->conf_frame_Q)) {
109 size = cq_howmany(ep->conf_frame_Q); 110 size = cq_howmany(&ep->conf_frame_Q);
110 for (; size; size--) { 111 for (; size; size--) {
111 struct packet *pkt = cq_get(ep->conf_frame_Q); 112 struct packet *pkt = cq_get(&ep->conf_frame_Q);
112 113
113 kfree(pkt); 114 kfree(pkt);
114 } 115 }
115 cq_delete(ep->conf_frame_Q); 116 cq_delete(&ep->conf_frame_Q);
116 } 117 }
117 118
118 if (ep->empty_frame_Q) { 119 if (kfifo_initialized(&ep->empty_frame_Q)) {
119 size = cq_howmany(ep->empty_frame_Q); 120 size = cq_howmany(&ep->empty_frame_Q);
120 for (; size; size--) { 121 for (; size; size--) {
121 struct packet *pkt = cq_get(ep->empty_frame_Q); 122 struct packet *pkt = cq_get(&ep->empty_frame_Q);
122 123
123 kfree(pkt); 124 kfree(pkt);
124 } 125 }
125 cq_delete(ep->empty_frame_Q); 126 cq_delete(&ep->empty_frame_Q);
126 } 127 }
127 128
128 if (ep->dummy_packets_Q) { 129 if (kfifo_initialized(&ep->dummy_packets_Q)) {
129 size = cq_howmany(ep->dummy_packets_Q); 130 size = cq_howmany(&ep->dummy_packets_Q);
130 for (; size; size--) { 131 for (; size; size--) {
131 u8 *buff = cq_get(ep->dummy_packets_Q); 132 u8 *buff = cq_get(&ep->dummy_packets_Q);
132 133
133 kfree(buff); 134 kfree(buff);
134 } 135 }
135 cq_delete(ep->dummy_packets_Q); 136 cq_delete(&ep->dummy_packets_Q);
136 } 137 }
137 138
138 kfree(ep); 139 kfree(ep);
@@ -175,10 +176,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
175 ep->td_base = cpm_muram_addr(ep_offset); 176 ep->td_base = cpm_muram_addr(ep_offset);
176 177
177 /* zero all queue pointers */ 178 /* zero all queue pointers */
178 ep->conf_frame_Q = cq_new(ring_len + 2); 179 if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
179 ep->empty_frame_Q = cq_new(ring_len + 2); 180 cq_new(&ep->empty_frame_Q, ring_len + 2) ||
180 ep->dummy_packets_Q = cq_new(ring_len + 2); 181 cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
181 if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
182 err_for = "frame_queues"; 182 err_for = "frame_queues";
183 goto err; 183 goto err;
184 } 184 }
@@ -199,8 +199,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
199 err_for = "buffer"; 199 err_for = "buffer";
200 goto err; 200 goto err;
201 } 201 }
202 cq_put(ep->empty_frame_Q, pkt); 202 cq_put(&ep->empty_frame_Q, pkt);
203 cq_put(ep->dummy_packets_Q, buff); 203 cq_put(&ep->dummy_packets_Q, buff);
204 } 204 }
205 205
206 /* we put the endpoint parameter RAM right behind the TD ring */ 206 /* we put the endpoint parameter RAM right behind the TD ring */
@@ -319,7 +319,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
319 if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) 319 if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
320 continue; 320 continue;
321 321
322 pkt = cq_get(ep->conf_frame_Q); 322 pkt = cq_get(&ep->conf_frame_Q);
323 if (!pkt) 323 if (!pkt)
324 fhci_err(usb->fhci, "no frame to confirm\n"); 324 fhci_err(usb->fhci, "no frame to confirm\n");
325 325
@@ -460,9 +460,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb,
460 out_be16(&td->length, pkt->len); 460 out_be16(&td->length, pkt->len);
461 461
462 /* put the frame to the confirmation queue */ 462 /* put the frame to the confirmation queue */
463 cq_put(ep->conf_frame_Q, pkt); 463 cq_put(&ep->conf_frame_Q, pkt);
464 464
465 if (cq_howmany(ep->conf_frame_Q) == 1) 465 if (cq_howmany(&ep->conf_frame_Q) == 1)
466 out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); 466 out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
467 467
468 return 0; 468 return 0;
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 7116284ed21a..72dae1c5ab38 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -423,9 +423,9 @@ struct endpoint {
423 struct usb_td __iomem *td_base; /* first TD in the ring */ 423 struct usb_td __iomem *td_base; /* first TD in the ring */
424 struct usb_td __iomem *conf_td; /* next TD for confirm after transac */ 424 struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
425 struct usb_td __iomem *empty_td;/* next TD for new transaction req. */ 425 struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
426 struct kfifo *empty_frame_Q; /* Empty frames list to use */ 426 struct kfifo empty_frame_Q; /* Empty frames list to use */
427 struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */ 427 struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */
428 struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */ 428 struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
429 429
430 bool already_pushed_dummy_bd; 430 bool already_pushed_dummy_bd;
431}; 431};
@@ -493,9 +493,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
493} 493}
494 494
495/* fifo of pointers */ 495/* fifo of pointers */
496static inline struct kfifo *cq_new(int size) 496static inline int cq_new(struct kfifo *fifo, int size)
497{ 497{
498 return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL); 498 return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL);
499} 499}
500 500
501static inline void cq_delete(struct kfifo *kfifo) 501static inline void cq_delete(struct kfifo *kfifo)
@@ -505,19 +505,19 @@ static inline void cq_delete(struct kfifo *kfifo)
505 505
506static inline unsigned int cq_howmany(struct kfifo *kfifo) 506static inline unsigned int cq_howmany(struct kfifo *kfifo)
507{ 507{
508 return __kfifo_len(kfifo) / sizeof(void *); 508 return kfifo_len(kfifo) / sizeof(void *);
509} 509}
510 510
511static inline int cq_put(struct kfifo *kfifo, void *p) 511static inline int cq_put(struct kfifo *kfifo, void *p)
512{ 512{
513 return __kfifo_put(kfifo, (void *)&p, sizeof(p)); 513 return kfifo_in(kfifo, (void *)&p, sizeof(p));
514} 514}
515 515
516static inline void *cq_get(struct kfifo *kfifo) 516static inline void *cq_get(struct kfifo *kfifo)
517{ 517{
518 void *p = NULL; 518 void *p = NULL;
519 519
520 __kfifo_get(kfifo, (void *)&p, sizeof(p)); 520 kfifo_out(kfifo, (void *)&p, sizeof(p));
521 return p; 521 return p;
522} 522}
523 523
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 88b03214622b..35742f8c7cda 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -55,6 +55,7 @@
55 */ 55 */
56#include <linux/kernel.h> 56#include <linux/kernel.h>
57#include <linux/init.h> 57#include <linux/init.h>
58#include <linux/slab.h>
58#include <linux/module.h> 59#include <linux/module.h>
59#include <linux/workqueue.h> 60#include <linux/workqueue.h>
60#include <linux/wait.h> 61#include <linux/wait.h>
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 000000000000..512f647448ca
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright (c) 2009 by Martin Fuzzey
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of imx21-hcd.c */
20
21#ifndef DEBUG
22
23static inline void create_debug_files(struct imx21 *imx21) { }
24static inline void remove_debug_files(struct imx21 *imx21) { }
25static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
26static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
27 int status) {}
28static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
29static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
30 struct urb *urb) {}
31static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
32 struct urb *urb) {}
33static inline void debug_etd_allocated(struct imx21 *imx21) {}
34static inline void debug_etd_freed(struct imx21 *imx21) {}
35static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
36static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
37static inline void debug_isoc_submitted(struct imx21 *imx21,
38 int frame, struct td *td) {}
39static inline void debug_isoc_completed(struct imx21 *imx21,
40 int frame, struct td *td, int cc, int len) {}
41
42#else
43
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46
47static const char *dir_labels[] = {
48 "TD 0",
49 "OUT",
50 "IN",
51 "TD 1"
52};
53
54static const char *speed_labels[] = {
55 "Full",
56 "Low"
57};
58
59static const char *format_labels[] = {
60 "Control",
61 "ISO",
62 "Bulk",
63 "Interrupt"
64};
65
66static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
67 struct urb *urb)
68{
69 return usb_pipeisoc(urb->pipe) ?
70 &imx21->isoc_stats : &imx21->nonisoc_stats;
71}
72
73static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
74{
75 stats_for_urb(imx21, urb)->submitted++;
76}
77
78static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
79{
80 if (st)
81 stats_for_urb(imx21, urb)->completed_failed++;
82 else
83 stats_for_urb(imx21, urb)->completed_ok++;
84}
85
86static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
87{
88 stats_for_urb(imx21, urb)->unlinked++;
89}
90
91static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
92{
93 stats_for_urb(imx21, urb)->queue_etd++;
94}
95
96static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
97{
98 stats_for_urb(imx21, urb)->queue_dmem++;
99}
100
101static inline void debug_etd_allocated(struct imx21 *imx21)
102{
103 imx21->etd_usage.maximum = max(
104 ++(imx21->etd_usage.value),
105 imx21->etd_usage.maximum);
106}
107
108static inline void debug_etd_freed(struct imx21 *imx21)
109{
110 imx21->etd_usage.value--;
111}
112
113static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
114{
115 imx21->dmem_usage.value += size;
116 imx21->dmem_usage.maximum = max(
117 imx21->dmem_usage.value,
118 imx21->dmem_usage.maximum);
119}
120
121static inline void debug_dmem_freed(struct imx21 *imx21, int size)
122{
123 imx21->dmem_usage.value -= size;
124}
125
126
127static void debug_isoc_submitted(struct imx21 *imx21,
128 int frame, struct td *td)
129{
130 struct debug_isoc_trace *trace = &imx21->isoc_trace[
131 imx21->isoc_trace_index++];
132
133 imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
134 trace->schedule_frame = td->frame;
135 trace->submit_frame = frame;
136 trace->request_len = td->len;
137 trace->td = td;
138}
139
140static inline void debug_isoc_completed(struct imx21 *imx21,
141 int frame, struct td *td, int cc, int len)
142{
143 struct debug_isoc_trace *trace, *trace_failed;
144 int i;
145 int found = 0;
146
147 trace = imx21->isoc_trace;
148 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
149 if (trace->td == td) {
150 trace->done_frame = frame;
151 trace->done_len = len;
152 trace->cc = cc;
153 trace->td = NULL;
154 found = 1;
155 break;
156 }
157 }
158
159 if (found && cc) {
160 trace_failed = &imx21->isoc_trace_failed[
161 imx21->isoc_trace_index_failed++];
162
163 imx21->isoc_trace_index_failed %= ARRAY_SIZE(
164 imx21->isoc_trace_failed);
165 *trace_failed = *trace;
166 }
167}
168
169
170static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
171{
172 if (ep)
173 snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
174 ep->desc.bEndpointAddress,
175 usb_endpoint_type(&ep->desc),
176 ep);
177 else
178 snprintf(buf, bufsize, "none");
179 return buf;
180}
181
182static char *format_etd_dword0(u32 value, char *buf, int bufsize)
183{
184 snprintf(buf, bufsize,
185 "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
186 value & 0x7F,
187 (value >> DW0_ENDPNT) & 0x0F,
188 dir_labels[(value >> DW0_DIRECT) & 0x03],
189 speed_labels[(value >> DW0_SPEED) & 0x01],
190 format_labels[(value >> DW0_FORMAT) & 0x03],
191 (value >> DW0_HALTED) & 0x01);
192 return buf;
193}
194
195static int debug_status_show(struct seq_file *s, void *v)
196{
197 struct imx21 *imx21 = s->private;
198 int etds_allocated = 0;
199 int etds_sw_busy = 0;
200 int etds_hw_busy = 0;
201 int dmem_blocks = 0;
202 int queued_for_etd = 0;
203 int queued_for_dmem = 0;
204 unsigned int dmem_bytes = 0;
205 int i;
206 struct etd_priv *etd;
207 u32 etd_enable_mask;
208 unsigned long flags;
209 struct imx21_dmem_area *dmem;
210 struct ep_priv *ep_priv;
211
212 spin_lock_irqsave(&imx21->lock, flags);
213
214 etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
215 for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
216 if (etd->alloc)
217 etds_allocated++;
218 if (etd->urb)
219 etds_sw_busy++;
220 if (etd_enable_mask & (1<<i))
221 etds_hw_busy++;
222 }
223
224 list_for_each_entry(dmem, &imx21->dmem_list, list) {
225 dmem_bytes += dmem->size;
226 dmem_blocks++;
227 }
228
229 list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
230 queued_for_etd++;
231
232 list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
233 queued_for_dmem++;
234
235 spin_unlock_irqrestore(&imx21->lock, flags);
236
237 seq_printf(s,
238 "Frame: %d\n"
239 "ETDs allocated: %d/%d (max=%d)\n"
240 "ETDs in use sw: %d\n"
241 "ETDs in use hw: %d\n"
242 "DMEM alocated: %d/%d (max=%d)\n"
243 "DMEM blocks: %d\n"
244 "Queued waiting for ETD: %d\n"
245 "Queued waiting for DMEM: %d\n",
246 readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
247 etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
248 etds_sw_busy,
249 etds_hw_busy,
250 dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
251 dmem_blocks,
252 queued_for_etd,
253 queued_for_dmem);
254
255 return 0;
256}
257
258static int debug_dmem_show(struct seq_file *s, void *v)
259{
260 struct imx21 *imx21 = s->private;
261 struct imx21_dmem_area *dmem;
262 unsigned long flags;
263 char ep_text[40];
264
265 spin_lock_irqsave(&imx21->lock, flags);
266
267 list_for_each_entry(dmem, &imx21->dmem_list, list)
268 seq_printf(s,
269 "%04X: size=0x%X "
270 "ep=%s\n",
271 dmem->offset, dmem->size,
272 format_ep(dmem->ep, ep_text, sizeof(ep_text)));
273
274 spin_unlock_irqrestore(&imx21->lock, flags);
275
276 return 0;
277}
278
279static int debug_etd_show(struct seq_file *s, void *v)
280{
281 struct imx21 *imx21 = s->private;
282 struct etd_priv *etd;
283 char buf[60];
284 u32 dword;
285 int i, j;
286 unsigned long flags;
287
288 spin_lock_irqsave(&imx21->lock, flags);
289
290 for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
291 int state = -1;
292 struct urb_priv *urb_priv;
293 if (etd->urb) {
294 urb_priv = etd->urb->hcpriv;
295 if (urb_priv)
296 state = urb_priv->state;
297 }
298
299 seq_printf(s,
300 "etd_num: %d\n"
301 "ep: %s\n"
302 "alloc: %d\n"
303 "len: %d\n"
304 "busy sw: %d\n"
305 "busy hw: %d\n"
306 "urb state: %d\n"
307 "current urb: %p\n",
308
309 i,
310 format_ep(etd->ep, buf, sizeof(buf)),
311 etd->alloc,
312 etd->len,
313 etd->urb != NULL,
314 (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
315 state,
316 etd->urb);
317
318 for (j = 0; j < 4; j++) {
319 dword = etd_readl(imx21, i, j);
320 switch (j) {
321 case 0:
322 format_etd_dword0(dword, buf, sizeof(buf));
323 break;
324 case 2:
325 snprintf(buf, sizeof(buf),
326 "cc=0X%02X", dword >> DW2_COMPCODE);
327 break;
328 default:
329 *buf = 0;
330 break;
331 }
332 seq_printf(s,
333 "dword %d: submitted=%08X cur=%08X [%s]\n",
334 j,
335 etd->submitted_dwords[j],
336 dword,
337 buf);
338 }
339 seq_printf(s, "\n");
340 }
341
342 spin_unlock_irqrestore(&imx21->lock, flags);
343
344 return 0;
345}
346
347static void debug_statistics_show_one(struct seq_file *s,
348 const char *name, struct debug_stats *stats)
349{
350 seq_printf(s, "%s:\n"
351 "submitted URBs: %lu\n"
352 "completed OK: %lu\n"
353 "completed failed: %lu\n"
354 "unlinked: %lu\n"
355 "queued for ETD: %lu\n"
356 "queued for DMEM: %lu\n\n",
357 name,
358 stats->submitted,
359 stats->completed_ok,
360 stats->completed_failed,
361 stats->unlinked,
362 stats->queue_etd,
363 stats->queue_dmem);
364}
365
366static int debug_statistics_show(struct seq_file *s, void *v)
367{
368 struct imx21 *imx21 = s->private;
369 unsigned long flags;
370
371 spin_lock_irqsave(&imx21->lock, flags);
372
373 debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
374 debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
375 seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
376 spin_unlock_irqrestore(&imx21->lock, flags);
377
378 return 0;
379}
380
381static void debug_isoc_show_one(struct seq_file *s,
382 const char *name, int index, struct debug_isoc_trace *trace)
383{
384 seq_printf(s, "%s %d:\n"
385 "cc=0X%02X\n"
386 "scheduled frame %d (%d)\n"
387 "submittted frame %d (%d)\n"
388 "completed frame %d (%d)\n"
389 "requested length=%d\n"
390 "completed length=%d\n\n",
391 name, index,
392 trace->cc,
393 trace->schedule_frame, trace->schedule_frame & 0xFFFF,
394 trace->submit_frame, trace->submit_frame & 0xFFFF,
395 trace->done_frame, trace->done_frame & 0xFFFF,
396 trace->request_len,
397 trace->done_len);
398}
399
400static int debug_isoc_show(struct seq_file *s, void *v)
401{
402 struct imx21 *imx21 = s->private;
403 struct debug_isoc_trace *trace;
404 unsigned long flags;
405 int i;
406
407 spin_lock_irqsave(&imx21->lock, flags);
408
409 trace = imx21->isoc_trace_failed;
410 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
411 debug_isoc_show_one(s, "isoc failed", i, trace);
412
413 trace = imx21->isoc_trace;
414 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
415 debug_isoc_show_one(s, "isoc", i, trace);
416
417 spin_unlock_irqrestore(&imx21->lock, flags);
418
419 return 0;
420}
421
422static int debug_status_open(struct inode *inode, struct file *file)
423{
424 return single_open(file, debug_status_show, inode->i_private);
425}
426
427static int debug_dmem_open(struct inode *inode, struct file *file)
428{
429 return single_open(file, debug_dmem_show, inode->i_private);
430}
431
432static int debug_etd_open(struct inode *inode, struct file *file)
433{
434 return single_open(file, debug_etd_show, inode->i_private);
435}
436
437static int debug_statistics_open(struct inode *inode, struct file *file)
438{
439 return single_open(file, debug_statistics_show, inode->i_private);
440}
441
442static int debug_isoc_open(struct inode *inode, struct file *file)
443{
444 return single_open(file, debug_isoc_show, inode->i_private);
445}
446
447static const struct file_operations debug_status_fops = {
448 .open = debug_status_open,
449 .read = seq_read,
450 .llseek = seq_lseek,
451 .release = single_release,
452};
453
454static const struct file_operations debug_dmem_fops = {
455 .open = debug_dmem_open,
456 .read = seq_read,
457 .llseek = seq_lseek,
458 .release = single_release,
459};
460
461static const struct file_operations debug_etd_fops = {
462 .open = debug_etd_open,
463 .read = seq_read,
464 .llseek = seq_lseek,
465 .release = single_release,
466};
467
468static const struct file_operations debug_statistics_fops = {
469 .open = debug_statistics_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
472 .release = single_release,
473};
474
475static const struct file_operations debug_isoc_fops = {
476 .open = debug_isoc_open,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = single_release,
480};
481
482static void create_debug_files(struct imx21 *imx21)
483{
484 imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
485 if (!imx21->debug_root)
486 goto failed_create_rootdir;
487
488 if (!debugfs_create_file("status", S_IRUGO,
489 imx21->debug_root, imx21, &debug_status_fops))
490 goto failed_create;
491
492 if (!debugfs_create_file("dmem", S_IRUGO,
493 imx21->debug_root, imx21, &debug_dmem_fops))
494 goto failed_create;
495
496 if (!debugfs_create_file("etd", S_IRUGO,
497 imx21->debug_root, imx21, &debug_etd_fops))
498 goto failed_create;
499
500 if (!debugfs_create_file("statistics", S_IRUGO,
501 imx21->debug_root, imx21, &debug_statistics_fops))
502 goto failed_create;
503
504 if (!debugfs_create_file("isoc", S_IRUGO,
505 imx21->debug_root, imx21, &debug_isoc_fops))
506 goto failed_create;
507
508 return;
509
510failed_create:
511 debugfs_remove_recursive(imx21->debug_root);
512
513failed_create_rootdir:
514 imx21->debug_root = NULL;
515}
516
517
518static void remove_debug_files(struct imx21 *imx21)
519{
520 if (imx21->debug_root) {
521 debugfs_remove_recursive(imx21->debug_root);
522 imx21->debug_root = NULL;
523 }
524}
525
526#endif
527
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 000000000000..8a12f297645f
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1790 @@
1/*
2 * USB Host Controller Driver for IMX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25 /*
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
29 *
30 * The data memory is shared between the host and fuction controlers
31 * (but this driver only supports the host controler)
32 *
33 * So setting up a transfer involves:
34 * * Allocating a ETD
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
37 * * Activate the ETD
38 * * Get interrupt when done.
39 *
40 * An ETD is assigned to each active endpoint.
41 *
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
44 *
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46 *
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
50 */
51
52#include <linux/clk.h>
53#include <linux/io.h>
54#include <linux/kernel.h>
55#include <linux/list.h>
56#include <linux/platform_device.h>
57#include <linux/slab.h>
58#include <linux/usb.h>
59
60#include "../core/hcd.h"
61#include "imx21-hcd.h"
62
63#ifdef DEBUG
64#define DEBUG_LOG_FRAME(imx21, etd, event) \
65 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
66#else
67#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
68#endif
69
70static const char hcd_name[] = "imx21-hcd";
71
72static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
73{
74 return (struct imx21 *)hcd->hcd_priv;
75}
76
77
78/* =========================================== */
79/* Hardware access helpers */
80/* =========================================== */
81
82static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
83{
84 void __iomem *reg = imx21->regs + offset;
85 writel(readl(reg) | mask, reg);
86}
87
88static inline void clear_register_bits(struct imx21 *imx21,
89 u32 offset, u32 mask)
90{
91 void __iomem *reg = imx21->regs + offset;
92 writel(readl(reg) & ~mask, reg);
93}
94
95static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
96{
97 void __iomem *reg = imx21->regs + offset;
98
99 if (readl(reg) & mask)
100 writel(mask, reg);
101}
102
103static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
104{
105 void __iomem *reg = imx21->regs + offset;
106
107 if (!(readl(reg) & mask))
108 writel(mask, reg);
109}
110
111static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
112{
113 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
114}
115
116static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
117{
118 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
119}
120
121static inline int wrap_frame(int counter)
122{
123 return counter & 0xFFFF;
124}
125
126static inline int frame_after(int frame, int after)
127{
128 /* handle wrapping like jiffies time_afer */
129 return (s16)((s16)after - (s16)frame) < 0;
130}
131
132static int imx21_hc_get_frame(struct usb_hcd *hcd)
133{
134 struct imx21 *imx21 = hcd_to_imx21(hcd);
135
136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
137}
138
139
140#include "imx21-dbg.c"
141
142/* =========================================== */
143/* ETD management */
144/* =========================================== */
145
146static int alloc_etd(struct imx21 *imx21)
147{
148 int i;
149 struct etd_priv *etd = imx21->etd;
150
151 for (i = 0; i < USB_NUM_ETD; i++, etd++) {
152 if (etd->alloc == 0) {
153 memset(etd, 0, sizeof(imx21->etd[0]));
154 etd->alloc = 1;
155 debug_etd_allocated(imx21);
156 return i;
157 }
158 }
159 return -1;
160}
161
162static void disactivate_etd(struct imx21 *imx21, int num)
163{
164 int etd_mask = (1 << num);
165 struct etd_priv *etd = &imx21->etd[num];
166
167 writel(etd_mask, imx21->regs + USBH_ETDENCLR);
168 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
169 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
170 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
171
172 etd->active_count = 0;
173
174 DEBUG_LOG_FRAME(imx21, etd, disactivated);
175}
176
177static void reset_etd(struct imx21 *imx21, int num)
178{
179 struct etd_priv *etd = imx21->etd + num;
180 int i;
181
182 disactivate_etd(imx21, num);
183
184 for (i = 0; i < 4; i++)
185 etd_writel(imx21, num, i, 0);
186 etd->urb = NULL;
187 etd->ep = NULL;
188 etd->td = NULL;;
189}
190
191static void free_etd(struct imx21 *imx21, int num)
192{
193 if (num < 0)
194 return;
195
196 if (num >= USB_NUM_ETD) {
197 dev_err(imx21->dev, "BAD etd=%d!\n", num);
198 return;
199 }
200 if (imx21->etd[num].alloc == 0) {
201 dev_err(imx21->dev, "ETD %d already free!\n", num);
202 return;
203 }
204
205 debug_etd_freed(imx21);
206 reset_etd(imx21, num);
207 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
208}
209
210
211static void setup_etd_dword0(struct imx21 *imx21,
212 int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
213{
214 etd_writel(imx21, etd_num, 0,
215 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
216 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
217 ((u32) dir << DW0_DIRECT) |
218 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
219 1 : 0) << DW0_SPEED) |
220 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
221 ((u32) maxpacket << DW0_MAXPKTSIZ));
222}
223
224static void activate_etd(struct imx21 *imx21,
225 int etd_num, dma_addr_t dma, u8 dir)
226{
227 u32 etd_mask = 1 << etd_num;
228 struct etd_priv *etd = &imx21->etd[etd_num];
229
230 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
231 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
232 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
233 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
234
235 if (dma) {
236 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
237 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
238 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
239 writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
240 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
241 } else {
242 if (dir != TD_DIR_IN) {
243 /* need to set for ZLP */
244 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
245 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
246 }
247 }
248
249 DEBUG_LOG_FRAME(imx21, etd, activated);
250
251#ifdef DEBUG
252 if (!etd->active_count) {
253 int i;
254 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
255 etd->disactivated_frame = -1;
256 etd->last_int_frame = -1;
257 etd->last_req_frame = -1;
258
259 for (i = 0; i < 4; i++)
260 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
261 }
262#endif
263
264 etd->active_count = 1;
265 writel(etd_mask, imx21->regs + USBH_ETDENSET);
266}
267
268/* =========================================== */
269/* Data memory management */
270/* =========================================== */
271
272static int alloc_dmem(struct imx21 *imx21, unsigned int size,
273 struct usb_host_endpoint *ep)
274{
275 unsigned int offset = 0;
276 struct imx21_dmem_area *area;
277 struct imx21_dmem_area *tmp;
278
279 size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
280
281 if (size > DMEM_SIZE) {
282 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
283 size, DMEM_SIZE);
284 return -EINVAL;
285 }
286
287 list_for_each_entry(tmp, &imx21->dmem_list, list) {
288 if ((size + offset) < offset)
289 goto fail;
290 if ((size + offset) <= tmp->offset)
291 break;
292 offset = tmp->size + tmp->offset;
293 if ((offset + size) > DMEM_SIZE)
294 goto fail;
295 }
296
297 area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
298 if (area == NULL)
299 return -ENOMEM;
300
301 area->ep = ep;
302 area->offset = offset;
303 area->size = size;
304 list_add_tail(&area->list, &tmp->list);
305 debug_dmem_allocated(imx21, size);
306 return offset;
307
308fail:
309 return -ENOMEM;
310}
311
312/* Memory now available for a queued ETD - activate it */
313static void activate_queued_etd(struct imx21 *imx21,
314 struct etd_priv *etd, u32 dmem_offset)
315{
316 struct urb_priv *urb_priv = etd->urb->hcpriv;
317 int etd_num = etd - &imx21->etd[0];
318 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
319 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
320
321 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
322 etd_num);
323 etd_writel(imx21, etd_num, 1,
324 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
325
326 urb_priv->active = 1;
327 activate_etd(imx21, etd_num, etd->dma_handle, dir);
328}
329
330static void free_dmem(struct imx21 *imx21, int offset)
331{
332 struct imx21_dmem_area *area;
333 struct etd_priv *etd, *tmp;
334 int found = 0;
335
336 list_for_each_entry(area, &imx21->dmem_list, list) {
337 if (area->offset == offset) {
338 debug_dmem_freed(imx21, area->size);
339 list_del(&area->list);
340 kfree(area);
341 found = 1;
342 break;
343 }
344 }
345
346 if (!found) {
347 dev_err(imx21->dev,
348 "Trying to free unallocated DMEM %d\n", offset);
349 return;
350 }
351
352 /* Try again to allocate memory for anything we've queued */
353 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
354 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
355 if (offset >= 0) {
356 list_del(&etd->queue);
357 activate_queued_etd(imx21, etd, (u32)offset);
358 }
359 }
360}
361
362static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
363{
364 struct imx21_dmem_area *area, *tmp;
365
366 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
367 if (area->ep == ep) {
368 dev_err(imx21->dev,
369 "Active DMEM %d for disabled ep=%p\n",
370 area->offset, ep);
371 list_del(&area->list);
372 kfree(area);
373 }
374 }
375}
376
377
378/* =========================================== */
379/* End handling */
380/* =========================================== */
381static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
382
383/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
384static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
385{
386 int etd_num;
387 int i;
388
389 for (i = 0; i < NUM_ISO_ETDS; i++) {
390 etd_num = ep_priv->etd[i];
391 if (etd_num < 0)
392 continue;
393
394 ep_priv->etd[i] = -1;
395 if (list_empty(&imx21->queue_for_etd)) {
396 free_etd(imx21, etd_num);
397 continue;
398 }
399
400 dev_dbg(imx21->dev,
401 "assigning idle etd %d for queued request\n", etd_num);
402 ep_priv = list_first_entry(&imx21->queue_for_etd,
403 struct ep_priv, queue);
404 list_del(&ep_priv->queue);
405 reset_etd(imx21, etd_num);
406 ep_priv->waiting_etd = 0;
407 ep_priv->etd[i] = etd_num;
408
409 if (list_empty(&ep_priv->ep->urb_list)) {
410 dev_err(imx21->dev, "No urb for queued ep!\n");
411 continue;
412 }
413 schedule_nonisoc_etd(imx21, list_first_entry(
414 &ep_priv->ep->urb_list, struct urb, urb_list));
415 }
416}
417
418static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
419__releases(imx21->lock)
420__acquires(imx21->lock)
421{
422 struct imx21 *imx21 = hcd_to_imx21(hcd);
423 struct ep_priv *ep_priv = urb->ep->hcpriv;
424 struct urb_priv *urb_priv = urb->hcpriv;
425
426 debug_urb_completed(imx21, urb, status);
427 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
428
429 kfree(urb_priv->isoc_td);
430 kfree(urb->hcpriv);
431 urb->hcpriv = NULL;
432 usb_hcd_unlink_urb_from_ep(hcd, urb);
433 spin_unlock(&imx21->lock);
434 usb_hcd_giveback_urb(hcd, urb, status);
435 spin_lock(&imx21->lock);
436 if (list_empty(&ep_priv->ep->urb_list))
437 ep_idle(imx21, ep_priv);
438}
439
440/* =========================================== */
441/* ISOC Handling ... */
442/* =========================================== */
443
444static void schedule_isoc_etds(struct usb_hcd *hcd,
445 struct usb_host_endpoint *ep)
446{
447 struct imx21 *imx21 = hcd_to_imx21(hcd);
448 struct ep_priv *ep_priv = ep->hcpriv;
449 struct etd_priv *etd;
450 struct urb_priv *urb_priv;
451 struct td *td;
452 int etd_num;
453 int i;
454 int cur_frame;
455 u8 dir;
456
457 for (i = 0; i < NUM_ISO_ETDS; i++) {
458too_late:
459 if (list_empty(&ep_priv->td_list))
460 break;
461
462 etd_num = ep_priv->etd[i];
463 if (etd_num < 0)
464 break;
465
466 etd = &imx21->etd[etd_num];
467 if (etd->urb)
468 continue;
469
470 td = list_entry(ep_priv->td_list.next, struct td, list);
471 list_del(&td->list);
472 urb_priv = td->urb->hcpriv;
473
474 cur_frame = imx21_hc_get_frame(hcd);
475 if (frame_after(cur_frame, td->frame)) {
476 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
477 cur_frame, td->frame);
478 urb_priv->isoc_status = -EXDEV;
479 td->urb->iso_frame_desc[
480 td->isoc_index].actual_length = 0;
481 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
482 if (--urb_priv->isoc_remaining == 0)
483 urb_done(hcd, td->urb, urb_priv->isoc_status);
484 goto too_late;
485 }
486
487 urb_priv->active = 1;
488 etd->td = td;
489 etd->ep = td->ep;
490 etd->urb = td->urb;
491 etd->len = td->len;
492
493 debug_isoc_submitted(imx21, cur_frame, td);
494
495 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
496 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
497 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
498 etd_writel(imx21, etd_num, 2,
499 (TD_NOTACCESSED << DW2_COMPCODE) |
500 ((td->frame & 0xFFFF) << DW2_STARTFRM));
501 etd_writel(imx21, etd_num, 3,
502 (TD_NOTACCESSED << DW3_COMPCODE0) |
503 (td->len << DW3_PKTLEN0));
504
505 activate_etd(imx21, etd_num, td->data, dir);
506 }
507}
508
509static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
510{
511 struct imx21 *imx21 = hcd_to_imx21(hcd);
512 int etd_mask = 1 << etd_num;
513 struct urb_priv *urb_priv = urb->hcpriv;
514 struct etd_priv *etd = imx21->etd + etd_num;
515 struct td *td = etd->td;
516 struct usb_host_endpoint *ep = etd->ep;
517 int isoc_index = td->isoc_index;
518 unsigned int pipe = urb->pipe;
519 int dir_in = usb_pipein(pipe);
520 int cc;
521 int bytes_xfrd;
522
523 disactivate_etd(imx21, etd_num);
524
525 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
526 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
527
528 /* Input doesn't always fill the buffer, don't generate an error
529 * when this happens.
530 */
531 if (dir_in && (cc == TD_DATAUNDERRUN))
532 cc = TD_CC_NOERROR;
533
534 if (cc == TD_NOTACCESSED)
535 bytes_xfrd = 0;
536
537 debug_isoc_completed(imx21,
538 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
539 if (cc) {
540 urb_priv->isoc_status = -EXDEV;
541 dev_dbg(imx21->dev,
542 "bad iso cc=0x%X frame=%d sched frame=%d "
543 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
544 cc, imx21_hc_get_frame(hcd), td->frame,
545 bytes_xfrd, td->len, urb, etd_num, isoc_index);
546 }
547
548 if (dir_in)
549 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
550
551 urb->actual_length += bytes_xfrd;
552 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
553 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
554
555 etd->td = NULL;
556 etd->urb = NULL;
557 etd->ep = NULL;
558
559 if (--urb_priv->isoc_remaining == 0)
560 urb_done(hcd, urb, urb_priv->isoc_status);
561
562 schedule_isoc_etds(hcd, ep);
563}
564
565static struct ep_priv *alloc_isoc_ep(
566 struct imx21 *imx21, struct usb_host_endpoint *ep)
567{
568 struct ep_priv *ep_priv;
569 int i;
570
571 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
572 if (ep_priv == NULL)
573 return NULL;
574
575 /* Allocate the ETDs */
576 for (i = 0; i < NUM_ISO_ETDS; i++) {
577 ep_priv->etd[i] = alloc_etd(imx21);
578 if (ep_priv->etd[i] < 0) {
579 int j;
580 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
581 for (j = 0; j < i; j++)
582 free_etd(imx21, ep_priv->etd[j]);
583 goto alloc_etd_failed;
584 }
585 imx21->etd[ep_priv->etd[i]].ep = ep;
586 }
587
588 INIT_LIST_HEAD(&ep_priv->td_list);
589 ep_priv->ep = ep;
590 ep->hcpriv = ep_priv;
591 return ep_priv;
592
593alloc_etd_failed:
594 kfree(ep_priv);
595 return NULL;
596}
597
598static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
599 struct usb_host_endpoint *ep,
600 struct urb *urb, gfp_t mem_flags)
601{
602 struct imx21 *imx21 = hcd_to_imx21(hcd);
603 struct urb_priv *urb_priv;
604 unsigned long flags;
605 struct ep_priv *ep_priv;
606 struct td *td = NULL;
607 int i;
608 int ret;
609 int cur_frame;
610 u16 maxpacket;
611
612 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
613 if (urb_priv == NULL)
614 return -ENOMEM;
615
616 urb_priv->isoc_td = kzalloc(
617 sizeof(struct td) * urb->number_of_packets, mem_flags);
618 if (urb_priv->isoc_td == NULL) {
619 ret = -ENOMEM;
620 goto alloc_td_failed;
621 }
622
623 spin_lock_irqsave(&imx21->lock, flags);
624
625 if (ep->hcpriv == NULL) {
626 ep_priv = alloc_isoc_ep(imx21, ep);
627 if (ep_priv == NULL) {
628 ret = -ENOMEM;
629 goto alloc_ep_failed;
630 }
631 } else {
632 ep_priv = ep->hcpriv;
633 }
634
635 ret = usb_hcd_link_urb_to_ep(hcd, urb);
636 if (ret)
637 goto link_failed;
638
639 urb->status = -EINPROGRESS;
640 urb->actual_length = 0;
641 urb->error_count = 0;
642 urb->hcpriv = urb_priv;
643 urb_priv->ep = ep;
644
645 /* allocate data memory for largest packets if not already done */
646 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
647 for (i = 0; i < NUM_ISO_ETDS; i++) {
648 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
649
650 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
651 /* not sure if this can really occur.... */
652 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
653 etd->dmem_size, maxpacket);
654 ret = -EMSGSIZE;
655 goto alloc_dmem_failed;
656 }
657
658 if (etd->dmem_size == 0) {
659 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
660 if (etd->dmem_offset < 0) {
661 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
662 ret = -EAGAIN;
663 goto alloc_dmem_failed;
664 }
665 etd->dmem_size = maxpacket;
666 }
667 }
668
669 /* calculate frame */
670 cur_frame = imx21_hc_get_frame(hcd);
671 if (urb->transfer_flags & URB_ISO_ASAP) {
672 if (list_empty(&ep_priv->td_list))
673 urb->start_frame = cur_frame + 5;
674 else
675 urb->start_frame = list_entry(
676 ep_priv->td_list.prev,
677 struct td, list)->frame + urb->interval;
678 }
679 urb->start_frame = wrap_frame(urb->start_frame);
680 if (frame_after(cur_frame, urb->start_frame)) {
681 dev_dbg(imx21->dev,
682 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
683 urb->start_frame, cur_frame,
684 (urb->transfer_flags & URB_ISO_ASAP) != 0);
685 urb->start_frame = wrap_frame(cur_frame + 1);
686 }
687
688 /* set up transfers */
689 td = urb_priv->isoc_td;
690 for (i = 0; i < urb->number_of_packets; i++, td++) {
691 td->ep = ep;
692 td->urb = urb;
693 td->len = urb->iso_frame_desc[i].length;
694 td->isoc_index = i;
695 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
696 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
697 list_add_tail(&td->list, &ep_priv->td_list);
698 }
699
700 urb_priv->isoc_remaining = urb->number_of_packets;
701 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
702 urb->number_of_packets, urb->start_frame, td->frame);
703
704 debug_urb_submitted(imx21, urb);
705 schedule_isoc_etds(hcd, ep);
706
707 spin_unlock_irqrestore(&imx21->lock, flags);
708 return 0;
709
710alloc_dmem_failed:
711 usb_hcd_unlink_urb_from_ep(hcd, urb);
712
713link_failed:
714alloc_ep_failed:
715 spin_unlock_irqrestore(&imx21->lock, flags);
716 kfree(urb_priv->isoc_td);
717
718alloc_td_failed:
719 kfree(urb_priv);
720 return ret;
721}
722
723static void dequeue_isoc_urb(struct imx21 *imx21,
724 struct urb *urb, struct ep_priv *ep_priv)
725{
726 struct urb_priv *urb_priv = urb->hcpriv;
727 struct td *td, *tmp;
728 int i;
729
730 if (urb_priv->active) {
731 for (i = 0; i < NUM_ISO_ETDS; i++) {
732 int etd_num = ep_priv->etd[i];
733 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
734 struct etd_priv *etd = imx21->etd + etd_num;
735
736 reset_etd(imx21, etd_num);
737 if (etd->dmem_size)
738 free_dmem(imx21, etd->dmem_offset);
739 etd->dmem_size = 0;
740 }
741 }
742 }
743
744 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
745 if (td->urb == urb) {
746 dev_vdbg(imx21->dev, "removing td %p\n", td);
747 list_del(&td->list);
748 }
749 }
750}
751
752/* =========================================== */
753/* NON ISOC Handling ... */
754/* =========================================== */
755
756static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
757{
758 unsigned int pipe = urb->pipe;
759 struct urb_priv *urb_priv = urb->hcpriv;
760 struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
761 int state = urb_priv->state;
762 int etd_num = ep_priv->etd[0];
763 struct etd_priv *etd;
764 int dmem_offset;
765 u32 count;
766 u16 etd_buf_size;
767 u16 maxpacket;
768 u8 dir;
769 u8 bufround;
770 u8 datatoggle;
771 u8 interval = 0;
772 u8 relpolpos = 0;
773
774 if (etd_num < 0) {
775 dev_err(imx21->dev, "No valid ETD\n");
776 return;
777 }
778 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
779 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
780
781 etd = &imx21->etd[etd_num];
782 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
783 if (!maxpacket)
784 maxpacket = 8;
785
786 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
787 if (state == US_CTRL_SETUP) {
788 dir = TD_DIR_SETUP;
789 etd->dma_handle = urb->setup_dma;
790 bufround = 0;
791 count = 8;
792 datatoggle = TD_TOGGLE_DATA0;
793 } else { /* US_CTRL_ACK */
794 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
795 etd->dma_handle = urb->transfer_dma;
796 bufround = 0;
797 count = 0;
798 datatoggle = TD_TOGGLE_DATA1;
799 }
800 } else {
801 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
802 bufround = (dir == TD_DIR_IN) ? 1 : 0;
803 etd->dma_handle = urb->transfer_dma;
804 if (usb_pipebulk(pipe) && (state == US_BULK0))
805 count = 0;
806 else
807 count = urb->transfer_buffer_length;
808
809 if (usb_pipecontrol(pipe)) {
810 datatoggle = TD_TOGGLE_DATA1;
811 } else {
812 if (usb_gettoggle(
813 urb->dev,
814 usb_pipeendpoint(urb->pipe),
815 usb_pipeout(urb->pipe)))
816 datatoggle = TD_TOGGLE_DATA1;
817 else
818 datatoggle = TD_TOGGLE_DATA0;
819 }
820 }
821
822 etd->urb = urb;
823 etd->ep = urb_priv->ep;
824 etd->len = count;
825
826 if (usb_pipeint(pipe)) {
827 interval = urb->interval;
828 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
829 }
830
831 /* Write ETD to device memory */
832 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
833
834 etd_writel(imx21, etd_num, 2,
835 (u32) interval << DW2_POLINTERV |
836 ((u32) relpolpos << DW2_RELPOLPOS) |
837 ((u32) dir << DW2_DIRPID) |
838 ((u32) bufround << DW2_BUFROUND) |
839 ((u32) datatoggle << DW2_DATATOG) |
840 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
841
842 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
843 is smaller. Make sure we don't overrun the buffer!
844 */
845 if (count && count < maxpacket)
846 etd_buf_size = count;
847 else
848 etd_buf_size = maxpacket;
849
850 etd_writel(imx21, etd_num, 3,
851 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
852
853 if (!count)
854 etd->dma_handle = 0;
855
856 /* allocate x and y buffer space at once */
857 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
858 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
859 if (dmem_offset < 0) {
860 /* Setup everything we can in HW and update when we get DMEM */
861 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
862
863 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
864 debug_urb_queued_for_dmem(imx21, urb);
865 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
866 return;
867 }
868
869 etd_writel(imx21, etd_num, 1,
870 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
871 (u32) dmem_offset);
872
873 urb_priv->active = 1;
874
875 /* enable the ETD to kick off transfer */
876 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
877 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
878 activate_etd(imx21, etd_num, etd->dma_handle, dir);
879
880}
881
882static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
883{
884 struct imx21 *imx21 = hcd_to_imx21(hcd);
885 struct etd_priv *etd = &imx21->etd[etd_num];
886 u32 etd_mask = 1 << etd_num;
887 struct urb_priv *urb_priv = urb->hcpriv;
888 int dir;
889 u16 xbufaddr;
890 int cc;
891 u32 bytes_xfrd;
892 int etd_done;
893
894 disactivate_etd(imx21, etd_num);
895
896 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
897 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
898 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
899 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
900
901 /* save toggle carry */
902 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
903 usb_pipeout(urb->pipe),
904 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
905
906 if (dir == TD_DIR_IN) {
907 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
908 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
909 }
910 free_dmem(imx21, xbufaddr);
911
912 urb->error_count = 0;
913 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
914 && (cc == TD_DATAUNDERRUN))
915 cc = TD_CC_NOERROR;
916
917 if (cc != 0)
918 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
919
920 etd_done = (cc_to_error[cc] != 0); /* stop if error */
921
922 switch (usb_pipetype(urb->pipe)) {
923 case PIPE_CONTROL:
924 switch (urb_priv->state) {
925 case US_CTRL_SETUP:
926 if (urb->transfer_buffer_length > 0)
927 urb_priv->state = US_CTRL_DATA;
928 else
929 urb_priv->state = US_CTRL_ACK;
930 break;
931 case US_CTRL_DATA:
932 urb->actual_length += bytes_xfrd;
933 urb_priv->state = US_CTRL_ACK;
934 break;
935 case US_CTRL_ACK:
936 etd_done = 1;
937 break;
938 default:
939 dev_err(imx21->dev,
940 "Invalid pipe state %d\n", urb_priv->state);
941 etd_done = 1;
942 break;
943 }
944 break;
945
946 case PIPE_BULK:
947 urb->actual_length += bytes_xfrd;
948 if ((urb_priv->state == US_BULK)
949 && (urb->transfer_flags & URB_ZERO_PACKET)
950 && urb->transfer_buffer_length > 0
951 && ((urb->transfer_buffer_length %
952 usb_maxpacket(urb->dev, urb->pipe,
953 usb_pipeout(urb->pipe))) == 0)) {
954 /* need a 0-packet */
955 urb_priv->state = US_BULK0;
956 } else {
957 etd_done = 1;
958 }
959 break;
960
961 case PIPE_INTERRUPT:
962 urb->actual_length += bytes_xfrd;
963 etd_done = 1;
964 break;
965 }
966
967 if (!etd_done) {
968 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
969 schedule_nonisoc_etd(imx21, urb);
970 } else {
971 struct usb_host_endpoint *ep = urb->ep;
972
973 urb_done(hcd, urb, cc_to_error[cc]);
974 etd->urb = NULL;
975
976 if (!list_empty(&ep->urb_list)) {
977 urb = list_first_entry(&ep->urb_list,
978 struct urb, urb_list);
979 dev_vdbg(imx21->dev, "next URB %p\n", urb);
980 schedule_nonisoc_etd(imx21, urb);
981 }
982 }
983}
984
985static struct ep_priv *alloc_ep(void)
986{
987 int i;
988 struct ep_priv *ep_priv;
989
990 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
991 if (!ep_priv)
992 return NULL;
993
994 for (i = 0; i < NUM_ISO_ETDS; ++i)
995 ep_priv->etd[i] = -1;
996
997 return ep_priv;
998}
999
1000static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1001 struct urb *urb, gfp_t mem_flags)
1002{
1003 struct imx21 *imx21 = hcd_to_imx21(hcd);
1004 struct usb_host_endpoint *ep = urb->ep;
1005 struct urb_priv *urb_priv;
1006 struct ep_priv *ep_priv;
1007 struct etd_priv *etd;
1008 int ret;
1009 unsigned long flags;
1010 int new_ep = 0;
1011
1012 dev_vdbg(imx21->dev,
1013 "enqueue urb=%p ep=%p len=%d "
1014 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1015 urb, ep,
1016 urb->transfer_buffer_length,
1017 urb->transfer_buffer, urb->transfer_dma,
1018 urb->setup_packet, urb->setup_dma);
1019
1020 if (usb_pipeisoc(urb->pipe))
1021 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1022
1023 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1024 if (!urb_priv)
1025 return -ENOMEM;
1026
1027 spin_lock_irqsave(&imx21->lock, flags);
1028
1029 ep_priv = ep->hcpriv;
1030 if (ep_priv == NULL) {
1031 ep_priv = alloc_ep();
1032 if (!ep_priv) {
1033 ret = -ENOMEM;
1034 goto failed_alloc_ep;
1035 }
1036 ep->hcpriv = ep_priv;
1037 ep_priv->ep = ep;
1038 new_ep = 1;
1039 }
1040
1041 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1042 if (ret)
1043 goto failed_link;
1044
1045 urb->status = -EINPROGRESS;
1046 urb->actual_length = 0;
1047 urb->error_count = 0;
1048 urb->hcpriv = urb_priv;
1049 urb_priv->ep = ep;
1050
1051 switch (usb_pipetype(urb->pipe)) {
1052 case PIPE_CONTROL:
1053 urb_priv->state = US_CTRL_SETUP;
1054 break;
1055 case PIPE_BULK:
1056 urb_priv->state = US_BULK;
1057 break;
1058 }
1059
1060 debug_urb_submitted(imx21, urb);
1061 if (ep_priv->etd[0] < 0) {
1062 if (ep_priv->waiting_etd) {
1063 dev_dbg(imx21->dev,
1064 "no ETD available already queued %p\n",
1065 ep_priv);
1066 debug_urb_queued_for_etd(imx21, urb);
1067 goto out;
1068 }
1069 ep_priv->etd[0] = alloc_etd(imx21);
1070 if (ep_priv->etd[0] < 0) {
1071 dev_dbg(imx21->dev,
1072 "no ETD available queueing %p\n", ep_priv);
1073 debug_urb_queued_for_etd(imx21, urb);
1074 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1075 ep_priv->waiting_etd = 1;
1076 goto out;
1077 }
1078 }
1079
1080 /* Schedule if no URB already active for this endpoint */
1081 etd = &imx21->etd[ep_priv->etd[0]];
1082 if (etd->urb == NULL) {
1083 DEBUG_LOG_FRAME(imx21, etd, last_req);
1084 schedule_nonisoc_etd(imx21, urb);
1085 }
1086
1087out:
1088 spin_unlock_irqrestore(&imx21->lock, flags);
1089 return 0;
1090
1091failed_link:
1092failed_alloc_ep:
1093 spin_unlock_irqrestore(&imx21->lock, flags);
1094 kfree(urb_priv);
1095 return ret;
1096}
1097
1098static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1099 int status)
1100{
1101 struct imx21 *imx21 = hcd_to_imx21(hcd);
1102 unsigned long flags;
1103 struct usb_host_endpoint *ep;
1104 struct ep_priv *ep_priv;
1105 struct urb_priv *urb_priv = urb->hcpriv;
1106 int ret = -EINVAL;
1107
1108 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1109 urb, usb_pipeisoc(urb->pipe), status);
1110
1111 spin_lock_irqsave(&imx21->lock, flags);
1112
1113 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1114 if (ret)
1115 goto fail;
1116 ep = urb_priv->ep;
1117 ep_priv = ep->hcpriv;
1118
1119 debug_urb_unlinked(imx21, urb);
1120
1121 if (usb_pipeisoc(urb->pipe)) {
1122 dequeue_isoc_urb(imx21, urb, ep_priv);
1123 schedule_isoc_etds(hcd, ep);
1124 } else if (urb_priv->active) {
1125 int etd_num = ep_priv->etd[0];
1126 if (etd_num != -1) {
1127 disactivate_etd(imx21, etd_num);
1128 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
1129 imx21->etd[etd_num].urb = NULL;
1130 }
1131 }
1132
1133 urb_done(hcd, urb, status);
1134
1135 spin_unlock_irqrestore(&imx21->lock, flags);
1136 return 0;
1137
1138fail:
1139 spin_unlock_irqrestore(&imx21->lock, flags);
1140 return ret;
1141}
1142
1143/* =========================================== */
1144/* Interrupt dispatch */
1145/* =========================================== */
1146
1147static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1148{
1149 int etd_num;
1150 int enable_sof_int = 0;
1151 unsigned long flags;
1152
1153 spin_lock_irqsave(&imx21->lock, flags);
1154
1155 for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1156 u32 etd_mask = 1 << etd_num;
1157 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1158 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1159 struct etd_priv *etd = &imx21->etd[etd_num];
1160
1161
1162 if (done) {
1163 DEBUG_LOG_FRAME(imx21, etd, last_int);
1164 } else {
1165/*
1166 * Kludge warning!
1167 *
1168 * When multiple transfers are using the bus we sometimes get into a state
1169 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1170 * the ETD has self disabled but the ETDDONESTAT flag is not set
1171 * (and hence no interrupt occurs).
1172 * This causes the transfer in question to hang.
1173 * The kludge below checks for this condition at each SOF and processes any
1174 * blocked ETDs (after an arbitary 10 frame wait)
1175 *
1176 * With a single active transfer the usbtest test suite will run for days
1177 * without the kludge.
1178 * With other bus activity (eg mass storage) even just test1 will hang without
1179 * the kludge.
1180 */
1181 u32 dword0;
1182 int cc;
1183
1184 if (etd->active_count && !enabled) /* suspicious... */
1185 enable_sof_int = 1;
1186
1187 if (!sof || enabled || !etd->active_count)
1188 continue;
1189
1190 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1191 if (cc == TD_NOTACCESSED)
1192 continue;
1193
1194 if (++etd->active_count < 10)
1195 continue;
1196
1197 dword0 = etd_readl(imx21, etd_num, 0);
1198 dev_dbg(imx21->dev,
1199 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1200 etd_num, dword0 & 0x7F,
1201 (dword0 >> DW0_ENDPNT) & 0x0F,
1202 cc);
1203
1204#ifdef DEBUG
1205 dev_dbg(imx21->dev,
1206 "frame: act=%d disact=%d"
1207 " int=%d req=%d cur=%d\n",
1208 etd->activated_frame,
1209 etd->disactivated_frame,
1210 etd->last_int_frame,
1211 etd->last_req_frame,
1212 readl(imx21->regs + USBH_FRMNUB));
1213 imx21->debug_unblocks++;
1214#endif
1215 etd->active_count = 0;
1216/* End of kludge */
1217 }
1218
1219 if (etd->ep == NULL || etd->urb == NULL) {
1220 dev_dbg(imx21->dev,
1221 "Interrupt for unexpected etd %d"
1222 " ep=%p urb=%p\n",
1223 etd_num, etd->ep, etd->urb);
1224 disactivate_etd(imx21, etd_num);
1225 continue;
1226 }
1227
1228 if (usb_pipeisoc(etd->urb->pipe))
1229 isoc_etd_done(hcd, etd->urb, etd_num);
1230 else
1231 nonisoc_etd_done(hcd, etd->urb, etd_num);
1232 }
1233
1234 /* only enable SOF interrupt if it may be needed for the kludge */
1235 if (enable_sof_int)
1236 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1237 else
1238 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1239
1240
1241 spin_unlock_irqrestore(&imx21->lock, flags);
1242}
1243
1244static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1245{
1246 struct imx21 *imx21 = hcd_to_imx21(hcd);
1247 u32 ints = readl(imx21->regs + USBH_SYSISR);
1248
1249 if (ints & USBH_SYSIEN_HERRINT)
1250 dev_dbg(imx21->dev, "Scheduling error\n");
1251
1252 if (ints & USBH_SYSIEN_SORINT)
1253 dev_dbg(imx21->dev, "Scheduling overrun\n");
1254
1255 if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1256 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1257
1258 writel(ints, imx21->regs + USBH_SYSISR);
1259 return IRQ_HANDLED;
1260}
1261
1262static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1263 struct usb_host_endpoint *ep)
1264{
1265 struct imx21 *imx21 = hcd_to_imx21(hcd);
1266 unsigned long flags;
1267 struct ep_priv *ep_priv;
1268 int i;
1269
1270 if (ep == NULL)
1271 return;
1272
1273 spin_lock_irqsave(&imx21->lock, flags);
1274 ep_priv = ep->hcpriv;
1275 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1276
1277 if (!list_empty(&ep->urb_list))
1278 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1279
1280 if (ep_priv != NULL) {
1281 for (i = 0; i < NUM_ISO_ETDS; i++) {
1282 if (ep_priv->etd[i] > -1)
1283 dev_dbg(imx21->dev, "free etd %d for disable\n",
1284 ep_priv->etd[i]);
1285
1286 free_etd(imx21, ep_priv->etd[i]);
1287 }
1288 kfree(ep_priv);
1289 ep->hcpriv = NULL;
1290 }
1291
1292 for (i = 0; i < USB_NUM_ETD; i++) {
1293 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1294 dev_err(imx21->dev,
1295 "Active etd %d for disabled ep=%p!\n", i, ep);
1296 free_etd(imx21, i);
1297 }
1298 }
1299 free_epdmem(imx21, ep);
1300 spin_unlock_irqrestore(&imx21->lock, flags);
1301}
1302
1303/* =========================================== */
1304/* Hub handling */
1305/* =========================================== */
1306
1307static int get_hub_descriptor(struct usb_hcd *hcd,
1308 struct usb_hub_descriptor *desc)
1309{
1310 struct imx21 *imx21 = hcd_to_imx21(hcd);
1311 desc->bDescriptorType = 0x29; /* HUB descriptor */
1312 desc->bHubContrCurrent = 0;
1313
1314 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1315 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1316 desc->bDescLength = 9;
1317 desc->bPwrOn2PwrGood = 0;
1318 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1319 0x0002 | /* No power switching */
1320 0x0010 | /* No over current protection */
1321 0);
1322
1323 desc->bitmap[0] = 1 << 1;
1324 desc->bitmap[1] = ~0;
1325 return 0;
1326}
1327
1328static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1329{
1330 struct imx21 *imx21 = hcd_to_imx21(hcd);
1331 int ports;
1332 int changed = 0;
1333 int i;
1334 unsigned long flags;
1335
1336 spin_lock_irqsave(&imx21->lock, flags);
1337 ports = readl(imx21->regs + USBH_ROOTHUBA)
1338 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1339 if (ports > 7) {
1340 ports = 7;
1341 dev_err(imx21->dev, "ports %d > 7\n", ports);
1342 }
1343 for (i = 0; i < ports; i++) {
1344 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1345 (USBH_PORTSTAT_CONNECTSC |
1346 USBH_PORTSTAT_PRTENBLSC |
1347 USBH_PORTSTAT_PRTSTATSC |
1348 USBH_PORTSTAT_OVRCURIC |
1349 USBH_PORTSTAT_PRTRSTSC)) {
1350
1351 changed = 1;
1352 buf[0] |= 1 << (i + 1);
1353 }
1354 }
1355 spin_unlock_irqrestore(&imx21->lock, flags);
1356
1357 if (changed)
1358 dev_info(imx21->dev, "Hub status changed\n");
1359 return changed;
1360}
1361
1362static int imx21_hc_hub_control(struct usb_hcd *hcd,
1363 u16 typeReq,
1364 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1365{
1366 struct imx21 *imx21 = hcd_to_imx21(hcd);
1367 int rc = 0;
1368 u32 status_write = 0;
1369
1370 switch (typeReq) {
1371 case ClearHubFeature:
1372 dev_dbg(imx21->dev, "ClearHubFeature\n");
1373 switch (wValue) {
1374 case C_HUB_OVER_CURRENT:
1375 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1376 break;
1377 case C_HUB_LOCAL_POWER:
1378 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1379 break;
1380 default:
1381 dev_dbg(imx21->dev, " unknown\n");
1382 rc = -EINVAL;
1383 break;
1384 }
1385 break;
1386
1387 case ClearPortFeature:
1388 dev_dbg(imx21->dev, "ClearPortFeature\n");
1389 switch (wValue) {
1390 case USB_PORT_FEAT_ENABLE:
1391 dev_dbg(imx21->dev, " ENABLE\n");
1392 status_write = USBH_PORTSTAT_CURCONST;
1393 break;
1394 case USB_PORT_FEAT_SUSPEND:
1395 dev_dbg(imx21->dev, " SUSPEND\n");
1396 status_write = USBH_PORTSTAT_PRTOVRCURI;
1397 break;
1398 case USB_PORT_FEAT_POWER:
1399 dev_dbg(imx21->dev, " POWER\n");
1400 status_write = USBH_PORTSTAT_LSDEVCON;
1401 break;
1402 case USB_PORT_FEAT_C_ENABLE:
1403 dev_dbg(imx21->dev, " C_ENABLE\n");
1404 status_write = USBH_PORTSTAT_PRTENBLSC;
1405 break;
1406 case USB_PORT_FEAT_C_SUSPEND:
1407 dev_dbg(imx21->dev, " C_SUSPEND\n");
1408 status_write = USBH_PORTSTAT_PRTSTATSC;
1409 break;
1410 case USB_PORT_FEAT_C_CONNECTION:
1411 dev_dbg(imx21->dev, " C_CONNECTION\n");
1412 status_write = USBH_PORTSTAT_CONNECTSC;
1413 break;
1414 case USB_PORT_FEAT_C_OVER_CURRENT:
1415 dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1416 status_write = USBH_PORTSTAT_OVRCURIC;
1417 break;
1418 case USB_PORT_FEAT_C_RESET:
1419 dev_dbg(imx21->dev, " C_RESET\n");
1420 status_write = USBH_PORTSTAT_PRTRSTSC;
1421 break;
1422 default:
1423 dev_dbg(imx21->dev, " unknown\n");
1424 rc = -EINVAL;
1425 break;
1426 }
1427
1428 break;
1429
1430 case GetHubDescriptor:
1431 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1432 rc = get_hub_descriptor(hcd, (void *)buf);
1433 break;
1434
1435 case GetHubStatus:
1436 dev_dbg(imx21->dev, " GetHubStatus\n");
1437 *(__le32 *) buf = 0;
1438 break;
1439
1440 case GetPortStatus:
1441 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1442 wIndex, USBH_PORTSTAT(wIndex - 1));
1443 *(__le32 *) buf = readl(imx21->regs +
1444 USBH_PORTSTAT(wIndex - 1));
1445 break;
1446
1447 case SetHubFeature:
1448 dev_dbg(imx21->dev, "SetHubFeature\n");
1449 switch (wValue) {
1450 case C_HUB_OVER_CURRENT:
1451 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1452 break;
1453
1454 case C_HUB_LOCAL_POWER:
1455 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1456 break;
1457 default:
1458 dev_dbg(imx21->dev, " unknown\n");
1459 rc = -EINVAL;
1460 break;
1461 }
1462
1463 break;
1464
1465 case SetPortFeature:
1466 dev_dbg(imx21->dev, "SetPortFeature\n");
1467 switch (wValue) {
1468 case USB_PORT_FEAT_SUSPEND:
1469 dev_dbg(imx21->dev, " SUSPEND\n");
1470 status_write = USBH_PORTSTAT_PRTSUSPST;
1471 break;
1472 case USB_PORT_FEAT_POWER:
1473 dev_dbg(imx21->dev, " POWER\n");
1474 status_write = USBH_PORTSTAT_PRTPWRST;
1475 break;
1476 case USB_PORT_FEAT_RESET:
1477 dev_dbg(imx21->dev, " RESET\n");
1478 status_write = USBH_PORTSTAT_PRTRSTST;
1479 break;
1480 default:
1481 dev_dbg(imx21->dev, " unknown\n");
1482 rc = -EINVAL;
1483 break;
1484 }
1485 break;
1486
1487 default:
1488 dev_dbg(imx21->dev, " unknown\n");
1489 rc = -EINVAL;
1490 break;
1491 }
1492
1493 if (status_write)
1494 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1495 return rc;
1496}
1497
1498/* =========================================== */
1499/* Host controller management */
1500/* =========================================== */
1501
1502static int imx21_hc_reset(struct usb_hcd *hcd)
1503{
1504 struct imx21 *imx21 = hcd_to_imx21(hcd);
1505 unsigned long timeout;
1506 unsigned long flags;
1507
1508 spin_lock_irqsave(&imx21->lock, flags);
1509
1510 /* Reset the Host controler modules */
1511 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1512 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1513 imx21->regs + USBOTG_RST_CTRL);
1514
1515 /* Wait for reset to finish */
1516 timeout = jiffies + HZ;
1517 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1518 if (time_after(jiffies, timeout)) {
1519 spin_unlock_irqrestore(&imx21->lock, flags);
1520 dev_err(imx21->dev, "timeout waiting for reset\n");
1521 return -ETIMEDOUT;
1522 }
1523 spin_unlock_irq(&imx21->lock);
1524 schedule_timeout(1);
1525 spin_lock_irq(&imx21->lock);
1526 }
1527 spin_unlock_irqrestore(&imx21->lock, flags);
1528 return 0;
1529}
1530
1531static int __devinit imx21_hc_start(struct usb_hcd *hcd)
1532{
1533 struct imx21 *imx21 = hcd_to_imx21(hcd);
1534 unsigned long flags;
1535 int i, j;
1536 u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1537 u32 usb_control = 0;
1538
1539 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1540 USBOTG_HWMODE_HOSTXCVR_MASK);
1541 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1542 USBOTG_HWMODE_OTGXCVR_MASK);
1543
1544 if (imx21->pdata->host1_txenoe)
1545 usb_control |= USBCTRL_HOST1_TXEN_OE;
1546
1547 if (!imx21->pdata->host1_xcverless)
1548 usb_control |= USBCTRL_HOST1_BYP_TLL;
1549
1550 if (imx21->pdata->otg_ext_xcvr)
1551 usb_control |= USBCTRL_OTC_RCV_RXDP;
1552
1553
1554 spin_lock_irqsave(&imx21->lock, flags);
1555
1556 writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1557 imx21->regs + USBOTG_CLK_CTRL);
1558 writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1559 writel(usb_control, imx21->regs + USBCTRL);
1560 writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
1561 imx21->regs + USB_MISCCONTROL);
1562
1563 /* Clear the ETDs */
1564 for (i = 0; i < USB_NUM_ETD; i++)
1565 for (j = 0; j < 4; j++)
1566 etd_writel(imx21, i, j, 0);
1567
1568 /* Take the HC out of reset */
1569 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1570 imx21->regs + USBH_HOST_CTRL);
1571
1572 /* Enable ports */
1573 if (imx21->pdata->enable_otg_host)
1574 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1575 imx21->regs + USBH_PORTSTAT(0));
1576
1577 if (imx21->pdata->enable_host1)
1578 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1579 imx21->regs + USBH_PORTSTAT(1));
1580
1581 if (imx21->pdata->enable_host2)
1582 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1583 imx21->regs + USBH_PORTSTAT(2));
1584
1585
1586 hcd->state = HC_STATE_RUNNING;
1587
1588 /* Enable host controller interrupts */
1589 set_register_bits(imx21, USBH_SYSIEN,
1590 USBH_SYSIEN_HERRINT |
1591 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1592 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1593
1594 spin_unlock_irqrestore(&imx21->lock, flags);
1595
1596 return 0;
1597}
1598
1599static void imx21_hc_stop(struct usb_hcd *hcd)
1600{
1601 struct imx21 *imx21 = hcd_to_imx21(hcd);
1602 unsigned long flags;
1603
1604 spin_lock_irqsave(&imx21->lock, flags);
1605
1606 writel(0, imx21->regs + USBH_SYSIEN);
1607 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1608 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1609 USBOTG_CLK_CTRL);
1610 spin_unlock_irqrestore(&imx21->lock, flags);
1611}
1612
1613/* =========================================== */
1614/* Driver glue */
1615/* =========================================== */
1616
1617static struct hc_driver imx21_hc_driver = {
1618 .description = hcd_name,
1619 .product_desc = "IMX21 USB Host Controller",
1620 .hcd_priv_size = sizeof(struct imx21),
1621
1622 .flags = HCD_USB11,
1623 .irq = imx21_irq,
1624
1625 .reset = imx21_hc_reset,
1626 .start = imx21_hc_start,
1627 .stop = imx21_hc_stop,
1628
1629 /* I/O requests */
1630 .urb_enqueue = imx21_hc_urb_enqueue,
1631 .urb_dequeue = imx21_hc_urb_dequeue,
1632 .endpoint_disable = imx21_hc_endpoint_disable,
1633
1634 /* scheduling support */
1635 .get_frame_number = imx21_hc_get_frame,
1636
1637 /* Root hub support */
1638 .hub_status_data = imx21_hc_hub_status_data,
1639 .hub_control = imx21_hc_hub_control,
1640
1641};
1642
1643static struct mx21_usbh_platform_data default_pdata = {
1644 .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1645 .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1646 .enable_host1 = 1,
1647 .enable_host2 = 1,
1648 .enable_otg_host = 1,
1649
1650};
1651
1652static int imx21_remove(struct platform_device *pdev)
1653{
1654 struct usb_hcd *hcd = platform_get_drvdata(pdev);
1655 struct imx21 *imx21 = hcd_to_imx21(hcd);
1656 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1657
1658 remove_debug_files(imx21);
1659 usb_remove_hcd(hcd);
1660
1661 if (res != NULL) {
1662 clk_disable(imx21->clk);
1663 clk_put(imx21->clk);
1664 iounmap(imx21->regs);
1665 release_mem_region(res->start, resource_size(res));
1666 }
1667
1668 kfree(hcd);
1669 return 0;
1670}
1671
1672
1673static int imx21_probe(struct platform_device *pdev)
1674{
1675 struct usb_hcd *hcd;
1676 struct imx21 *imx21;
1677 struct resource *res;
1678 int ret;
1679 int irq;
1680
1681 printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1682
1683 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1684 if (!res)
1685 return -ENODEV;
1686 irq = platform_get_irq(pdev, 0);
1687 if (irq < 0)
1688 return -ENXIO;
1689
1690 hcd = usb_create_hcd(&imx21_hc_driver,
1691 &pdev->dev, dev_name(&pdev->dev));
1692 if (hcd == NULL) {
1693 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1694 dev_name(&pdev->dev));
1695 return -ENOMEM;
1696 }
1697
1698 imx21 = hcd_to_imx21(hcd);
1699 imx21->dev = &pdev->dev;
1700 imx21->pdata = pdev->dev.platform_data;
1701 if (!imx21->pdata)
1702 imx21->pdata = &default_pdata;
1703
1704 spin_lock_init(&imx21->lock);
1705 INIT_LIST_HEAD(&imx21->dmem_list);
1706 INIT_LIST_HEAD(&imx21->queue_for_etd);
1707 INIT_LIST_HEAD(&imx21->queue_for_dmem);
1708 create_debug_files(imx21);
1709
1710 res = request_mem_region(res->start, resource_size(res), hcd_name);
1711 if (!res) {
1712 ret = -EBUSY;
1713 goto failed_request_mem;
1714 }
1715
1716 imx21->regs = ioremap(res->start, resource_size(res));
1717 if (imx21->regs == NULL) {
1718 dev_err(imx21->dev, "Cannot map registers\n");
1719 ret = -ENOMEM;
1720 goto failed_ioremap;
1721 }
1722
1723 /* Enable clocks source */
1724 imx21->clk = clk_get(imx21->dev, NULL);
1725 if (IS_ERR(imx21->clk)) {
1726 dev_err(imx21->dev, "no clock found\n");
1727 ret = PTR_ERR(imx21->clk);
1728 goto failed_clock_get;
1729 }
1730
1731 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1732 if (ret)
1733 goto failed_clock_set;
1734 ret = clk_enable(imx21->clk);
1735 if (ret)
1736 goto failed_clock_enable;
1737
1738 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1739 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1740
1741 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
1742 if (ret != 0) {
1743 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1744 goto failed_add_hcd;
1745 }
1746
1747 return 0;
1748
1749failed_add_hcd:
1750 clk_disable(imx21->clk);
1751failed_clock_enable:
1752failed_clock_set:
1753 clk_put(imx21->clk);
1754failed_clock_get:
1755 iounmap(imx21->regs);
1756failed_ioremap:
1757 release_mem_region(res->start, res->end - res->start);
1758failed_request_mem:
1759 remove_debug_files(imx21);
1760 usb_put_hcd(hcd);
1761 return ret;
1762}
1763
1764static struct platform_driver imx21_hcd_driver = {
1765 .driver = {
1766 .name = (char *)hcd_name,
1767 },
1768 .probe = imx21_probe,
1769 .remove = imx21_remove,
1770 .suspend = NULL,
1771 .resume = NULL,
1772};
1773
1774static int __init imx21_hcd_init(void)
1775{
1776 return platform_driver_register(&imx21_hcd_driver);
1777}
1778
1779static void __exit imx21_hcd_cleanup(void)
1780{
1781 platform_driver_unregister(&imx21_hcd_driver);
1782}
1783
1784module_init(imx21_hcd_init);
1785module_exit(imx21_hcd_cleanup);
1786
1787MODULE_DESCRIPTION("i.MX21 USB Host controller");
1788MODULE_AUTHOR("Martin Fuzzey");
1789MODULE_LICENSE("GPL");
1790MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 000000000000..1b0d913780a5
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,436 @@
1/*
2 * Macros and prototypes for i.MX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef __LINUX_IMX21_HCD_H__
25#define __LINUX_IMX21_HCD_H__
26
27#include <mach/mx21-usbhost.h>
28
29#define NUM_ISO_ETDS 2
30#define USB_NUM_ETD 32
31#define DMEM_SIZE 4096
32
33/* Register definitions */
34#define USBOTG_HWMODE 0x00
35#define USBOTG_HWMODE_ANASDBEN (1 << 14)
36#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
37#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
38#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
39#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
40#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
41#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
42#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
43#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
44#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
45#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
46#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
47#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
48#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
49#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
50#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
51#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
52
53#define USBOTG_CINT_STAT 0x04
54#define USBOTG_CINT_STEN 0x08
55#define USBOTG_ASHNPINT (1 << 5)
56#define USBOTG_ASFCINT (1 << 4)
57#define USBOTG_ASHCINT (1 << 3)
58#define USBOTG_SHNPINT (1 << 2)
59#define USBOTG_FCINT (1 << 1)
60#define USBOTG_HCINT (1 << 0)
61
62#define USBOTG_CLK_CTRL 0x0c
63#define USBOTG_CLK_CTRL_FUNC (1 << 2)
64#define USBOTG_CLK_CTRL_HST (1 << 1)
65#define USBOTG_CLK_CTRL_MAIN (1 << 0)
66
67#define USBOTG_RST_CTRL 0x10
68#define USBOTG_RST_RSTI2C (1 << 15)
69#define USBOTG_RST_RSTCTRL (1 << 5)
70#define USBOTG_RST_RSTFC (1 << 4)
71#define USBOTG_RST_RSTFSKE (1 << 3)
72#define USBOTG_RST_RSTRH (1 << 2)
73#define USBOTG_RST_RSTHSIE (1 << 1)
74#define USBOTG_RST_RSTHC (1 << 0)
75
76#define USBOTG_FRM_INTVL 0x14
77#define USBOTG_FRM_REMAIN 0x18
78#define USBOTG_HNP_CSR 0x1c
79#define USBOTG_HNP_ISR 0x2c
80#define USBOTG_HNP_IEN 0x30
81
82#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
83#define USBOTG_I2C_XCVR_DEVAD 0x118
84#define USBOTG_I2C_SEQ_OP_REG 0x119
85#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
86#define USBOTG_I2C_OP_CTRL_REG 0x11b
87#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
88#define USBOTG_I2C_MASTER_INT_REG 0x11f
89
90#define USBH_HOST_CTRL 0x80
91#define USBH_HOST_CTRL_HCRESET (1 << 31)
92#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
93#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
94#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
95#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
96#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
97#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
98#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
99#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
100#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
101#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
102
103#define USBH_SYSISR 0x88
104#define USBH_SYSISR_PSCINT (1 << 6)
105#define USBH_SYSISR_FMOFINT (1 << 5)
106#define USBH_SYSISR_HERRINT (1 << 4)
107#define USBH_SYSISR_RESDETINT (1 << 3)
108#define USBH_SYSISR_SOFINT (1 << 2)
109#define USBH_SYSISR_DONEINT (1 << 1)
110#define USBH_SYSISR_SORINT (1 << 0)
111
112#define USBH_SYSIEN 0x8c
113#define USBH_SYSIEN_PSCINT (1 << 6)
114#define USBH_SYSIEN_FMOFINT (1 << 5)
115#define USBH_SYSIEN_HERRINT (1 << 4)
116#define USBH_SYSIEN_RESDETINT (1 << 3)
117#define USBH_SYSIEN_SOFINT (1 << 2)
118#define USBH_SYSIEN_DONEINT (1 << 1)
119#define USBH_SYSIEN_SORINT (1 << 0)
120
121#define USBH_XBUFSTAT 0x98
122#define USBH_YBUFSTAT 0x9c
123#define USBH_XYINTEN 0xa0
124#define USBH_XFILLSTAT 0xa8
125#define USBH_YFILLSTAT 0xac
126#define USBH_ETDENSET 0xc0
127#define USBH_ETDENCLR 0xc4
128#define USBH_IMMEDINT 0xcc
129#define USBH_ETDDONESTAT 0xd0
130#define USBH_ETDDONEEN 0xd4
131#define USBH_FRMNUB 0xe0
132#define USBH_LSTHRESH 0xe4
133
134#define USBH_ROOTHUBA 0xe8
135#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
136#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
137#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
138#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
139#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
140#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
141#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
142#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
143
144#define USBH_ROOTHUBB 0xec
145#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
146#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
147
148#define USBH_ROOTSTAT 0xf0
149#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
150#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
151#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
152#define USBH_ROOTSTAT_OVRCURI (1 << 1)
153#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
154
155#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
156#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
157#define USBH_PORTSTAT_OVRCURIC (1 << 19)
158#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
159#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
160#define USBH_PORTSTAT_CONNECTSC (1 << 16)
161#define USBH_PORTSTAT_LSDEVCON (1 << 9)
162#define USBH_PORTSTAT_PRTPWRST (1 << 8)
163#define USBH_PORTSTAT_PRTRSTST (1 << 4)
164#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
165#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
166#define USBH_PORTSTAT_PRTENABST (1 << 1)
167#define USBH_PORTSTAT_CURCONST (1 << 0)
168
169#define USB_DMAREV 0x800
170#define USB_DMAINTSTAT 0x804
171#define USB_DMAINTSTAT_EPERR (1 << 1)
172#define USB_DMAINTSTAT_ETDERR (1 << 0)
173
174#define USB_DMAINTEN 0x808
175#define USB_DMAINTEN_EPERRINTEN (1 << 1)
176#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
177
178#define USB_ETDDMAERSTAT 0x80c
179#define USB_EPDMAERSTAT 0x810
180#define USB_ETDDMAEN 0x820
181#define USB_EPDMAEN 0x824
182#define USB_ETDDMAXTEN 0x828
183#define USB_EPDMAXTEN 0x82c
184#define USB_ETDDMAENXYT 0x830
185#define USB_EPDMAENXYT 0x834
186#define USB_ETDDMABST4EN 0x838
187#define USB_EPDMABST4EN 0x83c
188
189#define USB_MISCCONTROL 0x840
190#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
191#define USB_MISCCONTROL_SKPRTRY (1 << 2)
192#define USB_MISCCONTROL_ARBMODE (1 << 1)
193#define USB_MISCCONTROL_FILTCC (1 << 0)
194
195#define USB_ETDDMACHANLCLR 0x848
196#define USB_EPDMACHANLCLR 0x84c
197#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
198#define USB_EPSMSA(x) (0x980 + ((x) * 4))
199#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
200#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
201
202#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
203#define DW0_ADDRESS 0
204#define DW0_ENDPNT 7
205#define DW0_DIRECT 11
206#define DW0_SPEED 13
207#define DW0_FORMAT 14
208#define DW0_MAXPKTSIZ 16
209#define DW0_HALTED 27
210#define DW0_TOGCRY 28
211#define DW0_SNDNAK 30
212
213#define DW1_XBUFSRTAD 0
214#define DW1_YBUFSRTAD 16
215
216#define DW2_RTRYDELAY 0
217#define DW2_POLINTERV 0
218#define DW2_STARTFRM 0
219#define DW2_RELPOLPOS 8
220#define DW2_DIRPID 16
221#define DW2_BUFROUND 18
222#define DW2_DELAYINT 19
223#define DW2_DATATOG 22
224#define DW2_ERRORCNT 24
225#define DW2_COMPCODE 28
226
227#define DW3_TOTBYECNT 0
228#define DW3_PKTLEN0 0
229#define DW3_COMPCODE0 12
230#define DW3_PKTLEN1 16
231#define DW3_BUFSIZE 21
232#define DW3_COMPCODE1 28
233
234#define USBCTRL 0x600
235#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
236#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
237#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
238#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
239#define USBCTRL_I2C_WU_INT_EN (1 << 19)
240#define USBCTRL_OTG_WU_INT_EN (1 << 18)
241#define USBCTRL_HOST_WU_INT_EN (1 << 17)
242#define USBCTRL_FNT_WU_INT_EN (1 << 16)
243#define USBCTRL_OTC_RCV_RXDP (1 << 13)
244#define USBCTRL_HOST1_BYP_TLL (1 << 12)
245#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
246#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
247#define USBCTRL_OTG_PWR_MASK (1 << 6)
248#define USBCTRL_HOST1_PWR_MASK (1 << 5)
249#define USBCTRL_HOST2_PWR_MASK (1 << 4)
250#define USBCTRL_USB_BYP (1 << 2)
251#define USBCTRL_HOST1_TXEN_OE (1 << 1)
252
253
254/* Values in TD blocks */
255#define TD_DIR_SETUP 0
256#define TD_DIR_OUT 1
257#define TD_DIR_IN 2
258#define TD_FORMAT_CONTROL 0
259#define TD_FORMAT_ISO 1
260#define TD_FORMAT_BULK 2
261#define TD_FORMAT_INT 3
262#define TD_TOGGLE_CARRY 0
263#define TD_TOGGLE_DATA0 2
264#define TD_TOGGLE_DATA1 3
265
266/* control transfer states */
267#define US_CTRL_SETUP 2
268#define US_CTRL_DATA 1
269#define US_CTRL_ACK 0
270
271/* bulk transfer main state and 0-length packet */
272#define US_BULK 1
273#define US_BULK0 0
274
275/*ETD format description*/
276#define IMX_FMT_CTRL 0x0
277#define IMX_FMT_ISO 0x1
278#define IMX_FMT_BULK 0x2
279#define IMX_FMT_INT 0x3
280
281static char fmt_urb_to_etd[4] = {
282/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
283/*PIPE_INTERRUPT*/ IMX_FMT_INT,
284/*PIPE_CONTROL*/ IMX_FMT_CTRL,
285/*PIPE_BULK*/ IMX_FMT_BULK
286};
287
288/* condition (error) CC codes and mapping (OHCI like) */
289
290#define TD_CC_NOERROR 0x00
291#define TD_CC_CRC 0x01
292#define TD_CC_BITSTUFFING 0x02
293#define TD_CC_DATATOGGLEM 0x03
294#define TD_CC_STALL 0x04
295#define TD_DEVNOTRESP 0x05
296#define TD_PIDCHECKFAIL 0x06
297/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
298#define TD_DATAOVERRUN 0x08
299#define TD_DATAUNDERRUN 0x09
300#define TD_BUFFEROVERRUN 0x0C
301#define TD_BUFFERUNDERRUN 0x0D
302#define TD_SCHEDULEOVERRUN 0x0E
303#define TD_NOTACCESSED 0x0F
304
305static const int cc_to_error[16] = {
306 /* No Error */ 0,
307 /* CRC Error */ -EILSEQ,
308 /* Bit Stuff */ -EPROTO,
309 /* Data Togg */ -EILSEQ,
310 /* Stall */ -EPIPE,
311 /* DevNotResp */ -ETIMEDOUT,
312 /* PIDCheck */ -EPROTO,
313 /* UnExpPID */ -EPROTO,
314 /* DataOver */ -EOVERFLOW,
315 /* DataUnder */ -EREMOTEIO,
316 /* (for hw) */ -EIO,
317 /* (for hw) */ -EIO,
318 /* BufferOver */ -ECOMM,
319 /* BuffUnder */ -ENOSR,
320 /* (for HCD) */ -ENOSPC,
321 /* (for HCD) */ -EALREADY
322};
323
324/* HCD data associated with a usb core URB */
325struct urb_priv {
326 struct urb *urb;
327 struct usb_host_endpoint *ep;
328 int active;
329 int state;
330 struct td *isoc_td;
331 int isoc_remaining;
332 int isoc_status;
333};
334
335/* HCD data associated with a usb core endpoint */
336struct ep_priv {
337 struct usb_host_endpoint *ep;
338 struct list_head td_list;
339 struct list_head queue;
340 int etd[NUM_ISO_ETDS];
341 int waiting_etd;
342};
343
344/* isoc packet */
345struct td {
346 struct list_head list;
347 struct urb *urb;
348 struct usb_host_endpoint *ep;
349 dma_addr_t data;
350 unsigned long buf_addr;
351 int len;
352 int frame;
353 int isoc_index;
354};
355
356/* HCD data associated with a hardware ETD */
357struct etd_priv {
358 struct usb_host_endpoint *ep;
359 struct urb *urb;
360 struct td *td;
361 struct list_head queue;
362 dma_addr_t dma_handle;
363 int alloc;
364 int len;
365 int dmem_size;
366 int dmem_offset;
367 int active_count;
368#ifdef DEBUG
369 int activated_frame;
370 int disactivated_frame;
371 int last_int_frame;
372 int last_req_frame;
373 u32 submitted_dwords[4];
374#endif
375};
376
377/* Hardware data memory info */
378struct imx21_dmem_area {
379 struct usb_host_endpoint *ep;
380 unsigned int offset;
381 unsigned int size;
382 struct list_head list;
383};
384
385#ifdef DEBUG
386struct debug_usage_stats {
387 unsigned int value;
388 unsigned int maximum;
389};
390
391struct debug_stats {
392 unsigned long submitted;
393 unsigned long completed_ok;
394 unsigned long completed_failed;
395 unsigned long unlinked;
396 unsigned long queue_etd;
397 unsigned long queue_dmem;
398};
399
400struct debug_isoc_trace {
401 int schedule_frame;
402 int submit_frame;
403 int request_len;
404 int done_frame;
405 int done_len;
406 int cc;
407 struct td *td;
408};
409#endif
410
411/* HCD data structure */
412struct imx21 {
413 spinlock_t lock;
414 struct device *dev;
415 struct mx21_usbh_platform_data *pdata;
416 struct list_head dmem_list;
417 struct list_head queue_for_etd; /* eps queued due to etd shortage */
418 struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
419 struct etd_priv etd[USB_NUM_ETD];
420 struct clk *clk;
421 void __iomem *regs;
422#ifdef DEBUG
423 struct dentry *debug_root;
424 struct debug_stats nonisoc_stats;
425 struct debug_stats isoc_stats;
426 struct debug_usage_stats etd_usage;
427 struct debug_usage_stats dmem_usage;
428 struct debug_isoc_trace isoc_trace[20];
429 struct debug_isoc_trace isoc_trace_failed[20];
430 unsigned long debug_unblocks;
431 int isoc_trace_index;
432 int isoc_trace_index_failed;
433#endif
434};
435
436#endif
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index a2b305477afe..92de71dc7729 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -62,6 +62,7 @@
62#include <linux/errno.h> 62#include <linux/errno.h>
63#include <linux/init.h> 63#include <linux/init.h>
64#include <linux/list.h> 64#include <linux/list.h>
65#include <linux/slab.h>
65#include <linux/usb.h> 66#include <linux/usb.h>
66#include <linux/usb/isp116x.h> 67#include <linux/usb/isp116x.h>
67#include <linux/platform_device.h> 68#include <linux/platform_device.h>
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 5c774ab98252..217fb5170200 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -80,7 +80,7 @@
80#include <linux/platform_device.h> 80#include <linux/platform_device.h>
81#include <linux/pm.h> 81#include <linux/pm.h>
82#include <linux/io.h> 82#include <linux/io.h>
83#include <linux/bitops.h> 83#include <linux/bitmap.h>
84 84
85#include <asm/irq.h> 85#include <asm/irq.h>
86#include <asm/system.h> 86#include <asm/system.h>
@@ -190,10 +190,8 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
190 struct isp1362_ep *ep, u16 len) 190 struct isp1362_ep *ep, u16 len)
191{ 191{
192 int ptd_offset = -EINVAL; 192 int ptd_offset = -EINVAL;
193 int index;
194 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; 193 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
195 int found = -1; 194 int found;
196 int last = -1;
197 195
198 BUG_ON(len > epq->buf_size); 196 BUG_ON(len > epq->buf_size);
199 197
@@ -205,20 +203,9 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
205 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); 203 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
206 BUG_ON(ep->num_ptds != 0); 204 BUG_ON(ep->num_ptds != 0);
207 205
208 for (index = 0; index <= epq->buf_count - num_ptds; index++) { 206 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
209 if (test_bit(index, &epq->buf_map)) 207 num_ptds, 0);
210 continue; 208 if (found >= epq->buf_count)
211 found = index;
212 for (last = index + 1; last < index + num_ptds; last++) {
213 if (test_bit(last, &epq->buf_map)) {
214 found = -1;
215 break;
216 }
217 }
218 if (found >= 0)
219 break;
220 }
221 if (found < 0)
222 return -EOVERFLOW; 209 return -EOVERFLOW;
223 210
224 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, 211 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
@@ -230,8 +217,7 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
230 epq->buf_avail -= num_ptds; 217 epq->buf_avail -= num_ptds;
231 BUG_ON(epq->buf_avail > epq->buf_count); 218 BUG_ON(epq->buf_avail > epq->buf_count);
232 ep->ptd_index = found; 219 ep->ptd_index = found;
233 for (index = found; index < last; index++) 220 bitmap_set(&epq->buf_map, found, num_ptds);
234 __set_bit(index, &epq->buf_map);
235 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", 221 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
236 __func__, epq->name, ep->ptd_index, ep->ptd_offset, 222 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
237 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); 223 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
@@ -1271,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1271 1257
1272 /* avoid all allocations within spinlocks: request or endpoint */ 1258 /* avoid all allocations within spinlocks: request or endpoint */
1273 if (!hep->hcpriv) { 1259 if (!hep->hcpriv) {
1274 ep = kcalloc(1, sizeof *ep, mem_flags); 1260 ep = kzalloc(sizeof *ep, mem_flags);
1275 if (!ep) 1261 if (!ep)
1276 return -ENOMEM; 1262 return -ENOMEM;
1277 } 1263 }
@@ -2284,10 +2270,10 @@ static int isp1362_mem_config(struct usb_hcd *hcd)
2284 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); 2270 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2285 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", 2271 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2286 istl_size / 2, istl_size, 0, istl_size / 2); 2272 istl_size / 2, istl_size, 0, istl_size / 2);
2287 dev_info(hcd->self.controller, " INTL: %4d * (%3lu+8): %4d @ $%04x\n", 2273 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2288 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, 2274 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2289 intl_size, istl_size); 2275 intl_size, istl_size);
2290 dev_info(hcd->self.controller, " ATL : %4d * (%3lu+8): %4d @ $%04x\n", 2276 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2291 atl_buffers, atl_blksize - PTD_HEADER_SIZE, 2277 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2292 atl_size, istl_size + intl_size); 2278 atl_size, istl_size + intl_size);
2293 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, 2279 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
@@ -2711,6 +2697,8 @@ static int __init isp1362_probe(struct platform_device *pdev)
2711 void __iomem *data_reg; 2697 void __iomem *data_reg;
2712 int irq; 2698 int irq;
2713 int retval = 0; 2699 int retval = 0;
2700 struct resource *irq_res;
2701 unsigned int irq_flags = 0;
2714 2702
2715 /* basic sanity checks first. board-specific init logic should 2703 /* basic sanity checks first. board-specific init logic should
2716 * have initialized this the three resources and probably board 2704 * have initialized this the three resources and probably board
@@ -2724,30 +2712,18 @@ static int __init isp1362_probe(struct platform_device *pdev)
2724 2712
2725 data = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2713 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2726 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2714 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2727 irq = platform_get_irq(pdev, 0); 2715 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2728 if (!addr || !data || irq < 0) { 2716 if (!addr || !data || !irq_res) {
2729 retval = -ENODEV; 2717 retval = -ENODEV;
2730 goto err1; 2718 goto err1;
2731 } 2719 }
2720 irq = irq_res->start;
2732 2721
2733#ifdef CONFIG_USB_HCD_DMA
2734 if (pdev->dev.dma_mask) {
2735 struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2736
2737 if (!dma_res) {
2738 retval = -ENODEV;
2739 goto err1;
2740 }
2741 isp1362_hcd->data_dma = dma_res->start;
2742 isp1362_hcd->max_dma_size = resource_len(dma_res);
2743 }
2744#else
2745 if (pdev->dev.dma_mask) { 2722 if (pdev->dev.dma_mask) {
2746 DBG(1, "won't do DMA"); 2723 DBG(1, "won't do DMA");
2747 retval = -ENODEV; 2724 retval = -ENODEV;
2748 goto err1; 2725 goto err1;
2749 } 2726 }
2750#endif
2751 2727
2752 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { 2728 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
2753 retval = -EBUSY; 2729 retval = -EBUSY;
@@ -2795,12 +2771,16 @@ static int __init isp1362_probe(struct platform_device *pdev)
2795 } 2771 }
2796#endif 2772#endif
2797 2773
2798#ifdef CONFIG_ARM 2774 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2799 if (isp1362_hcd->board) 2775 irq_flags |= IRQF_TRIGGER_RISING;
2800 set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING); 2776 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2801#endif 2777 irq_flags |= IRQF_TRIGGER_FALLING;
2778 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2779 irq_flags |= IRQF_TRIGGER_HIGH;
2780 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2781 irq_flags |= IRQF_TRIGGER_LOW;
2802 2782
2803 retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED); 2783 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
2804 if (retval != 0) 2784 if (retval != 0)
2805 goto err6; 2785 goto err6;
2806 pr_info("%s, irq %d\n", hcd->product_desc, irq); 2786 pr_info("%s, irq %d\n", hcd->product_desc, irq);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 1a253ebf7e50..5151516ea1de 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -534,8 +534,8 @@ struct isp1362_hcd {
534 534
535 /* periodic schedule: isochronous */ 535 /* periodic schedule: isochronous */
536 struct list_head isoc; 536 struct list_head isoc;
537 int istl_flip:1; 537 unsigned int istl_flip:1;
538 int irq_active:1; 538 unsigned int irq_active:1;
539 539
540 /* Schedules for the current frame */ 540 /* Schedules for the current frame */
541 struct isp1362_ep_queue atl_queue; 541 struct isp1362_ep_queue atl_queue;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 9600a58299db..9f01293600b0 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -17,7 +17,9 @@
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/mm.h>
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22#include <asm/cacheflush.h>
21 23
22#include "../core/hcd.h" 24#include "../core/hcd.h"
23#include "isp1760-hcd.h" 25#include "isp1760-hcd.h"
@@ -904,6 +906,14 @@ __acquires(priv->lock)
904 status = 0; 906 status = 0;
905 } 907 }
906 908
909 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
910 void *ptr;
911 for (ptr = urb->transfer_buffer;
912 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
913 ptr += PAGE_SIZE)
914 flush_dcache_page(virt_to_page(ptr));
915 }
916
907 /* complete() can reenter this HCD */ 917 /* complete() can reenter this HCD */
908 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb); 918 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
909 spin_unlock(&priv->lock); 919 spin_unlock(&priv->lock);
@@ -1039,12 +1049,12 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
1039 if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) { 1049 if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
1040 u32 buffstatus; 1050 u32 buffstatus;
1041 1051
1042 /* XXX 1052 /*
1043 * NAKs are handled in HW by the chip. Usually if the 1053 * NAKs are handled in HW by the chip. Usually if the
1044 * device is not able to send data fast enough. 1054 * device is not able to send data fast enough.
1045 * This did not trigger for a long time now. 1055 * This happens mostly on slower hardware.
1046 */ 1056 */
1047 printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: " 1057 printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: "
1048 "%d of %zu done: %08x cur: %08x\n", qtd, 1058 "%d of %zu done: %08x cur: %08x\n", qtd,
1049 urb, qh, PTD_XFERRED_LENGTH(dw3), 1059 urb, qh, PTD_XFERRED_LENGTH(dw3),
1050 qtd->length, done_map, 1060 qtd->length, done_map,
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 1c9f977a5c9c..4293cfd28d61 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -109,7 +109,7 @@ static int of_isp1760_remove(struct of_device *dev)
109 return 0; 109 return 0;
110} 110}
111 111
112static struct of_device_id of_isp1760_match[] = { 112static const struct of_device_id of_isp1760_match[] = {
113 { 113 {
114 .compatible = "nxp,usb-isp1760", 114 .compatible = "nxp,usb-isp1760",
115 }, 115 },
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 7ccffcbe7b6f..944291e10f97 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -35,7 +35,7 @@ extern int usb_disabled(void);
35 35
36static void at91_start_clock(void) 36static void at91_start_clock(void)
37{ 37{
38 if (cpu_is_at91sam9261()) 38 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
39 clk_enable(hclk); 39 clk_enable(hclk);
40 clk_enable(iclk); 40 clk_enable(iclk);
41 clk_enable(fclk); 41 clk_enable(fclk);
@@ -46,7 +46,7 @@ static void at91_stop_clock(void)
46{ 46{
47 clk_disable(fclk); 47 clk_disable(fclk);
48 clk_disable(iclk); 48 clk_disable(iclk);
49 if (cpu_is_at91sam9261()) 49 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
50 clk_disable(hclk); 50 clk_disable(hclk);
51 clocked = 0; 51 clocked = 0;
52} 52}
@@ -142,7 +142,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
142 142
143 iclk = clk_get(&pdev->dev, "ohci_clk"); 143 iclk = clk_get(&pdev->dev, "ohci_clk");
144 fclk = clk_get(&pdev->dev, "uhpck"); 144 fclk = clk_get(&pdev->dev, "uhpck");
145 if (cpu_is_at91sam9261()) 145 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
146 hclk = clk_get(&pdev->dev, "hck0"); 146 hclk = clk_get(&pdev->dev, "hck0");
147 147
148 at91_start_hc(pdev); 148 at91_start_hc(pdev);
@@ -155,7 +155,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
155 /* Error handling */ 155 /* Error handling */
156 at91_stop_hc(pdev); 156 at91_stop_hc(pdev);
157 157
158 if (cpu_is_at91sam9261()) 158 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
159 clk_put(hclk); 159 clk_put(hclk);
160 clk_put(fclk); 160 clk_put(fclk);
161 clk_put(iclk); 161 clk_put(iclk);
@@ -192,7 +192,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
192 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 192 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
193 usb_put_hcd(hcd); 193 usb_put_hcd(hcd);
194 194
195 if (cpu_is_at91sam9261()) 195 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
196 clk_put(hclk); 196 clk_put(hclk);
197 clk_put(fclk); 197 clk_put(fclk);
198 clk_put(iclk); 198 clk_put(iclk);
@@ -331,6 +331,8 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
331 */ 331 */
332 if (at91_suspend_entering_slow_clock()) { 332 if (at91_suspend_entering_slow_clock()) {
333 ohci_usb_reset (ohci); 333 ohci_usb_reset (ohci);
334 /* flush the writes */
335 (void) ohci_readl (ohci, &ohci->regs->control);
334 at91_stop_clock(); 336 at91_stop_clock();
335 } 337 }
336 338
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index e4380082ebb1..17a6043c1fa0 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
294 return 0; 294 return 0;
295} 295}
296 296
297static struct dev_pm_ops au1xxx_ohci_pmops = { 297static const struct dev_pm_ops au1xxx_ohci_pmops = {
298 .suspend = ohci_hcd_au1xxx_drv_suspend, 298 .suspend = ohci_hcd_au1xxx_drv_suspend,
299 .resume = ohci_hcd_au1xxx_drv_resume, 299 .resume = ohci_hcd_au1xxx_drv_resume,
300}; 300};
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 000000000000..d22fb4d577b7
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,456 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * TI DA8xx (OMAP-L1x) Bus Glue
5 *
6 * Derived from: ohci-omap.c and ohci-s3c2410.c
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/interrupt.h>
15#include <linux/jiffies.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18
19#include <mach/da8xx.h>
20#include <mach/usb.h>
21
22#ifndef CONFIG_ARCH_DAVINCI_DA8XX
23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
24#endif
25
26#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
27
28static struct clk *usb11_clk;
29static struct clk *usb20_clk;
30
31/* Over-current indicator change bitmask */
32static volatile u16 ocic_mask;
33
34static void ohci_da8xx_clock(int on)
35{
36 u32 cfgchip2;
37
38 cfgchip2 = __raw_readl(CFGCHIP2);
39 if (on) {
40 clk_enable(usb11_clk);
41
42 /*
43 * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
44 * need to enable the USB 2.0 module clocking, start its PHY,
45 * and not allow it to stop the clock during USB 2.0 suspend.
46 */
47 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
48 clk_enable(usb20_clk);
49
50 cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
51 cfgchip2 |= CFGCHIP2_PHY_PLLON;
52 __raw_writel(cfgchip2, CFGCHIP2);
53
54 pr_info("Waiting for USB PHY clock good...\n");
55 while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
56 cpu_relax();
57 }
58
59 /* Enable USB 1.1 PHY */
60 cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
61 } else {
62 clk_disable(usb11_clk);
63 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
64 clk_disable(usb20_clk);
65
66 /* Disable USB 1.1 PHY */
67 cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
68 }
69 __raw_writel(cfgchip2, CFGCHIP2);
70}
71
72/*
73 * Handle the port over-current indicator change.
74 */
75static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
76 unsigned port)
77{
78 ocic_mask |= 1 << port;
79
80 /* Once over-current is detected, the port needs to be powered down */
81 if (hub->get_oci(port) > 0)
82 hub->set_power(port, 0);
83}
84
85static int ohci_da8xx_init(struct usb_hcd *hcd)
86{
87 struct device *dev = hcd->self.controller;
88 struct da8xx_ohci_root_hub *hub = dev->platform_data;
89 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
90 int result;
91 u32 rh_a;
92
93 dev_dbg(dev, "starting USB controller\n");
94
95 ohci_da8xx_clock(1);
96
97 /*
98 * DA8xx only have 1 port connected to the pins but the HC root hub
99 * register A reports 2 ports, thus we'll have to override it...
100 */
101 ohci->num_ports = 1;
102
103 result = ohci_init(ohci);
104 if (result < 0)
105 return result;
106
107 /*
108 * Since we're providing a board-specific root hub port power control
109 * and over-current reporting, we have to override the HC root hub A
110 * register's default value, so that ohci_hub_control() could return
111 * the correct hub descriptor...
112 */
113 rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
114 if (hub->set_power) {
115 rh_a &= ~RH_A_NPS;
116 rh_a |= RH_A_PSM;
117 }
118 if (hub->get_oci) {
119 rh_a &= ~RH_A_NOCP;
120 rh_a |= RH_A_OCPM;
121 }
122 rh_a &= ~RH_A_POTPGT;
123 rh_a |= hub->potpgt << 24;
124 ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
125
126 return result;
127}
128
129static void ohci_da8xx_stop(struct usb_hcd *hcd)
130{
131 ohci_stop(hcd);
132 ohci_da8xx_clock(0);
133}
134
135static int ohci_da8xx_start(struct usb_hcd *hcd)
136{
137 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
138 int result;
139
140 result = ohci_run(ohci);
141 if (result < 0)
142 ohci_da8xx_stop(hcd);
143
144 return result;
145}
146
147/*
148 * Update the status data from the hub with the over-current indicator change.
149 */
150static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
151{
152 int length = ohci_hub_status_data(hcd, buf);
153
154 /* See if we have OCIC bit set on port 1 */
155 if (ocic_mask & (1 << 1)) {
156 dev_dbg(hcd->self.controller, "over-current indicator change "
157 "on port 1\n");
158
159 if (!length)
160 length = 1;
161
162 buf[0] |= 1 << 1;
163 }
164 return length;
165}
166
167/*
168 * Look at the control requests to the root hub and see if we need to override.
169 */
170static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
171 u16 wIndex, char *buf, u16 wLength)
172{
173 struct device *dev = hcd->self.controller;
174 struct da8xx_ohci_root_hub *hub = dev->platform_data;
175 int temp;
176
177 switch (typeReq) {
178 case GetPortStatus:
179 /* Check the port number */
180 if (wIndex != 1)
181 break;
182
183 dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
184
185 temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
186
187 /* The port power status (PPS) bit defaults to 1 */
188 if (hub->get_power && hub->get_power(wIndex) == 0)
189 temp &= ~RH_PS_PPS;
190
191 /* The port over-current indicator (POCI) bit is always 0 */
192 if (hub->get_oci && hub->get_oci(wIndex) > 0)
193 temp |= RH_PS_POCI;
194
195 /* The over-current indicator change (OCIC) bit is 0 too */
196 if (ocic_mask & (1 << wIndex))
197 temp |= RH_PS_OCIC;
198
199 put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
200 return 0;
201 case SetPortFeature:
202 temp = 1;
203 goto check_port;
204 case ClearPortFeature:
205 temp = 0;
206
207check_port:
208 /* Check the port number */
209 if (wIndex != 1)
210 break;
211
212 switch (wValue) {
213 case USB_PORT_FEAT_POWER:
214 dev_dbg(dev, "%sPortFeature(%u): %s\n",
215 temp ? "Set" : "Clear", wIndex, "POWER");
216
217 if (!hub->set_power)
218 return -EPIPE;
219
220 return hub->set_power(wIndex, temp) ? -EPIPE : 0;
221 case USB_PORT_FEAT_C_OVER_CURRENT:
222 dev_dbg(dev, "%sPortFeature(%u): %s\n",
223 temp ? "Set" : "Clear", wIndex,
224 "C_OVER_CURRENT");
225
226 if (temp)
227 ocic_mask |= 1 << wIndex;
228 else
229 ocic_mask &= ~(1 << wIndex);
230 return 0;
231 }
232 }
233
234 return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
235}
236
237static const struct hc_driver ohci_da8xx_hc_driver = {
238 .description = hcd_name,
239 .product_desc = "DA8xx OHCI",
240 .hcd_priv_size = sizeof(struct ohci_hcd),
241
242 /*
243 * generic hardware linkage
244 */
245 .irq = ohci_irq,
246 .flags = HCD_USB11 | HCD_MEMORY,
247
248 /*
249 * basic lifecycle operations
250 */
251 .reset = ohci_da8xx_init,
252 .start = ohci_da8xx_start,
253 .stop = ohci_da8xx_stop,
254 .shutdown = ohci_shutdown,
255
256 /*
257 * managing i/o requests and associated device resources
258 */
259 .urb_enqueue = ohci_urb_enqueue,
260 .urb_dequeue = ohci_urb_dequeue,
261 .endpoint_disable = ohci_endpoint_disable,
262
263 /*
264 * scheduling support
265 */
266 .get_frame_number = ohci_get_frame,
267
268 /*
269 * root hub support
270 */
271 .hub_status_data = ohci_da8xx_hub_status_data,
272 .hub_control = ohci_da8xx_hub_control,
273
274#ifdef CONFIG_PM
275 .bus_suspend = ohci_bus_suspend,
276 .bus_resume = ohci_bus_resume,
277#endif
278 .start_port_reset = ohci_start_port_reset,
279};
280
281/*-------------------------------------------------------------------------*/
282
283
284/**
285 * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
286 * Context: !in_interrupt()
287 *
288 * Allocates basic resources for this USB host controller, and
289 * then invokes the start() method for the HCD associated with it
290 * through the hotplug entry's driver_data.
291 */
292static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
293 struct platform_device *pdev)
294{
295 struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
296 struct usb_hcd *hcd;
297 struct resource *mem;
298 int error, irq;
299
300 if (hub == NULL)
301 return -ENODEV;
302
303 usb11_clk = clk_get(&pdev->dev, "usb11");
304 if (IS_ERR(usb11_clk))
305 return PTR_ERR(usb11_clk);
306
307 usb20_clk = clk_get(&pdev->dev, "usb20");
308 if (IS_ERR(usb20_clk)) {
309 error = PTR_ERR(usb20_clk);
310 goto err0;
311 }
312
313 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
314 if (!hcd) {
315 error = -ENOMEM;
316 goto err1;
317 }
318
319 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
320 if (!mem) {
321 error = -ENODEV;
322 goto err2;
323 }
324 hcd->rsrc_start = mem->start;
325 hcd->rsrc_len = mem->end - mem->start + 1;
326
327 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
328 dev_dbg(&pdev->dev, "request_mem_region failed\n");
329 error = -EBUSY;
330 goto err2;
331 }
332
333 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
334 if (!hcd->regs) {
335 dev_err(&pdev->dev, "ioremap failed\n");
336 error = -ENOMEM;
337 goto err3;
338 }
339
340 ohci_hcd_init(hcd_to_ohci(hcd));
341
342 irq = platform_get_irq(pdev, 0);
343 if (irq < 0) {
344 error = -ENODEV;
345 goto err4;
346 }
347 error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
348 if (error)
349 goto err4;
350
351 if (hub->ocic_notify) {
352 error = hub->ocic_notify(ohci_da8xx_ocic_handler);
353 if (!error)
354 return 0;
355 }
356
357 usb_remove_hcd(hcd);
358err4:
359 iounmap(hcd->regs);
360err3:
361 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
362err2:
363 usb_put_hcd(hcd);
364err1:
365 clk_put(usb20_clk);
366err0:
367 clk_put(usb11_clk);
368 return error;
369}
370
371/**
372 * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
373 * @dev: USB Host Controller being removed
374 * Context: !in_interrupt()
375 *
376 * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
377 * the HCD's stop() method. It is always called from a thread
378 * context, normally "rmmod", "apmd", or something similar.
379 */
380static inline void
381usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
382{
383 struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
384
385 hub->ocic_notify(NULL);
386 usb_remove_hcd(hcd);
387 iounmap(hcd->regs);
388 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
389 usb_put_hcd(hcd);
390 clk_put(usb20_clk);
391 clk_put(usb11_clk);
392}
393
394static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
395{
396 return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
397}
398
399static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
400{
401 struct usb_hcd *hcd = platform_get_drvdata(dev);
402
403 usb_hcd_da8xx_remove(hcd, dev);
404 platform_set_drvdata(dev, NULL);
405
406 return 0;
407}
408
409#ifdef CONFIG_PM
410static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
411{
412 struct usb_hcd *hcd = platform_get_drvdata(dev);
413 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
414
415 if (time_before(jiffies, ohci->next_statechange))
416 msleep(5);
417 ohci->next_statechange = jiffies;
418
419 ohci_da8xx_clock(0);
420 hcd->state = HC_STATE_SUSPENDED;
421 dev->dev.power.power_state = PMSG_SUSPEND;
422 return 0;
423}
424
425static int ohci_da8xx_resume(struct platform_device *dev)
426{
427 struct usb_hcd *hcd = platform_get_drvdata(dev);
428 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
429
430 if (time_before(jiffies, ohci->next_statechange))
431 msleep(5);
432 ohci->next_statechange = jiffies;
433
434 ohci_da8xx_clock(1);
435 dev->dev.power.power_state = PMSG_ON;
436 usb_hcd_resume_root_hub(hcd);
437 return 0;
438}
439#endif
440
441/*
442 * Driver definition to register with platform structure.
443 */
444static struct platform_driver ohci_hcd_da8xx_driver = {
445 .probe = ohci_hcd_da8xx_drv_probe,
446 .remove = ohci_hcd_da8xx_drv_remove,
447 .shutdown = usb_hcd_platform_shutdown,
448#ifdef CONFIG_PM
449 .suspend = ohci_da8xx_suspend,
450 .resume = ohci_da8xx_resume,
451#endif
452 .driver = {
453 .owner = THIS_MODULE,
454 .name = "ohci",
455 },
456};
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 811f5dfdc582..8ad2441b0284 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status)
53 int i, len; 53 int i, len;
54 54
55 if (usb_pipecontrol (pipe)) { 55 if (usb_pipecontrol (pipe)) {
56 printk (KERN_DEBUG __FILE__ ": setup(8):"); 56 printk (KERN_DEBUG "%s: setup(8):", __FILE__);
57 for (i = 0; i < 8 ; i++) 57 for (i = 0; i < 8 ; i++)
58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]); 58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
59 printk ("\n"); 59 printk ("\n");
60 } 60 }
61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { 61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
62 printk (KERN_DEBUG __FILE__ ": data(%d/%d):", 62 printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
63 urb->actual_length, 63 urb->actual_length,
64 urb->transfer_buffer_length); 64 urb->transfer_buffer_length);
65 len = usb_pipeout (pipe)? 65 len = usb_pipeout (pipe)?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 24eb74781919..afe59be23645 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1051,6 +1051,11 @@ MODULE_LICENSE ("GPL");
1051#define PLATFORM_DRIVER usb_hcd_pnx4008_driver 1051#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
1052#endif 1052#endif
1053 1053
1054#ifdef CONFIG_ARCH_DAVINCI_DA8XX
1055#include "ohci-da8xx.c"
1056#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
1057#endif
1058
1054#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 1059#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
1055 defined(CONFIG_CPU_SUBTYPE_SH7721) || \ 1060 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
1056 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1061 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 32bbce9718f0..65cac8cc8921 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -697,7 +697,7 @@ static int ohci_hub_control (
697 u16 wLength 697 u16 wLength
698) { 698) {
699 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 699 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
700 int ports = hcd_to_bus (hcd)->root_hub->maxchild; 700 int ports = ohci->num_ports;
701 u32 temp; 701 u32 temp;
702 int retval = 0; 702 int retval = 0;
703 703
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index de42283149c7..18d39f0463ee 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -28,8 +28,8 @@ extern int usb_disabled(void);
28 28
29static void lh7a404_start_hc(struct platform_device *dev) 29static void lh7a404_start_hc(struct platform_device *dev)
30{ 30{
31 printk(KERN_DEBUG __FILE__ 31 printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
32 ": starting LH7A404 OHCI USB Controller\n"); 32 __FILE__);
33 33
34 /* 34 /*
35 * Now, carefully enable the USB clock, and take 35 * Now, carefully enable the USB clock, and take
@@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev)
39 udelay(1000); 39 udelay(1000);
40 USBH_CMDSTATUS = OHCI_HCR; 40 USBH_CMDSTATUS = OHCI_HCR;
41 41
42 printk(KERN_DEBUG __FILE__ 42 printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
43 ": Clock to USB host has been enabled \n");
44} 43}
45 44
46static void lh7a404_stop_hc(struct platform_device *dev) 45static void lh7a404_stop_hc(struct platform_device *dev)
47{ 46{
48 printk(KERN_DEBUG __FILE__ 47 printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
49 ": stopping LH7A404 OHCI USB Controller\n"); 48 __FILE__);
50 49
51 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */ 50 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
52} 51}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 83cbecd2a1ed..5645f70b9214 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -24,10 +24,10 @@
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/mach-types.h> 25#include <asm/mach-types.h>
26 26
27#include <mach/mux.h> 27#include <plat/mux.h>
28#include <mach/irqs.h> 28#include <mach/irqs.h>
29#include <mach/fpga.h> 29#include <plat/fpga.h>
30#include <mach/usb.h> 30#include <plat/usb.h>
31 31
32 32
33/* OMAP-1510 OHCI has its own MMU for DMA */ 33/* OMAP-1510 OHCI has its own MMU for DMA */
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 100bf3d8437c..cd74bbdd007c 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -98,8 +98,8 @@
98#define ISP1301_I2C_INTERRUPT_RISING 0xE 98#define ISP1301_I2C_INTERRUPT_RISING 0xE
99#define ISP1301_I2C_REG_CLEAR_ADDR 1 99#define ISP1301_I2C_REG_CLEAR_ADDR 1
100 100
101struct i2c_driver isp1301_driver; 101static struct i2c_driver isp1301_driver;
102struct i2c_client *isp1301_i2c_client; 102static struct i2c_client *isp1301_i2c_client;
103 103
104extern int usb_disabled(void); 104extern int usb_disabled(void);
105extern int ocpi_enable(void); 105extern int ocpi_enable(void);
@@ -120,12 +120,12 @@ static int isp1301_remove(struct i2c_client *client)
120 return 0; 120 return 0;
121} 121}
122 122
123const struct i2c_device_id isp1301_id[] = { 123static const struct i2c_device_id isp1301_id[] = {
124 { "isp1301_pnx", 0 }, 124 { "isp1301_pnx", 0 },
125 { } 125 { }
126}; 126};
127 127
128struct i2c_driver isp1301_driver = { 128static struct i2c_driver isp1301_driver = {
129 .driver = { 129 .driver = {
130 .name = "isp1301_pnx", 130 .name = "isp1301_pnx",
131 }, 131 },
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
327 } 327 }
328 i2c_adap = i2c_get_adapter(2); 328 i2c_adap = i2c_get_adapter(2);
329 memset(&i2c_info, 0, sizeof(struct i2c_board_info)); 329 memset(&i2c_info, 0, sizeof(struct i2c_board_info));
330 strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); 330 strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
331 isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, 331 isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
332 normal_i2c); 332 normal_i2c);
333 i2c_put_adapter(i2c_adap); 333 i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
411out2: 411out2:
412 clk_put(usb_clk); 412 clk_put(usb_clk);
413out1: 413out1:
414 i2c_unregister_client(isp1301_i2c_client); 414 i2c_unregister_device(isp1301_i2c_client);
415 isp1301_i2c_client = NULL; 415 isp1301_i2c_client = NULL;
416out_i2c_driver: 416out_i2c_driver:
417 i2c_del_driver(&isp1301_driver); 417 i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
430 pnx4008_unset_usb_bits(); 430 pnx4008_unset_usb_bits();
431 clk_disable(usb_clk); 431 clk_disable(usb_clk);
432 clk_put(usb_clk); 432 clk_put(usb_clk);
433 i2c_unregister_client(isp1301_i2c_client); 433 i2c_unregister_device(isp1301_i2c_client);
434 isp1301_i2c_client = NULL; 434 isp1301_i2c_client = NULL;
435 i2c_del_driver(&isp1301_driver); 435 i2c_del_driver(&isp1301_driver);
436 436
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 68a301710297..103263c230cf 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
114 hcd->rsrc_len = res.end - res.start + 1; 114 hcd->rsrc_len = res.end - res.start + 1;
115 115
116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
117 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 117 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
118 rv = -EBUSY; 118 rv = -EBUSY;
119 goto err_rmr; 119 goto err_rmr;
120 } 120 }
121 121
122 irq = irq_of_parse_and_map(dn, 0); 122 irq = irq_of_parse_and_map(dn, 0);
123 if (irq == NO_IRQ) { 123 if (irq == NO_IRQ) {
124 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 124 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
125 rv = -EBUSY; 125 rv = -EBUSY;
126 goto err_irq; 126 goto err_irq;
127 } 127 }
128 128
129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
130 if (!hcd->regs) { 130 if (!hcd->regs) {
131 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 131 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
132 rv = -ENOMEM; 132 rv = -ENOMEM;
133 goto err_ioremap; 133 goto err_ioremap;
134 } 134 }
@@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
169 } else 169 } else
170 release_mem_region(res.start, 0x4); 170 release_mem_region(res.start, 0x4);
171 } else 171 } else
172 pr_debug(__FILE__ ": cannot get ehci offset from fdt\n"); 172 pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
173 } 173 }
174 174
175 iounmap(hcd->regs); 175 iounmap(hcd->regs);
@@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
212} 212}
213 213
214 214
215static struct of_device_id ohci_hcd_ppc_of_match[] = { 215static const struct of_device_id ohci_hcd_ppc_of_match[] = {
216#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE 216#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
217 { 217 {
218 .name = "usb", 218 .name = "usb",
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index cd3398b675b2..89e670e38c10 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
41 41
42 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 42 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
43 if (!res) { 43 if (!res) {
44 pr_debug(__FILE__ ": no irq\n"); 44 pr_debug("%s: no irq\n", __FILE__);
45 return -ENODEV; 45 return -ENODEV;
46 } 46 }
47 irq = res->start; 47 irq = res->start;
48 48
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!res) { 50 if (!res) {
51 pr_debug(__FILE__ ": no reg addr\n"); 51 pr_debug("%s: no reg addr\n", __FILE__);
52 return -ENODEV; 52 return -ENODEV;
53 } 53 }
54 54
@@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
59 hcd->rsrc_len = res->end - res->start + 1; 59 hcd->rsrc_len = res->end - res->start + 1;
60 60
61 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 61 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
62 pr_debug(__FILE__ ": request_mem_region failed\n"); 62 pr_debug("%s: request_mem_region failed\n", __FILE__);
63 retval = -EBUSY; 63 retval = -EBUSY;
64 goto err1; 64 goto err1;
65 } 65 }
66 66
67 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 67 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
68 if (!hcd->regs) { 68 if (!hcd->regs) {
69 pr_debug(__FILE__ ": ioremap failed\n"); 69 pr_debug("%s: ioremap failed\n", __FILE__);
70 retval = -ENOMEM; 70 retval = -ENOMEM;
71 goto err2; 71 goto err2;
72 } 72 }
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index f1c06202fdf2..a18debdd79b8 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
518 return 0; 518 return 0;
519} 519}
520 520
521static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { 521static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
522 .suspend = ohci_hcd_pxa27x_drv_suspend, 522 .suspend = ohci_hcd_pxa27x_drv_suspend,
523 .resume = ohci_hcd_pxa27x_drv_resume, 523 .resume = ohci_hcd_pxa27x_drv_resume,
524}; 524};
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 35288bcae0db..83094d067e0f 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <linux/slab.h>
11 12
12static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) 13static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
13{ 14{
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index e4bbe8e188e4..d8eb3bdafabb 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
31{ 31{
32 unsigned int usb_rst = 0; 32 unsigned int usb_rst = 0;
33 33
34 printk(KERN_DEBUG __FILE__ 34 printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
35 ": starting SA-1111 OHCI USB Controller\n"); 35 __FILE__);
36 36
37#ifdef CONFIG_SA1100_BADGE4 37#ifdef CONFIG_SA1100_BADGE4
38 if (machine_is_badge4()) { 38 if (machine_is_badge4()) {
@@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
65static void sa1111_stop_hc(struct sa1111_dev *dev) 65static void sa1111_stop_hc(struct sa1111_dev *dev)
66{ 66{
67 unsigned int usb_rst; 67 unsigned int usb_rst;
68 printk(KERN_DEBUG __FILE__ 68 printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
69 ": stopping SA-1111 OHCI USB Controller\n"); 69 __FILE__);
70 70
71 /* 71 /*
72 * Put the USB host controller into reset. 72 * Put the USB host controller into reset.
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 50f57f468836..e62b30b3e429 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -660,13 +660,13 @@ static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
660 if (qh->dummy == NULL) { 660 if (qh->dummy == NULL) {
661 oxu_dbg(oxu, "no dummy td\n"); 661 oxu_dbg(oxu, "no dummy td\n");
662 oxu->qh_used[i] = 0; 662 oxu->qh_used[i] = 0;
663 663 qh = NULL;
664 return NULL; 664 goto unlock;
665 } 665 }
666 666
667 oxu->qh_used[i] = 1; 667 oxu->qh_used[i] = 1;
668 } 668 }
669 669unlock:
670 spin_unlock(&oxu->mem_lock); 670 spin_unlock(&oxu->mem_lock);
671 671
672 return qh; 672 return qh;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index e33d36256350..d478ffad59b4 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,10 @@
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/mm.h>
38#include <linux/irq.h> 39#include <linux/irq.h>
40#include <linux/slab.h>
41#include <asm/cacheflush.h>
39 42
40#include "../core/hcd.h" 43#include "../core/hcd.h"
41#include "r8a66597.h" 44#include "r8a66597.h"
@@ -216,8 +219,17 @@ static void disable_controller(struct r8a66597 *r8a66597)
216{ 219{
217 int port; 220 int port;
218 221
222 /* disable interrupts */
219 r8a66597_write(r8a66597, 0, INTENB0); 223 r8a66597_write(r8a66597, 0, INTENB0);
220 r8a66597_write(r8a66597, 0, INTSTS0); 224 r8a66597_write(r8a66597, 0, INTENB1);
225 r8a66597_write(r8a66597, 0, BRDYENB);
226 r8a66597_write(r8a66597, 0, BEMPENB);
227 r8a66597_write(r8a66597, 0, NRDYENB);
228
229 /* clear status */
230 r8a66597_write(r8a66597, 0, BRDYSTS);
231 r8a66597_write(r8a66597, 0, NRDYSTS);
232 r8a66597_write(r8a66597, 0, BEMPSTS);
221 233
222 for (port = 0; port < r8a66597->max_root_hub; port++) 234 for (port = 0; port < r8a66597->max_root_hub; port++)
223 r8a66597_disable_port(r8a66597, port); 235 r8a66597_disable_port(r8a66597, port);
@@ -407,7 +419,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
407 419
408/* this function must be called with interrupt disabled */ 420/* this function must be called with interrupt disabled */
409static void free_usb_address(struct r8a66597 *r8a66597, 421static void free_usb_address(struct r8a66597 *r8a66597,
410 struct r8a66597_device *dev) 422 struct r8a66597_device *dev, int reset)
411{ 423{
412 int port; 424 int port;
413 425
@@ -419,7 +431,13 @@ static void free_usb_address(struct r8a66597 *r8a66597,
419 dev->state = USB_STATE_DEFAULT; 431 dev->state = USB_STATE_DEFAULT;
420 r8a66597->address_map &= ~(1 << dev->address); 432 r8a66597->address_map &= ~(1 << dev->address);
421 dev->address = 0; 433 dev->address = 0;
422 dev_set_drvdata(&dev->udev->dev, NULL); 434 /*
435 * Only when resetting USB, it is necessary to erase drvdata. When
436 * a usb device with usb hub is disconnect, "dev->udev" is already
437 * freed on usb_desconnect(). So we cannot access the data.
438 */
439 if (reset)
440 dev_set_drvdata(&dev->udev->dev, NULL);
423 list_del(&dev->device_list); 441 list_del(&dev->device_list);
424 kfree(dev); 442 kfree(dev);
425 443
@@ -811,6 +829,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
811 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); 829 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
812} 830}
813 831
832static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
833 int status)
834__releases(r8a66597->lock)
835__acquires(r8a66597->lock)
836{
837 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
838 void *ptr;
839
840 for (ptr = urb->transfer_buffer;
841 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
842 ptr += PAGE_SIZE)
843 flush_dcache_page(virt_to_page(ptr));
844 }
845
846 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
847 spin_unlock(&r8a66597->lock);
848 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
849 spin_lock(&r8a66597->lock);
850}
851
814/* this function must be called with interrupt disabled */ 852/* this function must be called with interrupt disabled */
815static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) 853static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
816{ 854{
@@ -822,8 +860,6 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
822 return; 860 return;
823 861
824 list_for_each_entry_safe(td, next, list, queue) { 862 list_for_each_entry_safe(td, next, list, queue) {
825 if (!td)
826 continue;
827 if (td->address != address) 863 if (td->address != address)
828 continue; 864 continue;
829 865
@@ -831,15 +867,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
831 list_del(&td->queue); 867 list_del(&td->queue);
832 kfree(td); 868 kfree(td);
833 869
834 if (urb) { 870 if (urb)
835 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), 871 r8a66597_urb_done(r8a66597, urb, -ENODEV);
836 urb);
837 872
838 spin_unlock(&r8a66597->lock);
839 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
840 -ENODEV);
841 spin_lock(&r8a66597->lock);
842 }
843 break; 873 break;
844 } 874 }
845} 875}
@@ -999,6 +1029,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
999/* this function must be called with interrupt disabled */ 1029/* this function must be called with interrupt disabled */
1000static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 1030static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1001 u16 syssts) 1031 u16 syssts)
1032__releases(r8a66597->lock)
1033__acquires(r8a66597->lock)
1002{ 1034{
1003 if (syssts == SE0) { 1035 if (syssts == SE0) {
1004 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1036 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1016,7 +1048,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1016 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); 1048 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1017 } 1049 }
1018 1050
1051 spin_unlock(&r8a66597->lock);
1019 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); 1052 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1053 spin_lock(&r8a66597->lock);
1020} 1054}
1021 1055
1022/* this function must be called with interrupt disabled */ 1056/* this function must be called with interrupt disabled */
@@ -1042,7 +1076,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
1042 struct r8a66597_device *dev = r8a66597->root_hub[port].dev; 1076 struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
1043 1077
1044 disable_r8a66597_pipe_all(r8a66597, dev); 1078 disable_r8a66597_pipe_all(r8a66597, dev);
1045 free_usb_address(r8a66597, dev); 1079 free_usb_address(r8a66597, dev, 0);
1046 1080
1047 start_root_hub_sampling(r8a66597, port, 0); 1081 start_root_hub_sampling(r8a66597, port, 0);
1048} 1082}
@@ -1276,10 +1310,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
1276 if (usb_pipeisoc(urb->pipe)) 1310 if (usb_pipeisoc(urb->pipe))
1277 urb->start_frame = r8a66597_get_frame(hcd); 1311 urb->start_frame = r8a66597_get_frame(hcd);
1278 1312
1279 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); 1313 r8a66597_urb_done(r8a66597, urb, status);
1280 spin_unlock(&r8a66597->lock);
1281 usb_hcd_giveback_urb(hcd, urb, status);
1282 spin_lock(&r8a66597->lock);
1283 } 1314 }
1284 1315
1285 if (restart) { 1316 if (restart) {
@@ -2025,8 +2056,6 @@ static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597,
2025 struct list_head *list = &r8a66597->child_device; 2056 struct list_head *list = &r8a66597->child_device;
2026 2057
2027 list_for_each_entry(dev, list, device_list) { 2058 list_for_each_entry(dev, list, device_list) {
2028 if (!dev)
2029 continue;
2030 if (dev->usb_address != addr) 2059 if (dev->usb_address != addr)
2031 continue; 2060 continue;
2032 2061
@@ -2063,7 +2092,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
2063 spin_lock_irqsave(&r8a66597->lock, flags); 2092 spin_lock_irqsave(&r8a66597->lock, flags);
2064 dev = get_r8a66597_device(r8a66597, addr); 2093 dev = get_r8a66597_device(r8a66597, addr);
2065 disable_r8a66597_pipe_all(r8a66597, dev); 2094 disable_r8a66597_pipe_all(r8a66597, dev);
2066 free_usb_address(r8a66597, dev); 2095 free_usb_address(r8a66597, dev, 0);
2067 put_child_connect_map(r8a66597, addr); 2096 put_child_connect_map(r8a66597, addr);
2068 spin_unlock_irqrestore(&r8a66597->lock, flags); 2097 spin_unlock_irqrestore(&r8a66597->lock, flags);
2069 } 2098 }
@@ -2206,7 +2235,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2206 rh->port |= (1 << USB_PORT_FEAT_RESET); 2235 rh->port |= (1 << USB_PORT_FEAT_RESET);
2207 2236
2208 disable_r8a66597_pipe_all(r8a66597, dev); 2237 disable_r8a66597_pipe_all(r8a66597, dev);
2209 free_usb_address(r8a66597, dev); 2238 free_usb_address(r8a66597, dev, 1);
2210 2239
2211 r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT, 2240 r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
2212 get_dvstctr_reg(port)); 2241 get_dvstctr_reg(port));
@@ -2357,7 +2386,7 @@ static int r8a66597_resume(struct device *dev)
2357 return 0; 2386 return 0;
2358} 2387}
2359 2388
2360static struct dev_pm_ops r8a66597_dev_pm_ops = { 2389static const struct dev_pm_ops r8a66597_dev_pm_ops = {
2361 .suspend = r8a66597_suspend, 2390 .suspend = r8a66597_suspend,
2362 .resume = r8a66597_resume, 2391 .resume = r8a66597_resume,
2363 .poweroff = r8a66597_suspend, 2392 .poweroff = r8a66597_suspend,
@@ -2470,6 +2499,12 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
2470 r8a66597->rh_timer.data = (unsigned long)r8a66597; 2499 r8a66597->rh_timer.data = (unsigned long)r8a66597;
2471 r8a66597->reg = (unsigned long)reg; 2500 r8a66597->reg = (unsigned long)reg;
2472 2501
2502 /* make sure no interrupts are pending */
2503 ret = r8a66597_clock_enable(r8a66597);
2504 if (ret < 0)
2505 goto clean_up3;
2506 disable_controller(r8a66597);
2507
2473 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { 2508 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
2474 INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); 2509 INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
2475 init_timer(&r8a66597->td_timer[i]); 2510 init_timer(&r8a66597->td_timer[i]);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b22a4d1c9e4..3b867a8af7b2 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -51,6 +51,7 @@
51#include <asm/irq.h> 51#include <asm/irq.h>
52#include <asm/system.h> 52#include <asm/system.h>
53#include <asm/byteorder.h> 53#include <asm/byteorder.h>
54#include <asm/unaligned.h>
54 55
55#include "../core/hcd.h" 56#include "../core/hcd.h"
56#include "sl811.h" 57#include "sl811.h"
@@ -719,10 +720,10 @@ retry:
719 /* port status seems weird until after reset, so 720 /* port status seems weird until after reset, so
720 * force the reset and make khubd clean up later. 721 * force the reset and make khubd clean up later.
721 */ 722 */
722 if (sl811->stat_insrmv & 1) 723 if (irqstat & SL11H_INTMASK_RD)
723 sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
724 else
725 sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION); 724 sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
725 else
726 sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
726 727
727 sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION; 728 sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
728 729
@@ -1272,12 +1273,12 @@ sl811h_hub_control(
1272 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf); 1273 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
1273 break; 1274 break;
1274 case GetHubStatus: 1275 case GetHubStatus:
1275 *(__le32 *) buf = cpu_to_le32(0); 1276 put_unaligned_le32(0, buf);
1276 break; 1277 break;
1277 case GetPortStatus: 1278 case GetPortStatus:
1278 if (wIndex != 1) 1279 if (wIndex != 1)
1279 goto error; 1280 goto error;
1280 *(__le32 *) buf = cpu_to_le32(sl811->port1); 1281 put_unaligned_le32(sl811->port1, buf);
1281 1282
1282#ifndef VERBOSE 1283#ifndef VERBOSE
1283 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ 1284 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 516848dd9b48..39d253e841f6 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -37,28 +37,8 @@ MODULE_LICENSE("GPL");
37/* MACROS */ 37/* MACROS */
38/*====================================================================*/ 38/*====================================================================*/
39 39
40#if defined(DEBUG) || defined(PCMCIA_DEBUG)
41
42static int pc_debug = 0;
43module_param(pc_debug, int, 0644);
44
45#define DBG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG "sl811_cs: " args)
46
47#else
48#define DBG(n, args...) do{}while(0)
49#endif /* no debugging */
50
51#define INFO(args...) printk(KERN_INFO "sl811_cs: " args) 40#define INFO(args...) printk(KERN_INFO "sl811_cs: " args)
52 41
53#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444)
54
55#define CS_CHECK(fn, ret) \
56 do { \
57 last_fn = (fn); \
58 if ((last_ret = (ret)) != 0) \
59 goto cs_failed; \
60 } while (0)
61
62/*====================================================================*/ 42/*====================================================================*/
63/* VARIABLES */ 43/* VARIABLES */
64/*====================================================================*/ 44/*====================================================================*/
@@ -76,7 +56,7 @@ static void sl811_cs_release(struct pcmcia_device * link);
76 56
77static void release_platform_dev(struct device * dev) 57static void release_platform_dev(struct device * dev)
78{ 58{
79 DBG(0, "sl811_cs platform_dev release\n"); 59 dev_dbg(dev, "sl811_cs platform_dev release\n");
80 dev->parent = NULL; 60 dev->parent = NULL;
81} 61}
82 62
@@ -140,7 +120,7 @@ static int sl811_hc_init(struct device *parent, resource_size_t base_addr,
140 120
141static void sl811_cs_detach(struct pcmcia_device *link) 121static void sl811_cs_detach(struct pcmcia_device *link)
142{ 122{
143 DBG(0, "sl811_cs_detach(0x%p)\n", link); 123 dev_dbg(&link->dev, "sl811_cs_detach\n");
144 124
145 sl811_cs_release(link); 125 sl811_cs_release(link);
146 126
@@ -150,7 +130,7 @@ static void sl811_cs_detach(struct pcmcia_device *link)
150 130
151static void sl811_cs_release(struct pcmcia_device * link) 131static void sl811_cs_release(struct pcmcia_device * link)
152{ 132{
153 DBG(0, "sl811_cs_release(0x%p)\n", link); 133 dev_dbg(&link->dev, "sl811_cs_release\n");
154 134
155 pcmcia_disable_device(link); 135 pcmcia_disable_device(link);
156 platform_device_unregister(&platform_dev); 136 platform_device_unregister(&platform_dev);
@@ -205,11 +185,11 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
205 185
206static int sl811_cs_config(struct pcmcia_device *link) 186static int sl811_cs_config(struct pcmcia_device *link)
207{ 187{
208 struct device *parent = &handle_to_dev(link); 188 struct device *parent = &link->dev;
209 local_info_t *dev = link->priv; 189 local_info_t *dev = link->priv;
210 int last_fn, last_ret; 190 int ret;
211 191
212 DBG(0, "sl811_cs_config(0x%p)\n", link); 192 dev_dbg(&link->dev, "sl811_cs_config\n");
213 193
214 if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) 194 if (pcmcia_loop_config(link, sl811_cs_config_check, NULL))
215 goto failed; 195 goto failed;
@@ -217,14 +197,16 @@ static int sl811_cs_config(struct pcmcia_device *link)
217 /* require an IRQ and two registers */ 197 /* require an IRQ and two registers */
218 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) 198 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
219 goto failed; 199 goto failed;
220 if (link->conf.Attributes & CONF_ENABLE_IRQ) 200 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
221 CS_CHECK(RequestIRQ, 201 ret = pcmcia_request_irq(link, &link->irq);
222 pcmcia_request_irq(link, &link->irq)); 202 if (ret)
223 else 203 goto failed;
204 } else
224 goto failed; 205 goto failed;
225 206
226 CS_CHECK(RequestConfiguration, 207 ret = pcmcia_request_configuration(link, &link->conf);
227 pcmcia_request_configuration(link, &link->conf)); 208 if (ret)
209 goto failed;
228 210
229 sprintf(dev->node.dev_name, driver_name); 211 sprintf(dev->node.dev_name, driver_name);
230 dev->node.major = dev->node.minor = 0; 212 dev->node.major = dev->node.minor = 0;
@@ -241,8 +223,6 @@ static int sl811_cs_config(struct pcmcia_device *link)
241 223
242 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) 224 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
243 < 0) { 225 < 0) {
244cs_failed:
245 cs_error(link, last_fn, last_ret);
246failed: 226failed:
247 printk(KERN_WARNING "sl811_cs_config failed\n"); 227 printk(KERN_WARNING "sl811_cs_config failed\n");
248 sl811_cs_release(link); 228 sl811_cs_release(link);
@@ -263,7 +243,6 @@ static int sl811_cs_probe(struct pcmcia_device *link)
263 243
264 /* Initialize */ 244 /* Initialize */
265 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 245 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
266 link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
267 link->irq.Handler = NULL; 246 link->irq.Handler = NULL;
268 247
269 link->conf.Attributes = 0; 248 link->conf.Attributes = 0;
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index e52b954dda47..98cf0b26b968 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -9,6 +9,7 @@
9 * (C) Copyright 1999-2001 Johannes Erdfelt 9 * (C) Copyright 1999-2001 Johannes Erdfelt
10 */ 10 */
11 11
12#include <linux/slab.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/debugfs.h> 14#include <linux/debugfs.h>
14#include <linux/smp_lock.h> 15#include <linux/smp_lock.h>
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 5cd0e48f67fb..09197067fe6b 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd)
735 uhci_hc_died(uhci); 735 uhci_hc_died(uhci);
736 uhci_scan_schedule(uhci); 736 uhci_scan_schedule(uhci);
737 spin_unlock_irq(&uhci->lock); 737 spin_unlock_irq(&uhci->lock);
738 synchronize_irq(hcd->irq);
738 739
739 del_timer_sync(&uhci->fsbr_timer); 740 del_timer_sync(&uhci->fsbr_timer);
740 release_uhci(uhci); 741 release_uhci(uhci);
@@ -749,7 +750,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
749 spin_lock_irq(&uhci->lock); 750 spin_lock_irq(&uhci->lock);
750 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) 751 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
751 rc = -ESHUTDOWN; 752 rc = -ESHUTDOWN;
752 else if (!uhci->dead) 753 else if (uhci->dead)
754 ; /* Dead controllers tell no tales */
755
756 /* Once the controller is stopped, port resumes that are already
757 * in progress won't complete. Hence if remote wakeup is enabled
758 * for the root hub and any ports are in the middle of a resume or
759 * remote wakeup, we must fail the suspend.
760 */
761 else if (hcd->self.root_hub->do_remote_wakeup &&
762 uhci->resuming_ports) {
763 dev_dbg(uhci_dev(uhci), "suspend failed because a port "
764 "is resuming\n");
765 rc = -EBUSY;
766 } else
753 suspend_rh(uhci, UHCI_RH_SUSPENDED); 767 suspend_rh(uhci, UHCI_RH_SUSPENDED);
754 spin_unlock_irq(&uhci->lock); 768 spin_unlock_irq(&uhci->lock);
755 return rc; 769 return rc;
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 885b585360b9..8270055848ca 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
167 /* Port received a wakeup request */ 167 /* Port received a wakeup request */
168 set_bit(port, &uhci->resuming_ports); 168 set_bit(port, &uhci->resuming_ports);
169 uhci->ports_timeout = jiffies + 169 uhci->ports_timeout = jiffies +
170 msecs_to_jiffies(20); 170 msecs_to_jiffies(25);
171 171
172 /* Make sure we see the port again 172 /* Make sure we see the port again
173 * after the resuming period is over. */ 173 * after the resuming period is over. */
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
index 562eba108816..773249306031 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/usb/host/whci/asl.c
@@ -16,6 +16,7 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/gfp.h>
19#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h> 21#include <linux/uwb/umc.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 2273c815941f..c5305b599ca0 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -15,6 +15,7 @@
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#include <linux/slab.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/debugfs.h> 20#include <linux/debugfs.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
@@ -31,17 +32,29 @@ struct whc_dbg {
31 32
32void qset_print(struct seq_file *s, struct whc_qset *qset) 33void qset_print(struct seq_file *s, struct whc_qset *qset)
33{ 34{
35 static const char *qh_type[] = {
36 "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
34 struct whc_std *std; 37 struct whc_std *std;
35 struct urb *urb = NULL; 38 struct urb *urb = NULL;
36 int i; 39 int i;
37 40
38 seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); 41 seq_printf(s, "qset %08x", (u32)qset->qset_dma);
42 if (&qset->list_node == qset->whc->async_list.prev) {
43 seq_printf(s, " (dummy)\n");
44 } else {
45 seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
46 qset->qh.info1 & 0x0f,
47 (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
48 qh_type[(qset->qh.info1 >> 5) & 0x7],
49 (qset->qh.info1 >> 16) & 0xffff);
50 }
39 seq_printf(s, " -> %08x\n", (u32)qset->qh.link); 51 seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
40 seq_printf(s, " info: %08x %08x %08x\n", 52 seq_printf(s, " info: %08x %08x %08x\n",
41 qset->qh.info1, qset->qh.info2, qset->qh.info3); 53 qset->qh.info1, qset->qh.info2, qset->qh.info3);
42 seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); 54 seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
55 qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
43 seq_printf(s, " TD: sts: %08x opts: %08x\n", 56 seq_printf(s, " TD: sts: %08x opts: %08x\n",
44 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); 57 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
45 58
46 for (i = 0; i < WHCI_QSET_TD_MAX; i++) { 59 for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
47 seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", 60 seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 687b622a1612..e0d3401285c8 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -250,6 +250,7 @@ static int whc_probe(struct umc_dev *umc)
250 } 250 }
251 251
252 usb_hcd->wireless = 1; 252 usb_hcd->wireless = 1;
253 usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
253 254
254 wusbhc = usb_hcd_to_wusbhc(usb_hcd); 255 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
255 whc = wusbhc_to_whc(wusbhc); 256 whc = wusbhc_to_whc(wusbhc);
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index 34a783cb0133..f7582e8e2169 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -16,6 +16,7 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/gfp.h>
19#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h> 21#include <linux/uwb/umc.h>
21 22
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
index 0db3fb2dc03a..33c5580b4d25 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/usb/host/whci/pzl.c
@@ -16,6 +16,7 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/gfp.h>
19#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h> 21#include <linux/uwb/umc.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 1b9dc1571570..141d049beb3e 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -17,6 +17,7 @@
17 */ 17 */
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/slab.h>
20#include <linux/uwb/umc.h> 21#include <linux/uwb/umc.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
22 23
@@ -49,16 +50,19 @@ struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
49 * state 50 * state
50 * @urb: an urb for a transfer to this endpoint 51 * @urb: an urb for a transfer to this endpoint
51 */ 52 */
52static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) 53static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
53{ 54{
54 struct usb_device *usb_dev = urb->dev; 55 struct usb_device *usb_dev = urb->dev;
56 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
55 struct usb_wireless_ep_comp_descriptor *epcd; 57 struct usb_wireless_ep_comp_descriptor *epcd;
56 bool is_out; 58 bool is_out;
59 uint8_t phy_rate;
57 60
58 is_out = usb_pipeout(urb->pipe); 61 is_out = usb_pipeout(urb->pipe);
59 62
60 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; 63 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
61 64
65 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
62 if (epcd) { 66 if (epcd) {
63 qset->max_seq = epcd->bMaxSequence; 67 qset->max_seq = epcd->bMaxSequence;
64 qset->max_burst = epcd->bMaxBurst; 68 qset->max_burst = epcd->bMaxBurst;
@@ -67,12 +71,28 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
67 qset->max_burst = 1; 71 qset->max_burst = 1;
68 } 72 }
69 73
74 /*
75 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
76 * the maximum supported by the device for other endpoints
77 * (unless limited by the user).
78 */
79 if (usb_pipecontrol(urb->pipe))
80 phy_rate = UWB_PHY_RATE_53;
81 else {
82 uint16_t phy_rates;
83
84 phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
85 phy_rate = fls(phy_rates) - 1;
86 if (phy_rate > whc->wusbhc.phy_rate)
87 phy_rate = whc->wusbhc.phy_rate;
88 }
89
70 qset->qh.info1 = cpu_to_le32( 90 qset->qh.info1 = cpu_to_le32(
71 QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) 91 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
72 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) 92 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
73 | usb_pipe_to_qh_type(urb->pipe) 93 | usb_pipe_to_qh_type(urb->pipe)
74 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) 94 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
75 | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) 95 | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
76 ); 96 );
77 qset->qh.info2 = cpu_to_le32( 97 qset->qh.info2 = cpu_to_le32(
78 QH_INFO2_BURST(qset->max_burst) 98 QH_INFO2_BURST(qset->max_burst)
@@ -86,7 +106,7 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
86 * strength and can presumably guess the Tx power required 106 * strength and can presumably guess the Tx power required
87 * from that? */ 107 * from that? */
88 qset->qh.info3 = cpu_to_le32( 108 qset->qh.info3 = cpu_to_le32(
89 QH_INFO3_TX_RATE_53_3 109 QH_INFO3_TX_RATE(phy_rate)
90 | QH_INFO3_TX_PWR(0) /* 0 == max power */ 110 | QH_INFO3_TX_PWR(0) /* 0 == max power */
91 ); 111 );
92 112
@@ -148,7 +168,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
148 168
149 qset->ep = urb->ep; 169 qset->ep = urb->ep;
150 urb->ep->hcpriv = qset; 170 urb->ep->hcpriv = qset;
151 qset_fill_qh(qset, urb); 171 qset_fill_qh(whc, qset, urb);
152 } 172 }
153 return qset; 173 return qset;
154} 174}
@@ -241,6 +261,36 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
241 qset->ntds--; 261 qset->ntds--;
242} 262}
243 263
264static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
265{
266 struct scatterlist *sg;
267 void *bounce;
268 size_t remaining, offset;
269
270 bounce = std->bounce_buf;
271 remaining = std->len;
272
273 sg = std->bounce_sg;
274 offset = std->bounce_offset;
275
276 while (remaining) {
277 size_t len;
278
279 len = min(sg->length - offset, remaining);
280 memcpy(sg_virt(sg) + offset, bounce, len);
281
282 bounce += len;
283 remaining -= len;
284
285 offset += len;
286 if (offset >= sg->length) {
287 sg = sg_next(sg);
288 offset = 0;
289 }
290 }
291
292}
293
244/** 294/**
245 * qset_free_std - remove an sTD and free it. 295 * qset_free_std - remove an sTD and free it.
246 * @whc: the WHCI host controller 296 * @whc: the WHCI host controller
@@ -249,13 +299,29 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
249void qset_free_std(struct whc *whc, struct whc_std *std) 299void qset_free_std(struct whc *whc, struct whc_std *std)
250{ 300{
251 list_del(&std->list_node); 301 list_del(&std->list_node);
252 if (std->num_pointers) { 302 if (std->bounce_buf) {
253 dma_unmap_single(whc->wusbhc.dev, std->dma_addr, 303 bool is_out = usb_pipeout(std->urb->pipe);
254 std->num_pointers * sizeof(struct whc_page_list_entry), 304 dma_addr_t dma_addr;
255 DMA_TO_DEVICE); 305
306 if (std->num_pointers)
307 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
308 else
309 dma_addr = std->dma_addr;
310
311 dma_unmap_single(whc->wusbhc.dev, dma_addr,
312 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
313 if (!is_out)
314 qset_copy_bounce_to_sg(whc, std);
315 kfree(std->bounce_buf);
316 }
317 if (std->pl_virt) {
318 if (std->dma_addr)
319 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
320 std->num_pointers * sizeof(struct whc_page_list_entry),
321 DMA_TO_DEVICE);
256 kfree(std->pl_virt); 322 kfree(std->pl_virt);
323 std->pl_virt = NULL;
257 } 324 }
258
259 kfree(std); 325 kfree(std);
260} 326}
261 327
@@ -293,12 +359,17 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
293{ 359{
294 dma_addr_t dma_addr = std->dma_addr; 360 dma_addr_t dma_addr = std->dma_addr;
295 dma_addr_t sp, ep; 361 dma_addr_t sp, ep;
296 size_t std_len = std->len;
297 size_t pl_len; 362 size_t pl_len;
298 int p; 363 int p;
299 364
300 sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); 365 /* Short buffers don't need a page list. */
301 ep = dma_addr + std_len; 366 if (std->len <= WHCI_PAGE_SIZE) {
367 std->num_pointers = 0;
368 return 0;
369 }
370
371 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
372 ep = dma_addr + std->len;
302 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); 373 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
303 374
304 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 375 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
@@ -309,7 +380,7 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
309 380
310 for (p = 0; p < std->num_pointers; p++) { 381 for (p = 0; p < std->num_pointers; p++) {
311 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 382 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
312 dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); 383 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
313 } 384 }
314 385
315 return 0; 386 return 0;
@@ -339,6 +410,218 @@ static void urb_dequeue_work(struct work_struct *work)
339 spin_unlock_irqrestore(&whc->lock, flags); 410 spin_unlock_irqrestore(&whc->lock, flags);
340} 411}
341 412
413static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
414 struct urb *urb, gfp_t mem_flags)
415{
416 struct whc_std *std;
417
418 std = kzalloc(sizeof(struct whc_std), mem_flags);
419 if (std == NULL)
420 return NULL;
421
422 std->urb = urb;
423 std->qtd = NULL;
424
425 INIT_LIST_HEAD(&std->list_node);
426 list_add_tail(&std->list_node, &qset->stds);
427
428 return std;
429}
430
431static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
432 gfp_t mem_flags)
433{
434 size_t remaining;
435 struct scatterlist *sg;
436 int i;
437 int ntds = 0;
438 struct whc_std *std = NULL;
439 struct whc_page_list_entry *entry;
440 dma_addr_t prev_end = 0;
441 size_t pl_len;
442 int p = 0;
443
444 remaining = urb->transfer_buffer_length;
445
446 for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
447 dma_addr_t dma_addr;
448 size_t dma_remaining;
449 dma_addr_t sp, ep;
450 int num_pointers;
451
452 if (remaining == 0) {
453 break;
454 }
455
456 dma_addr = sg_dma_address(sg);
457 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
458
459 while (dma_remaining) {
460 size_t dma_len;
461
462 /*
463 * We can use the previous std (if it exists) provided that:
464 * - the previous one ended on a page boundary.
465 * - the current one begins on a page boundary.
466 * - the previous one isn't full.
467 *
468 * If a new std is needed but the previous one
469 * was not a whole number of packets then this
470 * sg list cannot be mapped onto multiple
471 * qTDs. Return an error and let the caller
472 * sort it out.
473 */
474 if (!std
475 || (prev_end & (WHCI_PAGE_SIZE-1))
476 || (dma_addr & (WHCI_PAGE_SIZE-1))
477 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
478 if (std->len % qset->max_packet != 0)
479 return -EINVAL;
480 std = qset_new_std(whc, qset, urb, mem_flags);
481 if (std == NULL) {
482 return -ENOMEM;
483 }
484 ntds++;
485 p = 0;
486 }
487
488 dma_len = dma_remaining;
489
490 /*
491 * If the remainder of this element doesn't
492 * fit in a single qTD, limit the qTD to a
493 * whole number of packets. This allows the
494 * remainder to go into the next qTD.
495 */
496 if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
497 dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
498 * qset->max_packet - std->len;
499 }
500
501 std->len += dma_len;
502 std->ntds_remaining = -1; /* filled in later */
503
504 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
505 ep = dma_addr + dma_len;
506 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
507 std->num_pointers += num_pointers;
508
509 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
510
511 std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
512 if (std->pl_virt == NULL) {
513 return -ENOMEM;
514 }
515
516 for (;p < std->num_pointers; p++, entry++) {
517 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
518 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
519 }
520
521 prev_end = dma_addr = ep;
522 dma_remaining -= dma_len;
523 remaining -= dma_len;
524 }
525 }
526
527 /* Now the number of stds is know, go back and fill in
528 std->ntds_remaining. */
529 list_for_each_entry(std, &qset->stds, list_node) {
530 if (std->ntds_remaining == -1) {
531 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
532 std->ntds_remaining = ntds--;
533 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
534 pl_len, DMA_TO_DEVICE);
535 }
536 }
537 return 0;
538}
539
540/**
541 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
542 *
543 * If the URB contains an sg list whose elements cannot be directly
544 * mapped to qTDs then the data must be transferred via bounce
545 * buffers.
546 */
547static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
548 struct urb *urb, gfp_t mem_flags)
549{
550 bool is_out = usb_pipeout(urb->pipe);
551 size_t max_std_len;
552 size_t remaining;
553 int ntds = 0;
554 struct whc_std *std = NULL;
555 void *bounce = NULL;
556 struct scatterlist *sg;
557 int i;
558
559 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
560 max_std_len = qset->max_burst * qset->max_packet;
561
562 remaining = urb->transfer_buffer_length;
563
564 for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
565 size_t len;
566 size_t sg_remaining;
567 void *orig;
568
569 if (remaining == 0) {
570 break;
571 }
572
573 sg_remaining = min_t(size_t, remaining, sg->length);
574 orig = sg_virt(sg);
575
576 while (sg_remaining) {
577 if (!std || std->len == max_std_len) {
578 std = qset_new_std(whc, qset, urb, mem_flags);
579 if (std == NULL)
580 return -ENOMEM;
581 std->bounce_buf = kmalloc(max_std_len, mem_flags);
582 if (std->bounce_buf == NULL)
583 return -ENOMEM;
584 std->bounce_sg = sg;
585 std->bounce_offset = orig - sg_virt(sg);
586 bounce = std->bounce_buf;
587 ntds++;
588 }
589
590 len = min(sg_remaining, max_std_len - std->len);
591
592 if (is_out)
593 memcpy(bounce, orig, len);
594
595 std->len += len;
596 std->ntds_remaining = -1; /* filled in later */
597
598 bounce += len;
599 orig += len;
600 sg_remaining -= len;
601 remaining -= len;
602 }
603 }
604
605 /*
606 * For each of the new sTDs, map the bounce buffers, create
607 * page lists (if necessary), and fill in std->ntds_remaining.
608 */
609 list_for_each_entry(std, &qset->stds, list_node) {
610 if (std->ntds_remaining != -1)
611 continue;
612
613 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
614 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
615
616 if (qset_fill_page_list(whc, std, mem_flags) < 0)
617 return -ENOMEM;
618
619 std->ntds_remaining = ntds--;
620 }
621
622 return 0;
623}
624
342/** 625/**
343 * qset_add_urb - add an urb to the qset's queue. 626 * qset_add_urb - add an urb to the qset's queue.
344 * 627 *
@@ -353,10 +636,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
353 int remaining = urb->transfer_buffer_length; 636 int remaining = urb->transfer_buffer_length;
354 u64 transfer_dma = urb->transfer_dma; 637 u64 transfer_dma = urb->transfer_dma;
355 int ntds_remaining; 638 int ntds_remaining;
356 639 int ret;
357 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
358 if (ntds_remaining == 0)
359 ntds_remaining = 1;
360 640
361 wurb = kzalloc(sizeof(struct whc_urb), mem_flags); 641 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
362 if (wurb == NULL) 642 if (wurb == NULL)
@@ -366,32 +646,39 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
366 wurb->urb = urb; 646 wurb->urb = urb;
367 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); 647 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
368 648
649 if (urb->sg) {
650 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
651 if (ret == -EINVAL) {
652 qset_free_stds(qset, urb);
653 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
654 }
655 if (ret < 0)
656 goto err_no_mem;
657 return 0;
658 }
659
660 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
661 if (ntds_remaining == 0)
662 ntds_remaining = 1;
663
369 while (ntds_remaining) { 664 while (ntds_remaining) {
370 struct whc_std *std; 665 struct whc_std *std;
371 size_t std_len; 666 size_t std_len;
372 667
373 std = kmalloc(sizeof(struct whc_std), mem_flags);
374 if (std == NULL)
375 goto err_no_mem;
376
377 std_len = remaining; 668 std_len = remaining;
378 if (std_len > QTD_MAX_XFER_SIZE) 669 if (std_len > QTD_MAX_XFER_SIZE)
379 std_len = QTD_MAX_XFER_SIZE; 670 std_len = QTD_MAX_XFER_SIZE;
380 671
381 std->urb = urb; 672 std = qset_new_std(whc, qset, urb, mem_flags);
673 if (std == NULL)
674 goto err_no_mem;
675
382 std->dma_addr = transfer_dma; 676 std->dma_addr = transfer_dma;
383 std->len = std_len; 677 std->len = std_len;
384 std->ntds_remaining = ntds_remaining; 678 std->ntds_remaining = ntds_remaining;
385 std->qtd = NULL;
386 679
387 INIT_LIST_HEAD(&std->list_node); 680 if (qset_fill_page_list(whc, std, mem_flags) < 0)
388 list_add_tail(&std->list_node, &qset->stds); 681 goto err_no_mem;
389
390 if (std_len > WHCI_PAGE_SIZE) {
391 if (qset_fill_page_list(whc, std, mem_flags) < 0)
392 goto err_no_mem;
393 } else
394 std->num_pointers = 0;
395 682
396 ntds_remaining--; 683 ntds_remaining--;
397 remaining -= std_len; 684 remaining -= std_len;
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
index 24e94d983c5e..c80c7d93bc4a 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/usb/host/whci/whcd.h
@@ -84,6 +84,11 @@ struct whc {
84 * @len: the length of data in the associated TD. 84 * @len: the length of data in the associated TD.
85 * @ntds_remaining: number of TDs (starting from this one) in this transfer. 85 * @ntds_remaining: number of TDs (starting from this one) in this transfer.
86 * 86 *
87 * @bounce_buf: a bounce buffer if the std was from an urb with a sg
88 * list that could not be mapped to qTDs directly.
89 * @bounce_sg: the first scatterlist element bounce_buf is for.
90 * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
91 *
87 * Queued URBs may require more TDs than are available in a qset so we 92 * Queued URBs may require more TDs than are available in a qset so we
88 * use a list of these "software TDs" (sTDs) to hold per-TD data. 93 * use a list of these "software TDs" (sTDs) to hold per-TD data.
89 */ 94 */
@@ -97,6 +102,10 @@ struct whc_std {
97 int num_pointers; 102 int num_pointers;
98 dma_addr_t dma_addr; 103 dma_addr_t dma_addr;
99 struct whc_page_list_entry *pl_virt; 104 struct whc_page_list_entry *pl_virt;
105
106 void *bounce_buf;
107 struct scatterlist *bounce_sg;
108 unsigned bounce_offset;
100}; 109};
101 110
102/** 111/**
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index e8d0001605be..4d4cbc0730bf 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -172,14 +172,7 @@ struct whc_qhead {
172#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ 172#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
173#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ 173#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
174 174
175#define QH_INFO3_TX_RATE_53_3 (0 << 24) 175#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
176#define QH_INFO3_TX_RATE_80 (1 << 24)
177#define QH_INFO3_TX_RATE_106_7 (2 << 24)
178#define QH_INFO3_TX_RATE_160 (3 << 24)
179#define QH_INFO3_TX_RATE_200 (4 << 24)
180#define QH_INFO3_TX_RATE_320 (5 << 24)
181#define QH_INFO3_TX_RATE_400 (6 << 24)
182#define QH_INFO3_TX_RATE_480 (7 << 24)
183#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ 176#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
184 177
185#define QH_STATUS_FLOW_CTRL (1 << 15) 178#define QH_STATUS_FLOW_CTRL (1 << 15)
@@ -267,8 +260,9 @@ struct whc_qset {
267 unsigned reset:1; 260 unsigned reset:1;
268 struct urb *pause_after_urb; 261 struct urb *pause_after_urb;
269 struct completion remove_complete; 262 struct completion remove_complete;
270 int max_burst; 263 uint16_t max_packet;
271 int max_seq; 264 uint8_t max_burst;
265 uint8_t max_seq;
272}; 266};
273 267
274static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) 268static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 33128d52f212..105fa8b025bb 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
406 } 406 }
407} 407}
408 408
409char *xhci_get_slot_state(struct xhci_hcd *xhci,
410 struct xhci_container_ctx *ctx)
411{
412 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
413
414 switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
415 case 0:
416 return "enabled/disabled";
417 case 1:
418 return "default";
419 case 2:
420 return "addressed";
421 case 3:
422 return "configured";
423 default:
424 return "reserved";
425 }
426}
427
409void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 428void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
410{ 429{
411 /* Fields are 32 bits wide, DMA addresses are in bytes */ 430 /* Fields are 32 bits wide, DMA addresses are in bytes */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ecc131c3fe33..78c4edac1db1 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
101 101
102 next = readl(base + ext_offset); 102 next = readl(base + ext_offset);
103 103
104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET) 104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
105 /* Find the first extended capability */ 105 /* Find the first extended capability */
106 next = XHCI_HCC_EXT_CAPS(next); 106 next = XHCI_HCC_EXT_CAPS(next);
107 else 107 ext_offset = 0;
108 } else {
108 /* Find the next extended capability */ 109 /* Find the next extended capability */
109 next = XHCI_EXT_CAPS_NEXT(next); 110 next = XHCI_EXT_CAPS_NEXT(next);
111 }
112
110 if (!next) 113 if (!next)
111 return 0; 114 return 0;
112 /* 115 /*
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index eac5b53aa9e7..208b805b80eb 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state)
129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); 129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
130} 130}
131 131
132static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
133 u32 __iomem *addr, u32 port_status)
134{
135 /* Write 1 to disable the port */
136 xhci_writel(xhci, port_status | PORT_PE, addr);
137 port_status = xhci_readl(xhci, addr);
138 xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
139 wIndex, port_status);
140}
141
142static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
143 u16 wIndex, u32 __iomem *addr, u32 port_status)
144{
145 char *port_change_bit;
146 u32 status;
147
148 switch (wValue) {
149 case USB_PORT_FEAT_C_RESET:
150 status = PORT_RC;
151 port_change_bit = "reset";
152 break;
153 case USB_PORT_FEAT_C_CONNECTION:
154 status = PORT_CSC;
155 port_change_bit = "connect";
156 break;
157 case USB_PORT_FEAT_C_OVER_CURRENT:
158 status = PORT_OCC;
159 port_change_bit = "over-current";
160 break;
161 case USB_PORT_FEAT_C_ENABLE:
162 status = PORT_PEC;
163 port_change_bit = "enable/disable";
164 break;
165 default:
166 /* Should never happen */
167 return;
168 }
169 /* Change bits are all write 1 to clear */
170 xhci_writel(xhci, port_status | status, addr);
171 port_status = xhci_readl(xhci, addr);
172 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
173 port_change_bit, wIndex, port_status);
174}
175
132int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, 176int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
133 u16 wIndex, char *buf, u16 wLength) 177 u16 wIndex, char *buf, u16 wLength)
134{ 178{
@@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
138 u32 temp, status; 182 u32 temp, status;
139 int retval = 0; 183 int retval = 0;
140 u32 __iomem *addr; 184 u32 __iomem *addr;
141 char *port_change_bit;
142 185
143 ports = HCS_MAX_PORTS(xhci->hcs_params1); 186 ports = HCS_MAX_PORTS(xhci->hcs_params1);
144 187
@@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
229 temp = xhci_port_state_to_neutral(temp); 272 temp = xhci_port_state_to_neutral(temp);
230 switch (wValue) { 273 switch (wValue) {
231 case USB_PORT_FEAT_C_RESET: 274 case USB_PORT_FEAT_C_RESET:
232 status = PORT_RC;
233 port_change_bit = "reset";
234 break;
235 case USB_PORT_FEAT_C_CONNECTION: 275 case USB_PORT_FEAT_C_CONNECTION:
236 status = PORT_CSC;
237 port_change_bit = "connect";
238 break;
239 case USB_PORT_FEAT_C_OVER_CURRENT: 276 case USB_PORT_FEAT_C_OVER_CURRENT:
240 status = PORT_OCC; 277 case USB_PORT_FEAT_C_ENABLE:
241 port_change_bit = "over-current"; 278 xhci_clear_port_change_bit(xhci, wValue, wIndex,
279 addr, temp);
280 break;
281 case USB_PORT_FEAT_ENABLE:
282 xhci_disable_port(xhci, wIndex, addr, temp);
242 break; 283 break;
243 default: 284 default:
244 goto error; 285 goto error;
245 } 286 }
246 /* Change bits are all write 1 to clear */
247 xhci_writel(xhci, temp | status, addr);
248 temp = xhci_readl(xhci, addr);
249 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
250 port_change_bit, wIndex, temp);
251 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
252 break; 287 break;
253 default: 288 default:
254error: 289error:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b8fd270a8b0d..d64f5724bfc4 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/usb.h> 23#include <linux/usb.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/slab.h>
25#include <linux/dmapool.h> 26#include <linux/dmapool.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
@@ -125,6 +126,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
125 kfree(ring); 126 kfree(ring);
126} 127}
127 128
129static void xhci_initialize_ring_info(struct xhci_ring *ring)
130{
131 /* The ring is empty, so the enqueue pointer == dequeue pointer */
132 ring->enqueue = ring->first_seg->trbs;
133 ring->enq_seg = ring->first_seg;
134 ring->dequeue = ring->enqueue;
135 ring->deq_seg = ring->first_seg;
136 /* The ring is initialized to 0. The producer must write 1 to the cycle
137 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
138 * compare CCS to the cycle bit to check ownership, so CCS = 1.
139 */
140 ring->cycle_state = 1;
141 /* Not necessary for new rings, but needed for re-initialized rings */
142 ring->enq_updates = 0;
143 ring->deq_updates = 0;
144}
145
128/** 146/**
129 * Create a new ring with zero or more segments. 147 * Create a new ring with zero or more segments.
130 * 148 *
@@ -173,17 +191,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
173 " segment %p (virtual), 0x%llx (DMA)\n", 191 " segment %p (virtual), 0x%llx (DMA)\n",
174 prev, (unsigned long long)prev->dma); 192 prev, (unsigned long long)prev->dma);
175 } 193 }
176 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 194 xhci_initialize_ring_info(ring);
177 ring->enqueue = ring->first_seg->trbs;
178 ring->enq_seg = ring->first_seg;
179 ring->dequeue = ring->enqueue;
180 ring->deq_seg = ring->first_seg;
181 /* The ring is initialized to 0. The producer must write 1 to the cycle
182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
183 * compare CCS to the cycle bit to check ownership, so CCS = 1.
184 */
185 ring->cycle_state = 1;
186
187 return ring; 195 return ring;
188 196
189fail: 197fail:
@@ -191,6 +199,52 @@ fail:
191 return 0; 199 return 0;
192} 200}
193 201
202void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
203 struct xhci_virt_device *virt_dev,
204 unsigned int ep_index)
205{
206 int rings_cached;
207
208 rings_cached = virt_dev->num_rings_cached;
209 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
210 virt_dev->num_rings_cached++;
211 rings_cached = virt_dev->num_rings_cached;
212 virt_dev->ring_cache[rings_cached] =
213 virt_dev->eps[ep_index].ring;
214 xhci_dbg(xhci, "Cached old ring, "
215 "%d ring%s cached\n",
216 rings_cached,
217 (rings_cached > 1) ? "s" : "");
218 } else {
219 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
220 xhci_dbg(xhci, "Ring cache full (%d rings), "
221 "freeing ring\n",
222 virt_dev->num_rings_cached);
223 }
224 virt_dev->eps[ep_index].ring = NULL;
225}
226
227/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
228 * pointers to the beginning of the ring.
229 */
230static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
231 struct xhci_ring *ring)
232{
233 struct xhci_segment *seg = ring->first_seg;
234 do {
235 memset(seg->trbs, 0,
236 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
237 /* All endpoint rings have link TRBs */
238 xhci_link_segments(xhci, seg, seg->next, 1);
239 seg = seg->next;
240 } while (seg != ring->first_seg);
241 xhci_initialize_ring_info(ring);
242 /* td list should be empty since all URBs have been cancelled,
243 * but just in case...
244 */
245 INIT_LIST_HEAD(&ring->td_list);
246}
247
194#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 248#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
195 249
196struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 250struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -214,6 +268,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
214void xhci_free_container_ctx(struct xhci_hcd *xhci, 268void xhci_free_container_ctx(struct xhci_hcd *xhci,
215 struct xhci_container_ctx *ctx) 269 struct xhci_container_ctx *ctx)
216{ 270{
271 if (!ctx)
272 return;
217 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 273 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
218 kfree(ctx); 274 kfree(ctx);
219} 275}
@@ -248,6 +304,15 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
248 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 304 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
249} 305}
250 306
307static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
308 struct xhci_virt_ep *ep)
309{
310 init_timer(&ep->stop_cmd_timer);
311 ep->stop_cmd_timer.data = (unsigned long) ep;
312 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
313 ep->xhci = xhci;
314}
315
251/* All the xhci_tds in the ring's TD list should be freed at this point */ 316/* All the xhci_tds in the ring's TD list should be freed at this point */
252void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 317void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
253{ 318{
@@ -267,6 +332,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
267 if (dev->eps[i].ring) 332 if (dev->eps[i].ring)
268 xhci_ring_free(xhci, dev->eps[i].ring); 333 xhci_ring_free(xhci, dev->eps[i].ring);
269 334
335 if (dev->ring_cache) {
336 for (i = 0; i < dev->num_rings_cached; i++)
337 xhci_ring_free(xhci, dev->ring_cache[i]);
338 kfree(dev->ring_cache);
339 }
340
270 if (dev->in_ctx) 341 if (dev->in_ctx)
271 xhci_free_container_ctx(xhci, dev->in_ctx); 342 xhci_free_container_ctx(xhci, dev->in_ctx);
272 if (dev->out_ctx) 343 if (dev->out_ctx)
@@ -309,15 +380,25 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
309 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 380 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
310 (unsigned long long)dev->in_ctx->dma); 381 (unsigned long long)dev->in_ctx->dma);
311 382
312 /* Initialize the cancellation list for each endpoint */ 383 /* Initialize the cancellation list and watchdog timers for each ep */
313 for (i = 0; i < 31; i++) 384 for (i = 0; i < 31; i++) {
385 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
314 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 386 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
387 }
315 388
316 /* Allocate endpoint 0 ring */ 389 /* Allocate endpoint 0 ring */
317 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 390 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
318 if (!dev->eps[0].ring) 391 if (!dev->eps[0].ring)
319 goto fail; 392 goto fail;
320 393
394 /* Allocate pointers to the ring cache */
395 dev->ring_cache = kzalloc(
396 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
397 flags);
398 if (!dev->ring_cache)
399 goto fail;
400 dev->num_rings_cached = 0;
401
321 init_completion(&dev->cmd_completion); 402 init_completion(&dev->cmd_completion);
322 INIT_LIST_HEAD(&dev->cmd_list); 403 INIT_LIST_HEAD(&dev->cmd_list);
323 404
@@ -374,7 +455,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
374 case USB_SPEED_LOW: 455 case USB_SPEED_LOW:
375 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 456 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
376 break; 457 break;
377 case USB_SPEED_VARIABLE: 458 case USB_SPEED_WIRELESS:
378 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 459 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
379 return -EINVAL; 460 return -EINVAL;
380 break; 461 break;
@@ -418,7 +499,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
418 case USB_SPEED_LOW: 499 case USB_SPEED_LOW:
419 ep0_ctx->ep_info2 |= MAX_PACKET(8); 500 ep0_ctx->ep_info2 |= MAX_PACKET(8);
420 break; 501 break;
421 case USB_SPEED_VARIABLE: 502 case USB_SPEED_WIRELESS:
422 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 503 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
423 return -EINVAL; 504 return -EINVAL;
424 break; 505 break;
@@ -486,8 +567,13 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
486 if (interval < 3) 567 if (interval < 3)
487 interval = 3; 568 interval = 3;
488 if ((1 << interval) != 8*ep->desc.bInterval) 569 if ((1 << interval) != 8*ep->desc.bInterval)
489 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 570 dev_warn(&udev->dev,
490 ep->desc.bEndpointAddress, 1 << interval); 571 "ep %#x - rounding interval"
572 " to %d microframes, "
573 "ep desc says %d microframes\n",
574 ep->desc.bEndpointAddress,
575 1 << interval,
576 8*ep->desc.bInterval);
491 } 577 }
492 break; 578 break;
493 default: 579 default:
@@ -496,6 +582,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
496 return EP_INTERVAL(interval); 582 return EP_INTERVAL(interval);
497} 583}
498 584
585/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
586 * High speed endpoint descriptors can define "the number of additional
587 * transaction opportunities per microframe", but that goes in the Max Burst
588 * endpoint context field.
589 */
590static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
591 struct usb_host_endpoint *ep)
592{
593 if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
594 return 0;
595 return ep->ss_ep_comp->desc.bmAttributes;
596}
597
499static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 598static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
500 struct usb_host_endpoint *ep) 599 struct usb_host_endpoint *ep)
501{ 600{
@@ -526,6 +625,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
526 return type; 625 return type;
527} 626}
528 627
628/* Return the maximum endpoint service interval time (ESIT) payload.
629 * Basically, this is the maxpacket size, multiplied by the burst size
630 * and mult size.
631 */
632static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
633 struct usb_device *udev,
634 struct usb_host_endpoint *ep)
635{
636 int max_burst;
637 int max_packet;
638
639 /* Only applies for interrupt or isochronous endpoints */
640 if (usb_endpoint_xfer_control(&ep->desc) ||
641 usb_endpoint_xfer_bulk(&ep->desc))
642 return 0;
643
644 if (udev->speed == USB_SPEED_SUPER) {
645 if (ep->ss_ep_comp)
646 return ep->ss_ep_comp->desc.wBytesPerInterval;
647 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
648 /* Assume no bursts, no multiple opportunities to send. */
649 return ep->desc.wMaxPacketSize;
650 }
651
652 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
653 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
654 /* A 0 in max burst means 1 transfer per ESIT */
655 return max_packet * (max_burst + 1);
656}
657
529int xhci_endpoint_init(struct xhci_hcd *xhci, 658int xhci_endpoint_init(struct xhci_hcd *xhci,
530 struct xhci_virt_device *virt_dev, 659 struct xhci_virt_device *virt_dev,
531 struct usb_device *udev, 660 struct usb_device *udev,
@@ -537,6 +666,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
537 struct xhci_ring *ep_ring; 666 struct xhci_ring *ep_ring;
538 unsigned int max_packet; 667 unsigned int max_packet;
539 unsigned int max_burst; 668 unsigned int max_burst;
669 u32 max_esit_payload;
540 670
541 ep_index = xhci_get_endpoint_index(&ep->desc); 671 ep_index = xhci_get_endpoint_index(&ep->desc);
542 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 672 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -544,12 +674,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
544 /* Set up the endpoint ring */ 674 /* Set up the endpoint ring */
545 virt_dev->eps[ep_index].new_ring = 675 virt_dev->eps[ep_index].new_ring =
546 xhci_ring_alloc(xhci, 1, true, mem_flags); 676 xhci_ring_alloc(xhci, 1, true, mem_flags);
547 if (!virt_dev->eps[ep_index].new_ring) 677 if (!virt_dev->eps[ep_index].new_ring) {
548 return -ENOMEM; 678 /* Attempt to use the ring cache */
679 if (virt_dev->num_rings_cached == 0)
680 return -ENOMEM;
681 virt_dev->eps[ep_index].new_ring =
682 virt_dev->ring_cache[virt_dev->num_rings_cached];
683 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
684 virt_dev->num_rings_cached--;
685 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
686 }
549 ep_ring = virt_dev->eps[ep_index].new_ring; 687 ep_ring = virt_dev->eps[ep_index].new_ring;
550 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 688 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
551 689
552 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 690 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
691 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
553 692
554 /* FIXME dig Mult and streams info out of ep companion desc */ 693 /* FIXME dig Mult and streams info out of ep companion desc */
555 694
@@ -595,6 +734,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
595 default: 734 default:
596 BUG(); 735 BUG();
597 } 736 }
737 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
738 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
739
740 /*
741 * XXX no idea how to calculate the average TRB buffer length for bulk
742 * endpoints, as the driver gives us no clue how big each scatter gather
743 * list entry (or buffer) is going to be.
744 *
745 * For isochronous and interrupt endpoints, we set it to the max
746 * available, until we have new API in the USB core to allow drivers to
747 * declare how much bandwidth they actually need.
748 *
749 * Normally, it would be calculated by taking the total of the buffer
750 * lengths in the TD and then dividing by the number of TRBs in a TD,
751 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
752 * use Event Data TRBs, and we don't chain in a link TRB on short
753 * transfers, we're basically dividing by 1.
754 */
755 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
756
598 /* FIXME Debug endpoint context */ 757 /* FIXME Debug endpoint context */
599 return 0; 758 return 0;
600} 759}
@@ -758,7 +917,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
758} 917}
759 918
760struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 919struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
761 bool allocate_completion, gfp_t mem_flags) 920 bool allocate_in_ctx, bool allocate_completion,
921 gfp_t mem_flags)
762{ 922{
763 struct xhci_command *command; 923 struct xhci_command *command;
764 924
@@ -766,16 +926,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
766 if (!command) 926 if (!command)
767 return NULL; 927 return NULL;
768 928
769 command->in_ctx = 929 if (allocate_in_ctx) {
770 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); 930 command->in_ctx =
771 if (!command->in_ctx) 931 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
772 return NULL; 932 mem_flags);
933 if (!command->in_ctx) {
934 kfree(command);
935 return NULL;
936 }
937 }
773 938
774 if (allocate_completion) { 939 if (allocate_completion) {
775 command->completion = 940 command->completion =
776 kzalloc(sizeof(struct completion), mem_flags); 941 kzalloc(sizeof(struct completion), mem_flags);
777 if (!command->completion) { 942 if (!command->completion) {
778 xhci_free_container_ctx(xhci, command->in_ctx); 943 xhci_free_container_ctx(xhci, command->in_ctx);
944 kfree(command);
779 return NULL; 945 return NULL;
780 } 946 }
781 init_completion(command->completion); 947 init_completion(command->completion);
@@ -848,6 +1014,163 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
848 xhci->page_shift = 0; 1014 xhci->page_shift = 0;
849} 1015}
850 1016
1017static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1018 struct xhci_segment *input_seg,
1019 union xhci_trb *start_trb,
1020 union xhci_trb *end_trb,
1021 dma_addr_t input_dma,
1022 struct xhci_segment *result_seg,
1023 char *test_name, int test_number)
1024{
1025 unsigned long long start_dma;
1026 unsigned long long end_dma;
1027 struct xhci_segment *seg;
1028
1029 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1030 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1031
1032 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1033 if (seg != result_seg) {
1034 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1035 test_name, test_number);
1036 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1037 "input DMA 0x%llx\n",
1038 input_seg,
1039 (unsigned long long) input_dma);
1040 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1041 "ending TRB %p (0x%llx DMA)\n",
1042 start_trb, start_dma,
1043 end_trb, end_dma);
1044 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1045 result_seg, seg);
1046 return -1;
1047 }
1048 return 0;
1049}
1050
1051/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1052static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1053{
1054 struct {
1055 dma_addr_t input_dma;
1056 struct xhci_segment *result_seg;
1057 } simple_test_vector [] = {
1058 /* A zeroed DMA field should fail */
1059 { 0, NULL },
1060 /* One TRB before the ring start should fail */
1061 { xhci->event_ring->first_seg->dma - 16, NULL },
1062 /* One byte before the ring start should fail */
1063 { xhci->event_ring->first_seg->dma - 1, NULL },
1064 /* Starting TRB should succeed */
1065 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1066 /* Ending TRB should succeed */
1067 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1068 xhci->event_ring->first_seg },
1069 /* One byte after the ring end should fail */
1070 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1071 /* One TRB after the ring end should fail */
1072 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1073 /* An address of all ones should fail */
1074 { (dma_addr_t) (~0), NULL },
1075 };
1076 struct {
1077 struct xhci_segment *input_seg;
1078 union xhci_trb *start_trb;
1079 union xhci_trb *end_trb;
1080 dma_addr_t input_dma;
1081 struct xhci_segment *result_seg;
1082 } complex_test_vector [] = {
1083 /* Test feeding a valid DMA address from a different ring */
1084 { .input_seg = xhci->event_ring->first_seg,
1085 .start_trb = xhci->event_ring->first_seg->trbs,
1086 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1087 .input_dma = xhci->cmd_ring->first_seg->dma,
1088 .result_seg = NULL,
1089 },
1090 /* Test feeding a valid end TRB from a different ring */
1091 { .input_seg = xhci->event_ring->first_seg,
1092 .start_trb = xhci->event_ring->first_seg->trbs,
1093 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1094 .input_dma = xhci->cmd_ring->first_seg->dma,
1095 .result_seg = NULL,
1096 },
1097 /* Test feeding a valid start and end TRB from a different ring */
1098 { .input_seg = xhci->event_ring->first_seg,
1099 .start_trb = xhci->cmd_ring->first_seg->trbs,
1100 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1101 .input_dma = xhci->cmd_ring->first_seg->dma,
1102 .result_seg = NULL,
1103 },
1104 /* TRB in this ring, but after this TD */
1105 { .input_seg = xhci->event_ring->first_seg,
1106 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1107 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1108 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1109 .result_seg = NULL,
1110 },
1111 /* TRB in this ring, but before this TD */
1112 { .input_seg = xhci->event_ring->first_seg,
1113 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1114 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1115 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1116 .result_seg = NULL,
1117 },
1118 /* TRB in this ring, but after this wrapped TD */
1119 { .input_seg = xhci->event_ring->first_seg,
1120 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1121 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1122 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1123 .result_seg = NULL,
1124 },
1125 /* TRB in this ring, but before this wrapped TD */
1126 { .input_seg = xhci->event_ring->first_seg,
1127 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1128 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1129 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1130 .result_seg = NULL,
1131 },
1132 /* TRB not in this ring, and we have a wrapped TD */
1133 { .input_seg = xhci->event_ring->first_seg,
1134 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1135 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1136 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1137 .result_seg = NULL,
1138 },
1139 };
1140
1141 unsigned int num_tests;
1142 int i, ret;
1143
1144 num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
1145 for (i = 0; i < num_tests; i++) {
1146 ret = xhci_test_trb_in_td(xhci,
1147 xhci->event_ring->first_seg,
1148 xhci->event_ring->first_seg->trbs,
1149 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1150 simple_test_vector[i].input_dma,
1151 simple_test_vector[i].result_seg,
1152 "Simple", i);
1153 if (ret < 0)
1154 return ret;
1155 }
1156
1157 num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
1158 for (i = 0; i < num_tests; i++) {
1159 ret = xhci_test_trb_in_td(xhci,
1160 complex_test_vector[i].input_seg,
1161 complex_test_vector[i].start_trb,
1162 complex_test_vector[i].end_trb,
1163 complex_test_vector[i].input_dma,
1164 complex_test_vector[i].result_seg,
1165 "Complex", i);
1166 if (ret < 0)
1167 return ret;
1168 }
1169 xhci_dbg(xhci, "TRB math tests passed.\n");
1170 return 0;
1171}
1172
1173
851int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 1174int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
852{ 1175{
853 dma_addr_t dma; 1176 dma_addr_t dma;
@@ -951,6 +1274,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
951 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 1274 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
952 if (!xhci->event_ring) 1275 if (!xhci->event_ring)
953 goto fail; 1276 goto fail;
1277 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
1278 goto fail;
954 1279
955 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 1280 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
956 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 1281 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 06595ec27bb7..417d37aff8d7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
54 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 54 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
55 int retval; 55 int retval;
56 56
57 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
58
57 xhci->cap_regs = hcd->regs; 59 xhci->cap_regs = hcd->regs;
58 xhci->op_regs = hcd->regs + 60 xhci->op_regs = hcd->regs +
59 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); 61 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
@@ -137,6 +139,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
137 .reset_bandwidth = xhci_reset_bandwidth, 139 .reset_bandwidth = xhci_reset_bandwidth,
138 .address_device = xhci_address_device, 140 .address_device = xhci_address_device,
139 .update_hub_device = xhci_update_hub_device, 141 .update_hub_device = xhci_update_hub_device,
142 .reset_device = xhci_reset_device,
140 143
141 /* 144 /*
142 * scheduling support 145 * scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 821b7b4709de..85d7e8f2085e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -65,6 +65,7 @@
65 */ 65 */
66 66
67#include <linux/scatterlist.h> 67#include <linux/scatterlist.h>
68#include <linux/slab.h>
68#include "xhci.h" 69#include "xhci.h"
69 70
70/* 71/*
@@ -306,7 +307,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
306 /* Don't ring the doorbell for this endpoint if there are pending 307 /* Don't ring the doorbell for this endpoint if there are pending
307 * cancellations because the we don't want to interrupt processing. 308 * cancellations because the we don't want to interrupt processing.
308 */ 309 */
309 if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) 310 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
310 && !(ep_state & EP_HALTED)) { 311 && !(ep_state & EP_HALTED)) {
311 field = xhci_readl(xhci, db_addr) & DB_MASK; 312 field = xhci_readl(xhci, db_addr) & DB_MASK;
312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 313 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +476,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
475 ep->ep_state |= SET_DEQ_PENDING; 476 ep->ep_state |= SET_DEQ_PENDING;
476} 477}
477 478
479static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
480 struct xhci_virt_ep *ep)
481{
482 ep->ep_state &= ~EP_HALT_PENDING;
483 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
484 * timer is running on another CPU, we don't decrement stop_cmds_pending
485 * (since we didn't successfully stop the watchdog timer).
486 */
487 if (del_timer(&ep->stop_cmd_timer))
488 ep->stop_cmds_pending--;
489}
490
491/* Must be called with xhci->lock held in interrupt context */
492static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
493 struct xhci_td *cur_td, int status, char *adjective)
494{
495 struct usb_hcd *hcd = xhci_to_hcd(xhci);
496
497 cur_td->urb->hcpriv = NULL;
498 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
499 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
500
501 spin_unlock(&xhci->lock);
502 usb_hcd_giveback_urb(hcd, cur_td->urb, status);
503 kfree(cur_td);
504 spin_lock(&xhci->lock);
505 xhci_dbg(xhci, "%s URB given back\n", adjective);
506}
507
478/* 508/*
479 * When we get a command completion for a Stop Endpoint Command, we need to 509 * When we get a command completion for a Stop Endpoint Command, we need to
480 * unlink any cancelled TDs from the ring. There are two ways to do that: 510 * unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -497,9 +527,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
497 struct xhci_td *last_unlinked_td; 527 struct xhci_td *last_unlinked_td;
498 528
499 struct xhci_dequeue_state deq_state; 529 struct xhci_dequeue_state deq_state;
500#ifdef CONFIG_USB_HCD_STAT
501 ktime_t stop_time = ktime_get();
502#endif
503 530
504 memset(&deq_state, 0, sizeof(deq_state)); 531 memset(&deq_state, 0, sizeof(deq_state));
505 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 532 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +534,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
507 ep = &xhci->devs[slot_id]->eps[ep_index]; 534 ep = &xhci->devs[slot_id]->eps[ep_index];
508 ep_ring = ep->ring; 535 ep_ring = ep->ring;
509 536
510 if (list_empty(&ep->cancelled_td_list)) 537 if (list_empty(&ep->cancelled_td_list)) {
538 xhci_stop_watchdog_timer_in_irq(xhci, ep);
539 ring_ep_doorbell(xhci, slot_id, ep_index);
511 return; 540 return;
541 }
512 542
513 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 543 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
514 * We have the xHCI lock, so nothing can modify this list until we drop 544 * We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +565,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
535 * the cancelled TD list for URB completion later. 565 * the cancelled TD list for URB completion later.
536 */ 566 */
537 list_del(&cur_td->td_list); 567 list_del(&cur_td->td_list);
538 ep->cancels_pending--;
539 } 568 }
540 last_unlinked_td = cur_td; 569 last_unlinked_td = cur_td;
570 xhci_stop_watchdog_timer_in_irq(xhci, ep);
541 571
542 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 572 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
543 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 573 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +591,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
561 list_del(&cur_td->cancelled_td_list); 591 list_del(&cur_td->cancelled_td_list);
562 592
563 /* Clean up the cancelled URB */ 593 /* Clean up the cancelled URB */
564#ifdef CONFIG_USB_HCD_STAT
565 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
566 ktime_sub(stop_time, cur_td->start_time));
567#endif
568 cur_td->urb->hcpriv = NULL;
569 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
570
571 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
572 spin_unlock(&xhci->lock);
573 /* Doesn't matter what we pass for status, since the core will 594 /* Doesn't matter what we pass for status, since the core will
574 * just overwrite it (because the URB has been unlinked). 595 * just overwrite it (because the URB has been unlinked).
575 */ 596 */
576 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); 597 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
577 kfree(cur_td);
578 598
579 spin_lock(&xhci->lock); 599 /* Stop processing the cancelled list if the watchdog timer is
600 * running.
601 */
602 if (xhci->xhc_state & XHCI_STATE_DYING)
603 return;
580 } while (cur_td != last_unlinked_td); 604 } while (cur_td != last_unlinked_td);
581 605
582 /* Return to the event handler with xhci->lock re-acquired */ 606 /* Return to the event handler with xhci->lock re-acquired */
583} 607}
584 608
609/* Watchdog timer function for when a stop endpoint command fails to complete.
610 * In this case, we assume the host controller is broken or dying or dead. The
611 * host may still be completing some other events, so we have to be careful to
612 * let the event ring handler and the URB dequeueing/enqueueing functions know
613 * through xhci->state.
614 *
615 * The timer may also fire if the host takes a very long time to respond to the
616 * command, and the stop endpoint command completion handler cannot delete the
617 * timer before the timer function is called. Another endpoint cancellation may
618 * sneak in before the timer function can grab the lock, and that may queue
619 * another stop endpoint command and add the timer back. So we cannot use a
620 * simple flag to say whether there is a pending stop endpoint command for a
621 * particular endpoint.
622 *
623 * Instead we use a combination of that flag and a counter for the number of
624 * pending stop endpoint commands. If the timer is the tail end of the last
625 * stop endpoint command, and the endpoint's command is still pending, we assume
626 * the host is dying.
627 */
628void xhci_stop_endpoint_command_watchdog(unsigned long arg)
629{
630 struct xhci_hcd *xhci;
631 struct xhci_virt_ep *ep;
632 struct xhci_virt_ep *temp_ep;
633 struct xhci_ring *ring;
634 struct xhci_td *cur_td;
635 int ret, i, j;
636
637 ep = (struct xhci_virt_ep *) arg;
638 xhci = ep->xhci;
639
640 spin_lock(&xhci->lock);
641
642 ep->stop_cmds_pending--;
643 if (xhci->xhc_state & XHCI_STATE_DYING) {
644 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
645 "xHCI as DYING, exiting.\n");
646 spin_unlock(&xhci->lock);
647 return;
648 }
649 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
650 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
651 "exiting.\n");
652 spin_unlock(&xhci->lock);
653 return;
654 }
655
656 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
657 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
658 /* Oops, HC is dead or dying or at least not responding to the stop
659 * endpoint command.
660 */
661 xhci->xhc_state |= XHCI_STATE_DYING;
662 /* Disable interrupts from the host controller and start halting it */
663 xhci_quiesce(xhci);
664 spin_unlock(&xhci->lock);
665
666 ret = xhci_halt(xhci);
667
668 spin_lock(&xhci->lock);
669 if (ret < 0) {
670 /* This is bad; the host is not responding to commands and it's
671 * not allowing itself to be halted. At least interrupts are
672 * disabled, so we can set HC_STATE_HALT and notify the
673 * USB core. But if we call usb_hc_died(), it will attempt to
674 * disconnect all device drivers under this host. Those
675 * disconnect() methods will wait for all URBs to be unlinked,
676 * so we must complete them.
677 */
678 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
679 xhci_warn(xhci, "Completing active URBs anyway.\n");
680 /* We could turn all TDs on the rings to no-ops. This won't
681 * help if the host has cached part of the ring, and is slow if
682 * we want to preserve the cycle bit. Skip it and hope the host
683 * doesn't touch the memory.
684 */
685 }
686 for (i = 0; i < MAX_HC_SLOTS; i++) {
687 if (!xhci->devs[i])
688 continue;
689 for (j = 0; j < 31; j++) {
690 temp_ep = &xhci->devs[i]->eps[j];
691 ring = temp_ep->ring;
692 if (!ring)
693 continue;
694 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
695 "ep index %u\n", i, j);
696 while (!list_empty(&ring->td_list)) {
697 cur_td = list_first_entry(&ring->td_list,
698 struct xhci_td,
699 td_list);
700 list_del(&cur_td->td_list);
701 if (!list_empty(&cur_td->cancelled_td_list))
702 list_del(&cur_td->cancelled_td_list);
703 xhci_giveback_urb_in_irq(xhci, cur_td,
704 -ESHUTDOWN, "killed");
705 }
706 while (!list_empty(&temp_ep->cancelled_td_list)) {
707 cur_td = list_first_entry(
708 &temp_ep->cancelled_td_list,
709 struct xhci_td,
710 cancelled_td_list);
711 list_del(&cur_td->cancelled_td_list);
712 xhci_giveback_urb_in_irq(xhci, cur_td,
713 -ESHUTDOWN, "killed");
714 }
715 }
716 }
717 spin_unlock(&xhci->lock);
718 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
719 xhci_dbg(xhci, "Calling usb_hc_died()\n");
720 usb_hc_died(xhci_to_hcd(xhci));
721 xhci_dbg(xhci, "xHCI host controller is dead.\n");
722}
723
585/* 724/*
586 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 725 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
587 * we need to clear the set deq pending flag in the endpoint ring state, so that 726 * we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +904,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
765 virt_dev->in_ctx); 904 virt_dev->in_ctx);
766 /* Input ctx add_flags are the endpoint index plus one */ 905 /* Input ctx add_flags are the endpoint index plus one */
767 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 906 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
768 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 907 /* A usb_set_interface() call directly after clearing a halted
769 if (!ep_ring) { 908 * condition may race on this quirky hardware.
770 /* This must have been an initial configure endpoint */ 909 * Not worth worrying about, since this is prototype hardware.
771 xhci->devs[slot_id]->cmd_status = 910 */
772 GET_COMP_CODE(event->status);
773 complete(&xhci->devs[slot_id]->cmd_completion);
774 break;
775 }
776 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
777 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
778 "state = %d\n", ep_index, ep_state);
779 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 911 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
780 ep_state & EP_HALTED) { 912 ep_index != (unsigned int) -1 &&
913 ctrl_ctx->add_flags - SLOT_FLAG ==
914 ctrl_ctx->drop_flags) {
915 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
916 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
917 if (!(ep_state & EP_HALTED))
918 goto bandwidth_change;
919 xhci_dbg(xhci, "Completed config ep cmd - "
920 "last ep index = %d, state = %d\n",
921 ep_index, ep_state);
781 /* Clear our internal halted state and restart ring */ 922 /* Clear our internal halted state and restart ring */
782 xhci->devs[slot_id]->eps[ep_index].ep_state &= 923 xhci->devs[slot_id]->eps[ep_index].ep_state &=
783 ~EP_HALTED; 924 ~EP_HALTED;
784 ring_ep_doorbell(xhci, slot_id, ep_index); 925 ring_ep_doorbell(xhci, slot_id, ep_index);
785 } else { 926 break;
786 xhci->devs[slot_id]->cmd_status =
787 GET_COMP_CODE(event->status);
788 complete(&xhci->devs[slot_id]->cmd_completion);
789 } 927 }
928bandwidth_change:
929 xhci_dbg(xhci, "Completed config ep cmd\n");
930 xhci->devs[slot_id]->cmd_status =
931 GET_COMP_CODE(event->status);
932 complete(&xhci->devs[slot_id]->cmd_completion);
790 break; 933 break;
791 case TRB_TYPE(TRB_EVAL_CONTEXT): 934 case TRB_TYPE(TRB_EVAL_CONTEXT):
792 virt_dev = xhci->devs[slot_id]; 935 virt_dev = xhci->devs[slot_id];
@@ -811,6 +954,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
811 case TRB_TYPE(TRB_RESET_EP): 954 case TRB_TYPE(TRB_RESET_EP):
812 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 955 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
813 break; 956 break;
957 case TRB_TYPE(TRB_RESET_DEV):
958 xhci_dbg(xhci, "Completed reset device command.\n");
959 slot_id = TRB_TO_SLOT_ID(
960 xhci->cmd_ring->dequeue->generic.field[3]);
961 virt_dev = xhci->devs[slot_id];
962 if (virt_dev)
963 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
964 else
965 xhci_warn(xhci, "Reset device command completion "
966 "for disabled slot %u\n", slot_id);
967 break;
814 default: 968 default:
815 /* Skip over unknown commands on the event ring */ 969 /* Skip over unknown commands on the event ring */
816 xhci->error_bitmask |= 1 << 6; 970 xhci->error_bitmask |= 1 << 6;
@@ -849,8 +1003,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
849 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1003 * TRB in this TD, this function returns that TRB's segment. Otherwise it
850 * returns 0. 1004 * returns 0.
851 */ 1005 */
852static struct xhci_segment *trb_in_td( 1006struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
853 struct xhci_segment *start_seg,
854 union xhci_trb *start_trb, 1007 union xhci_trb *start_trb,
855 union xhci_trb *end_trb, 1008 union xhci_trb *end_trb,
856 dma_addr_t suspect_dma) 1009 dma_addr_t suspect_dma)
@@ -900,6 +1053,59 @@ static struct xhci_segment *trb_in_td(
900 return 0; 1053 return 0;
901} 1054}
902 1055
1056static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1057 unsigned int slot_id, unsigned int ep_index,
1058 struct xhci_td *td, union xhci_trb *event_trb)
1059{
1060 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1061 ep->ep_state |= EP_HALTED;
1062 ep->stopped_td = td;
1063 ep->stopped_trb = event_trb;
1064 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1065 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1066 xhci_ring_cmd_db(xhci);
1067}
1068
1069/* Check if an error has halted the endpoint ring. The class driver will
1070 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1071 * However, a babble and other errors also halt the endpoint ring, and the class
1072 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1073 * Ring Dequeue Pointer command manually.
1074 */
1075static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1076 struct xhci_ep_ctx *ep_ctx,
1077 unsigned int trb_comp_code)
1078{
1079 /* TRB completion codes that may require a manual halt cleanup */
1080 if (trb_comp_code == COMP_TX_ERR ||
1081 trb_comp_code == COMP_BABBLE ||
1082 trb_comp_code == COMP_SPLIT_ERR)
1083 /* The 0.96 spec says a babbling control endpoint
1084 * is not halted. The 0.96 spec says it is. Some HW
1085 * claims to be 0.95 compliant, but it halts the control
1086 * endpoint anyway. Check if a babble halted the
1087 * endpoint.
1088 */
1089 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
1090 return 1;
1091
1092 return 0;
1093}
1094
1095int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1096{
1097 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1098 /* Vendor defined "informational" completion code,
1099 * treat as not-an-error.
1100 */
1101 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1102 trb_comp_code);
1103 xhci_dbg(xhci, "Treating code as success.\n");
1104 return 1;
1105 }
1106 return 0;
1107}
1108
903/* 1109/*
904 * If this function returns an error condition, it means it got a Transfer 1110 * If this function returns an error condition, it means it got a Transfer
905 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1111 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1002,6 +1208,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1002 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1208 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1003 status = -EILSEQ; 1209 status = -EILSEQ;
1004 break; 1210 break;
1211 case COMP_SPLIT_ERR:
1005 case COMP_TX_ERR: 1212 case COMP_TX_ERR:
1006 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1213 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1007 status = -EPROTO; 1214 status = -EPROTO;
@@ -1015,6 +1222,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1015 status = -ENOSR; 1222 status = -ENOSR;
1016 break; 1223 break;
1017 default: 1224 default:
1225 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1226 status = 0;
1227 break;
1228 }
1018 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 1229 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
1019 urb = NULL; 1230 urb = NULL;
1020 goto cleanup; 1231 goto cleanup;
@@ -1043,15 +1254,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1043 else 1254 else
1044 status = 0; 1255 status = 0;
1045 break; 1256 break;
1046 case COMP_BABBLE: 1257
1047 /* The 0.96 spec says a babbling control endpoint 1258 default:
1048 * is not halted. The 0.96 spec says it is. Some HW 1259 if (!xhci_requires_manual_halt_cleanup(xhci,
1049 * claims to be 0.95 compliant, but it halts the control 1260 ep_ctx, trb_comp_code))
1050 * endpoint anyway. Check if a babble halted the
1051 * endpoint.
1052 */
1053 if (ep_ctx->ep_info != EP_STATE_HALTED)
1054 break; 1261 break;
1262 xhci_dbg(xhci, "TRB error code %u, "
1263 "halted endpoint index = %u\n",
1264 trb_comp_code, ep_index);
1055 /* else fall through */ 1265 /* else fall through */
1056 case COMP_STALL: 1266 case COMP_STALL:
1057 /* Did we transfer part of the data (middle) phase? */ 1267 /* Did we transfer part of the data (middle) phase? */
@@ -1063,15 +1273,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1063 else 1273 else
1064 td->urb->actual_length = 0; 1274 td->urb->actual_length = 0;
1065 1275
1066 ep->stopped_td = td; 1276 xhci_cleanup_halted_endpoint(xhci,
1067 ep->stopped_trb = event_trb; 1277 slot_id, ep_index, td, event_trb);
1068 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1069 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1070 xhci_ring_cmd_db(xhci);
1071 goto td_cleanup; 1278 goto td_cleanup;
1072 default:
1073 /* Others already handled above */
1074 break;
1075 } 1279 }
1076 /* 1280 /*
1077 * Did we transfer any data, despite the errors that might have 1281 * Did we transfer any data, despite the errors that might have
@@ -1209,16 +1413,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1209 ep->stopped_td = td; 1413 ep->stopped_td = td;
1210 ep->stopped_trb = event_trb; 1414 ep->stopped_trb = event_trb;
1211 } else { 1415 } else {
1212 if (trb_comp_code == COMP_STALL || 1416 if (trb_comp_code == COMP_STALL) {
1213 trb_comp_code == COMP_BABBLE) {
1214 /* The transfer is completed from the driver's 1417 /* The transfer is completed from the driver's
1215 * perspective, but we need to issue a set dequeue 1418 * perspective, but we need to issue a set dequeue
1216 * command for this stalled endpoint to move the dequeue 1419 * command for this stalled endpoint to move the dequeue
1217 * pointer past the TD. We can't do that here because 1420 * pointer past the TD. We can't do that here because
1218 * the halt condition must be cleared first. 1421 * the halt condition must be cleared first. Let the
1422 * USB class driver clear the stall later.
1219 */ 1423 */
1220 ep->stopped_td = td; 1424 ep->stopped_td = td;
1221 ep->stopped_trb = event_trb; 1425 ep->stopped_trb = event_trb;
1426 } else if (xhci_requires_manual_halt_cleanup(xhci,
1427 ep_ctx, trb_comp_code)) {
1428 /* Other types of errors halt the endpoint, but the
1429 * class driver doesn't call usb_reset_endpoint() unless
1430 * the error is -EPIPE. Clear the halted status in the
1431 * xHCI hardware manually.
1432 */
1433 xhci_cleanup_halted_endpoint(xhci,
1434 slot_id, ep_index, td, event_trb);
1222 } else { 1435 } else {
1223 /* Update ring dequeue pointer */ 1436 /* Update ring dequeue pointer */
1224 while (ep_ring->dequeue != td->last_trb) 1437 while (ep_ring->dequeue != td->last_trb)
@@ -1249,10 +1462,9 @@ td_cleanup:
1249 } 1462 }
1250 list_del(&td->td_list); 1463 list_del(&td->td_list);
1251 /* Was this TD slated to be cancelled but completed anyway? */ 1464 /* Was this TD slated to be cancelled but completed anyway? */
1252 if (!list_empty(&td->cancelled_td_list)) { 1465 if (!list_empty(&td->cancelled_td_list))
1253 list_del(&td->cancelled_td_list); 1466 list_del(&td->cancelled_td_list);
1254 ep->cancels_pending--; 1467
1255 }
1256 /* Leave the TD around for the reset endpoint function to use 1468 /* Leave the TD around for the reset endpoint function to use
1257 * (but only if it's not a control endpoint, since we already 1469 * (but only if it's not a control endpoint, since we already
1258 * queued the Set TR dequeue pointer command for stalled 1470 * queued the Set TR dequeue pointer command for stalled
@@ -1331,6 +1543,14 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1331 default: 1543 default:
1332 xhci->error_bitmask |= 1 << 3; 1544 xhci->error_bitmask |= 1 << 3;
1333 } 1545 }
1546 /* Any of the above functions may drop and re-acquire the lock, so check
1547 * to make sure a watchdog timer didn't mark the host as non-responsive.
1548 */
1549 if (xhci->xhc_state & XHCI_STATE_DYING) {
1550 xhci_dbg(xhci, "xHCI host dying, returning from "
1551 "event handler.\n");
1552 return;
1553 }
1334 1554
1335 if (update_ptrs) { 1555 if (update_ptrs) {
1336 /* Update SW and HC event ring dequeue pointer */ 1556 /* Update SW and HC event ring dequeue pointer */
@@ -1555,6 +1775,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1555 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 1775 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1556} 1776}
1557 1777
1778/*
1779 * The TD size is the number of bytes remaining in the TD (including this TRB),
1780 * right shifted by 10.
1781 * It must fit in bits 21:17, so it can't be bigger than 31.
1782 */
1783static u32 xhci_td_remainder(unsigned int remainder)
1784{
1785 u32 max = (1 << (21 - 17 + 1)) - 1;
1786
1787 if ((remainder >> 10) >= max)
1788 return max << 17;
1789 else
1790 return (remainder >> 10) << 17;
1791}
1792
1558static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1793static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1559 struct urb *urb, int slot_id, unsigned int ep_index) 1794 struct urb *urb, int slot_id, unsigned int ep_index)
1560{ 1795{
@@ -1612,6 +1847,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1612 do { 1847 do {
1613 u32 field = 0; 1848 u32 field = 0;
1614 u32 length_field = 0; 1849 u32 length_field = 0;
1850 u32 remainder = 0;
1615 1851
1616 /* Don't change the cycle bit of the first TRB until later */ 1852 /* Don't change the cycle bit of the first TRB until later */
1617 if (first_trb) 1853 if (first_trb)
@@ -1641,8 +1877,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1641 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1877 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1642 (unsigned int) addr + trb_buff_len); 1878 (unsigned int) addr + trb_buff_len);
1643 } 1879 }
1880 remainder = xhci_td_remainder(urb->transfer_buffer_length -
1881 running_total) ;
1644 length_field = TRB_LEN(trb_buff_len) | 1882 length_field = TRB_LEN(trb_buff_len) |
1645 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1883 remainder |
1646 TRB_INTR_TARGET(0); 1884 TRB_INTR_TARGET(0);
1647 queue_trb(xhci, ep_ring, false, 1885 queue_trb(xhci, ep_ring, false,
1648 lower_32_bits(addr), 1886 lower_32_bits(addr),
@@ -1755,6 +1993,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1755 1993
1756 /* Queue the first TRB, even if it's zero-length */ 1994 /* Queue the first TRB, even if it's zero-length */
1757 do { 1995 do {
1996 u32 remainder = 0;
1758 field = 0; 1997 field = 0;
1759 1998
1760 /* Don't change the cycle bit of the first TRB until later */ 1999 /* Don't change the cycle bit of the first TRB until later */
@@ -1773,8 +2012,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1773 td->last_trb = ep_ring->enqueue; 2012 td->last_trb = ep_ring->enqueue;
1774 field |= TRB_IOC; 2013 field |= TRB_IOC;
1775 } 2014 }
2015 remainder = xhci_td_remainder(urb->transfer_buffer_length -
2016 running_total);
1776 length_field = TRB_LEN(trb_buff_len) | 2017 length_field = TRB_LEN(trb_buff_len) |
1777 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 2018 remainder |
1778 TRB_INTR_TARGET(0); 2019 TRB_INTR_TARGET(0);
1779 queue_trb(xhci, ep_ring, false, 2020 queue_trb(xhci, ep_ring, false,
1780 lower_32_bits(addr), 2021 lower_32_bits(addr),
@@ -1862,7 +2103,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1862 /* If there's data, queue data TRBs */ 2103 /* If there's data, queue data TRBs */
1863 field = 0; 2104 field = 0;
1864 length_field = TRB_LEN(urb->transfer_buffer_length) | 2105 length_field = TRB_LEN(urb->transfer_buffer_length) |
1865 TD_REMAINDER(urb->transfer_buffer_length) | 2106 xhci_td_remainder(urb->transfer_buffer_length) |
1866 TRB_INTR_TARGET(0); 2107 TRB_INTR_TARGET(0);
1867 if (urb->transfer_buffer_length > 0) { 2108 if (urb->transfer_buffer_length > 0) {
1868 if (setup->bRequestType & USB_DIR_IN) 2109 if (setup->bRequestType & USB_DIR_IN)
@@ -1960,6 +2201,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1960 false); 2201 false);
1961} 2202}
1962 2203
2204/* Queue a reset device command TRB */
2205int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
2206{
2207 return queue_command(xhci, 0, 0, 0,
2208 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
2209 false);
2210}
2211
1963/* Queue a configure endpoint command TRB */ 2212/* Queue a configure endpoint command TRB */
1964int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2213int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1965 u32 slot_id, bool command_must_succeed) 2214 u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci.c
index 932f99938481..7e4277273908 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci.c
@@ -23,6 +23,7 @@
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
26#include <linux/slab.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
28 29
@@ -67,22 +68,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
67} 68}
68 69
69/* 70/*
70 * Force HC into halt state. 71 * Disable interrupts and begin the xHCI halting process.
71 *
72 * Disable any IRQs and clear the run/stop bit.
73 * HC will complete any current and actively pipelined transactions, and
74 * should halt within 16 microframes of the run/stop bit being cleared.
75 * Read HC Halted bit in the status register to see when the HC is finished.
76 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
77 */ 72 */
78int xhci_halt(struct xhci_hcd *xhci) 73void xhci_quiesce(struct xhci_hcd *xhci)
79{ 74{
80 u32 halted; 75 u32 halted;
81 u32 cmd; 76 u32 cmd;
82 u32 mask; 77 u32 mask;
83 78
84 xhci_dbg(xhci, "// Halt the HC\n");
85 /* Disable all interrupts from the host controller */
86 mask = ~(XHCI_IRQS); 79 mask = ~(XHCI_IRQS);
87 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 80 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
88 if (!halted) 81 if (!halted)
@@ -91,6 +84,21 @@ int xhci_halt(struct xhci_hcd *xhci)
91 cmd = xhci_readl(xhci, &xhci->op_regs->command); 84 cmd = xhci_readl(xhci, &xhci->op_regs->command);
92 cmd &= mask; 85 cmd &= mask;
93 xhci_writel(xhci, cmd, &xhci->op_regs->command); 86 xhci_writel(xhci, cmd, &xhci->op_regs->command);
87}
88
89/*
90 * Force HC into halt state.
91 *
92 * Disable any IRQs and clear the run/stop bit.
93 * HC will complete any current and actively pipelined transactions, and
94 * should halt within 16 microframes of the run/stop bit being cleared.
95 * Read HC Halted bit in the status register to see when the HC is finished.
96 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
97 */
98int xhci_halt(struct xhci_hcd *xhci)
99{
100 xhci_dbg(xhci, "// Halt the HC\n");
101 xhci_quiesce(xhci);
94 102
95 return handshake(xhci, &xhci->op_regs->status, 103 return handshake(xhci, &xhci->op_regs->status,
96 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 104 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
@@ -124,28 +132,6 @@ int xhci_reset(struct xhci_hcd *xhci)
124 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); 132 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
125} 133}
126 134
127/*
128 * Stop the HC from processing the endpoint queues.
129 */
130static void xhci_quiesce(struct xhci_hcd *xhci)
131{
132 /*
133 * Queues are per endpoint, so we need to disable an endpoint or slot.
134 *
135 * To disable a slot, we need to insert a disable slot command on the
136 * command ring and ring the doorbell. This will also free any internal
137 * resources associated with the slot (which might not be what we want).
138 *
139 * A Release Endpoint command sounds better - doesn't free internal HC
140 * memory, but removes the endpoints from the schedule and releases the
141 * bandwidth, disables the doorbells, and clears the endpoint enable
142 * flag. Usually used prior to a set interface command.
143 *
144 * TODO: Implement after command ring code is done.
145 */
146 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
147 xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
148}
149 135
150#if 0 136#if 0
151/* Set up MSI-X table for entry 0 (may claim other entries later) */ 137/* Set up MSI-X table for entry 0 (may claim other entries later) */
@@ -261,8 +247,14 @@ static void xhci_work(struct xhci_hcd *xhci)
261 /* Flush posted writes */ 247 /* Flush posted writes */
262 xhci_readl(xhci, &xhci->ir_set->irq_pending); 248 xhci_readl(xhci, &xhci->ir_set->irq_pending);
263 249
264 /* FIXME this should be a delayed service routine that clears the EHB */ 250 if (xhci->xhc_state & XHCI_STATE_DYING)
265 xhci_handle_event(xhci); 251 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
252 "Shouldn't IRQs be disabled?\n");
253 else
254 /* FIXME this should be a delayed service routine
255 * that clears the EHB.
256 */
257 xhci_handle_event(xhci);
266 258
267 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ 259 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
268 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 260 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
@@ -335,7 +327,7 @@ void xhci_event_ring_work(unsigned long arg)
335 spin_lock_irqsave(&xhci->lock, flags); 327 spin_lock_irqsave(&xhci->lock, flags);
336 temp = xhci_readl(xhci, &xhci->op_regs->status); 328 temp = xhci_readl(xhci, &xhci->op_regs->status);
337 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 329 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
338 if (temp == 0xffffffff) { 330 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
339 xhci_dbg(xhci, "HW died, polling stopped.\n"); 331 xhci_dbg(xhci, "HW died, polling stopped.\n");
340 spin_unlock_irqrestore(&xhci->lock, flags); 332 spin_unlock_irqrestore(&xhci->lock, flags);
341 return; 333 return;
@@ -490,8 +482,6 @@ void xhci_stop(struct usb_hcd *hcd)
490 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 482 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
491 483
492 spin_lock_irq(&xhci->lock); 484 spin_lock_irq(&xhci->lock);
493 if (HC_IS_RUNNING(hcd->state))
494 xhci_quiesce(xhci);
495 xhci_halt(xhci); 485 xhci_halt(xhci);
496 xhci_reset(xhci); 486 xhci_reset(xhci);
497 spin_unlock_irq(&xhci->lock); 487 spin_unlock_irq(&xhci->lock);
@@ -727,16 +717,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
727 * atomic context to this function, which may allocate memory. 717 * atomic context to this function, which may allocate memory.
728 */ 718 */
729 spin_lock_irqsave(&xhci->lock, flags); 719 spin_lock_irqsave(&xhci->lock, flags);
720 if (xhci->xhc_state & XHCI_STATE_DYING)
721 goto dying;
730 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 722 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
731 slot_id, ep_index); 723 slot_id, ep_index);
732 spin_unlock_irqrestore(&xhci->lock, flags); 724 spin_unlock_irqrestore(&xhci->lock, flags);
733 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 725 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
734 spin_lock_irqsave(&xhci->lock, flags); 726 spin_lock_irqsave(&xhci->lock, flags);
727 if (xhci->xhc_state & XHCI_STATE_DYING)
728 goto dying;
735 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 729 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
736 slot_id, ep_index); 730 slot_id, ep_index);
737 spin_unlock_irqrestore(&xhci->lock, flags); 731 spin_unlock_irqrestore(&xhci->lock, flags);
738 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 732 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
739 spin_lock_irqsave(&xhci->lock, flags); 733 spin_lock_irqsave(&xhci->lock, flags);
734 if (xhci->xhc_state & XHCI_STATE_DYING)
735 goto dying;
740 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 736 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
741 slot_id, ep_index); 737 slot_id, ep_index);
742 spin_unlock_irqrestore(&xhci->lock, flags); 738 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -745,6 +741,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
745 } 741 }
746exit: 742exit:
747 return ret; 743 return ret;
744dying:
745 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
746 "non-responsive xHCI host.\n",
747 urb->ep->desc.bEndpointAddress, urb);
748 spin_unlock_irqrestore(&xhci->lock, flags);
749 return -ESHUTDOWN;
748} 750}
749 751
750/* 752/*
@@ -806,6 +808,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
806 kfree(td); 808 kfree(td);
807 return ret; 809 return ret;
808 } 810 }
811 if (xhci->xhc_state & XHCI_STATE_DYING) {
812 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
813 "non-responsive xHCI host.\n",
814 urb->ep->desc.bEndpointAddress, urb);
815 /* Let the stop endpoint command watchdog timer (which set this
816 * state) finish cleaning up the endpoint TD lists. We must
817 * have caught it in the middle of dropping a lock and giving
818 * back an URB.
819 */
820 goto done;
821 }
809 822
810 xhci_dbg(xhci, "Cancel URB %p\n", urb); 823 xhci_dbg(xhci, "Cancel URB %p\n", urb);
811 xhci_dbg(xhci, "Event ring:\n"); 824 xhci_dbg(xhci, "Event ring:\n");
@@ -817,12 +830,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
817 xhci_debug_ring(xhci, ep_ring); 830 xhci_debug_ring(xhci, ep_ring);
818 td = (struct xhci_td *) urb->hcpriv; 831 td = (struct xhci_td *) urb->hcpriv;
819 832
820 ep->cancels_pending++;
821 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 833 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
822 /* Queue a stop endpoint command, but only if this is 834 /* Queue a stop endpoint command, but only if this is
823 * the first cancellation to be handled. 835 * the first cancellation to be handled.
824 */ 836 */
825 if (ep->cancels_pending == 1) { 837 if (!(ep->ep_state & EP_HALT_PENDING)) {
838 ep->ep_state |= EP_HALT_PENDING;
839 ep->stop_cmds_pending++;
840 ep->stop_cmd_timer.expires = jiffies +
841 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
842 add_timer(&ep->stop_cmd_timer);
826 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 843 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
827 xhci_ring_cmd_db(xhci); 844 xhci_ring_cmd_db(xhci);
828 } 845 }
@@ -991,7 +1008,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
991 * for usb_set_interface() and usb_set_configuration() claim). 1008 * for usb_set_interface() and usb_set_configuration() claim).
992 */ 1009 */
993 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 1010 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
994 udev, ep, GFP_KERNEL) < 0) { 1011 udev, ep, GFP_NOIO) < 0) {
995 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1012 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
996 __func__, ep->desc.bEndpointAddress); 1013 __func__, ep->desc.bEndpointAddress);
997 return -ENOMEM; 1014 return -ENOMEM;
@@ -1157,6 +1174,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1157 cmd_completion = &virt_dev->cmd_completion; 1174 cmd_completion = &virt_dev->cmd_completion;
1158 cmd_status = &virt_dev->cmd_status; 1175 cmd_status = &virt_dev->cmd_status;
1159 } 1176 }
1177 init_completion(cmd_completion);
1160 1178
1161 if (!ctx_change) 1179 if (!ctx_change)
1162 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 1180 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
@@ -1165,6 +1183,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1165 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 1183 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1166 udev->slot_id); 1184 udev->slot_id);
1167 if (ret < 0) { 1185 if (ret < 0) {
1186 if (command)
1187 list_del(&command->cmd_list);
1168 spin_unlock_irqrestore(&xhci->lock, flags); 1188 spin_unlock_irqrestore(&xhci->lock, flags);
1169 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 1189 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1170 return -ENOMEM; 1190 return -ENOMEM;
@@ -1246,13 +1266,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1246 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1266 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1247 1267
1248 xhci_zero_in_ctx(xhci, virt_dev); 1268 xhci_zero_in_ctx(xhci, virt_dev);
1249 /* Free any old rings */ 1269 /* Install new rings and free or cache any old rings */
1250 for (i = 1; i < 31; ++i) { 1270 for (i = 1; i < 31; ++i) {
1251 if (virt_dev->eps[i].new_ring) { 1271 if (!virt_dev->eps[i].new_ring)
1252 xhci_ring_free(xhci, virt_dev->eps[i].ring); 1272 continue;
1253 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 1273 /* Only cache or free the old ring if it exists.
1254 virt_dev->eps[i].new_ring = NULL; 1274 * It may not if this is the first add of an endpoint.
1275 */
1276 if (virt_dev->eps[i].ring) {
1277 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1255 } 1278 }
1279 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1280 virt_dev->eps[i].new_ring = NULL;
1256 } 1281 }
1257 1282
1258 return ret; 1283 return ret;
@@ -1420,6 +1445,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1420} 1445}
1421 1446
1422/* 1447/*
1448 * This submits a Reset Device Command, which will set the device state to 0,
1449 * set the device address to 0, and disable all the endpoints except the default
1450 * control endpoint. The USB core should come back and call
1451 * xhci_address_device(), and then re-set up the configuration. If this is
1452 * called because of a usb_reset_and_verify_device(), then the old alternate
1453 * settings will be re-installed through the normal bandwidth allocation
1454 * functions.
1455 *
1456 * Wait for the Reset Device command to finish. Remove all structures
1457 * associated with the endpoints that were disabled. Clear the input device
1458 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
1459 */
1460int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1461{
1462 int ret, i;
1463 unsigned long flags;
1464 struct xhci_hcd *xhci;
1465 unsigned int slot_id;
1466 struct xhci_virt_device *virt_dev;
1467 struct xhci_command *reset_device_cmd;
1468 int timeleft;
1469 int last_freed_endpoint;
1470
1471 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1472 if (ret <= 0)
1473 return ret;
1474 xhci = hcd_to_xhci(hcd);
1475 slot_id = udev->slot_id;
1476 virt_dev = xhci->devs[slot_id];
1477 if (!virt_dev) {
1478 xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
1479 __func__, slot_id);
1480 return -EINVAL;
1481 }
1482
1483 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
1484 /* Allocate the command structure that holds the struct completion.
1485 * Assume we're in process context, since the normal device reset
1486 * process has to wait for the device anyway. Storage devices are
1487 * reset as part of error handling, so use GFP_NOIO instead of
1488 * GFP_KERNEL.
1489 */
1490 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
1491 if (!reset_device_cmd) {
1492 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
1493 return -ENOMEM;
1494 }
1495
1496 /* Attempt to submit the Reset Device command to the command ring */
1497 spin_lock_irqsave(&xhci->lock, flags);
1498 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
1499 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
1500 ret = xhci_queue_reset_device(xhci, slot_id);
1501 if (ret) {
1502 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1503 list_del(&reset_device_cmd->cmd_list);
1504 spin_unlock_irqrestore(&xhci->lock, flags);
1505 goto command_cleanup;
1506 }
1507 xhci_ring_cmd_db(xhci);
1508 spin_unlock_irqrestore(&xhci->lock, flags);
1509
1510 /* Wait for the Reset Device command to finish */
1511 timeleft = wait_for_completion_interruptible_timeout(
1512 reset_device_cmd->completion,
1513 USB_CTRL_SET_TIMEOUT);
1514 if (timeleft <= 0) {
1515 xhci_warn(xhci, "%s while waiting for reset device command\n",
1516 timeleft == 0 ? "Timeout" : "Signal");
1517 spin_lock_irqsave(&xhci->lock, flags);
1518 /* The timeout might have raced with the event ring handler, so
1519 * only delete from the list if the item isn't poisoned.
1520 */
1521 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
1522 list_del(&reset_device_cmd->cmd_list);
1523 spin_unlock_irqrestore(&xhci->lock, flags);
1524 ret = -ETIME;
1525 goto command_cleanup;
1526 }
1527
1528 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
1529 * unless we tried to reset a slot ID that wasn't enabled,
1530 * or the device wasn't in the addressed or configured state.
1531 */
1532 ret = reset_device_cmd->status;
1533 switch (ret) {
1534 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
1535 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
1536 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
1537 slot_id,
1538 xhci_get_slot_state(xhci, virt_dev->out_ctx));
1539 xhci_info(xhci, "Not freeing device rings.\n");
1540 /* Don't treat this as an error. May change my mind later. */
1541 ret = 0;
1542 goto command_cleanup;
1543 case COMP_SUCCESS:
1544 xhci_dbg(xhci, "Successful reset device command.\n");
1545 break;
1546 default:
1547 if (xhci_is_vendor_info_code(xhci, ret))
1548 break;
1549 xhci_warn(xhci, "Unknown completion code %u for "
1550 "reset device command.\n", ret);
1551 ret = -EINVAL;
1552 goto command_cleanup;
1553 }
1554
1555 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
1556 last_freed_endpoint = 1;
1557 for (i = 1; i < 31; ++i) {
1558 if (!virt_dev->eps[i].ring)
1559 continue;
1560 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1561 last_freed_endpoint = i;
1562 }
1563 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
1564 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
1565 ret = 0;
1566
1567command_cleanup:
1568 xhci_free_command(xhci, reset_device_cmd);
1569 return ret;
1570}
1571
1572/*
1423 * At this point, the struct usb_device is about to go away, the device has 1573 * At this point, the struct usb_device is about to go away, the device has
1424 * disconnected, and all traffic has been stopped and the endpoints have been 1574 * disconnected, and all traffic has been stopped and the endpoints have been
1425 * disabled. Free any HC data structures associated with that device. 1575 * disabled. Free any HC data structures associated with that device.
@@ -1427,16 +1577,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1427void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 1577void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1428{ 1578{
1429 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1579 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1580 struct xhci_virt_device *virt_dev;
1430 unsigned long flags; 1581 unsigned long flags;
1431 u32 state; 1582 u32 state;
1583 int i;
1432 1584
1433 if (udev->slot_id == 0) 1585 if (udev->slot_id == 0)
1434 return; 1586 return;
1587 virt_dev = xhci->devs[udev->slot_id];
1588 if (!virt_dev)
1589 return;
1590
1591 /* Stop any wayward timer functions (which may grab the lock) */
1592 for (i = 0; i < 31; ++i) {
1593 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
1594 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
1595 }
1435 1596
1436 spin_lock_irqsave(&xhci->lock, flags); 1597 spin_lock_irqsave(&xhci->lock, flags);
1437 /* Don't disable the slot if the host controller is dead. */ 1598 /* Don't disable the slot if the host controller is dead. */
1438 state = xhci_readl(xhci, &xhci->op_regs->status); 1599 state = xhci_readl(xhci, &xhci->op_regs->status);
1439 if (state == 0xffffffff) { 1600 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
1440 xhci_free_virt_device(xhci, udev->slot_id); 1601 xhci_free_virt_device(xhci, udev->slot_id);
1441 spin_unlock_irqrestore(&xhci->lock, flags); 1602 spin_unlock_irqrestore(&xhci->lock, flags);
1442 return; 1603 return;
@@ -1645,7 +1806,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1645 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 1806 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
1646 return -EINVAL; 1807 return -EINVAL;
1647 } 1808 }
1648 config_cmd = xhci_alloc_command(xhci, true, mem_flags); 1809 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1649 if (!config_cmd) { 1810 if (!config_cmd) {
1650 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 1811 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1651 return -ENOMEM; 1812 return -ENOMEM;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4b254b6fa245..ea389e9a4931 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -609,6 +609,10 @@ struct xhci_ep_ctx {
609#define MAX_PACKET_MASK (0xffff << 16) 609#define MAX_PACKET_MASK (0xffff << 16)
610#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) 610#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
611 611
612/* tx_info bitmasks */
613#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
614#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
615
612 616
613/** 617/**
614 * struct xhci_input_control_context 618 * struct xhci_input_control_context
@@ -652,13 +656,17 @@ struct xhci_virt_ep {
652 struct xhci_ring *new_ring; 656 struct xhci_ring *new_ring;
653 unsigned int ep_state; 657 unsigned int ep_state;
654#define SET_DEQ_PENDING (1 << 0) 658#define SET_DEQ_PENDING (1 << 0)
655#define EP_HALTED (1 << 1) 659#define EP_HALTED (1 << 1) /* For stall handling */
660#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
656 /* ---- Related to URB cancellation ---- */ 661 /* ---- Related to URB cancellation ---- */
657 struct list_head cancelled_td_list; 662 struct list_head cancelled_td_list;
658 unsigned int cancels_pending;
659 /* The TRB that was last reported in a stopped endpoint ring */ 663 /* The TRB that was last reported in a stopped endpoint ring */
660 union xhci_trb *stopped_trb; 664 union xhci_trb *stopped_trb;
661 struct xhci_td *stopped_td; 665 struct xhci_td *stopped_td;
666 /* Watchdog timer for stop endpoint command to cancel URBs */
667 struct timer_list stop_cmd_timer;
668 int stop_cmds_pending;
669 struct xhci_hcd *xhci;
662}; 670};
663 671
664struct xhci_virt_device { 672struct xhci_virt_device {
@@ -673,6 +681,10 @@ struct xhci_virt_device {
673 struct xhci_container_ctx *out_ctx; 681 struct xhci_container_ctx *out_ctx;
674 /* Used for addressing devices and configuration changes */ 682 /* Used for addressing devices and configuration changes */
675 struct xhci_container_ctx *in_ctx; 683 struct xhci_container_ctx *in_ctx;
684 /* Rings saved to ensure old alt settings can be re-instated */
685 struct xhci_ring **ring_cache;
686 int num_rings_cached;
687#define XHCI_MAX_RINGS_CACHED 31
676 struct xhci_virt_ep eps[31]; 688 struct xhci_virt_ep eps[31];
677 struct completion cmd_completion; 689 struct completion cmd_completion;
678 /* Status of the last command issued for this device */ 690 /* Status of the last command issued for this device */
@@ -824,9 +836,6 @@ struct xhci_event_cmd {
824/* Normal TRB fields */ 836/* Normal TRB fields */
825/* transfer_len bitmasks - bits 0:16 */ 837/* transfer_len bitmasks - bits 0:16 */
826#define TRB_LEN(p) ((p) & 0x1ffff) 838#define TRB_LEN(p) ((p) & 0x1ffff)
827/* TD size - number of bytes remaining in the TD (including this TRB):
828 * bits 17 - 21. Shift the number of bytes by 10. */
829#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
830/* Interrupter Target - which MSI-X vector to target the completion event at */ 839/* Interrupter Target - which MSI-X vector to target the completion event at */
831#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) 840#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
832#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) 841#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
@@ -1022,6 +1031,8 @@ struct xhci_scratchpad {
1022#define ERST_ENTRIES 1 1031#define ERST_ENTRIES 1
1023/* Poll every 60 seconds */ 1032/* Poll every 60 seconds */
1024#define POLL_TIMEOUT 60 1033#define POLL_TIMEOUT 60
1034/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
1035#define XHCI_STOP_EP_CMD_TIMEOUT 5
1025/* XXX: Make these module parameters */ 1036/* XXX: Make these module parameters */
1026 1037
1027 1038
@@ -1083,6 +1094,21 @@ struct xhci_hcd {
1083 struct timer_list event_ring_timer; 1094 struct timer_list event_ring_timer;
1084 int zombie; 1095 int zombie;
1085#endif 1096#endif
1097 /* Host controller watchdog timer structures */
1098 unsigned int xhc_state;
1099/* Host controller is dying - not responding to commands. "I'm not dead yet!"
1100 *
1101 * xHC interrupts have been disabled and a watchdog timer will (or has already)
1102 * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code
1103 * that sees this status (other than the timer that set it) should stop touching
1104 * hardware immediately. Interrupt handlers should return immediately when
1105 * they see this status (any time they drop and re-acquire xhci->lock).
1106 * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
1107 * putting the TD on the canceled list, etc.
1108 *
1109 * There are no reports of xHCI host controllers that display this issue.
1110 */
1111#define XHCI_STATE_DYING (1 << 0)
1086 /* Statistics */ 1112 /* Statistics */
1087 int noops_submitted; 1113 int noops_submitted;
1088 int noops_handled; 1114 int noops_handled;
@@ -1188,6 +1214,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1188void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1214void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1189void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1215void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1190void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); 1216void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1217char *xhci_get_slot_state(struct xhci_hcd *xhci,
1218 struct xhci_container_ctx *ctx);
1191 1219
1192/* xHCI memory management */ 1220/* xHCI memory management */
1193void xhci_mem_cleanup(struct xhci_hcd *xhci); 1221void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1211,8 +1239,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1211 struct usb_device *udev, struct usb_host_endpoint *ep, 1239 struct usb_device *udev, struct usb_host_endpoint *ep,
1212 gfp_t mem_flags); 1240 gfp_t mem_flags);
1213void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); 1241void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1242void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
1243 struct xhci_virt_device *virt_dev,
1244 unsigned int ep_index);
1214struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1245struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1215 bool allocate_completion, gfp_t mem_flags); 1246 bool allocate_in_ctx, bool allocate_completion,
1247 gfp_t mem_flags);
1216void xhci_free_command(struct xhci_hcd *xhci, 1248void xhci_free_command(struct xhci_hcd *xhci,
1217 struct xhci_command *command); 1249 struct xhci_command *command);
1218 1250
@@ -1223,6 +1255,7 @@ void xhci_unregister_pci(void);
1223#endif 1255#endif
1224 1256
1225/* xHCI host controller glue */ 1257/* xHCI host controller glue */
1258void xhci_quiesce(struct xhci_hcd *xhci);
1226int xhci_halt(struct xhci_hcd *xhci); 1259int xhci_halt(struct xhci_hcd *xhci);
1227int xhci_reset(struct xhci_hcd *xhci); 1260int xhci_reset(struct xhci_hcd *xhci);
1228int xhci_init(struct usb_hcd *hcd); 1261int xhci_init(struct usb_hcd *hcd);
@@ -1241,11 +1274,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1241int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1274int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1242int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1275int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1243void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); 1276void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1277int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
1244int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1278int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1245void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1279void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1246 1280
1247/* xHCI ring, segment, TRB, and TD functions */ 1281/* xHCI ring, segment, TRB, and TD functions */
1248dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); 1282dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1283struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1284 union xhci_trb *start_trb, union xhci_trb *end_trb,
1285 dma_addr_t suspect_dma);
1286int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
1249void xhci_ring_cmd_db(struct xhci_hcd *xhci); 1287void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1250void *xhci_setup_one_noop(struct xhci_hcd *xhci); 1288void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1251void xhci_handle_event(struct xhci_hcd *xhci); 1289void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1267,6 +1305,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1267 u32 slot_id); 1305 u32 slot_id);
1268int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 1306int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1269 unsigned int ep_index); 1307 unsigned int ep_index);
1308int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
1270void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 1309void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1271 unsigned int slot_id, unsigned int ep_index, 1310 unsigned int slot_id, unsigned int ep_index,
1272 struct xhci_td *cur_td, struct xhci_dequeue_state *state); 1311 struct xhci_td *cur_td, struct xhci_dequeue_state *state);
@@ -1278,6 +1317,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1278void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, 1317void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1279 unsigned int slot_id, unsigned int ep_index, 1318 unsigned int slot_id, unsigned int ep_index,
1280 struct xhci_dequeue_state *deq_state); 1319 struct xhci_dequeue_state *deq_state);
1320void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1281 1321
1282/* xHCI roothub code */ 1322/* xHCI roothub code */
1283int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1323int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,