diff options
Diffstat (limited to 'drivers/usb/host')
41 files changed, 6014 insertions, 914 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 1a920c70b5a1..9b43b226817f 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
| @@ -113,6 +113,12 @@ config USB_EHCI_HCD_PPC_OF | |||
| 113 | Enables support for the USB controller present on the PowerPC | 113 | Enables support for the USB controller present on the PowerPC |
| 114 | OpenFirmware platform bus. | 114 | OpenFirmware platform bus. |
| 115 | 115 | ||
| 116 | config USB_W90X900_EHCI | ||
| 117 | bool "W90X900(W90P910) EHCI support" | ||
| 118 | depends on USB_EHCI_HCD && ARCH_W90X900 | ||
| 119 | ---help--- | ||
| 120 | Enables support for the W90X900 USB controller | ||
| 121 | |||
| 116 | config USB_OXU210HP_HCD | 122 | config USB_OXU210HP_HCD |
| 117 | tristate "OXU210HP HCD support" | 123 | tristate "OXU210HP HCD support" |
| 118 | depends on USB | 124 | depends on USB |
| @@ -153,6 +159,18 @@ config USB_ISP1760_HCD | |||
| 153 | To compile this driver as a module, choose M here: the | 159 | To compile this driver as a module, choose M here: the |
| 154 | module will be called isp1760. | 160 | module will be called isp1760. |
| 155 | 161 | ||
| 162 | config USB_ISP1362_HCD | ||
| 163 | tristate "ISP1362 HCD support" | ||
| 164 | depends on USB | ||
| 165 | default N | ||
| 166 | ---help--- | ||
| 167 | Supports the Philips ISP1362 chip as a host controller | ||
| 168 | |||
| 169 | This driver does not support isochronous transfers. | ||
| 170 | |||
| 171 | To compile this driver as a module, choose M here: the | ||
| 172 | module will be called isp1362-hcd. | ||
| 173 | |||
| 156 | config USB_OHCI_HCD | 174 | config USB_OHCI_HCD |
| 157 | tristate "OHCI HCD support" | 175 | tristate "OHCI HCD support" |
| 158 | depends on USB && USB_ARCH_HAS_OHCI | 176 | depends on USB && USB_ARCH_HAS_OHCI |
| @@ -336,13 +354,6 @@ config USB_R8A66597_HCD | |||
| 336 | To compile this driver as a module, choose M here: the | 354 | To compile this driver as a module, choose M here: the |
| 337 | module will be called r8a66597-hcd. | 355 | module will be called r8a66597-hcd. |
| 338 | 356 | ||
| 339 | config SUPERH_ON_CHIP_R8A66597 | ||
| 340 | boolean "Enable SuperH on-chip R8A66597 USB" | ||
| 341 | depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724) | ||
| 342 | help | ||
| 343 | This driver enables support for the on-chip R8A66597 in the | ||
| 344 | SH7366, SH7723 and SH7724 processors. | ||
| 345 | |||
| 346 | config USB_WHCI_HCD | 357 | config USB_WHCI_HCD |
| 347 | tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" | 358 | tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" |
| 348 | depends on EXPERIMENTAL | 359 | depends on EXPERIMENTAL |
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 289d748bb414..f58b2494c44a 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile | |||
| @@ -21,6 +21,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o | |||
| 21 | obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o | 21 | obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o |
| 22 | obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o | 22 | obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o |
| 23 | obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o | 23 | obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o |
| 24 | obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o | ||
| 24 | obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o | 25 | obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o |
| 25 | obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o | 26 | obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o |
| 26 | obj-$(CONFIG_USB_FHCI_HCD) += fhci.o | 27 | obj-$(CONFIG_USB_FHCI_HCD) += fhci.o |
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c new file mode 100644 index 000000000000..87c1b7c34c0e --- /dev/null +++ b/drivers/usb/host/ehci-atmel.c | |||
| @@ -0,0 +1,230 @@ | |||
| 1 | /* | ||
| 2 | * Driver for EHCI UHP on Atmel chips | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Atmel Corporation, | ||
| 5 | * Nicolas Ferre <nicolas.ferre@atmel.com> | ||
| 6 | * | ||
| 7 | * Based on various ehci-*.c drivers | ||
| 8 | * | ||
| 9 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 10 | * License. See the file COPYING in the main directory of this archive for | ||
| 11 | * more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/platform_device.h> | ||
| 16 | |||
| 17 | /* interface and function clocks */ | ||
| 18 | static struct clk *iclk, *fclk; | ||
| 19 | static int clocked; | ||
| 20 | |||
| 21 | /*-------------------------------------------------------------------------*/ | ||
| 22 | |||
| 23 | static void atmel_start_clock(void) | ||
| 24 | { | ||
| 25 | clk_enable(iclk); | ||
| 26 | clk_enable(fclk); | ||
| 27 | clocked = 1; | ||
| 28 | } | ||
| 29 | |||
| 30 | static void atmel_stop_clock(void) | ||
| 31 | { | ||
| 32 | clk_disable(fclk); | ||
| 33 | clk_disable(iclk); | ||
| 34 | clocked = 0; | ||
| 35 | } | ||
| 36 | |||
| 37 | static void atmel_start_ehci(struct platform_device *pdev) | ||
| 38 | { | ||
| 39 | dev_dbg(&pdev->dev, "start\n"); | ||
| 40 | atmel_start_clock(); | ||
| 41 | } | ||
| 42 | |||
| 43 | static void atmel_stop_ehci(struct platform_device *pdev) | ||
| 44 | { | ||
| 45 | dev_dbg(&pdev->dev, "stop\n"); | ||
| 46 | atmel_stop_clock(); | ||
| 47 | } | ||
| 48 | |||
| 49 | /*-------------------------------------------------------------------------*/ | ||
| 50 | |||
| 51 | static int ehci_atmel_setup(struct usb_hcd *hcd) | ||
| 52 | { | ||
| 53 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
| 54 | int retval = 0; | ||
| 55 | |||
| 56 | /* registers start at offset 0x0 */ | ||
| 57 | ehci->caps = hcd->regs; | ||
| 58 | ehci->regs = hcd->regs + | ||
| 59 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | ||
| 60 | dbg_hcs_params(ehci, "reset"); | ||
| 61 | dbg_hcc_params(ehci, "reset"); | ||
| 62 | |||
| 63 | /* cache this readonly data; minimize chip reads */ | ||
| 64 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | ||
| 65 | |||
| 66 | retval = ehci_halt(ehci); | ||
| 67 | if (retval) | ||
| 68 | return retval; | ||
| 69 | |||
| 70 | /* data structure init */ | ||
| 71 | retval = ehci_init(hcd); | ||
| 72 | if (retval) | ||
| 73 | return retval; | ||
| 74 | |||
| 75 | ehci->sbrn = 0x20; | ||
| 76 | |||
| 77 | ehci_reset(ehci); | ||
| 78 | ehci_port_power(ehci, 0); | ||
| 79 | |||
| 80 | return retval; | ||
| 81 | } | ||
| 82 | |||
| 83 | static const struct hc_driver ehci_atmel_hc_driver = { | ||
| 84 | .description = hcd_name, | ||
| 85 | .product_desc = "Atmel EHCI UHP HS", | ||
| 86 | .hcd_priv_size = sizeof(struct ehci_hcd), | ||
| 87 | |||
| 88 | /* generic hardware linkage */ | ||
| 89 | .irq = ehci_irq, | ||
| 90 | .flags = HCD_MEMORY | HCD_USB2, | ||
| 91 | |||
| 92 | /* basic lifecycle operations */ | ||
| 93 | .reset = ehci_atmel_setup, | ||
| 94 | .start = ehci_run, | ||
| 95 | .stop = ehci_stop, | ||
| 96 | .shutdown = ehci_shutdown, | ||
| 97 | |||
| 98 | /* managing i/o requests and associated device resources */ | ||
| 99 | .urb_enqueue = ehci_urb_enqueue, | ||
| 100 | .urb_dequeue = ehci_urb_dequeue, | ||
| 101 | .endpoint_disable = ehci_endpoint_disable, | ||
| 102 | |||
| 103 | /* scheduling support */ | ||
| 104 | .get_frame_number = ehci_get_frame, | ||
| 105 | |||
| 106 | /* root hub support */ | ||
| 107 | .hub_status_data = ehci_hub_status_data, | ||
| 108 | .hub_control = ehci_hub_control, | ||
| 109 | .bus_suspend = ehci_bus_suspend, | ||
| 110 | .bus_resume = ehci_bus_resume, | ||
| 111 | .relinquish_port = ehci_relinquish_port, | ||
| 112 | .port_handed_over = ehci_port_handed_over, | ||
| 113 | }; | ||
| 114 | |||
| 115 | static int __init ehci_atmel_drv_probe(struct platform_device *pdev) | ||
| 116 | { | ||
| 117 | struct usb_hcd *hcd; | ||
| 118 | const struct hc_driver *driver = &ehci_atmel_hc_driver; | ||
| 119 | struct resource *res; | ||
| 120 | int irq; | ||
| 121 | int retval; | ||
| 122 | |||
| 123 | if (usb_disabled()) | ||
| 124 | return -ENODEV; | ||
| 125 | |||
| 126 | pr_debug("Initializing Atmel-SoC USB Host Controller\n"); | ||
| 127 | |||
| 128 | irq = platform_get_irq(pdev, 0); | ||
| 129 | if (irq <= 0) { | ||
| 130 | dev_err(&pdev->dev, | ||
| 131 | "Found HC with no IRQ. Check %s setup!\n", | ||
| 132 | dev_name(&pdev->dev)); | ||
| 133 | retval = -ENODEV; | ||
| 134 | goto fail_create_hcd; | ||
| 135 | } | ||
| 136 | |||
| 137 | hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); | ||
| 138 | if (!hcd) { | ||
| 139 | retval = -ENOMEM; | ||
| 140 | goto fail_create_hcd; | ||
| 141 | } | ||
| 142 | |||
| 143 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 144 | if (!res) { | ||
| 145 | dev_err(&pdev->dev, | ||
| 146 | "Found HC with no register addr. Check %s setup!\n", | ||
| 147 | dev_name(&pdev->dev)); | ||
| 148 | retval = -ENODEV; | ||
| 149 | goto fail_request_resource; | ||
| 150 | } | ||
| 151 | hcd->rsrc_start = res->start; | ||
| 152 | hcd->rsrc_len = res->end - res->start + 1; | ||
| 153 | |||
| 154 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, | ||
| 155 | driver->description)) { | ||
| 156 | dev_dbg(&pdev->dev, "controller already in use\n"); | ||
| 157 | retval = -EBUSY; | ||
| 158 | goto fail_request_resource; | ||
| 159 | } | ||
| 160 | |||
| 161 | hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); | ||
| 162 | if (hcd->regs == NULL) { | ||
| 163 | dev_dbg(&pdev->dev, "error mapping memory\n"); | ||
| 164 | retval = -EFAULT; | ||
| 165 | goto fail_ioremap; | ||
| 166 | } | ||
| 167 | |||
| 168 | iclk = clk_get(&pdev->dev, "ehci_clk"); | ||
| 169 | if (IS_ERR(iclk)) { | ||
| 170 | dev_err(&pdev->dev, "Error getting interface clock\n"); | ||
| 171 | retval = -ENOENT; | ||
| 172 | goto fail_get_iclk; | ||
| 173 | } | ||
| 174 | fclk = clk_get(&pdev->dev, "uhpck"); | ||
| 175 | if (IS_ERR(fclk)) { | ||
| 176 | dev_err(&pdev->dev, "Error getting function clock\n"); | ||
| 177 | retval = -ENOENT; | ||
| 178 | goto fail_get_fclk; | ||
| 179 | } | ||
| 180 | |||
| 181 | atmel_start_ehci(pdev); | ||
| 182 | |||
| 183 | retval = usb_add_hcd(hcd, irq, IRQF_SHARED); | ||
| 184 | if (retval) | ||
| 185 | goto fail_add_hcd; | ||
| 186 | |||
| 187 | return retval; | ||
| 188 | |||
| 189 | fail_add_hcd: | ||
| 190 | atmel_stop_ehci(pdev); | ||
| 191 | clk_put(fclk); | ||
| 192 | fail_get_fclk: | ||
| 193 | clk_put(iclk); | ||
| 194 | fail_get_iclk: | ||
| 195 | iounmap(hcd->regs); | ||
| 196 | fail_ioremap: | ||
| 197 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
| 198 | fail_request_resource: | ||
| 199 | usb_put_hcd(hcd); | ||
| 200 | fail_create_hcd: | ||
| 201 | dev_err(&pdev->dev, "init %s fail, %d\n", | ||
| 202 | dev_name(&pdev->dev), retval); | ||
| 203 | |||
| 204 | return retval; | ||
| 205 | } | ||
| 206 | |||
| 207 | static int __exit ehci_atmel_drv_remove(struct platform_device *pdev) | ||
| 208 | { | ||
| 209 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
| 210 | |||
| 211 | ehci_shutdown(hcd); | ||
| 212 | usb_remove_hcd(hcd); | ||
| 213 | iounmap(hcd->regs); | ||
| 214 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
| 215 | usb_put_hcd(hcd); | ||
| 216 | |||
| 217 | atmel_stop_ehci(pdev); | ||
| 218 | clk_put(fclk); | ||
| 219 | clk_put(iclk); | ||
| 220 | fclk = iclk = NULL; | ||
| 221 | |||
| 222 | return 0; | ||
| 223 | } | ||
| 224 | |||
| 225 | static struct platform_driver ehci_atmel_driver = { | ||
| 226 | .probe = ehci_atmel_drv_probe, | ||
| 227 | .remove = __exit_p(ehci_atmel_drv_remove), | ||
| 228 | .shutdown = usb_hcd_platform_shutdown, | ||
| 229 | .driver.name = "atmel-ehci", | ||
| 230 | }; | ||
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index 59d208d94d4e..ed77be76d6bb 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c | |||
| @@ -199,10 +199,9 @@ static int ehci_hcd_au1xxx_drv_remove(struct platform_device *pdev) | |||
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | #ifdef CONFIG_PM | 201 | #ifdef CONFIG_PM |
| 202 | static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev, | 202 | static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) |
| 203 | pm_message_t message) | ||
| 204 | { | 203 | { |
| 205 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 204 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 206 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 205 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
| 207 | unsigned long flags; | 206 | unsigned long flags; |
| 208 | int rc; | 207 | int rc; |
| @@ -229,12 +228,6 @@ static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev, | |||
| 229 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); | 228 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); |
| 230 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); | 229 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); |
| 231 | 230 | ||
| 232 | /* make sure snapshot being resumed re-enumerates everything */ | ||
| 233 | if (message.event == PM_EVENT_PRETHAW) { | ||
| 234 | ehci_halt(ehci); | ||
| 235 | ehci_reset(ehci); | ||
| 236 | } | ||
| 237 | |||
| 238 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 231 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
| 239 | 232 | ||
| 240 | au1xxx_stop_ehc(); | 233 | au1xxx_stop_ehc(); |
| @@ -248,10 +241,9 @@ bail: | |||
| 248 | return rc; | 241 | return rc; |
| 249 | } | 242 | } |
| 250 | 243 | ||
| 251 | 244 | static int ehci_hcd_au1xxx_drv_resume(struct device *dev) | |
| 252 | static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev) | ||
| 253 | { | 245 | { |
| 254 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 246 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 255 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 247 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
| 256 | 248 | ||
| 257 | au1xxx_start_ehc(); | 249 | au1xxx_start_ehc(); |
| @@ -305,20 +297,25 @@ static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev) | |||
| 305 | return 0; | 297 | return 0; |
| 306 | } | 298 | } |
| 307 | 299 | ||
| 300 | static struct dev_pm_ops au1xxx_ehci_pmops = { | ||
| 301 | .suspend = ehci_hcd_au1xxx_drv_suspend, | ||
| 302 | .resume = ehci_hcd_au1xxx_drv_resume, | ||
| 303 | }; | ||
| 304 | |||
| 305 | #define AU1XXX_EHCI_PMOPS &au1xxx_ehci_pmops | ||
| 306 | |||
| 308 | #else | 307 | #else |
| 309 | #define ehci_hcd_au1xxx_drv_suspend NULL | 308 | #define AU1XXX_EHCI_PMOPS NULL |
| 310 | #define ehci_hcd_au1xxx_drv_resume NULL | ||
| 311 | #endif | 309 | #endif |
| 312 | 310 | ||
| 313 | static struct platform_driver ehci_hcd_au1xxx_driver = { | 311 | static struct platform_driver ehci_hcd_au1xxx_driver = { |
| 314 | .probe = ehci_hcd_au1xxx_drv_probe, | 312 | .probe = ehci_hcd_au1xxx_drv_probe, |
| 315 | .remove = ehci_hcd_au1xxx_drv_remove, | 313 | .remove = ehci_hcd_au1xxx_drv_remove, |
| 316 | .shutdown = usb_hcd_platform_shutdown, | 314 | .shutdown = usb_hcd_platform_shutdown, |
| 317 | .suspend = ehci_hcd_au1xxx_drv_suspend, | ||
| 318 | .resume = ehci_hcd_au1xxx_drv_resume, | ||
| 319 | .driver = { | 315 | .driver = { |
| 320 | .name = "au1xxx-ehci", | 316 | .name = "au1xxx-ehci", |
| 321 | .owner = THIS_MODULE, | 317 | .owner = THIS_MODULE, |
| 318 | .pm = AU1XXX_EHCI_PMOPS, | ||
| 322 | } | 319 | } |
| 323 | }; | 320 | }; |
| 324 | 321 | ||
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 7f4ace73d44a..874d2000bf92 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c | |||
| @@ -134,10 +134,11 @@ dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd) | |||
| 134 | static void __maybe_unused | 134 | static void __maybe_unused |
| 135 | dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) | 135 | dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 136 | { | 136 | { |
| 137 | struct ehci_qh_hw *hw = qh->hw; | ||
| 138 | |||
| 137 | ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label, | 139 | ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label, |
| 138 | qh, qh->hw_next, qh->hw_info1, qh->hw_info2, | 140 | qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); |
| 139 | qh->hw_current); | 141 | dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next); |
| 140 | dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next); | ||
| 141 | } | 142 | } |
| 142 | 143 | ||
| 143 | static void __maybe_unused | 144 | static void __maybe_unused |
| @@ -400,31 +401,32 @@ static void qh_lines ( | |||
| 400 | char *next = *nextp; | 401 | char *next = *nextp; |
| 401 | char mark; | 402 | char mark; |
| 402 | __le32 list_end = EHCI_LIST_END(ehci); | 403 | __le32 list_end = EHCI_LIST_END(ehci); |
| 404 | struct ehci_qh_hw *hw = qh->hw; | ||
| 403 | 405 | ||
| 404 | if (qh->hw_qtd_next == list_end) /* NEC does this */ | 406 | if (hw->hw_qtd_next == list_end) /* NEC does this */ |
| 405 | mark = '@'; | 407 | mark = '@'; |
| 406 | else | 408 | else |
| 407 | mark = token_mark(ehci, qh->hw_token); | 409 | mark = token_mark(ehci, hw->hw_token); |
| 408 | if (mark == '/') { /* qh_alt_next controls qh advance? */ | 410 | if (mark == '/') { /* qh_alt_next controls qh advance? */ |
| 409 | if ((qh->hw_alt_next & QTD_MASK(ehci)) | 411 | if ((hw->hw_alt_next & QTD_MASK(ehci)) |
| 410 | == ehci->async->hw_alt_next) | 412 | == ehci->async->hw->hw_alt_next) |
| 411 | mark = '#'; /* blocked */ | 413 | mark = '#'; /* blocked */ |
| 412 | else if (qh->hw_alt_next == list_end) | 414 | else if (hw->hw_alt_next == list_end) |
| 413 | mark = '.'; /* use hw_qtd_next */ | 415 | mark = '.'; /* use hw_qtd_next */ |
| 414 | /* else alt_next points to some other qtd */ | 416 | /* else alt_next points to some other qtd */ |
| 415 | } | 417 | } |
| 416 | scratch = hc32_to_cpup(ehci, &qh->hw_info1); | 418 | scratch = hc32_to_cpup(ehci, &hw->hw_info1); |
| 417 | hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0; | 419 | hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0; |
| 418 | temp = scnprintf (next, size, | 420 | temp = scnprintf (next, size, |
| 419 | "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", | 421 | "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", |
| 420 | qh, scratch & 0x007f, | 422 | qh, scratch & 0x007f, |
| 421 | speed_char (scratch), | 423 | speed_char (scratch), |
| 422 | (scratch >> 8) & 0x000f, | 424 | (scratch >> 8) & 0x000f, |
| 423 | scratch, hc32_to_cpup(ehci, &qh->hw_info2), | 425 | scratch, hc32_to_cpup(ehci, &hw->hw_info2), |
| 424 | hc32_to_cpup(ehci, &qh->hw_token), mark, | 426 | hc32_to_cpup(ehci, &hw->hw_token), mark, |
| 425 | (cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token) | 427 | (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token) |
| 426 | ? "data1" : "data0", | 428 | ? "data1" : "data0", |
| 427 | (hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f); | 429 | (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f); |
| 428 | size -= temp; | 430 | size -= temp; |
| 429 | next += temp; | 431 | next += temp; |
| 430 | 432 | ||
| @@ -435,10 +437,10 @@ static void qh_lines ( | |||
| 435 | mark = ' '; | 437 | mark = ' '; |
| 436 | if (hw_curr == td->qtd_dma) | 438 | if (hw_curr == td->qtd_dma) |
| 437 | mark = '*'; | 439 | mark = '*'; |
| 438 | else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) | 440 | else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) |
| 439 | mark = '+'; | 441 | mark = '+'; |
| 440 | else if (QTD_LENGTH (scratch)) { | 442 | else if (QTD_LENGTH (scratch)) { |
| 441 | if (td->hw_alt_next == ehci->async->hw_alt_next) | 443 | if (td->hw_alt_next == ehci->async->hw->hw_alt_next) |
| 442 | mark = '#'; | 444 | mark = '#'; |
| 443 | else if (td->hw_alt_next != list_end) | 445 | else if (td->hw_alt_next != list_end) |
| 444 | mark = '/'; | 446 | mark = '/'; |
| @@ -550,12 +552,15 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) | |||
| 550 | next += temp; | 552 | next += temp; |
| 551 | 553 | ||
| 552 | do { | 554 | do { |
| 555 | struct ehci_qh_hw *hw; | ||
| 556 | |||
| 553 | switch (hc32_to_cpu(ehci, tag)) { | 557 | switch (hc32_to_cpu(ehci, tag)) { |
| 554 | case Q_TYPE_QH: | 558 | case Q_TYPE_QH: |
| 559 | hw = p.qh->hw; | ||
| 555 | temp = scnprintf (next, size, " qh%d-%04x/%p", | 560 | temp = scnprintf (next, size, " qh%d-%04x/%p", |
| 556 | p.qh->period, | 561 | p.qh->period, |
| 557 | hc32_to_cpup(ehci, | 562 | hc32_to_cpup(ehci, |
| 558 | &p.qh->hw_info2) | 563 | &hw->hw_info2) |
| 559 | /* uframe masks */ | 564 | /* uframe masks */ |
| 560 | & (QH_CMASK | QH_SMASK), | 565 | & (QH_CMASK | QH_SMASK), |
| 561 | p.qh); | 566 | p.qh); |
| @@ -576,7 +581,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) | |||
| 576 | /* show more info the first time around */ | 581 | /* show more info the first time around */ |
| 577 | if (temp == seen_count) { | 582 | if (temp == seen_count) { |
| 578 | u32 scratch = hc32_to_cpup(ehci, | 583 | u32 scratch = hc32_to_cpup(ehci, |
| 579 | &p.qh->hw_info1); | 584 | &hw->hw_info1); |
| 580 | struct ehci_qtd *qtd; | 585 | struct ehci_qtd *qtd; |
| 581 | char *type = ""; | 586 | char *type = ""; |
| 582 | 587 | ||
| @@ -609,7 +614,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf) | |||
| 609 | } else | 614 | } else |
| 610 | temp = 0; | 615 | temp = 0; |
| 611 | if (p.qh) { | 616 | if (p.qh) { |
| 612 | tag = Q_NEXT_TYPE(ehci, p.qh->hw_next); | 617 | tag = Q_NEXT_TYPE(ehci, hw->hw_next); |
| 613 | p = p.qh->qh_next; | 618 | p = p.qh->qh_next; |
| 614 | } | 619 | } |
| 615 | break; | 620 | break; |
| @@ -879,8 +884,7 @@ static int debug_close(struct inode *inode, struct file *file) | |||
| 879 | struct debug_buffer *buf = file->private_data; | 884 | struct debug_buffer *buf = file->private_data; |
| 880 | 885 | ||
| 881 | if (buf) { | 886 | if (buf) { |
| 882 | if (buf->output_buf) | 887 | vfree(buf->output_buf); |
| 883 | vfree(buf->output_buf); | ||
| 884 | kfree(buf); | 888 | kfree(buf); |
| 885 | } | 889 | } |
| 886 | 890 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 11c627ce6022..9835e0713943 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #include <linux/timer.h> | 30 | #include <linux/timer.h> |
| 31 | #include <linux/list.h> | 31 | #include <linux/list.h> |
| 32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
| 33 | #include <linux/reboot.h> | ||
| 34 | #include <linux/usb.h> | 33 | #include <linux/usb.h> |
| 35 | #include <linux/moduleparam.h> | 34 | #include <linux/moduleparam.h> |
| 36 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
| @@ -127,6 +126,8 @@ timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action) | |||
| 127 | 126 | ||
| 128 | switch (action) { | 127 | switch (action) { |
| 129 | case TIMER_IO_WATCHDOG: | 128 | case TIMER_IO_WATCHDOG: |
| 129 | if (!ehci->need_io_watchdog) | ||
| 130 | return; | ||
| 130 | t = EHCI_IO_JIFFIES; | 131 | t = EHCI_IO_JIFFIES; |
| 131 | break; | 132 | break; |
| 132 | case TIMER_ASYNC_OFF: | 133 | case TIMER_ASYNC_OFF: |
| @@ -239,6 +240,11 @@ static int ehci_reset (struct ehci_hcd *ehci) | |||
| 239 | int retval; | 240 | int retval; |
| 240 | u32 command = ehci_readl(ehci, &ehci->regs->command); | 241 | u32 command = ehci_readl(ehci, &ehci->regs->command); |
| 241 | 242 | ||
| 243 | /* If the EHCI debug controller is active, special care must be | ||
| 244 | * taken before and after a host controller reset */ | ||
| 245 | if (ehci->debug && !dbgp_reset_prep()) | ||
| 246 | ehci->debug = NULL; | ||
| 247 | |||
| 242 | command |= CMD_RESET; | 248 | command |= CMD_RESET; |
| 243 | dbg_cmd (ehci, "reset", command); | 249 | dbg_cmd (ehci, "reset", command); |
| 244 | ehci_writel(ehci, command, &ehci->regs->command); | 250 | ehci_writel(ehci, command, &ehci->regs->command); |
| @@ -247,12 +253,21 @@ static int ehci_reset (struct ehci_hcd *ehci) | |||
| 247 | retval = handshake (ehci, &ehci->regs->command, | 253 | retval = handshake (ehci, &ehci->regs->command, |
| 248 | CMD_RESET, 0, 250 * 1000); | 254 | CMD_RESET, 0, 250 * 1000); |
| 249 | 255 | ||
| 256 | if (ehci->has_hostpc) { | ||
| 257 | ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS, | ||
| 258 | (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX)); | ||
| 259 | ehci_writel(ehci, TXFIFO_DEFAULT, | ||
| 260 | (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING)); | ||
| 261 | } | ||
| 250 | if (retval) | 262 | if (retval) |
| 251 | return retval; | 263 | return retval; |
| 252 | 264 | ||
| 253 | if (ehci_is_TDI(ehci)) | 265 | if (ehci_is_TDI(ehci)) |
| 254 | tdi_reset (ehci); | 266 | tdi_reset (ehci); |
| 255 | 267 | ||
| 268 | if (ehci->debug) | ||
| 269 | dbgp_external_startup(); | ||
| 270 | |||
| 256 | return retval; | 271 | return retval; |
| 257 | } | 272 | } |
| 258 | 273 | ||
| @@ -505,9 +520,14 @@ static int ehci_init(struct usb_hcd *hcd) | |||
| 505 | u32 temp; | 520 | u32 temp; |
| 506 | int retval; | 521 | int retval; |
| 507 | u32 hcc_params; | 522 | u32 hcc_params; |
| 523 | struct ehci_qh_hw *hw; | ||
| 508 | 524 | ||
| 509 | spin_lock_init(&ehci->lock); | 525 | spin_lock_init(&ehci->lock); |
| 510 | 526 | ||
| 527 | /* | ||
| 528 | * keep io watchdog by default, those good HCDs could turn off it later | ||
| 529 | */ | ||
| 530 | ehci->need_io_watchdog = 1; | ||
| 511 | init_timer(&ehci->watchdog); | 531 | init_timer(&ehci->watchdog); |
| 512 | ehci->watchdog.function = ehci_watchdog; | 532 | ehci->watchdog.function = ehci_watchdog; |
| 513 | ehci->watchdog.data = (unsigned long) ehci; | 533 | ehci->watchdog.data = (unsigned long) ehci; |
| @@ -544,12 +564,13 @@ static int ehci_init(struct usb_hcd *hcd) | |||
| 544 | * from automatically advancing to the next td after short reads. | 564 | * from automatically advancing to the next td after short reads. |
| 545 | */ | 565 | */ |
| 546 | ehci->async->qh_next.qh = NULL; | 566 | ehci->async->qh_next.qh = NULL; |
| 547 | ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); | 567 | hw = ehci->async->hw; |
| 548 | ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); | 568 | hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); |
| 549 | ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); | 569 | hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); |
| 550 | ehci->async->hw_qtd_next = EHCI_LIST_END(ehci); | 570 | hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); |
| 571 | hw->hw_qtd_next = EHCI_LIST_END(ehci); | ||
| 551 | ehci->async->qh_state = QH_STATE_LINKED; | 572 | ehci->async->qh_state = QH_STATE_LINKED; |
| 552 | ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); | 573 | hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); |
| 553 | 574 | ||
| 554 | /* clear interrupt enables, set irq latency */ | 575 | /* clear interrupt enables, set irq latency */ |
| 555 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) | 576 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) |
| @@ -850,12 +871,18 @@ static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 850 | if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim) | 871 | if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim) |
| 851 | end_unlink_async(ehci); | 872 | end_unlink_async(ehci); |
| 852 | 873 | ||
| 853 | /* if it's not linked then there's nothing to do */ | 874 | /* If the QH isn't linked then there's nothing we can do |
| 854 | if (qh->qh_state != QH_STATE_LINKED) | 875 | * unless we were called during a giveback, in which case |
| 855 | ; | 876 | * qh_completions() has to deal with it. |
| 877 | */ | ||
| 878 | if (qh->qh_state != QH_STATE_LINKED) { | ||
| 879 | if (qh->qh_state == QH_STATE_COMPLETING) | ||
| 880 | qh->needs_rescan = 1; | ||
| 881 | return; | ||
| 882 | } | ||
| 856 | 883 | ||
| 857 | /* defer till later if busy */ | 884 | /* defer till later if busy */ |
| 858 | else if (ehci->reclaim) { | 885 | if (ehci->reclaim) { |
| 859 | struct ehci_qh *last; | 886 | struct ehci_qh *last; |
| 860 | 887 | ||
| 861 | for (last = ehci->reclaim; | 888 | for (last = ehci->reclaim; |
| @@ -915,8 +942,9 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
| 915 | break; | 942 | break; |
| 916 | switch (qh->qh_state) { | 943 | switch (qh->qh_state) { |
| 917 | case QH_STATE_LINKED: | 944 | case QH_STATE_LINKED: |
| 945 | case QH_STATE_COMPLETING: | ||
| 918 | intr_deschedule (ehci, qh); | 946 | intr_deschedule (ehci, qh); |
| 919 | /* FALL THROUGH */ | 947 | break; |
| 920 | case QH_STATE_IDLE: | 948 | case QH_STATE_IDLE: |
| 921 | qh_completions (ehci, qh); | 949 | qh_completions (ehci, qh); |
| 922 | break; | 950 | break; |
| @@ -925,23 +953,6 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
| 925 | qh, qh->qh_state); | 953 | qh, qh->qh_state); |
| 926 | goto done; | 954 | goto done; |
| 927 | } | 955 | } |
| 928 | |||
| 929 | /* reschedule QH iff another request is queued */ | ||
| 930 | if (!list_empty (&qh->qtd_list) | ||
| 931 | && HC_IS_RUNNING (hcd->state)) { | ||
| 932 | rc = qh_schedule(ehci, qh); | ||
| 933 | |||
| 934 | /* An error here likely indicates handshake failure | ||
| 935 | * or no space left in the schedule. Neither fault | ||
| 936 | * should happen often ... | ||
| 937 | * | ||
| 938 | * FIXME kill the now-dysfunctional queued urbs | ||
| 939 | */ | ||
| 940 | if (rc != 0) | ||
| 941 | ehci_err(ehci, | ||
| 942 | "can't reschedule qh %p, err %d", | ||
| 943 | qh, rc); | ||
| 944 | } | ||
| 945 | break; | 956 | break; |
| 946 | 957 | ||
| 947 | case PIPE_ISOCHRONOUS: | 958 | case PIPE_ISOCHRONOUS: |
| @@ -979,7 +990,7 @@ rescan: | |||
| 979 | /* endpoints can be iso streams. for now, we don't | 990 | /* endpoints can be iso streams. for now, we don't |
| 980 | * accelerate iso completions ... so spin a while. | 991 | * accelerate iso completions ... so spin a while. |
| 981 | */ | 992 | */ |
| 982 | if (qh->hw_info1 == 0) { | 993 | if (qh->hw->hw_info1 == 0) { |
| 983 | ehci_vdbg (ehci, "iso delay\n"); | 994 | ehci_vdbg (ehci, "iso delay\n"); |
| 984 | goto idle_timeout; | 995 | goto idle_timeout; |
| 985 | } | 996 | } |
| @@ -988,6 +999,7 @@ rescan: | |||
| 988 | qh->qh_state = QH_STATE_IDLE; | 999 | qh->qh_state = QH_STATE_IDLE; |
| 989 | switch (qh->qh_state) { | 1000 | switch (qh->qh_state) { |
| 990 | case QH_STATE_LINKED: | 1001 | case QH_STATE_LINKED: |
| 1002 | case QH_STATE_COMPLETING: | ||
| 991 | for (tmp = ehci->async->qh_next.qh; | 1003 | for (tmp = ehci->async->qh_next.qh; |
| 992 | tmp && tmp != qh; | 1004 | tmp && tmp != qh; |
| 993 | tmp = tmp->qh_next.qh) | 1005 | tmp = tmp->qh_next.qh) |
| @@ -1052,18 +1064,17 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) | |||
| 1052 | usb_settoggle(qh->dev, epnum, is_out, 0); | 1064 | usb_settoggle(qh->dev, epnum, is_out, 0); |
| 1053 | if (!list_empty(&qh->qtd_list)) { | 1065 | if (!list_empty(&qh->qtd_list)) { |
| 1054 | WARN_ONCE(1, "clear_halt for a busy endpoint\n"); | 1066 | WARN_ONCE(1, "clear_halt for a busy endpoint\n"); |
| 1055 | } else if (qh->qh_state == QH_STATE_LINKED) { | 1067 | } else if (qh->qh_state == QH_STATE_LINKED || |
| 1068 | qh->qh_state == QH_STATE_COMPLETING) { | ||
| 1056 | 1069 | ||
| 1057 | /* The toggle value in the QH can't be updated | 1070 | /* The toggle value in the QH can't be updated |
| 1058 | * while the QH is active. Unlink it now; | 1071 | * while the QH is active. Unlink it now; |
| 1059 | * re-linking will call qh_refresh(). | 1072 | * re-linking will call qh_refresh(). |
| 1060 | */ | 1073 | */ |
| 1061 | if (eptype == USB_ENDPOINT_XFER_BULK) { | 1074 | if (eptype == USB_ENDPOINT_XFER_BULK) |
| 1062 | unlink_async(ehci, qh); | 1075 | unlink_async(ehci, qh); |
| 1063 | } else { | 1076 | else |
| 1064 | intr_deschedule(ehci, qh); | 1077 | intr_deschedule(ehci, qh); |
| 1065 | (void) qh_schedule(ehci, qh); | ||
| 1066 | } | ||
| 1067 | } | 1078 | } |
| 1068 | } | 1079 | } |
| 1069 | spin_unlock_irqrestore(&ehci->lock, flags); | 1080 | spin_unlock_irqrestore(&ehci->lock, flags); |
| @@ -1117,6 +1128,16 @@ MODULE_LICENSE ("GPL"); | |||
| 1117 | #define PLATFORM_DRIVER ixp4xx_ehci_driver | 1128 | #define PLATFORM_DRIVER ixp4xx_ehci_driver |
| 1118 | #endif | 1129 | #endif |
| 1119 | 1130 | ||
| 1131 | #ifdef CONFIG_USB_W90X900_EHCI | ||
| 1132 | #include "ehci-w90x900.c" | ||
| 1133 | #define PLATFORM_DRIVER ehci_hcd_w90x900_driver | ||
| 1134 | #endif | ||
| 1135 | |||
| 1136 | #ifdef CONFIG_ARCH_AT91 | ||
| 1137 | #include "ehci-atmel.c" | ||
| 1138 | #define PLATFORM_DRIVER ehci_atmel_driver | ||
| 1139 | #endif | ||
| 1140 | |||
| 1120 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ | 1141 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ |
| 1121 | !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) | 1142 | !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) |
| 1122 | #error "missing bus glue for ehci-hcd" | 1143 | #error "missing bus glue for ehci-hcd" |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index f46ad27c9a90..1b6f1c0e5cee 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
| @@ -111,6 +111,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
| 111 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); | 111 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 112 | int port; | 112 | int port; |
| 113 | int mask; | 113 | int mask; |
| 114 | u32 __iomem *hostpc_reg = NULL; | ||
| 114 | 115 | ||
| 115 | ehci_dbg(ehci, "suspend root hub\n"); | 116 | ehci_dbg(ehci, "suspend root hub\n"); |
| 116 | 117 | ||
| @@ -142,6 +143,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
| 142 | u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; | 143 | u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; |
| 143 | u32 t2 = t1; | 144 | u32 t2 = t1; |
| 144 | 145 | ||
| 146 | if (ehci->has_hostpc) | ||
| 147 | hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs | ||
| 148 | + HOSTPC0 + 4 * (port & 0xff)); | ||
| 145 | /* keep track of which ports we suspend */ | 149 | /* keep track of which ports we suspend */ |
| 146 | if (t1 & PORT_OWNER) | 150 | if (t1 & PORT_OWNER) |
| 147 | set_bit(port, &ehci->owned_ports); | 151 | set_bit(port, &ehci->owned_ports); |
| @@ -151,15 +155,37 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
| 151 | } | 155 | } |
| 152 | 156 | ||
| 153 | /* enable remote wakeup on all ports */ | 157 | /* enable remote wakeup on all ports */ |
| 154 | if (hcd->self.root_hub->do_remote_wakeup) | 158 | if (hcd->self.root_hub->do_remote_wakeup) { |
| 155 | t2 |= PORT_WAKE_BITS; | 159 | /* only enable appropriate wake bits, otherwise the |
| 156 | else | 160 | * hardware can not go phy low power mode. If a race |
| 161 | * condition happens here(connection change during bits | ||
| 162 | * set), the port change detection will finally fix it. | ||
| 163 | */ | ||
| 164 | if (t1 & PORT_CONNECT) { | ||
| 165 | t2 |= PORT_WKOC_E | PORT_WKDISC_E; | ||
| 166 | t2 &= ~PORT_WKCONN_E; | ||
| 167 | } else { | ||
| 168 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; | ||
| 169 | t2 &= ~PORT_WKDISC_E; | ||
| 170 | } | ||
| 171 | } else | ||
| 157 | t2 &= ~PORT_WAKE_BITS; | 172 | t2 &= ~PORT_WAKE_BITS; |
| 158 | 173 | ||
| 159 | if (t1 != t2) { | 174 | if (t1 != t2) { |
| 160 | ehci_vdbg (ehci, "port %d, %08x -> %08x\n", | 175 | ehci_vdbg (ehci, "port %d, %08x -> %08x\n", |
| 161 | port + 1, t1, t2); | 176 | port + 1, t1, t2); |
| 162 | ehci_writel(ehci, t2, reg); | 177 | ehci_writel(ehci, t2, reg); |
| 178 | if (hostpc_reg) { | ||
| 179 | u32 t3; | ||
| 180 | |||
| 181 | msleep(5);/* 5ms for HCD enter low pwr mode */ | ||
| 182 | t3 = ehci_readl(ehci, hostpc_reg); | ||
| 183 | ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); | ||
| 184 | t3 = ehci_readl(ehci, hostpc_reg); | ||
| 185 | ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", | ||
| 186 | port, (t3 & HOSTPC_PHCD) ? | ||
| 187 | "succeeded" : "failed"); | ||
| 188 | } | ||
| 163 | } | 189 | } |
| 164 | } | 190 | } |
| 165 | 191 | ||
| @@ -183,6 +209,11 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
| 183 | 209 | ||
| 184 | ehci->next_statechange = jiffies + msecs_to_jiffies(10); | 210 | ehci->next_statechange = jiffies + msecs_to_jiffies(10); |
| 185 | spin_unlock_irq (&ehci->lock); | 211 | spin_unlock_irq (&ehci->lock); |
| 212 | |||
| 213 | /* ehci_work() may have re-enabled the watchdog timer, which we do not | ||
| 214 | * want, and so we must delete any pending watchdog timer events. | ||
| 215 | */ | ||
| 216 | del_timer_sync(&ehci->watchdog); | ||
| 186 | return 0; | 217 | return 0; |
| 187 | } | 218 | } |
| 188 | 219 | ||
| @@ -204,6 +235,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd) | |||
| 204 | return -ESHUTDOWN; | 235 | return -ESHUTDOWN; |
| 205 | } | 236 | } |
| 206 | 237 | ||
| 238 | if (unlikely(ehci->debug)) { | ||
| 239 | if (ehci->debug && !dbgp_reset_prep()) | ||
| 240 | ehci->debug = NULL; | ||
| 241 | else | ||
| 242 | dbgp_external_startup(); | ||
| 243 | } | ||
| 244 | |||
| 207 | /* Ideally and we've got a real resume here, and no port's power | 245 | /* Ideally and we've got a real resume here, and no port's power |
| 208 | * was lost. (For PCI, that means Vaux was maintained.) But we | 246 | * was lost. (For PCI, that means Vaux was maintained.) But we |
| 209 | * could instead be restoring a swsusp snapshot -- so that BIOS was | 247 | * could instead be restoring a swsusp snapshot -- so that BIOS was |
| @@ -563,7 +601,8 @@ static int ehci_hub_control ( | |||
| 563 | int ports = HCS_N_PORTS (ehci->hcs_params); | 601 | int ports = HCS_N_PORTS (ehci->hcs_params); |
| 564 | u32 __iomem *status_reg = &ehci->regs->port_status[ | 602 | u32 __iomem *status_reg = &ehci->regs->port_status[ |
| 565 | (wIndex & 0xff) - 1]; | 603 | (wIndex & 0xff) - 1]; |
| 566 | u32 temp, status; | 604 | u32 __iomem *hostpc_reg = NULL; |
| 605 | u32 temp, temp1, status; | ||
| 567 | unsigned long flags; | 606 | unsigned long flags; |
| 568 | int retval = 0; | 607 | int retval = 0; |
| 569 | unsigned selector; | 608 | unsigned selector; |
| @@ -575,6 +614,9 @@ static int ehci_hub_control ( | |||
| 575 | * power, "this is the one", etc. EHCI spec supports this. | 614 | * power, "this is the one", etc. EHCI spec supports this. |
| 576 | */ | 615 | */ |
| 577 | 616 | ||
| 617 | if (ehci->has_hostpc) | ||
| 618 | hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs | ||
| 619 | + HOSTPC0 + 4 * ((wIndex & 0xff) - 1)); | ||
| 578 | spin_lock_irqsave (&ehci->lock, flags); | 620 | spin_lock_irqsave (&ehci->lock, flags); |
| 579 | switch (typeReq) { | 621 | switch (typeReq) { |
| 580 | case ClearHubFeature: | 622 | case ClearHubFeature: |
| @@ -773,7 +815,11 @@ static int ehci_hub_control ( | |||
| 773 | if (temp & PORT_CONNECT) { | 815 | if (temp & PORT_CONNECT) { |
| 774 | status |= 1 << USB_PORT_FEAT_CONNECTION; | 816 | status |= 1 << USB_PORT_FEAT_CONNECTION; |
| 775 | // status may be from integrated TT | 817 | // status may be from integrated TT |
| 776 | status |= ehci_port_speed(ehci, temp); | 818 | if (ehci->has_hostpc) { |
| 819 | temp1 = ehci_readl(ehci, hostpc_reg); | ||
| 820 | status |= ehci_port_speed(ehci, temp1); | ||
| 821 | } else | ||
| 822 | status |= ehci_port_speed(ehci, temp); | ||
| 777 | } | 823 | } |
| 778 | if (temp & PORT_PE) | 824 | if (temp & PORT_PE) |
| 779 | status |= 1 << USB_PORT_FEAT_ENABLE; | 825 | status |= 1 << USB_PORT_FEAT_ENABLE; |
| @@ -816,6 +862,15 @@ static int ehci_hub_control ( | |||
| 816 | case SetPortFeature: | 862 | case SetPortFeature: |
| 817 | selector = wIndex >> 8; | 863 | selector = wIndex >> 8; |
| 818 | wIndex &= 0xff; | 864 | wIndex &= 0xff; |
| 865 | if (unlikely(ehci->debug)) { | ||
| 866 | /* If the debug port is active any port | ||
| 867 | * feature requests should get denied */ | ||
| 868 | if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) && | ||
| 869 | (readl(&ehci->debug->control) & DBGP_ENABLED)) { | ||
| 870 | retval = -ENODEV; | ||
| 871 | goto error_exit; | ||
| 872 | } | ||
| 873 | } | ||
| 819 | if (!wIndex || wIndex > ports) | 874 | if (!wIndex || wIndex > ports) |
| 820 | goto error; | 875 | goto error; |
| 821 | wIndex--; | 876 | wIndex--; |
| @@ -832,6 +887,24 @@ static int ehci_hub_control ( | |||
| 832 | || (temp & PORT_RESET) != 0) | 887 | || (temp & PORT_RESET) != 0) |
| 833 | goto error; | 888 | goto error; |
| 834 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); | 889 | ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); |
| 890 | /* After above check the port must be connected. | ||
| 891 | * Set appropriate bit thus could put phy into low power | ||
| 892 | * mode if we have hostpc feature | ||
| 893 | */ | ||
| 894 | if (hostpc_reg) { | ||
| 895 | temp &= ~PORT_WKCONN_E; | ||
| 896 | temp |= (PORT_WKDISC_E | PORT_WKOC_E); | ||
| 897 | ehci_writel(ehci, temp | PORT_SUSPEND, | ||
| 898 | status_reg); | ||
| 899 | msleep(5);/* 5ms for HCD enter low pwr mode */ | ||
| 900 | temp1 = ehci_readl(ehci, hostpc_reg); | ||
| 901 | ehci_writel(ehci, temp1 | HOSTPC_PHCD, | ||
| 902 | hostpc_reg); | ||
| 903 | temp1 = ehci_readl(ehci, hostpc_reg); | ||
| 904 | ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", | ||
| 905 | wIndex, (temp1 & HOSTPC_PHCD) ? | ||
| 906 | "succeeded" : "failed"); | ||
| 907 | } | ||
| 835 | set_bit(wIndex, &ehci->suspended_ports); | 908 | set_bit(wIndex, &ehci->suspended_ports); |
| 836 | break; | 909 | break; |
| 837 | case USB_PORT_FEAT_POWER: | 910 | case USB_PORT_FEAT_POWER: |
| @@ -894,6 +967,7 @@ error: | |||
| 894 | /* "stall" on error */ | 967 | /* "stall" on error */ |
| 895 | retval = -EPIPE; | 968 | retval = -EPIPE; |
| 896 | } | 969 | } |
| 970 | error_exit: | ||
| 897 | spin_unlock_irqrestore (&ehci->lock, flags); | 971 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 898 | return retval; | 972 | return retval; |
| 899 | } | 973 | } |
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index 10d52919abbb..aeda96e0af67 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c | |||
| @@ -75,7 +75,8 @@ static void qh_destroy(struct ehci_qh *qh) | |||
| 75 | } | 75 | } |
| 76 | if (qh->dummy) | 76 | if (qh->dummy) |
| 77 | ehci_qtd_free (ehci, qh->dummy); | 77 | ehci_qtd_free (ehci, qh->dummy); |
| 78 | dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); | 78 | dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); |
| 79 | kfree(qh); | ||
| 79 | } | 80 | } |
| 80 | 81 | ||
| 81 | static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) | 82 | static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) |
| @@ -83,12 +84,14 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) | |||
| 83 | struct ehci_qh *qh; | 84 | struct ehci_qh *qh; |
| 84 | dma_addr_t dma; | 85 | dma_addr_t dma; |
| 85 | 86 | ||
| 86 | qh = (struct ehci_qh *) | 87 | qh = kzalloc(sizeof *qh, GFP_ATOMIC); |
| 87 | dma_pool_alloc (ehci->qh_pool, flags, &dma); | ||
| 88 | if (!qh) | 88 | if (!qh) |
| 89 | return qh; | 89 | goto done; |
| 90 | 90 | qh->hw = (struct ehci_qh_hw *) | |
| 91 | memset (qh, 0, sizeof *qh); | 91 | dma_pool_alloc(ehci->qh_pool, flags, &dma); |
| 92 | if (!qh->hw) | ||
| 93 | goto fail; | ||
| 94 | memset(qh->hw, 0, sizeof *qh->hw); | ||
| 92 | qh->refcount = 1; | 95 | qh->refcount = 1; |
| 93 | qh->ehci = ehci; | 96 | qh->ehci = ehci; |
| 94 | qh->qh_dma = dma; | 97 | qh->qh_dma = dma; |
| @@ -99,10 +102,15 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) | |||
| 99 | qh->dummy = ehci_qtd_alloc (ehci, flags); | 102 | qh->dummy = ehci_qtd_alloc (ehci, flags); |
| 100 | if (qh->dummy == NULL) { | 103 | if (qh->dummy == NULL) { |
| 101 | ehci_dbg (ehci, "no dummy td\n"); | 104 | ehci_dbg (ehci, "no dummy td\n"); |
| 102 | dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); | 105 | goto fail1; |
| 103 | qh = NULL; | ||
| 104 | } | 106 | } |
| 107 | done: | ||
| 105 | return qh; | 108 | return qh; |
| 109 | fail1: | ||
| 110 | dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); | ||
| 111 | fail: | ||
| 112 | kfree(qh); | ||
| 113 | return NULL; | ||
| 106 | } | 114 | } |
| 107 | 115 | ||
| 108 | /* to share a qh (cpu threads, or hc) */ | 116 | /* to share a qh (cpu threads, or hc) */ |
| @@ -180,7 +188,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) | |||
| 180 | /* QHs for control/bulk/intr transfers */ | 188 | /* QHs for control/bulk/intr transfers */ |
| 181 | ehci->qh_pool = dma_pool_create ("ehci_qh", | 189 | ehci->qh_pool = dma_pool_create ("ehci_qh", |
| 182 | ehci_to_hcd(ehci)->self.controller, | 190 | ehci_to_hcd(ehci)->self.controller, |
| 183 | sizeof (struct ehci_qh), | 191 | sizeof(struct ehci_qh_hw), |
| 184 | 32 /* byte alignment (for hw parts) */, | 192 | 32 /* byte alignment (for hw parts) */, |
| 185 | 4096 /* can't cross 4K */); | 193 | 4096 /* can't cross 4K */); |
| 186 | if (!ehci->qh_pool) { | 194 | if (!ehci->qh_pool) { |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index c2f1b7df918c..378861b9d79a 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
| @@ -27,28 +27,8 @@ | |||
| 27 | /* called after powerup, by probe or system-pm "wakeup" */ | 27 | /* called after powerup, by probe or system-pm "wakeup" */ |
| 28 | static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) | 28 | static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) |
| 29 | { | 29 | { |
| 30 | u32 temp; | ||
| 31 | int retval; | 30 | int retval; |
| 32 | 31 | ||
| 33 | /* optional debug port, normally in the first BAR */ | ||
| 34 | temp = pci_find_capability(pdev, 0x0a); | ||
| 35 | if (temp) { | ||
| 36 | pci_read_config_dword(pdev, temp, &temp); | ||
| 37 | temp >>= 16; | ||
| 38 | if ((temp & (3 << 13)) == (1 << 13)) { | ||
| 39 | temp &= 0x1fff; | ||
| 40 | ehci->debug = ehci_to_hcd(ehci)->regs + temp; | ||
| 41 | temp = ehci_readl(ehci, &ehci->debug->control); | ||
| 42 | ehci_info(ehci, "debug port %d%s\n", | ||
| 43 | HCS_DEBUG_PORT(ehci->hcs_params), | ||
| 44 | (temp & DBGP_ENABLED) | ||
| 45 | ? " IN USE" | ||
| 46 | : ""); | ||
| 47 | if (!(temp & DBGP_ENABLED)) | ||
| 48 | ehci->debug = NULL; | ||
| 49 | } | ||
| 50 | } | ||
| 51 | |||
| 52 | /* we expect static quirk code to handle the "extended capabilities" | 32 | /* we expect static quirk code to handle the "extended capabilities" |
| 53 | * (currently just BIOS handoff) allowed starting with EHCI 0.96 | 33 | * (currently just BIOS handoff) allowed starting with EHCI 0.96 |
| 54 | */ | 34 | */ |
| @@ -129,6 +109,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
| 129 | return retval; | 109 | return retval; |
| 130 | 110 | ||
| 131 | switch (pdev->vendor) { | 111 | switch (pdev->vendor) { |
| 112 | case PCI_VENDOR_ID_INTEL: | ||
| 113 | ehci->need_io_watchdog = 0; | ||
| 114 | break; | ||
| 132 | case PCI_VENDOR_ID_TDI: | 115 | case PCI_VENDOR_ID_TDI: |
| 133 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { | 116 | if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { |
| 134 | hcd->has_tt = 1; | 117 | hcd->has_tt = 1; |
| @@ -192,6 +175,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
| 192 | break; | 175 | break; |
| 193 | } | 176 | } |
| 194 | 177 | ||
| 178 | /* optional debug port, normally in the first BAR */ | ||
| 179 | temp = pci_find_capability(pdev, 0x0a); | ||
| 180 | if (temp) { | ||
| 181 | pci_read_config_dword(pdev, temp, &temp); | ||
| 182 | temp >>= 16; | ||
| 183 | if ((temp & (3 << 13)) == (1 << 13)) { | ||
| 184 | temp &= 0x1fff; | ||
| 185 | ehci->debug = ehci_to_hcd(ehci)->regs + temp; | ||
| 186 | temp = ehci_readl(ehci, &ehci->debug->control); | ||
| 187 | ehci_info(ehci, "debug port %d%s\n", | ||
| 188 | HCS_DEBUG_PORT(ehci->hcs_params), | ||
| 189 | (temp & DBGP_ENABLED) | ||
| 190 | ? " IN USE" | ||
| 191 | : ""); | ||
| 192 | if (!(temp & DBGP_ENABLED)) | ||
| 193 | ehci->debug = NULL; | ||
| 194 | } | ||
| 195 | } | ||
| 196 | |||
| 195 | ehci_reset(ehci); | 197 | ehci_reset(ehci); |
| 196 | 198 | ||
| 197 | /* at least the Genesys GL880S needs fixup here */ | 199 | /* at least the Genesys GL880S needs fixup here */ |
| @@ -242,7 +244,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
| 242 | * System suspend currently expects to be able to suspend the entire | 244 | * System suspend currently expects to be able to suspend the entire |
| 243 | * device tree, device-at-a-time. If we failed selective suspend | 245 | * device tree, device-at-a-time. If we failed selective suspend |
| 244 | * reports, system suspend would fail; so the root hub code must claim | 246 | * reports, system suspend would fail; so the root hub code must claim |
| 245 | * success. That's lying to usbcore, and it matters for for runtime | 247 | * success. That's lying to usbcore, and it matters for runtime |
| 246 | * PM scenarios with selective suspend and remote wakeup... | 248 | * PM scenarios with selective suspend and remote wakeup... |
| 247 | */ | 249 | */ |
| 248 | if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) | 250 | if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 7673554fa64d..00ad9ce392ed 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
| @@ -87,31 +87,33 @@ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, | |||
| 87 | static inline void | 87 | static inline void |
| 88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) | 88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) |
| 89 | { | 89 | { |
| 90 | struct ehci_qh_hw *hw = qh->hw; | ||
| 91 | |||
| 90 | /* writes to an active overlay are unsafe */ | 92 | /* writes to an active overlay are unsafe */ |
| 91 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | 93 | BUG_ON(qh->qh_state != QH_STATE_IDLE); |
| 92 | 94 | ||
| 93 | qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); | 95 | hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); |
| 94 | qh->hw_alt_next = EHCI_LIST_END(ehci); | 96 | hw->hw_alt_next = EHCI_LIST_END(ehci); |
| 95 | 97 | ||
| 96 | /* Except for control endpoints, we make hardware maintain data | 98 | /* Except for control endpoints, we make hardware maintain data |
| 97 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | 99 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, |
| 98 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | 100 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will |
| 99 | * ever clear it. | 101 | * ever clear it. |
| 100 | */ | 102 | */ |
| 101 | if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { | 103 | if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { |
| 102 | unsigned is_out, epnum; | 104 | unsigned is_out, epnum; |
| 103 | 105 | ||
| 104 | is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); | 106 | is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); |
| 105 | epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f; | 107 | epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; |
| 106 | if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { | 108 | if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { |
| 107 | qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); | 109 | hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); |
| 108 | usb_settoggle (qh->dev, epnum, is_out, 1); | 110 | usb_settoggle (qh->dev, epnum, is_out, 1); |
| 109 | } | 111 | } |
| 110 | } | 112 | } |
| 111 | 113 | ||
| 112 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ | 114 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ |
| 113 | wmb (); | 115 | wmb (); |
| 114 | qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); | 116 | hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); |
| 115 | } | 117 | } |
| 116 | 118 | ||
| 117 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | 119 | /* if it weren't for a common silicon quirk (writing the dummy into the qh |
| @@ -129,7 +131,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 129 | qtd = list_entry (qh->qtd_list.next, | 131 | qtd = list_entry (qh->qtd_list.next, |
| 130 | struct ehci_qtd, qtd_list); | 132 | struct ehci_qtd, qtd_list); |
| 131 | /* first qtd may already be partially processed */ | 133 | /* first qtd may already be partially processed */ |
| 132 | if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current) | 134 | if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) |
| 133 | qtd = NULL; | 135 | qtd = NULL; |
| 134 | } | 136 | } |
| 135 | 137 | ||
| @@ -260,7 +262,7 @@ __acquires(ehci->lock) | |||
| 260 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; | 262 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; |
| 261 | 263 | ||
| 262 | /* S-mask in a QH means it's an interrupt urb */ | 264 | /* S-mask in a QH means it's an interrupt urb */ |
| 263 | if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { | 265 | if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { |
| 264 | 266 | ||
| 265 | /* ... update hc-wide periodic stats (for usbfs) */ | 267 | /* ... update hc-wide periodic stats (for usbfs) */ |
| 266 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; | 268 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; |
| @@ -297,7 +299,6 @@ __acquires(ehci->lock) | |||
| 297 | static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); | 299 | static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 298 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); | 300 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 299 | 301 | ||
| 300 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh); | ||
| 301 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); | 302 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 302 | 303 | ||
| 303 | /* | 304 | /* |
| @@ -308,13 +309,14 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); | |||
| 308 | static unsigned | 309 | static unsigned |
| 309 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | 310 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 310 | { | 311 | { |
| 311 | struct ehci_qtd *last = NULL, *end = qh->dummy; | 312 | struct ehci_qtd *last, *end = qh->dummy; |
| 312 | struct list_head *entry, *tmp; | 313 | struct list_head *entry, *tmp; |
| 313 | int last_status = -EINPROGRESS; | 314 | int last_status; |
| 314 | int stopped; | 315 | int stopped; |
| 315 | unsigned count = 0; | 316 | unsigned count = 0; |
| 316 | u8 state; | 317 | u8 state; |
| 317 | __le32 halt = HALT_BIT(ehci); | 318 | const __le32 halt = HALT_BIT(ehci); |
| 319 | struct ehci_qh_hw *hw = qh->hw; | ||
| 318 | 320 | ||
| 319 | if (unlikely (list_empty (&qh->qtd_list))) | 321 | if (unlikely (list_empty (&qh->qtd_list))) |
| 320 | return count; | 322 | return count; |
| @@ -324,11 +326,20 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 324 | * they add urbs to this qh's queue or mark them for unlinking. | 326 | * they add urbs to this qh's queue or mark them for unlinking. |
| 325 | * | 327 | * |
| 326 | * NOTE: unlinking expects to be done in queue order. | 328 | * NOTE: unlinking expects to be done in queue order. |
| 329 | * | ||
| 330 | * It's a bug for qh->qh_state to be anything other than | ||
| 331 | * QH_STATE_IDLE, unless our caller is scan_async() or | ||
| 332 | * scan_periodic(). | ||
| 327 | */ | 333 | */ |
| 328 | state = qh->qh_state; | 334 | state = qh->qh_state; |
| 329 | qh->qh_state = QH_STATE_COMPLETING; | 335 | qh->qh_state = QH_STATE_COMPLETING; |
| 330 | stopped = (state == QH_STATE_IDLE); | 336 | stopped = (state == QH_STATE_IDLE); |
| 331 | 337 | ||
| 338 | rescan: | ||
| 339 | last = NULL; | ||
| 340 | last_status = -EINPROGRESS; | ||
| 341 | qh->needs_rescan = 0; | ||
| 342 | |||
| 332 | /* remove de-activated QTDs from front of queue. | 343 | /* remove de-activated QTDs from front of queue. |
| 333 | * after faults (including short reads), cleanup this urb | 344 | * after faults (including short reads), cleanup this urb |
| 334 | * then let the queue advance. | 345 | * then let the queue advance. |
| @@ -392,7 +403,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 392 | qtd->hw_token = cpu_to_hc32(ehci, | 403 | qtd->hw_token = cpu_to_hc32(ehci, |
| 393 | token); | 404 | token); |
| 394 | wmb(); | 405 | wmb(); |
| 395 | qh->hw_token = cpu_to_hc32(ehci, token); | 406 | hw->hw_token = cpu_to_hc32(ehci, |
| 407 | token); | ||
| 396 | goto retry_xacterr; | 408 | goto retry_xacterr; |
| 397 | } | 409 | } |
| 398 | stopped = 1; | 410 | stopped = 1; |
| @@ -435,8 +447,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 435 | /* qh unlinked; token in overlay may be most current */ | 447 | /* qh unlinked; token in overlay may be most current */ |
| 436 | if (state == QH_STATE_IDLE | 448 | if (state == QH_STATE_IDLE |
| 437 | && cpu_to_hc32(ehci, qtd->qtd_dma) | 449 | && cpu_to_hc32(ehci, qtd->qtd_dma) |
| 438 | == qh->hw_current) { | 450 | == hw->hw_current) { |
| 439 | token = hc32_to_cpu(ehci, qh->hw_token); | 451 | token = hc32_to_cpu(ehci, hw->hw_token); |
| 440 | 452 | ||
| 441 | /* An unlink may leave an incomplete | 453 | /* An unlink may leave an incomplete |
| 442 | * async transaction in the TT buffer. | 454 | * async transaction in the TT buffer. |
| @@ -449,9 +461,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 449 | * patch the qh later and so that completions can't | 461 | * patch the qh later and so that completions can't |
| 450 | * activate it while we "know" it's stopped. | 462 | * activate it while we "know" it's stopped. |
| 451 | */ | 463 | */ |
| 452 | if ((halt & qh->hw_token) == 0) { | 464 | if ((halt & hw->hw_token) == 0) { |
| 453 | halt: | 465 | halt: |
| 454 | qh->hw_token |= halt; | 466 | hw->hw_token |= halt; |
| 455 | wmb (); | 467 | wmb (); |
| 456 | } | 468 | } |
| 457 | } | 469 | } |
| @@ -503,6 +515,21 @@ halt: | |||
| 503 | ehci_qtd_free (ehci, last); | 515 | ehci_qtd_free (ehci, last); |
| 504 | } | 516 | } |
| 505 | 517 | ||
| 518 | /* Do we need to rescan for URBs dequeued during a giveback? */ | ||
| 519 | if (unlikely(qh->needs_rescan)) { | ||
| 520 | /* If the QH is already unlinked, do the rescan now. */ | ||
| 521 | if (state == QH_STATE_IDLE) | ||
| 522 | goto rescan; | ||
| 523 | |||
| 524 | /* Otherwise we have to wait until the QH is fully unlinked. | ||
| 525 | * Our caller will start an unlink if qh->needs_rescan is | ||
| 526 | * set. But if an unlink has already started, nothing needs | ||
| 527 | * to be done. | ||
| 528 | */ | ||
| 529 | if (state != QH_STATE_LINKED) | ||
| 530 | qh->needs_rescan = 0; | ||
| 531 | } | ||
| 532 | |||
| 506 | /* restore original state; caller must unlink or relink */ | 533 | /* restore original state; caller must unlink or relink */ |
| 507 | qh->qh_state = state; | 534 | qh->qh_state = state; |
| 508 | 535 | ||
| @@ -510,7 +537,7 @@ halt: | |||
| 510 | * it after fault cleanup, or recovering from silicon wrongly | 537 | * it after fault cleanup, or recovering from silicon wrongly |
| 511 | * overlaying the dummy qtd (which reduces DMA chatter). | 538 | * overlaying the dummy qtd (which reduces DMA chatter). |
| 512 | */ | 539 | */ |
| 513 | if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { | 540 | if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) { |
| 514 | switch (state) { | 541 | switch (state) { |
| 515 | case QH_STATE_IDLE: | 542 | case QH_STATE_IDLE: |
| 516 | qh_refresh(ehci, qh); | 543 | qh_refresh(ehci, qh); |
| @@ -527,12 +554,9 @@ halt: | |||
| 527 | * That should be rare for interrupt transfers, | 554 | * That should be rare for interrupt transfers, |
| 528 | * except maybe high bandwidth ... | 555 | * except maybe high bandwidth ... |
| 529 | */ | 556 | */ |
| 530 | if ((cpu_to_hc32(ehci, QH_SMASK) | 557 | |
| 531 | & qh->hw_info2) != 0) { | 558 | /* Tell the caller to start an unlink */ |
| 532 | intr_deschedule (ehci, qh); | 559 | qh->needs_rescan = 1; |
| 533 | (void) qh_schedule (ehci, qh); | ||
| 534 | } else | ||
| 535 | unlink_async (ehci, qh); | ||
| 536 | break; | 560 | break; |
| 537 | /* otherwise, unlink already started */ | 561 | /* otherwise, unlink already started */ |
| 538 | } | 562 | } |
| @@ -649,7 +673,7 @@ qh_urb_transaction ( | |||
| 649 | * (this will usually be overridden later.) | 673 | * (this will usually be overridden later.) |
| 650 | */ | 674 | */ |
| 651 | if (is_input) | 675 | if (is_input) |
| 652 | qtd->hw_alt_next = ehci->async->hw_alt_next; | 676 | qtd->hw_alt_next = ehci->async->hw->hw_alt_next; |
| 653 | 677 | ||
| 654 | /* qh makes control packets use qtd toggle; maybe switch it */ | 678 | /* qh makes control packets use qtd toggle; maybe switch it */ |
| 655 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | 679 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) |
| @@ -744,6 +768,7 @@ qh_make ( | |||
| 744 | int is_input, type; | 768 | int is_input, type; |
| 745 | int maxp = 0; | 769 | int maxp = 0; |
| 746 | struct usb_tt *tt = urb->dev->tt; | 770 | struct usb_tt *tt = urb->dev->tt; |
| 771 | struct ehci_qh_hw *hw; | ||
| 747 | 772 | ||
| 748 | if (!qh) | 773 | if (!qh) |
| 749 | return qh; | 774 | return qh; |
| @@ -890,8 +915,9 @@ done: | |||
| 890 | 915 | ||
| 891 | /* init as live, toggle clear, advance to dummy */ | 916 | /* init as live, toggle clear, advance to dummy */ |
| 892 | qh->qh_state = QH_STATE_IDLE; | 917 | qh->qh_state = QH_STATE_IDLE; |
| 893 | qh->hw_info1 = cpu_to_hc32(ehci, info1); | 918 | hw = qh->hw; |
| 894 | qh->hw_info2 = cpu_to_hc32(ehci, info2); | 919 | hw->hw_info1 = cpu_to_hc32(ehci, info1); |
| 920 | hw->hw_info2 = cpu_to_hc32(ehci, info2); | ||
| 895 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); | 921 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); |
| 896 | qh_refresh (ehci, qh); | 922 | qh_refresh (ehci, qh); |
| 897 | return qh; | 923 | return qh; |
| @@ -910,6 +936,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 910 | if (unlikely(qh->clearing_tt)) | 936 | if (unlikely(qh->clearing_tt)) |
| 911 | return; | 937 | return; |
| 912 | 938 | ||
| 939 | WARN_ON(qh->qh_state != QH_STATE_IDLE); | ||
| 940 | |||
| 913 | /* (re)start the async schedule? */ | 941 | /* (re)start the async schedule? */ |
| 914 | head = ehci->async; | 942 | head = ehci->async; |
| 915 | timer_action_done (ehci, TIMER_ASYNC_OFF); | 943 | timer_action_done (ehci, TIMER_ASYNC_OFF); |
| @@ -928,16 +956,15 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 928 | } | 956 | } |
| 929 | 957 | ||
| 930 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ | 958 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ |
| 931 | if (qh->qh_state == QH_STATE_IDLE) | 959 | qh_refresh(ehci, qh); |
| 932 | qh_refresh (ehci, qh); | ||
| 933 | 960 | ||
| 934 | /* splice right after start */ | 961 | /* splice right after start */ |
| 935 | qh->qh_next = head->qh_next; | 962 | qh->qh_next = head->qh_next; |
| 936 | qh->hw_next = head->hw_next; | 963 | qh->hw->hw_next = head->hw->hw_next; |
| 937 | wmb (); | 964 | wmb (); |
| 938 | 965 | ||
| 939 | head->qh_next.qh = qh; | 966 | head->qh_next.qh = qh; |
| 940 | head->hw_next = dma; | 967 | head->hw->hw_next = dma; |
| 941 | 968 | ||
| 942 | qh_get(qh); | 969 | qh_get(qh); |
| 943 | qh->xacterrs = 0; | 970 | qh->xacterrs = 0; |
| @@ -984,7 +1011,7 @@ static struct ehci_qh *qh_append_tds ( | |||
| 984 | 1011 | ||
| 985 | /* usb_reset_device() briefly reverts to address 0 */ | 1012 | /* usb_reset_device() briefly reverts to address 0 */ |
| 986 | if (usb_pipedevice (urb->pipe) == 0) | 1013 | if (usb_pipedevice (urb->pipe) == 0) |
| 987 | qh->hw_info1 &= ~qh_addr_mask; | 1014 | qh->hw->hw_info1 &= ~qh_addr_mask; |
| 988 | } | 1015 | } |
| 989 | 1016 | ||
| 990 | /* just one way to queue requests: swap with the dummy qtd. | 1017 | /* just one way to queue requests: swap with the dummy qtd. |
| @@ -1169,7 +1196,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 1169 | while (prev->qh_next.qh != qh) | 1196 | while (prev->qh_next.qh != qh) |
| 1170 | prev = prev->qh_next.qh; | 1197 | prev = prev->qh_next.qh; |
| 1171 | 1198 | ||
| 1172 | prev->hw_next = qh->hw_next; | 1199 | prev->hw->hw_next = qh->hw->hw_next; |
| 1173 | prev->qh_next = qh->qh_next; | 1200 | prev->qh_next = qh->qh_next; |
| 1174 | wmb (); | 1201 | wmb (); |
| 1175 | 1202 | ||
| @@ -1214,6 +1241,8 @@ rescan: | |||
| 1214 | qh = qh_get (qh); | 1241 | qh = qh_get (qh); |
| 1215 | qh->stamp = ehci->stamp; | 1242 | qh->stamp = ehci->stamp; |
| 1216 | temp = qh_completions (ehci, qh); | 1243 | temp = qh_completions (ehci, qh); |
| 1244 | if (qh->needs_rescan) | ||
| 1245 | unlink_async(ehci, qh); | ||
| 1217 | qh_put (qh); | 1246 | qh_put (qh); |
| 1218 | if (temp != 0) { | 1247 | if (temp != 0) { |
| 1219 | goto rescan; | 1248 | goto rescan; |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index edd61ee90323..3ea05936851f 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
| @@ -60,6 +60,20 @@ periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic, | |||
| 60 | } | 60 | } |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static __hc32 * | ||
| 64 | shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic, | ||
| 65 | __hc32 tag) | ||
| 66 | { | ||
| 67 | switch (hc32_to_cpu(ehci, tag)) { | ||
| 68 | /* our ehci_shadow.qh is actually software part */ | ||
| 69 | case Q_TYPE_QH: | ||
| 70 | return &periodic->qh->hw->hw_next; | ||
| 71 | /* others are hw parts */ | ||
| 72 | default: | ||
| 73 | return periodic->hw_next; | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 63 | /* caller must hold ehci->lock */ | 77 | /* caller must hold ehci->lock */ |
| 64 | static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) | 78 | static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) |
| 65 | { | 79 | { |
| @@ -71,7 +85,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) | |||
| 71 | while (here.ptr && here.ptr != ptr) { | 85 | while (here.ptr && here.ptr != ptr) { |
| 72 | prev_p = periodic_next_shadow(ehci, prev_p, | 86 | prev_p = periodic_next_shadow(ehci, prev_p, |
| 73 | Q_NEXT_TYPE(ehci, *hw_p)); | 87 | Q_NEXT_TYPE(ehci, *hw_p)); |
| 74 | hw_p = here.hw_next; | 88 | hw_p = shadow_next_periodic(ehci, &here, |
| 89 | Q_NEXT_TYPE(ehci, *hw_p)); | ||
| 75 | here = *prev_p; | 90 | here = *prev_p; |
| 76 | } | 91 | } |
| 77 | /* an interrupt entry (at list end) could have been shared */ | 92 | /* an interrupt entry (at list end) could have been shared */ |
| @@ -83,7 +98,7 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) | |||
| 83 | */ | 98 | */ |
| 84 | *prev_p = *periodic_next_shadow(ehci, &here, | 99 | *prev_p = *periodic_next_shadow(ehci, &here, |
| 85 | Q_NEXT_TYPE(ehci, *hw_p)); | 100 | Q_NEXT_TYPE(ehci, *hw_p)); |
| 86 | *hw_p = *here.hw_next; | 101 | *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); |
| 87 | } | 102 | } |
| 88 | 103 | ||
| 89 | /* how many of the uframe's 125 usecs are allocated? */ | 104 | /* how many of the uframe's 125 usecs are allocated? */ |
| @@ -93,18 +108,20 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) | |||
| 93 | __hc32 *hw_p = &ehci->periodic [frame]; | 108 | __hc32 *hw_p = &ehci->periodic [frame]; |
| 94 | union ehci_shadow *q = &ehci->pshadow [frame]; | 109 | union ehci_shadow *q = &ehci->pshadow [frame]; |
| 95 | unsigned usecs = 0; | 110 | unsigned usecs = 0; |
| 111 | struct ehci_qh_hw *hw; | ||
| 96 | 112 | ||
| 97 | while (q->ptr) { | 113 | while (q->ptr) { |
| 98 | switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { | 114 | switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { |
| 99 | case Q_TYPE_QH: | 115 | case Q_TYPE_QH: |
| 116 | hw = q->qh->hw; | ||
| 100 | /* is it in the S-mask? */ | 117 | /* is it in the S-mask? */ |
| 101 | if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) | 118 | if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) |
| 102 | usecs += q->qh->usecs; | 119 | usecs += q->qh->usecs; |
| 103 | /* ... or C-mask? */ | 120 | /* ... or C-mask? */ |
| 104 | if (q->qh->hw_info2 & cpu_to_hc32(ehci, | 121 | if (hw->hw_info2 & cpu_to_hc32(ehci, |
| 105 | 1 << (8 + uframe))) | 122 | 1 << (8 + uframe))) |
| 106 | usecs += q->qh->c_usecs; | 123 | usecs += q->qh->c_usecs; |
| 107 | hw_p = &q->qh->hw_next; | 124 | hw_p = &hw->hw_next; |
| 108 | q = &q->qh->qh_next; | 125 | q = &q->qh->qh_next; |
| 109 | break; | 126 | break; |
| 110 | // case Q_TYPE_FSTN: | 127 | // case Q_TYPE_FSTN: |
| @@ -237,10 +254,10 @@ periodic_tt_usecs ( | |||
| 237 | continue; | 254 | continue; |
| 238 | case Q_TYPE_QH: | 255 | case Q_TYPE_QH: |
| 239 | if (same_tt(dev, q->qh->dev)) { | 256 | if (same_tt(dev, q->qh->dev)) { |
| 240 | uf = tt_start_uframe(ehci, q->qh->hw_info2); | 257 | uf = tt_start_uframe(ehci, q->qh->hw->hw_info2); |
| 241 | tt_usecs[uf] += q->qh->tt_usecs; | 258 | tt_usecs[uf] += q->qh->tt_usecs; |
| 242 | } | 259 | } |
| 243 | hw_p = &q->qh->hw_next; | 260 | hw_p = &q->qh->hw->hw_next; |
| 244 | q = &q->qh->qh_next; | 261 | q = &q->qh->qh_next; |
| 245 | continue; | 262 | continue; |
| 246 | case Q_TYPE_SITD: | 263 | case Q_TYPE_SITD: |
| @@ -375,6 +392,7 @@ static int tt_no_collision ( | |||
| 375 | for (; frame < ehci->periodic_size; frame += period) { | 392 | for (; frame < ehci->periodic_size; frame += period) { |
| 376 | union ehci_shadow here; | 393 | union ehci_shadow here; |
| 377 | __hc32 type; | 394 | __hc32 type; |
| 395 | struct ehci_qh_hw *hw; | ||
| 378 | 396 | ||
| 379 | here = ehci->pshadow [frame]; | 397 | here = ehci->pshadow [frame]; |
| 380 | type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); | 398 | type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); |
| @@ -385,17 +403,18 @@ static int tt_no_collision ( | |||
| 385 | here = here.itd->itd_next; | 403 | here = here.itd->itd_next; |
| 386 | continue; | 404 | continue; |
| 387 | case Q_TYPE_QH: | 405 | case Q_TYPE_QH: |
| 406 | hw = here.qh->hw; | ||
| 388 | if (same_tt (dev, here.qh->dev)) { | 407 | if (same_tt (dev, here.qh->dev)) { |
| 389 | u32 mask; | 408 | u32 mask; |
| 390 | 409 | ||
| 391 | mask = hc32_to_cpu(ehci, | 410 | mask = hc32_to_cpu(ehci, |
| 392 | here.qh->hw_info2); | 411 | hw->hw_info2); |
| 393 | /* "knows" no gap is needed */ | 412 | /* "knows" no gap is needed */ |
| 394 | mask |= mask >> 8; | 413 | mask |= mask >> 8; |
| 395 | if (mask & uf_mask) | 414 | if (mask & uf_mask) |
| 396 | break; | 415 | break; |
| 397 | } | 416 | } |
| 398 | type = Q_NEXT_TYPE(ehci, here.qh->hw_next); | 417 | type = Q_NEXT_TYPE(ehci, hw->hw_next); |
| 399 | here = here.qh->qh_next; | 418 | here = here.qh->qh_next; |
| 400 | continue; | 419 | continue; |
| 401 | case Q_TYPE_SITD: | 420 | case Q_TYPE_SITD: |
| @@ -498,7 +517,8 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 498 | 517 | ||
| 499 | dev_dbg (&qh->dev->dev, | 518 | dev_dbg (&qh->dev->dev, |
| 500 | "link qh%d-%04x/%p start %d [%d/%d us]\n", | 519 | "link qh%d-%04x/%p start %d [%d/%d us]\n", |
| 501 | period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), | 520 | period, hc32_to_cpup(ehci, &qh->hw->hw_info2) |
| 521 | & (QH_CMASK | QH_SMASK), | ||
| 502 | qh, qh->start, qh->usecs, qh->c_usecs); | 522 | qh, qh->start, qh->usecs, qh->c_usecs); |
| 503 | 523 | ||
| 504 | /* high bandwidth, or otherwise every microframe */ | 524 | /* high bandwidth, or otherwise every microframe */ |
| @@ -517,7 +537,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 517 | if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) | 537 | if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) |
| 518 | break; | 538 | break; |
| 519 | prev = periodic_next_shadow(ehci, prev, type); | 539 | prev = periodic_next_shadow(ehci, prev, type); |
| 520 | hw_p = &here.qh->hw_next; | 540 | hw_p = shadow_next_periodic(ehci, &here, type); |
| 521 | here = *prev; | 541 | here = *prev; |
| 522 | } | 542 | } |
| 523 | 543 | ||
| @@ -528,14 +548,14 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 528 | if (qh->period > here.qh->period) | 548 | if (qh->period > here.qh->period) |
| 529 | break; | 549 | break; |
| 530 | prev = &here.qh->qh_next; | 550 | prev = &here.qh->qh_next; |
| 531 | hw_p = &here.qh->hw_next; | 551 | hw_p = &here.qh->hw->hw_next; |
| 532 | here = *prev; | 552 | here = *prev; |
| 533 | } | 553 | } |
| 534 | /* link in this qh, unless some earlier pass did that */ | 554 | /* link in this qh, unless some earlier pass did that */ |
| 535 | if (qh != here.qh) { | 555 | if (qh != here.qh) { |
| 536 | qh->qh_next = here; | 556 | qh->qh_next = here; |
| 537 | if (here.qh) | 557 | if (here.qh) |
| 538 | qh->hw_next = *hw_p; | 558 | qh->hw->hw_next = *hw_p; |
| 539 | wmb (); | 559 | wmb (); |
| 540 | prev->qh = qh; | 560 | prev->qh = qh; |
| 541 | *hw_p = QH_NEXT (ehci, qh->qh_dma); | 561 | *hw_p = QH_NEXT (ehci, qh->qh_dma); |
| @@ -581,7 +601,7 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 581 | dev_dbg (&qh->dev->dev, | 601 | dev_dbg (&qh->dev->dev, |
| 582 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", | 602 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", |
| 583 | qh->period, | 603 | qh->period, |
| 584 | hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), | 604 | hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), |
| 585 | qh, qh->start, qh->usecs, qh->c_usecs); | 605 | qh, qh->start, qh->usecs, qh->c_usecs); |
| 586 | 606 | ||
| 587 | /* qh->qh_next still "live" to HC */ | 607 | /* qh->qh_next still "live" to HC */ |
| @@ -595,7 +615,19 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 595 | 615 | ||
| 596 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) | 616 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 597 | { | 617 | { |
| 598 | unsigned wait; | 618 | unsigned wait; |
| 619 | struct ehci_qh_hw *hw = qh->hw; | ||
| 620 | int rc; | ||
| 621 | |||
| 622 | /* If the QH isn't linked then there's nothing we can do | ||
| 623 | * unless we were called during a giveback, in which case | ||
| 624 | * qh_completions() has to deal with it. | ||
| 625 | */ | ||
| 626 | if (qh->qh_state != QH_STATE_LINKED) { | ||
| 627 | if (qh->qh_state == QH_STATE_COMPLETING) | ||
| 628 | qh->needs_rescan = 1; | ||
| 629 | return; | ||
| 630 | } | ||
| 599 | 631 | ||
| 600 | qh_unlink_periodic (ehci, qh); | 632 | qh_unlink_periodic (ehci, qh); |
| 601 | 633 | ||
| @@ -606,15 +638,33 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 606 | */ | 638 | */ |
| 607 | if (list_empty (&qh->qtd_list) | 639 | if (list_empty (&qh->qtd_list) |
| 608 | || (cpu_to_hc32(ehci, QH_CMASK) | 640 | || (cpu_to_hc32(ehci, QH_CMASK) |
| 609 | & qh->hw_info2) != 0) | 641 | & hw->hw_info2) != 0) |
| 610 | wait = 2; | 642 | wait = 2; |
| 611 | else | 643 | else |
| 612 | wait = 55; /* worst case: 3 * 1024 */ | 644 | wait = 55; /* worst case: 3 * 1024 */ |
| 613 | 645 | ||
| 614 | udelay (wait); | 646 | udelay (wait); |
| 615 | qh->qh_state = QH_STATE_IDLE; | 647 | qh->qh_state = QH_STATE_IDLE; |
| 616 | qh->hw_next = EHCI_LIST_END(ehci); | 648 | hw->hw_next = EHCI_LIST_END(ehci); |
| 617 | wmb (); | 649 | wmb (); |
| 650 | |||
| 651 | qh_completions(ehci, qh); | ||
| 652 | |||
| 653 | /* reschedule QH iff another request is queued */ | ||
| 654 | if (!list_empty(&qh->qtd_list) && | ||
| 655 | HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) { | ||
| 656 | rc = qh_schedule(ehci, qh); | ||
| 657 | |||
| 658 | /* An error here likely indicates handshake failure | ||
| 659 | * or no space left in the schedule. Neither fault | ||
| 660 | * should happen often ... | ||
| 661 | * | ||
| 662 | * FIXME kill the now-dysfunctional queued urbs | ||
| 663 | */ | ||
| 664 | if (rc != 0) | ||
| 665 | ehci_err(ehci, "can't reschedule qh %p, err %d\n", | ||
| 666 | qh, rc); | ||
| 667 | } | ||
| 618 | } | 668 | } |
| 619 | 669 | ||
| 620 | /*-------------------------------------------------------------------------*/ | 670 | /*-------------------------------------------------------------------------*/ |
| @@ -739,14 +789,15 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 739 | unsigned uframe; | 789 | unsigned uframe; |
| 740 | __hc32 c_mask; | 790 | __hc32 c_mask; |
| 741 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ | 791 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ |
| 792 | struct ehci_qh_hw *hw = qh->hw; | ||
| 742 | 793 | ||
| 743 | qh_refresh(ehci, qh); | 794 | qh_refresh(ehci, qh); |
| 744 | qh->hw_next = EHCI_LIST_END(ehci); | 795 | hw->hw_next = EHCI_LIST_END(ehci); |
| 745 | frame = qh->start; | 796 | frame = qh->start; |
| 746 | 797 | ||
| 747 | /* reuse the previous schedule slots, if we can */ | 798 | /* reuse the previous schedule slots, if we can */ |
| 748 | if (frame < qh->period) { | 799 | if (frame < qh->period) { |
| 749 | uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK); | 800 | uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK); |
| 750 | status = check_intr_schedule (ehci, frame, --uframe, | 801 | status = check_intr_schedule (ehci, frame, --uframe, |
| 751 | qh, &c_mask); | 802 | qh, &c_mask); |
| 752 | } else { | 803 | } else { |
| @@ -784,11 +835,11 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
| 784 | qh->start = frame; | 835 | qh->start = frame; |
| 785 | 836 | ||
| 786 | /* reset S-frame and (maybe) C-frame masks */ | 837 | /* reset S-frame and (maybe) C-frame masks */ |
| 787 | qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); | 838 | hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); |
| 788 | qh->hw_info2 |= qh->period | 839 | hw->hw_info2 |= qh->period |
| 789 | ? cpu_to_hc32(ehci, 1 << uframe) | 840 | ? cpu_to_hc32(ehci, 1 << uframe) |
| 790 | : cpu_to_hc32(ehci, QH_SMASK); | 841 | : cpu_to_hc32(ehci, QH_SMASK); |
| 791 | qh->hw_info2 |= c_mask; | 842 | hw->hw_info2 |= c_mask; |
| 792 | } else | 843 | } else |
| 793 | ehci_dbg (ehci, "reused qh %p schedule\n", qh); | 844 | ehci_dbg (ehci, "reused qh %p schedule\n", qh); |
| 794 | 845 | ||
| @@ -2188,10 +2239,11 @@ restart: | |||
| 2188 | case Q_TYPE_QH: | 2239 | case Q_TYPE_QH: |
| 2189 | /* handle any completions */ | 2240 | /* handle any completions */ |
| 2190 | temp.qh = qh_get (q.qh); | 2241 | temp.qh = qh_get (q.qh); |
| 2191 | type = Q_NEXT_TYPE(ehci, q.qh->hw_next); | 2242 | type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next); |
| 2192 | q = q.qh->qh_next; | 2243 | q = q.qh->qh_next; |
| 2193 | modified = qh_completions (ehci, temp.qh); | 2244 | modified = qh_completions (ehci, temp.qh); |
| 2194 | if (unlikely (list_empty (&temp.qh->qtd_list))) | 2245 | if (unlikely(list_empty(&temp.qh->qtd_list) || |
| 2246 | temp.qh->needs_rescan)) | ||
| 2195 | intr_deschedule (ehci, temp.qh); | 2247 | intr_deschedule (ehci, temp.qh); |
| 2196 | qh_put (temp.qh); | 2248 | qh_put (temp.qh); |
| 2197 | break; | 2249 | break; |
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c new file mode 100644 index 000000000000..cfa21ea20f82 --- /dev/null +++ b/drivers/usb/host/ehci-w90x900.c | |||
| @@ -0,0 +1,181 @@ | |||
| 1 | /* | ||
| 2 | * linux/driver/usb/host/ehci-w90x900.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008 Nuvoton technology corporation. | ||
| 5 | * | ||
| 6 | * Wan ZongShun <mcuos.com@gmail.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation;version 2 of the License. | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/platform_device.h> | ||
| 15 | |||
| 16 | /*ebable phy0 and phy1 for w90p910*/ | ||
| 17 | #define ENPHY (0x01<<8) | ||
| 18 | #define PHY0_CTR (0xA4) | ||
| 19 | #define PHY1_CTR (0xA8) | ||
| 20 | |||
| 21 | static int __devinit usb_w90x900_probe(const struct hc_driver *driver, | ||
| 22 | struct platform_device *pdev) | ||
| 23 | { | ||
| 24 | struct usb_hcd *hcd; | ||
| 25 | struct ehci_hcd *ehci; | ||
| 26 | struct resource *res; | ||
| 27 | int retval = 0, irq; | ||
| 28 | unsigned long val; | ||
| 29 | |||
| 30 | |||
| 31 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 32 | if (!res) { | ||
| 33 | retval = -ENXIO; | ||
| 34 | goto err1; | ||
| 35 | } | ||
| 36 | |||
| 37 | hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI"); | ||
| 38 | if (!hcd) { | ||
| 39 | retval = -ENOMEM; | ||
| 40 | goto err1; | ||
| 41 | } | ||
| 42 | |||
| 43 | hcd->rsrc_start = res->start; | ||
| 44 | hcd->rsrc_len = res->end - res->start + 1; | ||
| 45 | |||
| 46 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | ||
| 47 | retval = -EBUSY; | ||
| 48 | goto err2; | ||
| 49 | } | ||
| 50 | |||
| 51 | hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); | ||
| 52 | if (hcd->regs == NULL) { | ||
| 53 | retval = -EFAULT; | ||
| 54 | goto err3; | ||
| 55 | } | ||
| 56 | |||
| 57 | ehci = hcd_to_ehci(hcd); | ||
| 58 | ehci->caps = hcd->regs; | ||
| 59 | ehci->regs = hcd->regs + | ||
| 60 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | ||
| 61 | |||
| 62 | /* enable PHY 0,1,the regs only apply to w90p910 | ||
| 63 | * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of | ||
| 64 | * w90p910 IC relative to ehci->regs. | ||
| 65 | */ | ||
| 66 | val = __raw_readl(ehci->regs+PHY0_CTR); | ||
| 67 | val |= ENPHY; | ||
| 68 | __raw_writel(val, ehci->regs+PHY0_CTR); | ||
| 69 | |||
| 70 | val = __raw_readl(ehci->regs+PHY1_CTR); | ||
| 71 | val |= ENPHY; | ||
| 72 | __raw_writel(val, ehci->regs+PHY1_CTR); | ||
| 73 | |||
| 74 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | ||
| 75 | ehci->sbrn = 0x20; | ||
| 76 | |||
| 77 | irq = platform_get_irq(pdev, 0); | ||
| 78 | if (irq < 0) | ||
| 79 | goto err4; | ||
| 80 | |||
| 81 | retval = usb_add_hcd(hcd, irq, IRQF_SHARED); | ||
| 82 | if (retval != 0) | ||
| 83 | goto err4; | ||
| 84 | |||
| 85 | ehci_writel(ehci, 1, &ehci->regs->configured_flag); | ||
| 86 | |||
| 87 | return retval; | ||
| 88 | err4: | ||
| 89 | iounmap(hcd->regs); | ||
| 90 | err3: | ||
| 91 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
| 92 | err2: | ||
| 93 | usb_put_hcd(hcd); | ||
| 94 | err1: | ||
| 95 | return retval; | ||
| 96 | } | ||
| 97 | |||
| 98 | static | ||
| 99 | void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev) | ||
| 100 | { | ||
| 101 | usb_remove_hcd(hcd); | ||
| 102 | iounmap(hcd->regs); | ||
| 103 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
| 104 | usb_put_hcd(hcd); | ||
| 105 | } | ||
| 106 | |||
| 107 | static const struct hc_driver ehci_w90x900_hc_driver = { | ||
| 108 | .description = hcd_name, | ||
| 109 | .product_desc = "Nuvoton w90x900 EHCI Host Controller", | ||
| 110 | .hcd_priv_size = sizeof(struct ehci_hcd), | ||
| 111 | |||
| 112 | /* | ||
| 113 | * generic hardware linkage | ||
| 114 | */ | ||
| 115 | .irq = ehci_irq, | ||
| 116 | .flags = HCD_USB2|HCD_MEMORY, | ||
| 117 | |||
| 118 | /* | ||
| 119 | * basic lifecycle operations | ||
| 120 | */ | ||
| 121 | .reset = ehci_init, | ||
| 122 | .start = ehci_run, | ||
| 123 | |||
| 124 | .stop = ehci_stop, | ||
| 125 | .shutdown = ehci_shutdown, | ||
| 126 | |||
| 127 | /* | ||
| 128 | * managing i/o requests and associated device resources | ||
| 129 | */ | ||
| 130 | .urb_enqueue = ehci_urb_enqueue, | ||
| 131 | .urb_dequeue = ehci_urb_dequeue, | ||
| 132 | .endpoint_disable = ehci_endpoint_disable, | ||
| 133 | |||
| 134 | /* | ||
| 135 | * scheduling support | ||
| 136 | */ | ||
| 137 | .get_frame_number = ehci_get_frame, | ||
| 138 | |||
| 139 | /* | ||
| 140 | * root hub support | ||
| 141 | */ | ||
| 142 | .hub_status_data = ehci_hub_status_data, | ||
| 143 | .hub_control = ehci_hub_control, | ||
| 144 | #ifdef CONFIG_PM | ||
| 145 | .bus_suspend = ehci_bus_suspend, | ||
| 146 | .bus_resume = ehci_bus_resume, | ||
| 147 | #endif | ||
| 148 | .relinquish_port = ehci_relinquish_port, | ||
| 149 | .port_handed_over = ehci_port_handed_over, | ||
| 150 | }; | ||
| 151 | |||
| 152 | static int __devinit ehci_w90x900_probe(struct platform_device *pdev) | ||
| 153 | { | ||
| 154 | if (usb_disabled()) | ||
| 155 | return -ENODEV; | ||
| 156 | |||
| 157 | return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev); | ||
| 158 | } | ||
| 159 | |||
| 160 | static int __devexit ehci_w90x900_remove(struct platform_device *pdev) | ||
| 161 | { | ||
| 162 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
| 163 | |||
| 164 | usb_w90x900_remove(hcd, pdev); | ||
| 165 | |||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | |||
| 169 | static struct platform_driver ehci_hcd_w90x900_driver = { | ||
| 170 | .probe = ehci_w90x900_probe, | ||
| 171 | .remove = __devexit_p(ehci_w90x900_remove), | ||
| 172 | .driver = { | ||
| 173 | .name = "w90x900-ehci", | ||
| 174 | .owner = THIS_MODULE, | ||
| 175 | }, | ||
| 176 | }; | ||
| 177 | |||
| 178 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); | ||
| 179 | MODULE_DESCRIPTION("w90p910 usb ehci driver!"); | ||
| 180 | MODULE_LICENSE("GPL"); | ||
| 181 | MODULE_ALIAS("platform:w90p910-ehci"); | ||
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 2bfff30f4704..064e76821ff5 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
| @@ -37,7 +37,7 @@ typedef __u16 __bitwise __hc16; | |||
| 37 | #define __hc16 __le16 | 37 | #define __hc16 __le16 |
| 38 | #endif | 38 | #endif |
| 39 | 39 | ||
| 40 | /* statistics can be kept for for tuning/monitoring */ | 40 | /* statistics can be kept for tuning/monitoring */ |
| 41 | struct ehci_stats { | 41 | struct ehci_stats { |
| 42 | /* irq usage */ | 42 | /* irq usage */ |
| 43 | unsigned long normal; | 43 | unsigned long normal; |
| @@ -126,6 +126,7 @@ struct ehci_hcd { /* one per controller */ | |||
| 126 | unsigned big_endian_mmio:1; | 126 | unsigned big_endian_mmio:1; |
| 127 | unsigned big_endian_desc:1; | 127 | unsigned big_endian_desc:1; |
| 128 | unsigned has_amcc_usb23:1; | 128 | unsigned has_amcc_usb23:1; |
| 129 | unsigned need_io_watchdog:1; | ||
| 129 | 130 | ||
| 130 | /* required for usb32 quirk */ | 131 | /* required for usb32 quirk */ |
| 131 | #define OHCI_CTRL_HCFS (3 << 6) | 132 | #define OHCI_CTRL_HCFS (3 << 6) |
| @@ -135,6 +136,7 @@ struct ehci_hcd { /* one per controller */ | |||
| 135 | #define OHCI_HCCTRL_OFFSET 0x4 | 136 | #define OHCI_HCCTRL_OFFSET 0x4 |
| 136 | #define OHCI_HCCTRL_LEN 0x4 | 137 | #define OHCI_HCCTRL_LEN 0x4 |
| 137 | __hc32 *ohci_hcctrl_reg; | 138 | __hc32 *ohci_hcctrl_reg; |
| 139 | unsigned has_hostpc:1; | ||
| 138 | 140 | ||
| 139 | u8 sbrn; /* packed release number */ | 141 | u8 sbrn; /* packed release number */ |
| 140 | 142 | ||
| @@ -298,8 +300,8 @@ union ehci_shadow { | |||
| 298 | * These appear in both the async and (for interrupt) periodic schedules. | 300 | * These appear in both the async and (for interrupt) periodic schedules. |
| 299 | */ | 301 | */ |
| 300 | 302 | ||
| 301 | struct ehci_qh { | 303 | /* first part defined by EHCI spec */ |
| 302 | /* first part defined by EHCI spec */ | 304 | struct ehci_qh_hw { |
| 303 | __hc32 hw_next; /* see EHCI 3.6.1 */ | 305 | __hc32 hw_next; /* see EHCI 3.6.1 */ |
| 304 | __hc32 hw_info1; /* see EHCI 3.6.2 */ | 306 | __hc32 hw_info1; /* see EHCI 3.6.2 */ |
| 305 | #define QH_HEAD 0x00008000 | 307 | #define QH_HEAD 0x00008000 |
| @@ -317,7 +319,10 @@ struct ehci_qh { | |||
| 317 | __hc32 hw_token; | 319 | __hc32 hw_token; |
| 318 | __hc32 hw_buf [5]; | 320 | __hc32 hw_buf [5]; |
| 319 | __hc32 hw_buf_hi [5]; | 321 | __hc32 hw_buf_hi [5]; |
| 322 | } __attribute__ ((aligned(32))); | ||
| 320 | 323 | ||
| 324 | struct ehci_qh { | ||
| 325 | struct ehci_qh_hw *hw; | ||
| 321 | /* the rest is HCD-private */ | 326 | /* the rest is HCD-private */ |
| 322 | dma_addr_t qh_dma; /* address of qh */ | 327 | dma_addr_t qh_dma; /* address of qh */ |
| 323 | union ehci_shadow qh_next; /* ptr to qh; or periodic */ | 328 | union ehci_shadow qh_next; /* ptr to qh; or periodic */ |
| @@ -336,6 +341,7 @@ struct ehci_qh { | |||
| 336 | u32 refcount; | 341 | u32 refcount; |
| 337 | unsigned stamp; | 342 | unsigned stamp; |
| 338 | 343 | ||
| 344 | u8 needs_rescan; /* Dequeue during giveback */ | ||
| 339 | u8 qh_state; | 345 | u8 qh_state; |
| 340 | #define QH_STATE_LINKED 1 /* HC sees this */ | 346 | #define QH_STATE_LINKED 1 /* HC sees this */ |
| 341 | #define QH_STATE_UNLINK 2 /* HC may still see this */ | 347 | #define QH_STATE_UNLINK 2 /* HC may still see this */ |
| @@ -357,7 +363,7 @@ struct ehci_qh { | |||
| 357 | 363 | ||
| 358 | struct usb_device *dev; /* access to TT */ | 364 | struct usb_device *dev; /* access to TT */ |
| 359 | unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ | 365 | unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ |
| 360 | } __attribute__ ((aligned (32))); | 366 | }; |
| 361 | 367 | ||
| 362 | /*-------------------------------------------------------------------------*/ | 368 | /*-------------------------------------------------------------------------*/ |
| 363 | 369 | ||
| @@ -544,7 +550,7 @@ static inline unsigned int | |||
| 544 | ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) | 550 | ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) |
| 545 | { | 551 | { |
| 546 | if (ehci_is_TDI(ehci)) { | 552 | if (ehci_is_TDI(ehci)) { |
| 547 | switch ((portsc>>26)&3) { | 553 | switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) { |
| 548 | case 0: | 554 | case 0: |
| 549 | return 0; | 555 | return 0; |
| 550 | case 1: | 556 | case 1: |
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c new file mode 100644 index 000000000000..e35d82808bab --- /dev/null +++ b/drivers/usb/host/isp1362-hcd.c | |||
| @@ -0,0 +1,2909 @@ | |||
| 1 | /* | ||
| 2 | * ISP1362 HCD (Host Controller Driver) for USB. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de> | ||
| 5 | * | ||
| 6 | * Derived from the SL811 HCD, rewritten for ISP116x. | ||
| 7 | * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee> | ||
| 8 | * | ||
| 9 | * Portions: | ||
| 10 | * Copyright (C) 2004 Psion Teklogix (for NetBook PRO) | ||
| 11 | * Copyright (C) 2004 David Brownell | ||
| 12 | */ | ||
| 13 | |||
| 14 | /* | ||
| 15 | * The ISP1362 chip requires a large delay (300ns and 462ns) between | ||
| 16 | * accesses to the address and data register. | ||
| 17 | * The following timing options exist: | ||
| 18 | * | ||
| 19 | * 1. Configure your memory controller to add such delays if it can (the best) | ||
| 20 | * 2. Implement platform-specific delay function possibly | ||
| 21 | * combined with configuring the memory controller; see | ||
| 22 | * include/linux/usb_isp1362.h for more info. | ||
| 23 | * 3. Use ndelay (easiest, poorest). | ||
| 24 | * | ||
| 25 | * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the | ||
| 26 | * platform specific section of isp1362.h to select the appropriate variant. | ||
| 27 | * | ||
| 28 | * Also note that according to the Philips "ISP1362 Errata" document | ||
| 29 | * Rev 1.00 from 27 May data corruption may occur when the #WR signal | ||
| 30 | * is reasserted (even with #CS deasserted) within 132ns after a | ||
| 31 | * write cycle to any controller register. If the hardware doesn't | ||
| 32 | * implement the recommended fix (gating the #WR with #CS) software | ||
| 33 | * must ensure that no further write cycle (not necessarily to the chip!) | ||
| 34 | * is issued by the CPU within this interval. | ||
| 35 | |||
| 36 | * For PXA25x this can be ensured by using VLIO with the maximum | ||
| 37 | * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz. | ||
| 38 | */ | ||
| 39 | |||
| 40 | #ifdef CONFIG_USB_DEBUG | ||
| 41 | # define ISP1362_DEBUG | ||
| 42 | #else | ||
| 43 | # undef ISP1362_DEBUG | ||
| 44 | #endif | ||
| 45 | |||
| 46 | /* | ||
| 47 | * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and | ||
| 48 | * GET_INTERFACE requests correctly when the SETUP and DATA stages of the | ||
| 49 | * requests are carried out in separate frames. This will delay any SETUP | ||
| 50 | * packets until the start of the next frame so that this situation is | ||
| 51 | * unlikely to occur (and makes usbtest happy running with a PXA255 target | ||
| 52 | * device). | ||
| 53 | */ | ||
| 54 | #undef BUGGY_PXA2XX_UDC_USBTEST | ||
| 55 | |||
| 56 | #undef PTD_TRACE | ||
| 57 | #undef URB_TRACE | ||
| 58 | #undef VERBOSE | ||
| 59 | #undef REGISTERS | ||
| 60 | |||
| 61 | /* This enables a memory test on the ISP1362 chip memory to make sure the | ||
| 62 | * chip access timing is correct. | ||
| 63 | */ | ||
| 64 | #undef CHIP_BUFFER_TEST | ||
| 65 | |||
| 66 | #include <linux/module.h> | ||
| 67 | #include <linux/moduleparam.h> | ||
| 68 | #include <linux/kernel.h> | ||
| 69 | #include <linux/delay.h> | ||
| 70 | #include <linux/ioport.h> | ||
| 71 | #include <linux/sched.h> | ||
| 72 | #include <linux/slab.h> | ||
| 73 | #include <linux/smp_lock.h> | ||
| 74 | #include <linux/errno.h> | ||
| 75 | #include <linux/init.h> | ||
| 76 | #include <linux/list.h> | ||
| 77 | #include <linux/interrupt.h> | ||
| 78 | #include <linux/usb.h> | ||
| 79 | #include <linux/usb/isp1362.h> | ||
| 80 | #include <linux/platform_device.h> | ||
| 81 | #include <linux/pm.h> | ||
| 82 | #include <linux/io.h> | ||
| 83 | #include <linux/bitops.h> | ||
| 84 | |||
| 85 | #include <asm/irq.h> | ||
| 86 | #include <asm/system.h> | ||
| 87 | #include <asm/byteorder.h> | ||
| 88 | #include <asm/unaligned.h> | ||
| 89 | |||
| 90 | static int dbg_level; | ||
| 91 | #ifdef ISP1362_DEBUG | ||
| 92 | module_param(dbg_level, int, 0644); | ||
| 93 | #else | ||
| 94 | module_param(dbg_level, int, 0); | ||
| 95 | #define STUB_DEBUG_FILE | ||
| 96 | #endif | ||
| 97 | |||
| 98 | #include "../core/hcd.h" | ||
| 99 | #include "../core/usb.h" | ||
| 100 | #include "isp1362.h" | ||
| 101 | |||
| 102 | |||
| 103 | #define DRIVER_VERSION "2005-04-04" | ||
| 104 | #define DRIVER_DESC "ISP1362 USB Host Controller Driver" | ||
| 105 | |||
| 106 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
| 107 | MODULE_LICENSE("GPL"); | ||
| 108 | |||
| 109 | static const char hcd_name[] = "isp1362-hcd"; | ||
| 110 | |||
| 111 | static void isp1362_hc_stop(struct usb_hcd *hcd); | ||
| 112 | static int isp1362_hc_start(struct usb_hcd *hcd); | ||
| 113 | |||
| 114 | /*-------------------------------------------------------------------------*/ | ||
| 115 | |||
| 116 | /* | ||
| 117 | * When called from the interrupthandler only isp1362_hcd->irqenb is modified, | ||
| 118 | * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon | ||
| 119 | * completion. | ||
| 120 | * We don't need a 'disable' counterpart, since interrupts will be disabled | ||
| 121 | * only by the interrupt handler. | ||
| 122 | */ | ||
| 123 | static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask) | ||
| 124 | { | ||
| 125 | if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb) | ||
| 126 | return; | ||
| 127 | if (mask & ~isp1362_hcd->irqenb) | ||
| 128 | isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb); | ||
| 129 | isp1362_hcd->irqenb |= mask; | ||
| 130 | if (isp1362_hcd->irq_active) | ||
| 131 | return; | ||
| 132 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); | ||
| 133 | } | ||
| 134 | |||
| 135 | /*-------------------------------------------------------------------------*/ | ||
| 136 | |||
| 137 | static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd, | ||
| 138 | u16 offset) | ||
| 139 | { | ||
| 140 | struct isp1362_ep_queue *epq = NULL; | ||
| 141 | |||
| 142 | if (offset < isp1362_hcd->istl_queue[1].buf_start) | ||
| 143 | epq = &isp1362_hcd->istl_queue[0]; | ||
| 144 | else if (offset < isp1362_hcd->intl_queue.buf_start) | ||
| 145 | epq = &isp1362_hcd->istl_queue[1]; | ||
| 146 | else if (offset < isp1362_hcd->atl_queue.buf_start) | ||
| 147 | epq = &isp1362_hcd->intl_queue; | ||
| 148 | else if (offset < isp1362_hcd->atl_queue.buf_start + | ||
| 149 | isp1362_hcd->atl_queue.buf_size) | ||
| 150 | epq = &isp1362_hcd->atl_queue; | ||
| 151 | |||
| 152 | if (epq) | ||
| 153 | DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name); | ||
| 154 | else | ||
| 155 | pr_warning("%s: invalid PTD $%04x\n", __func__, offset); | ||
| 156 | |||
| 157 | return epq; | ||
| 158 | } | ||
| 159 | |||
| 160 | static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index) | ||
| 161 | { | ||
| 162 | int offset; | ||
| 163 | |||
| 164 | if (index * epq->blk_size > epq->buf_size) { | ||
| 165 | pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index, | ||
| 166 | epq->buf_size / epq->blk_size); | ||
| 167 | return -EINVAL; | ||
| 168 | } | ||
| 169 | offset = epq->buf_start + index * epq->blk_size; | ||
| 170 | DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset); | ||
| 171 | |||
| 172 | return offset; | ||
| 173 | } | ||
| 174 | |||
| 175 | /*-------------------------------------------------------------------------*/ | ||
| 176 | |||
| 177 | static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size, | ||
| 178 | int mps) | ||
| 179 | { | ||
| 180 | u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size); | ||
| 181 | |||
| 182 | xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE); | ||
| 183 | if (xfer_size < size && xfer_size % mps) | ||
| 184 | xfer_size -= xfer_size % mps; | ||
| 185 | |||
| 186 | return xfer_size; | ||
| 187 | } | ||
| 188 | |||
| 189 | static int claim_ptd_buffers(struct isp1362_ep_queue *epq, | ||
| 190 | struct isp1362_ep *ep, u16 len) | ||
| 191 | { | ||
| 192 | int ptd_offset = -EINVAL; | ||
| 193 | int index; | ||
| 194 | int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; | ||
| 195 | int found = -1; | ||
| 196 | int last = -1; | ||
| 197 | |||
| 198 | BUG_ON(len > epq->buf_size); | ||
| 199 | |||
| 200 | if (!epq->buf_avail) | ||
| 201 | return -ENOMEM; | ||
| 202 | |||
| 203 | if (ep->num_ptds) | ||
| 204 | pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__, | ||
| 205 | epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); | ||
| 206 | BUG_ON(ep->num_ptds != 0); | ||
| 207 | |||
| 208 | for (index = 0; index <= epq->buf_count - num_ptds; index++) { | ||
| 209 | if (test_bit(index, &epq->buf_map)) | ||
| 210 | continue; | ||
| 211 | found = index; | ||
| 212 | for (last = index + 1; last < index + num_ptds; last++) { | ||
| 213 | if (test_bit(last, &epq->buf_map)) { | ||
| 214 | found = -1; | ||
| 215 | break; | ||
| 216 | } | ||
| 217 | } | ||
| 218 | if (found >= 0) | ||
| 219 | break; | ||
| 220 | } | ||
| 221 | if (found < 0) | ||
| 222 | return -EOVERFLOW; | ||
| 223 | |||
| 224 | DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, | ||
| 225 | num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE)); | ||
| 226 | ptd_offset = get_ptd_offset(epq, found); | ||
| 227 | WARN_ON(ptd_offset < 0); | ||
| 228 | ep->ptd_offset = ptd_offset; | ||
| 229 | ep->num_ptds += num_ptds; | ||
| 230 | epq->buf_avail -= num_ptds; | ||
| 231 | BUG_ON(epq->buf_avail > epq->buf_count); | ||
| 232 | ep->ptd_index = found; | ||
| 233 | for (index = found; index < last; index++) | ||
| 234 | __set_bit(index, &epq->buf_map); | ||
| 235 | DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", | ||
| 236 | __func__, epq->name, ep->ptd_index, ep->ptd_offset, | ||
| 237 | epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); | ||
| 238 | |||
| 239 | return found; | ||
| 240 | } | ||
| 241 | |||
| 242 | static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) | ||
| 243 | { | ||
| 244 | int index = ep->ptd_index; | ||
| 245 | int last = ep->ptd_index + ep->num_ptds; | ||
| 246 | |||
| 247 | if (last > epq->buf_count) | ||
| 248 | pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n", | ||
| 249 | __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index, | ||
| 250 | ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail, | ||
| 251 | epq->buf_map, epq->skip_map); | ||
| 252 | BUG_ON(last > epq->buf_count); | ||
| 253 | |||
| 254 | for (; index < last; index++) { | ||
| 255 | __clear_bit(index, &epq->buf_map); | ||
| 256 | __set_bit(index, &epq->skip_map); | ||
| 257 | } | ||
| 258 | epq->buf_avail += ep->num_ptds; | ||
| 259 | epq->ptd_count--; | ||
| 260 | |||
| 261 | BUG_ON(epq->buf_avail > epq->buf_count); | ||
| 262 | BUG_ON(epq->ptd_count > epq->buf_count); | ||
| 263 | |||
| 264 | DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n", | ||
| 265 | __func__, epq->name, | ||
| 266 | ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count); | ||
| 267 | DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__, | ||
| 268 | epq->buf_map, epq->skip_map); | ||
| 269 | |||
| 270 | ep->num_ptds = 0; | ||
| 271 | ep->ptd_offset = -EINVAL; | ||
| 272 | ep->ptd_index = -EINVAL; | ||
| 273 | } | ||
| 274 | |||
| 275 | /*-------------------------------------------------------------------------*/ | ||
| 276 | |||
| 277 | /* | ||
| 278 | Set up PTD's. | ||
| 279 | */ | ||
| 280 | static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb, | ||
| 281 | struct isp1362_ep *ep, struct isp1362_ep_queue *epq, | ||
| 282 | u16 fno) | ||
| 283 | { | ||
| 284 | struct ptd *ptd; | ||
| 285 | int toggle; | ||
| 286 | int dir; | ||
| 287 | u16 len; | ||
| 288 | size_t buf_len = urb->transfer_buffer_length - urb->actual_length; | ||
| 289 | |||
| 290 | DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep); | ||
| 291 | |||
| 292 | ptd = &ep->ptd; | ||
| 293 | |||
| 294 | ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length; | ||
| 295 | |||
| 296 | switch (ep->nextpid) { | ||
| 297 | case USB_PID_IN: | ||
| 298 | toggle = usb_gettoggle(urb->dev, ep->epnum, 0); | ||
| 299 | dir = PTD_DIR_IN; | ||
| 300 | if (usb_pipecontrol(urb->pipe)) { | ||
| 301 | len = min_t(size_t, ep->maxpacket, buf_len); | ||
| 302 | } else if (usb_pipeisoc(urb->pipe)) { | ||
| 303 | len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE); | ||
| 304 | ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset; | ||
| 305 | } else | ||
| 306 | len = max_transfer_size(epq, buf_len, ep->maxpacket); | ||
| 307 | DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, | ||
| 308 | (int)buf_len); | ||
| 309 | break; | ||
| 310 | case USB_PID_OUT: | ||
| 311 | toggle = usb_gettoggle(urb->dev, ep->epnum, 1); | ||
| 312 | dir = PTD_DIR_OUT; | ||
| 313 | if (usb_pipecontrol(urb->pipe)) | ||
| 314 | len = min_t(size_t, ep->maxpacket, buf_len); | ||
| 315 | else if (usb_pipeisoc(urb->pipe)) | ||
| 316 | len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE); | ||
| 317 | else | ||
| 318 | len = max_transfer_size(epq, buf_len, ep->maxpacket); | ||
| 319 | if (len == 0) | ||
| 320 | pr_info("%s: Sending ZERO packet: %d\n", __func__, | ||
| 321 | urb->transfer_flags & URB_ZERO_PACKET); | ||
| 322 | DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, | ||
| 323 | (int)buf_len); | ||
| 324 | break; | ||
| 325 | case USB_PID_SETUP: | ||
| 326 | toggle = 0; | ||
| 327 | dir = PTD_DIR_SETUP; | ||
| 328 | len = sizeof(struct usb_ctrlrequest); | ||
| 329 | DBG(1, "%s: SETUP len %d\n", __func__, len); | ||
| 330 | ep->data = urb->setup_packet; | ||
| 331 | break; | ||
| 332 | case USB_PID_ACK: | ||
| 333 | toggle = 1; | ||
| 334 | len = 0; | ||
| 335 | dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ? | ||
| 336 | PTD_DIR_OUT : PTD_DIR_IN; | ||
| 337 | DBG(1, "%s: ACK len %d\n", __func__, len); | ||
| 338 | break; | ||
| 339 | default: | ||
| 340 | toggle = dir = len = 0; | ||
| 341 | pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid); | ||
| 342 | BUG_ON(1); | ||
| 343 | } | ||
| 344 | |||
| 345 | ep->length = len; | ||
| 346 | if (!len) | ||
| 347 | ep->data = NULL; | ||
| 348 | |||
| 349 | ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle); | ||
| 350 | ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) | | ||
| 351 | PTD_EP(ep->epnum); | ||
| 352 | ptd->len = PTD_LEN(len) | PTD_DIR(dir); | ||
| 353 | ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); | ||
| 354 | |||
| 355 | if (usb_pipeint(urb->pipe)) { | ||
| 356 | ptd->faddr |= PTD_SF_INT(ep->branch); | ||
| 357 | ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0); | ||
| 358 | } | ||
| 359 | if (usb_pipeisoc(urb->pipe)) | ||
| 360 | ptd->faddr |= PTD_SF_ISO(fno); | ||
| 361 | |||
| 362 | DBG(1, "%s: Finished\n", __func__); | ||
| 363 | } | ||
| 364 | |||
| 365 | static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, | ||
| 366 | struct isp1362_ep_queue *epq) | ||
| 367 | { | ||
| 368 | struct ptd *ptd = &ep->ptd; | ||
| 369 | int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; | ||
| 370 | |||
| 371 | _BUG_ON(ep->ptd_offset < 0); | ||
| 372 | |||
| 373 | prefetch(ptd); | ||
| 374 | isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); | ||
| 375 | if (len) | ||
| 376 | isp1362_write_buffer(isp1362_hcd, ep->data, | ||
| 377 | ep->ptd_offset + PTD_HEADER_SIZE, len); | ||
| 378 | |||
| 379 | dump_ptd(ptd); | ||
| 380 | dump_ptd_out_data(ptd, ep->data); | ||
| 381 | } | ||
| 382 | |||
| 383 | static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, | ||
| 384 | struct isp1362_ep_queue *epq) | ||
| 385 | { | ||
| 386 | struct ptd *ptd = &ep->ptd; | ||
| 387 | int act_len; | ||
| 388 | |||
| 389 | WARN_ON(list_empty(&ep->active)); | ||
| 390 | BUG_ON(ep->ptd_offset < 0); | ||
| 391 | |||
| 392 | list_del_init(&ep->active); | ||
| 393 | DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active); | ||
| 394 | |||
| 395 | prefetchw(ptd); | ||
| 396 | isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); | ||
| 397 | dump_ptd(ptd); | ||
| 398 | act_len = PTD_GET_COUNT(ptd); | ||
| 399 | if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0) | ||
| 400 | return; | ||
| 401 | if (act_len > ep->length) | ||
| 402 | pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep, | ||
| 403 | ep->ptd_offset, act_len, ep->length); | ||
| 404 | BUG_ON(act_len > ep->length); | ||
| 405 | /* Only transfer the amount of data that has actually been overwritten | ||
| 406 | * in the chip buffer. We don't want any data that doesn't belong to the | ||
| 407 | * transfer to leak out of the chip to the callers transfer buffer! | ||
| 408 | */ | ||
| 409 | prefetchw(ep->data); | ||
| 410 | isp1362_read_buffer(isp1362_hcd, ep->data, | ||
| 411 | ep->ptd_offset + PTD_HEADER_SIZE, act_len); | ||
| 412 | dump_ptd_in_data(ptd, ep->data); | ||
| 413 | } | ||
| 414 | |||
| 415 | /* | ||
| 416 | * INT PTDs will stay in the chip until data is available. | ||
| 417 | * This function will remove a PTD from the chip when the URB is dequeued. | ||
| 418 | * Must be called with the spinlock held and IRQs disabled | ||
| 419 | */ | ||
| 420 | static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) | ||
| 421 | |||
| 422 | { | ||
| 423 | int index; | ||
| 424 | struct isp1362_ep_queue *epq; | ||
| 425 | |||
| 426 | DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset); | ||
| 427 | BUG_ON(ep->ptd_offset < 0); | ||
| 428 | |||
| 429 | epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); | ||
| 430 | BUG_ON(!epq); | ||
| 431 | |||
| 432 | /* put ep in remove_list for cleanup */ | ||
| 433 | WARN_ON(!list_empty(&ep->remove_list)); | ||
| 434 | list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list); | ||
| 435 | /* let SOF interrupt handle the cleanup */ | ||
| 436 | isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); | ||
| 437 | |||
| 438 | index = ep->ptd_index; | ||
| 439 | if (index < 0) | ||
| 440 | /* ISO queues don't have SKIP registers */ | ||
| 441 | return; | ||
| 442 | |||
| 443 | DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__, | ||
| 444 | index, ep->ptd_offset, epq->skip_map, 1 << index); | ||
| 445 | |||
| 446 | /* prevent further processing of PTD (will be effective after next SOF) */ | ||
| 447 | epq->skip_map |= 1 << index; | ||
| 448 | if (epq == &isp1362_hcd->atl_queue) { | ||
| 449 | DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__, | ||
| 450 | isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map); | ||
| 451 | isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map); | ||
| 452 | if (~epq->skip_map == 0) | ||
| 453 | isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); | ||
| 454 | } else if (epq == &isp1362_hcd->intl_queue) { | ||
| 455 | DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__, | ||
| 456 | isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map); | ||
| 457 | isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map); | ||
| 458 | if (~epq->skip_map == 0) | ||
| 459 | isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); | ||
| 460 | } | ||
| 461 | } | ||
| 462 | |||
| 463 | /* | ||
| 464 | Take done or failed requests out of schedule. Give back | ||
| 465 | processed urbs. | ||
| 466 | */ | ||
| 467 | static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, | ||
| 468 | struct urb *urb, int status) | ||
| 469 | __releases(isp1362_hcd->lock) | ||
| 470 | __acquires(isp1362_hcd->lock) | ||
| 471 | { | ||
| 472 | urb->hcpriv = NULL; | ||
| 473 | ep->error_count = 0; | ||
| 474 | |||
| 475 | if (usb_pipecontrol(urb->pipe)) | ||
| 476 | ep->nextpid = USB_PID_SETUP; | ||
| 477 | |||
| 478 | URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__, | ||
| 479 | ep->num_req, usb_pipedevice(urb->pipe), | ||
| 480 | usb_pipeendpoint(urb->pipe), | ||
| 481 | !usb_pipein(urb->pipe) ? "out" : "in", | ||
| 482 | usb_pipecontrol(urb->pipe) ? "ctrl" : | ||
| 483 | usb_pipeint(urb->pipe) ? "int" : | ||
| 484 | usb_pipebulk(urb->pipe) ? "bulk" : | ||
| 485 | "iso", | ||
| 486 | urb->actual_length, urb->transfer_buffer_length, | ||
| 487 | !(urb->transfer_flags & URB_SHORT_NOT_OK) ? | ||
| 488 | "short_ok" : "", urb->status); | ||
| 489 | |||
| 490 | |||
| 491 | usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb); | ||
| 492 | spin_unlock(&isp1362_hcd->lock); | ||
| 493 | usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status); | ||
| 494 | spin_lock(&isp1362_hcd->lock); | ||
| 495 | |||
| 496 | /* take idle endpoints out of the schedule right away */ | ||
| 497 | if (!list_empty(&ep->hep->urb_list)) | ||
| 498 | return; | ||
| 499 | |||
| 500 | /* async deschedule */ | ||
| 501 | if (!list_empty(&ep->schedule)) { | ||
| 502 | list_del_init(&ep->schedule); | ||
| 503 | return; | ||
| 504 | } | ||
| 505 | |||
| 506 | |||
| 507 | if (ep->interval) { | ||
| 508 | /* periodic deschedule */ | ||
| 509 | DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval, | ||
| 510 | ep, ep->branch, ep->load, | ||
| 511 | isp1362_hcd->load[ep->branch], | ||
| 512 | isp1362_hcd->load[ep->branch] - ep->load); | ||
| 513 | isp1362_hcd->load[ep->branch] -= ep->load; | ||
| 514 | ep->branch = PERIODIC_SIZE; | ||
| 515 | } | ||
| 516 | } | ||
| 517 | |||
| 518 | /* | ||
| 519 | * Analyze transfer results, handle partial transfers and errors | ||
| 520 | */ | ||
| 521 | static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) | ||
| 522 | { | ||
| 523 | struct urb *urb = get_urb(ep); | ||
| 524 | struct usb_device *udev; | ||
| 525 | struct ptd *ptd; | ||
| 526 | int short_ok; | ||
| 527 | u16 len; | ||
| 528 | int urbstat = -EINPROGRESS; | ||
| 529 | u8 cc; | ||
| 530 | |||
| 531 | DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req); | ||
| 532 | |||
| 533 | udev = urb->dev; | ||
| 534 | ptd = &ep->ptd; | ||
| 535 | cc = PTD_GET_CC(ptd); | ||
| 536 | if (cc == PTD_NOTACCESSED) { | ||
| 537 | pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__, | ||
| 538 | ep->num_req, ptd); | ||
| 539 | cc = PTD_DEVNOTRESP; | ||
| 540 | } | ||
| 541 | |||
| 542 | short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK); | ||
| 543 | len = urb->transfer_buffer_length - urb->actual_length; | ||
| 544 | |||
| 545 | /* Data underrun is special. For allowed underrun | ||
| 546 | we clear the error and continue as normal. For | ||
| 547 | forbidden underrun we finish the DATA stage | ||
| 548 | immediately while for control transfer, | ||
| 549 | we do a STATUS stage. | ||
| 550 | */ | ||
| 551 | if (cc == PTD_DATAUNDERRUN) { | ||
| 552 | if (short_ok) { | ||
| 553 | DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n", | ||
| 554 | __func__, ep->num_req, short_ok ? "" : "not_", | ||
| 555 | PTD_GET_COUNT(ptd), ep->maxpacket, len); | ||
| 556 | cc = PTD_CC_NOERROR; | ||
| 557 | urbstat = 0; | ||
| 558 | } else { | ||
| 559 | DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n", | ||
| 560 | __func__, ep->num_req, | ||
| 561 | usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, | ||
| 562 | short_ok ? "" : "not_", | ||
| 563 | PTD_GET_COUNT(ptd), ep->maxpacket, len); | ||
| 564 | if (usb_pipecontrol(urb->pipe)) { | ||
| 565 | ep->nextpid = USB_PID_ACK; | ||
| 566 | /* save the data underrun error code for later and | ||
| 567 | * procede with the status stage | ||
| 568 | */ | ||
| 569 | urb->actual_length += PTD_GET_COUNT(ptd); | ||
| 570 | BUG_ON(urb->actual_length > urb->transfer_buffer_length); | ||
| 571 | |||
| 572 | if (urb->status == -EINPROGRESS) | ||
| 573 | urb->status = cc_to_error[PTD_DATAUNDERRUN]; | ||
| 574 | } else { | ||
| 575 | usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, | ||
| 576 | PTD_GET_TOGGLE(ptd)); | ||
| 577 | urbstat = cc_to_error[PTD_DATAUNDERRUN]; | ||
| 578 | } | ||
| 579 | goto out; | ||
| 580 | } | ||
| 581 | } | ||
| 582 | |||
| 583 | if (cc != PTD_CC_NOERROR) { | ||
| 584 | if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) { | ||
| 585 | urbstat = cc_to_error[cc]; | ||
| 586 | DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n", | ||
| 587 | __func__, ep->num_req, ep->nextpid, urbstat, cc, | ||
| 588 | ep->error_count); | ||
| 589 | } | ||
| 590 | goto out; | ||
| 591 | } | ||
| 592 | |||
| 593 | switch (ep->nextpid) { | ||
| 594 | case USB_PID_OUT: | ||
| 595 | if (PTD_GET_COUNT(ptd) != ep->length) | ||
| 596 | pr_err("%s: count=%d len=%d\n", __func__, | ||
| 597 | PTD_GET_COUNT(ptd), ep->length); | ||
| 598 | BUG_ON(PTD_GET_COUNT(ptd) != ep->length); | ||
| 599 | urb->actual_length += ep->length; | ||
| 600 | BUG_ON(urb->actual_length > urb->transfer_buffer_length); | ||
| 601 | usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)); | ||
| 602 | if (urb->actual_length == urb->transfer_buffer_length) { | ||
| 603 | DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, | ||
| 604 | ep->num_req, len, ep->maxpacket, urbstat); | ||
| 605 | if (usb_pipecontrol(urb->pipe)) { | ||
| 606 | DBG(3, "%s: req %d %s Wait for ACK\n", __func__, | ||
| 607 | ep->num_req, | ||
| 608 | usb_pipein(urb->pipe) ? "IN" : "OUT"); | ||
| 609 | ep->nextpid = USB_PID_ACK; | ||
| 610 | } else { | ||
| 611 | if (len % ep->maxpacket || | ||
| 612 | !(urb->transfer_flags & URB_ZERO_PACKET)) { | ||
| 613 | urbstat = 0; | ||
| 614 | DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", | ||
| 615 | __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", | ||
| 616 | urbstat, len, ep->maxpacket, urb->actual_length); | ||
| 617 | } | ||
| 618 | } | ||
| 619 | } | ||
| 620 | break; | ||
| 621 | case USB_PID_IN: | ||
| 622 | len = PTD_GET_COUNT(ptd); | ||
| 623 | BUG_ON(len > ep->length); | ||
| 624 | urb->actual_length += len; | ||
| 625 | BUG_ON(urb->actual_length > urb->transfer_buffer_length); | ||
| 626 | usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)); | ||
| 627 | /* if transfer completed or (allowed) data underrun */ | ||
| 628 | if ((urb->transfer_buffer_length == urb->actual_length) || | ||
| 629 | len % ep->maxpacket) { | ||
| 630 | DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, | ||
| 631 | ep->num_req, len, ep->maxpacket, urbstat); | ||
| 632 | if (usb_pipecontrol(urb->pipe)) { | ||
| 633 | DBG(3, "%s: req %d %s Wait for ACK\n", __func__, | ||
| 634 | ep->num_req, | ||
| 635 | usb_pipein(urb->pipe) ? "IN" : "OUT"); | ||
| 636 | ep->nextpid = USB_PID_ACK; | ||
| 637 | } else { | ||
| 638 | urbstat = 0; | ||
| 639 | DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", | ||
| 640 | __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", | ||
| 641 | urbstat, len, ep->maxpacket, urb->actual_length); | ||
| 642 | } | ||
| 643 | } | ||
| 644 | break; | ||
| 645 | case USB_PID_SETUP: | ||
| 646 | if (urb->transfer_buffer_length == urb->actual_length) { | ||
| 647 | ep->nextpid = USB_PID_ACK; | ||
| 648 | } else if (usb_pipeout(urb->pipe)) { | ||
| 649 | usb_settoggle(udev, 0, 1, 1); | ||
| 650 | ep->nextpid = USB_PID_OUT; | ||
| 651 | } else { | ||
| 652 | usb_settoggle(udev, 0, 0, 1); | ||
| 653 | ep->nextpid = USB_PID_IN; | ||
| 654 | } | ||
| 655 | break; | ||
| 656 | case USB_PID_ACK: | ||
| 657 | DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req, | ||
| 658 | urbstat); | ||
| 659 | WARN_ON(urbstat != -EINPROGRESS); | ||
| 660 | urbstat = 0; | ||
| 661 | ep->nextpid = 0; | ||
| 662 | break; | ||
| 663 | default: | ||
| 664 | BUG_ON(1); | ||
| 665 | } | ||
| 666 | |||
| 667 | out: | ||
| 668 | if (urbstat != -EINPROGRESS) { | ||
| 669 | DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__, | ||
| 670 | ep, ep->num_req, urb, urbstat); | ||
| 671 | finish_request(isp1362_hcd, ep, urb, urbstat); | ||
| 672 | } | ||
| 673 | } | ||
| 674 | |||
| 675 | static void finish_unlinks(struct isp1362_hcd *isp1362_hcd) | ||
| 676 | { | ||
| 677 | struct isp1362_ep *ep; | ||
| 678 | struct isp1362_ep *tmp; | ||
| 679 | |||
| 680 | list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) { | ||
| 681 | struct isp1362_ep_queue *epq = | ||
| 682 | get_ptd_queue(isp1362_hcd, ep->ptd_offset); | ||
| 683 | int index = ep->ptd_index; | ||
| 684 | |||
| 685 | BUG_ON(epq == NULL); | ||
| 686 | if (index >= 0) { | ||
| 687 | DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset); | ||
| 688 | BUG_ON(ep->num_ptds == 0); | ||
| 689 | release_ptd_buffers(epq, ep); | ||
| 690 | } | ||
| 691 | if (!list_empty(&ep->hep->urb_list)) { | ||
| 692 | struct urb *urb = get_urb(ep); | ||
| 693 | |||
| 694 | DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__, | ||
| 695 | ep->num_req, ep); | ||
| 696 | finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN); | ||
| 697 | } | ||
| 698 | WARN_ON(list_empty(&ep->active)); | ||
| 699 | if (!list_empty(&ep->active)) { | ||
| 700 | list_del_init(&ep->active); | ||
| 701 | DBG(1, "%s: ep %p removed from active list\n", __func__, ep); | ||
| 702 | } | ||
| 703 | list_del_init(&ep->remove_list); | ||
| 704 | DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep); | ||
| 705 | } | ||
| 706 | DBG(1, "%s: Done\n", __func__); | ||
| 707 | } | ||
| 708 | |||
| 709 | static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count) | ||
| 710 | { | ||
| 711 | if (count > 0) { | ||
| 712 | if (count < isp1362_hcd->atl_queue.ptd_count) | ||
| 713 | isp1362_write_reg16(isp1362_hcd, HCATLDTC, count); | ||
| 714 | isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); | ||
| 715 | isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map); | ||
| 716 | isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); | ||
| 717 | } else | ||
| 718 | isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); | ||
| 719 | } | ||
| 720 | |||
| 721 | static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd) | ||
| 722 | { | ||
| 723 | isp1362_enable_int(isp1362_hcd, HCuPINT_INTL); | ||
| 724 | isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); | ||
| 725 | isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map); | ||
| 726 | } | ||
| 727 | |||
| 728 | static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip) | ||
| 729 | { | ||
| 730 | isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0); | ||
| 731 | isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ? | ||
| 732 | HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL); | ||
| 733 | } | ||
| 734 | |||
| 735 | static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb, | ||
| 736 | struct isp1362_ep *ep, struct isp1362_ep_queue *epq) | ||
| 737 | { | ||
| 738 | int index = epq->free_ptd; | ||
| 739 | |||
| 740 | prepare_ptd(isp1362_hcd, urb, ep, epq, 0); | ||
| 741 | index = claim_ptd_buffers(epq, ep, ep->length); | ||
| 742 | if (index == -ENOMEM) { | ||
| 743 | DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__, | ||
| 744 | ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); | ||
| 745 | return index; | ||
| 746 | } else if (index == -EOVERFLOW) { | ||
| 747 | DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n", | ||
| 748 | __func__, ep->num_req, ep->length, epq->name, ep->num_ptds, | ||
| 749 | epq->buf_map, epq->skip_map); | ||
| 750 | return index; | ||
| 751 | } else | ||
| 752 | BUG_ON(index < 0); | ||
| 753 | list_add_tail(&ep->active, &epq->active); | ||
| 754 | DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__, | ||
| 755 | ep, ep->num_req, ep->length, &epq->active); | ||
| 756 | DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name, | ||
| 757 | ep->ptd_offset, ep, ep->num_req); | ||
| 758 | isp1362_write_ptd(isp1362_hcd, ep, epq); | ||
| 759 | __clear_bit(ep->ptd_index, &epq->skip_map); | ||
| 760 | |||
| 761 | return 0; | ||
| 762 | } | ||
| 763 | |||
| 764 | static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd) | ||
| 765 | { | ||
| 766 | int ptd_count = 0; | ||
| 767 | struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue; | ||
| 768 | struct isp1362_ep *ep; | ||
| 769 | int defer = 0; | ||
| 770 | |||
| 771 | if (atomic_read(&epq->finishing)) { | ||
| 772 | DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); | ||
| 773 | return; | ||
| 774 | } | ||
| 775 | |||
| 776 | list_for_each_entry(ep, &isp1362_hcd->async, schedule) { | ||
| 777 | struct urb *urb = get_urb(ep); | ||
| 778 | int ret; | ||
| 779 | |||
| 780 | if (!list_empty(&ep->active)) { | ||
| 781 | DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); | ||
| 782 | continue; | ||
| 783 | } | ||
| 784 | |||
| 785 | DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, | ||
| 786 | ep, ep->num_req); | ||
| 787 | |||
| 788 | ret = submit_req(isp1362_hcd, urb, ep, epq); | ||
| 789 | if (ret == -ENOMEM) { | ||
| 790 | defer = 1; | ||
| 791 | break; | ||
| 792 | } else if (ret == -EOVERFLOW) { | ||
| 793 | defer = 1; | ||
| 794 | continue; | ||
| 795 | } | ||
| 796 | #ifdef BUGGY_PXA2XX_UDC_USBTEST | ||
| 797 | defer = ep->nextpid == USB_PID_SETUP; | ||
| 798 | #endif | ||
| 799 | ptd_count++; | ||
| 800 | } | ||
| 801 | |||
| 802 | /* Avoid starving of endpoints */ | ||
| 803 | if (isp1362_hcd->async.next != isp1362_hcd->async.prev) { | ||
| 804 | DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count); | ||
| 805 | list_move(&isp1362_hcd->async, isp1362_hcd->async.next); | ||
| 806 | } | ||
| 807 | if (ptd_count || defer) | ||
| 808 | enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count); | ||
| 809 | |||
| 810 | epq->ptd_count += ptd_count; | ||
| 811 | if (epq->ptd_count > epq->stat_maxptds) { | ||
| 812 | epq->stat_maxptds = epq->ptd_count; | ||
| 813 | DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds); | ||
| 814 | } | ||
| 815 | } | ||
| 816 | |||
| 817 | static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd) | ||
| 818 | { | ||
| 819 | int ptd_count = 0; | ||
| 820 | struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue; | ||
| 821 | struct isp1362_ep *ep; | ||
| 822 | |||
| 823 | if (atomic_read(&epq->finishing)) { | ||
| 824 | DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); | ||
| 825 | return; | ||
| 826 | } | ||
| 827 | |||
| 828 | list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { | ||
| 829 | struct urb *urb = get_urb(ep); | ||
| 830 | int ret; | ||
| 831 | |||
| 832 | if (!list_empty(&ep->active)) { | ||
| 833 | DBG(1, "%s: Skipping active %s ep %p\n", __func__, | ||
| 834 | epq->name, ep); | ||
| 835 | continue; | ||
| 836 | } | ||
| 837 | |||
| 838 | DBG(1, "%s: Processing %s ep %p req %d\n", __func__, | ||
| 839 | epq->name, ep, ep->num_req); | ||
| 840 | ret = submit_req(isp1362_hcd, urb, ep, epq); | ||
| 841 | if (ret == -ENOMEM) | ||
| 842 | break; | ||
| 843 | else if (ret == -EOVERFLOW) | ||
| 844 | continue; | ||
| 845 | ptd_count++; | ||
| 846 | } | ||
| 847 | |||
| 848 | if (ptd_count) { | ||
| 849 | static int last_count; | ||
| 850 | |||
| 851 | if (ptd_count != last_count) { | ||
| 852 | DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count); | ||
| 853 | last_count = ptd_count; | ||
| 854 | } | ||
| 855 | enable_intl_transfers(isp1362_hcd); | ||
| 856 | } | ||
| 857 | |||
| 858 | epq->ptd_count += ptd_count; | ||
| 859 | if (epq->ptd_count > epq->stat_maxptds) | ||
| 860 | epq->stat_maxptds = epq->ptd_count; | ||
| 861 | } | ||
| 862 | |||
| 863 | static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) | ||
| 864 | { | ||
| 865 | u16 ptd_offset = ep->ptd_offset; | ||
| 866 | int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size; | ||
| 867 | |||
| 868 | DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset, | ||
| 869 | ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size); | ||
| 870 | |||
| 871 | ptd_offset += num_ptds * epq->blk_size; | ||
| 872 | if (ptd_offset < epq->buf_start + epq->buf_size) | ||
| 873 | return ptd_offset; | ||
| 874 | else | ||
| 875 | return -ENOMEM; | ||
| 876 | } | ||
| 877 | |||
| 878 | static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd) | ||
| 879 | { | ||
| 880 | int ptd_count = 0; | ||
| 881 | int flip = isp1362_hcd->istl_flip; | ||
| 882 | struct isp1362_ep_queue *epq; | ||
| 883 | int ptd_offset; | ||
| 884 | struct isp1362_ep *ep; | ||
| 885 | struct isp1362_ep *tmp; | ||
| 886 | u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM); | ||
| 887 | |||
| 888 | fill2: | ||
| 889 | epq = &isp1362_hcd->istl_queue[flip]; | ||
| 890 | if (atomic_read(&epq->finishing)) { | ||
| 891 | DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); | ||
| 892 | return; | ||
| 893 | } | ||
| 894 | |||
| 895 | if (!list_empty(&epq->active)) | ||
| 896 | return; | ||
| 897 | |||
| 898 | ptd_offset = epq->buf_start; | ||
| 899 | list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) { | ||
| 900 | struct urb *urb = get_urb(ep); | ||
| 901 | s16 diff = fno - (u16)urb->start_frame; | ||
| 902 | |||
| 903 | DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep); | ||
| 904 | |||
| 905 | if (diff > urb->number_of_packets) { | ||
| 906 | /* time frame for this URB has elapsed */ | ||
| 907 | finish_request(isp1362_hcd, ep, urb, -EOVERFLOW); | ||
| 908 | continue; | ||
| 909 | } else if (diff < -1) { | ||
| 910 | /* URB is not due in this frame or the next one. | ||
| 911 | * Comparing with '-1' instead of '0' accounts for double | ||
| 912 | * buffering in the ISP1362 which enables us to queue the PTD | ||
| 913 | * one frame ahead of time | ||
| 914 | */ | ||
| 915 | } else if (diff == -1) { | ||
| 916 | /* submit PTD's that are due in the next frame */ | ||
| 917 | prepare_ptd(isp1362_hcd, urb, ep, epq, fno); | ||
| 918 | if (ptd_offset + PTD_HEADER_SIZE + ep->length > | ||
| 919 | epq->buf_start + epq->buf_size) { | ||
| 920 | pr_err("%s: Not enough ISO buffer space for %d byte PTD\n", | ||
| 921 | __func__, ep->length); | ||
| 922 | continue; | ||
| 923 | } | ||
| 924 | ep->ptd_offset = ptd_offset; | ||
| 925 | list_add_tail(&ep->active, &epq->active); | ||
| 926 | |||
| 927 | ptd_offset = next_ptd(epq, ep); | ||
| 928 | if (ptd_offset < 0) { | ||
| 929 | pr_warning("%s: req %d No more %s PTD buffers available\n", __func__, | ||
| 930 | ep->num_req, epq->name); | ||
| 931 | break; | ||
| 932 | } | ||
| 933 | } | ||
| 934 | } | ||
| 935 | list_for_each_entry(ep, &epq->active, active) { | ||
| 936 | if (epq->active.next == &ep->active) | ||
| 937 | ep->ptd.mps |= PTD_LAST_MSK; | ||
| 938 | isp1362_write_ptd(isp1362_hcd, ep, epq); | ||
| 939 | ptd_count++; | ||
| 940 | } | ||
| 941 | |||
| 942 | if (ptd_count) | ||
| 943 | enable_istl_transfers(isp1362_hcd, flip); | ||
| 944 | |||
| 945 | epq->ptd_count += ptd_count; | ||
| 946 | if (epq->ptd_count > epq->stat_maxptds) | ||
| 947 | epq->stat_maxptds = epq->ptd_count; | ||
| 948 | |||
| 949 | /* check, whether the second ISTL buffer may also be filled */ | ||
| 950 | if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & | ||
| 951 | (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) { | ||
| 952 | fno++; | ||
| 953 | ptd_count = 0; | ||
| 954 | flip = 1 - flip; | ||
| 955 | goto fill2; | ||
| 956 | } | ||
| 957 | } | ||
| 958 | |||
| 959 | static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map, | ||
| 960 | struct isp1362_ep_queue *epq) | ||
| 961 | { | ||
| 962 | struct isp1362_ep *ep; | ||
| 963 | struct isp1362_ep *tmp; | ||
| 964 | |||
| 965 | if (list_empty(&epq->active)) { | ||
| 966 | DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); | ||
| 967 | return; | ||
| 968 | } | ||
| 969 | |||
| 970 | DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map); | ||
| 971 | |||
| 972 | atomic_inc(&epq->finishing); | ||
| 973 | list_for_each_entry_safe(ep, tmp, &epq->active, active) { | ||
| 974 | int index = ep->ptd_index; | ||
| 975 | |||
| 976 | DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name, | ||
| 977 | index, ep->ptd_offset); | ||
| 978 | |||
| 979 | BUG_ON(index < 0); | ||
| 980 | if (__test_and_clear_bit(index, &done_map)) { | ||
| 981 | isp1362_read_ptd(isp1362_hcd, ep, epq); | ||
| 982 | epq->free_ptd = index; | ||
| 983 | BUG_ON(ep->num_ptds == 0); | ||
| 984 | release_ptd_buffers(epq, ep); | ||
| 985 | |||
| 986 | DBG(1, "%s: ep %p req %d removed from active list\n", __func__, | ||
| 987 | ep, ep->num_req); | ||
| 988 | if (!list_empty(&ep->remove_list)) { | ||
| 989 | list_del_init(&ep->remove_list); | ||
| 990 | DBG(1, "%s: ep %p removed from remove list\n", __func__, ep); | ||
| 991 | } | ||
| 992 | DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name, | ||
| 993 | ep, ep->num_req); | ||
| 994 | postproc_ep(isp1362_hcd, ep); | ||
| 995 | } | ||
| 996 | if (!done_map) | ||
| 997 | break; | ||
| 998 | } | ||
| 999 | if (done_map) | ||
| 1000 | pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map, | ||
| 1001 | epq->skip_map); | ||
| 1002 | atomic_dec(&epq->finishing); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq) | ||
| 1006 | { | ||
| 1007 | struct isp1362_ep *ep; | ||
| 1008 | struct isp1362_ep *tmp; | ||
| 1009 | |||
| 1010 | if (list_empty(&epq->active)) { | ||
| 1011 | DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); | ||
| 1012 | return; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name); | ||
| 1016 | |||
| 1017 | atomic_inc(&epq->finishing); | ||
| 1018 | list_for_each_entry_safe(ep, tmp, &epq->active, active) { | ||
| 1019 | DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset); | ||
| 1020 | |||
| 1021 | isp1362_read_ptd(isp1362_hcd, ep, epq); | ||
| 1022 | DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep); | ||
| 1023 | postproc_ep(isp1362_hcd, ep); | ||
| 1024 | } | ||
| 1025 | WARN_ON(epq->blk_size != 0); | ||
| 1026 | atomic_dec(&epq->finishing); | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | static irqreturn_t isp1362_irq(struct usb_hcd *hcd) | ||
| 1030 | { | ||
| 1031 | int handled = 0; | ||
| 1032 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1033 | u16 irqstat; | ||
| 1034 | u16 svc_mask; | ||
| 1035 | |||
| 1036 | spin_lock(&isp1362_hcd->lock); | ||
| 1037 | |||
| 1038 | BUG_ON(isp1362_hcd->irq_active++); | ||
| 1039 | |||
| 1040 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); | ||
| 1041 | |||
| 1042 | irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT); | ||
| 1043 | DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb); | ||
| 1044 | |||
| 1045 | /* only handle interrupts that are currently enabled */ | ||
| 1046 | irqstat &= isp1362_hcd->irqenb; | ||
| 1047 | isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat); | ||
| 1048 | svc_mask = irqstat; | ||
| 1049 | |||
| 1050 | if (irqstat & HCuPINT_SOF) { | ||
| 1051 | isp1362_hcd->irqenb &= ~HCuPINT_SOF; | ||
| 1052 | isp1362_hcd->irq_stat[ISP1362_INT_SOF]++; | ||
| 1053 | handled = 1; | ||
| 1054 | svc_mask &= ~HCuPINT_SOF; | ||
| 1055 | DBG(3, "%s: SOF\n", __func__); | ||
| 1056 | isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); | ||
| 1057 | if (!list_empty(&isp1362_hcd->remove_list)) | ||
| 1058 | finish_unlinks(isp1362_hcd); | ||
| 1059 | if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) { | ||
| 1060 | if (list_empty(&isp1362_hcd->atl_queue.active)) { | ||
| 1061 | start_atl_transfers(isp1362_hcd); | ||
| 1062 | } else { | ||
| 1063 | isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); | ||
| 1064 | isp1362_write_reg32(isp1362_hcd, HCATLSKIP, | ||
| 1065 | isp1362_hcd->atl_queue.skip_map); | ||
| 1066 | isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); | ||
| 1067 | } | ||
| 1068 | } | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | if (irqstat & HCuPINT_ISTL0) { | ||
| 1072 | isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++; | ||
| 1073 | handled = 1; | ||
| 1074 | svc_mask &= ~HCuPINT_ISTL0; | ||
| 1075 | isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL); | ||
| 1076 | DBG(1, "%s: ISTL0\n", __func__); | ||
| 1077 | WARN_ON((int)!!isp1362_hcd->istl_flip); | ||
| 1078 | WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & | ||
| 1079 | HCBUFSTAT_ISTL0_ACTIVE); | ||
| 1080 | WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & | ||
| 1081 | HCBUFSTAT_ISTL0_DONE)); | ||
| 1082 | isp1362_hcd->irqenb &= ~HCuPINT_ISTL0; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | if (irqstat & HCuPINT_ISTL1) { | ||
| 1086 | isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++; | ||
| 1087 | handled = 1; | ||
| 1088 | svc_mask &= ~HCuPINT_ISTL1; | ||
| 1089 | isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL); | ||
| 1090 | DBG(1, "%s: ISTL1\n", __func__); | ||
| 1091 | WARN_ON(!(int)isp1362_hcd->istl_flip); | ||
| 1092 | WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & | ||
| 1093 | HCBUFSTAT_ISTL1_ACTIVE); | ||
| 1094 | WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & | ||
| 1095 | HCBUFSTAT_ISTL1_DONE)); | ||
| 1096 | isp1362_hcd->irqenb &= ~HCuPINT_ISTL1; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) { | ||
| 1100 | WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) == | ||
| 1101 | (HCuPINT_ISTL0 | HCuPINT_ISTL1)); | ||
| 1102 | finish_iso_transfers(isp1362_hcd, | ||
| 1103 | &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]); | ||
| 1104 | start_iso_transfers(isp1362_hcd); | ||
| 1105 | isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip; | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | if (irqstat & HCuPINT_INTL) { | ||
| 1109 | u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); | ||
| 1110 | u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP); | ||
| 1111 | isp1362_hcd->irq_stat[ISP1362_INT_INTL]++; | ||
| 1112 | |||
| 1113 | DBG(2, "%s: INTL\n", __func__); | ||
| 1114 | |||
| 1115 | svc_mask &= ~HCuPINT_INTL; | ||
| 1116 | |||
| 1117 | isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map); | ||
| 1118 | if (~(done_map | skip_map) == 0) | ||
| 1119 | /* All PTDs are finished, disable INTL processing entirely */ | ||
| 1120 | isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); | ||
| 1121 | |||
| 1122 | handled = 1; | ||
| 1123 | WARN_ON(!done_map); | ||
| 1124 | if (done_map) { | ||
| 1125 | DBG(3, "%s: INTL done_map %08x\n", __func__, done_map); | ||
| 1126 | finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); | ||
| 1127 | start_intl_transfers(isp1362_hcd); | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | if (irqstat & HCuPINT_ATL) { | ||
| 1132 | u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); | ||
| 1133 | u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP); | ||
| 1134 | isp1362_hcd->irq_stat[ISP1362_INT_ATL]++; | ||
| 1135 | |||
| 1136 | DBG(2, "%s: ATL\n", __func__); | ||
| 1137 | |||
| 1138 | svc_mask &= ~HCuPINT_ATL; | ||
| 1139 | |||
| 1140 | isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map); | ||
| 1141 | if (~(done_map | skip_map) == 0) | ||
| 1142 | isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); | ||
| 1143 | if (done_map) { | ||
| 1144 | DBG(3, "%s: ATL done_map %08x\n", __func__, done_map); | ||
| 1145 | finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); | ||
| 1146 | start_atl_transfers(isp1362_hcd); | ||
| 1147 | } | ||
| 1148 | handled = 1; | ||
| 1149 | } | ||
| 1150 | |||
| 1151 | if (irqstat & HCuPINT_OPR) { | ||
| 1152 | u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT); | ||
| 1153 | isp1362_hcd->irq_stat[ISP1362_INT_OPR]++; | ||
| 1154 | |||
| 1155 | svc_mask &= ~HCuPINT_OPR; | ||
| 1156 | DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb); | ||
| 1157 | intstat &= isp1362_hcd->intenb; | ||
| 1158 | if (intstat & OHCI_INTR_UE) { | ||
| 1159 | pr_err("Unrecoverable error\n"); | ||
| 1160 | /* FIXME: do here reset or cleanup or whatever */ | ||
| 1161 | } | ||
| 1162 | if (intstat & OHCI_INTR_RHSC) { | ||
| 1163 | isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS); | ||
| 1164 | isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1); | ||
| 1165 | isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2); | ||
| 1166 | } | ||
| 1167 | if (intstat & OHCI_INTR_RD) { | ||
| 1168 | pr_info("%s: RESUME DETECTED\n", __func__); | ||
| 1169 | isp1362_show_reg(isp1362_hcd, HCCONTROL); | ||
| 1170 | usb_hcd_resume_root_hub(hcd); | ||
| 1171 | } | ||
| 1172 | isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat); | ||
| 1173 | irqstat &= ~HCuPINT_OPR; | ||
| 1174 | handled = 1; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | if (irqstat & HCuPINT_SUSP) { | ||
| 1178 | isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++; | ||
| 1179 | handled = 1; | ||
| 1180 | svc_mask &= ~HCuPINT_SUSP; | ||
| 1181 | |||
| 1182 | pr_info("%s: SUSPEND IRQ\n", __func__); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | if (irqstat & HCuPINT_CLKRDY) { | ||
| 1186 | isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++; | ||
| 1187 | handled = 1; | ||
| 1188 | isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY; | ||
| 1189 | svc_mask &= ~HCuPINT_CLKRDY; | ||
| 1190 | pr_info("%s: CLKRDY IRQ\n", __func__); | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | if (svc_mask) | ||
| 1194 | pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask); | ||
| 1195 | |||
| 1196 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); | ||
| 1197 | isp1362_hcd->irq_active--; | ||
| 1198 | spin_unlock(&isp1362_hcd->lock); | ||
| 1199 | |||
| 1200 | return IRQ_RETVAL(handled); | ||
| 1201 | } | ||
| 1202 | |||
| 1203 | /*-------------------------------------------------------------------------*/ | ||
| 1204 | |||
| 1205 | #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */ | ||
| 1206 | static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load) | ||
| 1207 | { | ||
| 1208 | int i, branch = -ENOSPC; | ||
| 1209 | |||
| 1210 | /* search for the least loaded schedule branch of that interval | ||
| 1211 | * which has enough bandwidth left unreserved. | ||
| 1212 | */ | ||
| 1213 | for (i = 0; i < interval; i++) { | ||
| 1214 | if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) { | ||
| 1215 | int j; | ||
| 1216 | |||
| 1217 | for (j = i; j < PERIODIC_SIZE; j += interval) { | ||
| 1218 | if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) { | ||
| 1219 | pr_err("%s: new load %d load[%02x] %d max %d\n", __func__, | ||
| 1220 | load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD); | ||
| 1221 | break; | ||
| 1222 | } | ||
| 1223 | } | ||
| 1224 | if (j < PERIODIC_SIZE) | ||
| 1225 | continue; | ||
| 1226 | branch = i; | ||
| 1227 | } | ||
| 1228 | } | ||
| 1229 | return branch; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | /* NB! ALL the code above this point runs with isp1362_hcd->lock | ||
| 1233 | held, irqs off | ||
| 1234 | */ | ||
| 1235 | |||
| 1236 | /*-------------------------------------------------------------------------*/ | ||
| 1237 | |||
| 1238 | static int isp1362_urb_enqueue(struct usb_hcd *hcd, | ||
| 1239 | struct urb *urb, | ||
| 1240 | gfp_t mem_flags) | ||
| 1241 | { | ||
| 1242 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1243 | struct usb_device *udev = urb->dev; | ||
| 1244 | unsigned int pipe = urb->pipe; | ||
| 1245 | int is_out = !usb_pipein(pipe); | ||
| 1246 | int type = usb_pipetype(pipe); | ||
| 1247 | int epnum = usb_pipeendpoint(pipe); | ||
| 1248 | struct usb_host_endpoint *hep = urb->ep; | ||
| 1249 | struct isp1362_ep *ep = NULL; | ||
| 1250 | unsigned long flags; | ||
| 1251 | int retval = 0; | ||
| 1252 | |||
| 1253 | DBG(3, "%s: urb %p\n", __func__, urb); | ||
| 1254 | |||
| 1255 | if (type == PIPE_ISOCHRONOUS) { | ||
| 1256 | pr_err("Isochronous transfers not supported\n"); | ||
| 1257 | return -ENOSPC; | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__, | ||
| 1261 | usb_pipedevice(pipe), epnum, | ||
| 1262 | is_out ? "out" : "in", | ||
| 1263 | usb_pipecontrol(pipe) ? "ctrl" : | ||
| 1264 | usb_pipeint(pipe) ? "int" : | ||
| 1265 | usb_pipebulk(pipe) ? "bulk" : | ||
| 1266 | "iso", | ||
| 1267 | urb->transfer_buffer_length, | ||
| 1268 | (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "", | ||
| 1269 | !(urb->transfer_flags & URB_SHORT_NOT_OK) ? | ||
| 1270 | "short_ok" : ""); | ||
| 1271 | |||
| 1272 | /* avoid all allocations within spinlocks: request or endpoint */ | ||
| 1273 | if (!hep->hcpriv) { | ||
| 1274 | ep = kcalloc(1, sizeof *ep, mem_flags); | ||
| 1275 | if (!ep) | ||
| 1276 | return -ENOMEM; | ||
| 1277 | } | ||
| 1278 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1279 | |||
| 1280 | /* don't submit to a dead or disabled port */ | ||
| 1281 | if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) & | ||
| 1282 | (1 << USB_PORT_FEAT_ENABLE)) || | ||
| 1283 | !HC_IS_RUNNING(hcd->state)) { | ||
| 1284 | kfree(ep); | ||
| 1285 | retval = -ENODEV; | ||
| 1286 | goto fail_not_linked; | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | retval = usb_hcd_link_urb_to_ep(hcd, urb); | ||
| 1290 | if (retval) { | ||
| 1291 | kfree(ep); | ||
| 1292 | goto fail_not_linked; | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | if (hep->hcpriv) { | ||
| 1296 | ep = hep->hcpriv; | ||
| 1297 | } else { | ||
| 1298 | INIT_LIST_HEAD(&ep->schedule); | ||
| 1299 | INIT_LIST_HEAD(&ep->active); | ||
| 1300 | INIT_LIST_HEAD(&ep->remove_list); | ||
| 1301 | ep->udev = usb_get_dev(udev); | ||
| 1302 | ep->hep = hep; | ||
| 1303 | ep->epnum = epnum; | ||
| 1304 | ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); | ||
| 1305 | ep->ptd_offset = -EINVAL; | ||
| 1306 | ep->ptd_index = -EINVAL; | ||
| 1307 | usb_settoggle(udev, epnum, is_out, 0); | ||
| 1308 | |||
| 1309 | if (type == PIPE_CONTROL) | ||
| 1310 | ep->nextpid = USB_PID_SETUP; | ||
| 1311 | else if (is_out) | ||
| 1312 | ep->nextpid = USB_PID_OUT; | ||
| 1313 | else | ||
| 1314 | ep->nextpid = USB_PID_IN; | ||
| 1315 | |||
| 1316 | switch (type) { | ||
| 1317 | case PIPE_ISOCHRONOUS: | ||
| 1318 | case PIPE_INTERRUPT: | ||
| 1319 | if (urb->interval > PERIODIC_SIZE) | ||
| 1320 | urb->interval = PERIODIC_SIZE; | ||
| 1321 | ep->interval = urb->interval; | ||
| 1322 | ep->branch = PERIODIC_SIZE; | ||
| 1323 | ep->load = usb_calc_bus_time(udev->speed, !is_out, | ||
| 1324 | (type == PIPE_ISOCHRONOUS), | ||
| 1325 | usb_maxpacket(udev, pipe, is_out)) / 1000; | ||
| 1326 | break; | ||
| 1327 | } | ||
| 1328 | hep->hcpriv = ep; | ||
| 1329 | } | ||
| 1330 | ep->num_req = isp1362_hcd->req_serial++; | ||
| 1331 | |||
| 1332 | /* maybe put endpoint into schedule */ | ||
| 1333 | switch (type) { | ||
| 1334 | case PIPE_CONTROL: | ||
| 1335 | case PIPE_BULK: | ||
| 1336 | if (list_empty(&ep->schedule)) { | ||
| 1337 | DBG(1, "%s: Adding ep %p req %d to async schedule\n", | ||
| 1338 | __func__, ep, ep->num_req); | ||
| 1339 | list_add_tail(&ep->schedule, &isp1362_hcd->async); | ||
| 1340 | } | ||
| 1341 | break; | ||
| 1342 | case PIPE_ISOCHRONOUS: | ||
| 1343 | case PIPE_INTERRUPT: | ||
| 1344 | urb->interval = ep->interval; | ||
| 1345 | |||
| 1346 | /* urb submitted for already existing EP */ | ||
| 1347 | if (ep->branch < PERIODIC_SIZE) | ||
| 1348 | break; | ||
| 1349 | |||
| 1350 | retval = balance(isp1362_hcd, ep->interval, ep->load); | ||
| 1351 | if (retval < 0) { | ||
| 1352 | pr_err("%s: balance returned %d\n", __func__, retval); | ||
| 1353 | goto fail; | ||
| 1354 | } | ||
| 1355 | ep->branch = retval; | ||
| 1356 | retval = 0; | ||
| 1357 | isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); | ||
| 1358 | DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n", | ||
| 1359 | __func__, isp1362_hcd->fmindex, ep->branch, | ||
| 1360 | ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) & | ||
| 1361 | ~(PERIODIC_SIZE - 1)) + ep->branch, | ||
| 1362 | (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch); | ||
| 1363 | |||
| 1364 | if (list_empty(&ep->schedule)) { | ||
| 1365 | if (type == PIPE_ISOCHRONOUS) { | ||
| 1366 | u16 frame = isp1362_hcd->fmindex; | ||
| 1367 | |||
| 1368 | frame += max_t(u16, 8, ep->interval); | ||
| 1369 | frame &= ~(ep->interval - 1); | ||
| 1370 | frame |= ep->branch; | ||
| 1371 | if (frame_before(frame, isp1362_hcd->fmindex)) | ||
| 1372 | frame += ep->interval; | ||
| 1373 | urb->start_frame = frame; | ||
| 1374 | |||
| 1375 | DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep); | ||
| 1376 | list_add_tail(&ep->schedule, &isp1362_hcd->isoc); | ||
| 1377 | } else { | ||
| 1378 | DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep); | ||
| 1379 | list_add_tail(&ep->schedule, &isp1362_hcd->periodic); | ||
| 1380 | } | ||
| 1381 | } else | ||
| 1382 | DBG(1, "%s: ep %p already scheduled\n", __func__, ep); | ||
| 1383 | |||
| 1384 | DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__, | ||
| 1385 | ep->load / ep->interval, isp1362_hcd->load[ep->branch], | ||
| 1386 | isp1362_hcd->load[ep->branch] + ep->load); | ||
| 1387 | isp1362_hcd->load[ep->branch] += ep->load; | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | urb->hcpriv = hep; | ||
| 1391 | ALIGNSTAT(isp1362_hcd, urb->transfer_buffer); | ||
| 1392 | |||
| 1393 | switch (type) { | ||
| 1394 | case PIPE_CONTROL: | ||
| 1395 | case PIPE_BULK: | ||
| 1396 | start_atl_transfers(isp1362_hcd); | ||
| 1397 | break; | ||
| 1398 | case PIPE_INTERRUPT: | ||
| 1399 | start_intl_transfers(isp1362_hcd); | ||
| 1400 | break; | ||
| 1401 | case PIPE_ISOCHRONOUS: | ||
| 1402 | start_iso_transfers(isp1362_hcd); | ||
| 1403 | break; | ||
| 1404 | default: | ||
| 1405 | BUG(); | ||
| 1406 | } | ||
| 1407 | fail: | ||
| 1408 | if (retval) | ||
| 1409 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
| 1410 | |||
| 1411 | |||
| 1412 | fail_not_linked: | ||
| 1413 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1414 | if (retval) | ||
| 1415 | DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval); | ||
| 1416 | return retval; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
| 1420 | { | ||
| 1421 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1422 | struct usb_host_endpoint *hep; | ||
| 1423 | unsigned long flags; | ||
| 1424 | struct isp1362_ep *ep; | ||
| 1425 | int retval = 0; | ||
| 1426 | |||
| 1427 | DBG(3, "%s: urb %p\n", __func__, urb); | ||
| 1428 | |||
| 1429 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1430 | retval = usb_hcd_check_unlink_urb(hcd, urb, status); | ||
| 1431 | if (retval) | ||
| 1432 | goto done; | ||
| 1433 | |||
| 1434 | hep = urb->hcpriv; | ||
| 1435 | |||
| 1436 | if (!hep) { | ||
| 1437 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1438 | return -EIDRM; | ||
| 1439 | } | ||
| 1440 | |||
| 1441 | ep = hep->hcpriv; | ||
| 1442 | if (ep) { | ||
| 1443 | /* In front of queue? */ | ||
| 1444 | if (ep->hep->urb_list.next == &urb->urb_list) { | ||
| 1445 | if (!list_empty(&ep->active)) { | ||
| 1446 | DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__, | ||
| 1447 | urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); | ||
| 1448 | /* disable processing and queue PTD for removal */ | ||
| 1449 | remove_ptd(isp1362_hcd, ep); | ||
| 1450 | urb = NULL; | ||
| 1451 | } | ||
| 1452 | } | ||
| 1453 | if (urb) { | ||
| 1454 | DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep, | ||
| 1455 | ep->num_req); | ||
| 1456 | finish_request(isp1362_hcd, ep, urb, status); | ||
| 1457 | } else | ||
| 1458 | DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb); | ||
| 1459 | } else { | ||
| 1460 | pr_warning("%s: No EP in URB %p\n", __func__, urb); | ||
| 1461 | retval = -EINVAL; | ||
| 1462 | } | ||
| 1463 | done: | ||
| 1464 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1465 | |||
| 1466 | DBG(3, "%s: exit\n", __func__); | ||
| 1467 | |||
| 1468 | return retval; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | ||
| 1472 | { | ||
| 1473 | struct isp1362_ep *ep = hep->hcpriv; | ||
| 1474 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1475 | unsigned long flags; | ||
| 1476 | |||
| 1477 | DBG(1, "%s: ep %p\n", __func__, ep); | ||
| 1478 | if (!ep) | ||
| 1479 | return; | ||
| 1480 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1481 | if (!list_empty(&hep->urb_list)) { | ||
| 1482 | if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) { | ||
| 1483 | DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__, | ||
| 1484 | ep, ep->num_req, ep->ptd_index, ep->ptd_offset); | ||
| 1485 | remove_ptd(isp1362_hcd, ep); | ||
| 1486 | pr_info("%s: Waiting for Interrupt to clean up\n", __func__); | ||
| 1487 | } | ||
| 1488 | } | ||
| 1489 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1490 | /* Wait for interrupt to clear out active list */ | ||
| 1491 | while (!list_empty(&ep->active)) | ||
| 1492 | msleep(1); | ||
| 1493 | |||
| 1494 | DBG(1, "%s: Freeing EP %p\n", __func__, ep); | ||
| 1495 | |||
| 1496 | usb_put_dev(ep->udev); | ||
| 1497 | kfree(ep); | ||
| 1498 | hep->hcpriv = NULL; | ||
| 1499 | } | ||
| 1500 | |||
| 1501 | static int isp1362_get_frame(struct usb_hcd *hcd) | ||
| 1502 | { | ||
| 1503 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1504 | u32 fmnum; | ||
| 1505 | unsigned long flags; | ||
| 1506 | |||
| 1507 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1508 | fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM); | ||
| 1509 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1510 | |||
| 1511 | return (int)fmnum; | ||
| 1512 | } | ||
| 1513 | |||
| 1514 | /*-------------------------------------------------------------------------*/ | ||
| 1515 | |||
| 1516 | /* Adapted from ohci-hub.c */ | ||
| 1517 | static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf) | ||
| 1518 | { | ||
| 1519 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1520 | int ports, i, changed = 0; | ||
| 1521 | unsigned long flags; | ||
| 1522 | |||
| 1523 | if (!HC_IS_RUNNING(hcd->state)) | ||
| 1524 | return -ESHUTDOWN; | ||
| 1525 | |||
| 1526 | /* Report no status change now, if we are scheduled to be | ||
| 1527 | called later */ | ||
| 1528 | if (timer_pending(&hcd->rh_timer)) | ||
| 1529 | return 0; | ||
| 1530 | |||
| 1531 | ports = isp1362_hcd->rhdesca & RH_A_NDP; | ||
| 1532 | BUG_ON(ports > 2); | ||
| 1533 | |||
| 1534 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1535 | /* init status */ | ||
| 1536 | if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC)) | ||
| 1537 | buf[0] = changed = 1; | ||
| 1538 | else | ||
| 1539 | buf[0] = 0; | ||
| 1540 | |||
| 1541 | for (i = 0; i < ports; i++) { | ||
| 1542 | u32 status = isp1362_hcd->rhport[i]; | ||
| 1543 | |||
| 1544 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | | ||
| 1545 | RH_PS_OCIC | RH_PS_PRSC)) { | ||
| 1546 | changed = 1; | ||
| 1547 | buf[0] |= 1 << (i + 1); | ||
| 1548 | continue; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | if (!(status & RH_PS_CCS)) | ||
| 1552 | continue; | ||
| 1553 | } | ||
| 1554 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1555 | return changed; | ||
| 1556 | } | ||
| 1557 | |||
| 1558 | static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd, | ||
| 1559 | struct usb_hub_descriptor *desc) | ||
| 1560 | { | ||
| 1561 | u32 reg = isp1362_hcd->rhdesca; | ||
| 1562 | |||
| 1563 | DBG(3, "%s: enter\n", __func__); | ||
| 1564 | |||
| 1565 | desc->bDescriptorType = 0x29; | ||
| 1566 | desc->bDescLength = 9; | ||
| 1567 | desc->bHubContrCurrent = 0; | ||
| 1568 | desc->bNbrPorts = reg & 0x3; | ||
| 1569 | /* Power switching, device type, overcurrent. */ | ||
| 1570 | desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f); | ||
| 1571 | DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f)); | ||
| 1572 | desc->bPwrOn2PwrGood = (reg >> 24) & 0xff; | ||
| 1573 | /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */ | ||
| 1574 | desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1; | ||
| 1575 | desc->bitmap[1] = ~0; | ||
| 1576 | |||
| 1577 | DBG(3, "%s: exit\n", __func__); | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | /* Adapted from ohci-hub.c */ | ||
| 1581 | static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | ||
| 1582 | u16 wIndex, char *buf, u16 wLength) | ||
| 1583 | { | ||
| 1584 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1585 | int retval = 0; | ||
| 1586 | unsigned long flags; | ||
| 1587 | unsigned long t1; | ||
| 1588 | int ports = isp1362_hcd->rhdesca & RH_A_NDP; | ||
| 1589 | u32 tmp = 0; | ||
| 1590 | |||
| 1591 | switch (typeReq) { | ||
| 1592 | case ClearHubFeature: | ||
| 1593 | DBG(0, "ClearHubFeature: "); | ||
| 1594 | switch (wValue) { | ||
| 1595 | case C_HUB_OVER_CURRENT: | ||
| 1596 | _DBG(0, "C_HUB_OVER_CURRENT\n"); | ||
| 1597 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1598 | isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC); | ||
| 1599 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1600 | case C_HUB_LOCAL_POWER: | ||
| 1601 | _DBG(0, "C_HUB_LOCAL_POWER\n"); | ||
| 1602 | break; | ||
| 1603 | default: | ||
| 1604 | goto error; | ||
| 1605 | } | ||
| 1606 | break; | ||
| 1607 | case SetHubFeature: | ||
| 1608 | DBG(0, "SetHubFeature: "); | ||
| 1609 | switch (wValue) { | ||
| 1610 | case C_HUB_OVER_CURRENT: | ||
| 1611 | case C_HUB_LOCAL_POWER: | ||
| 1612 | _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); | ||
| 1613 | break; | ||
| 1614 | default: | ||
| 1615 | goto error; | ||
| 1616 | } | ||
| 1617 | break; | ||
| 1618 | case GetHubDescriptor: | ||
| 1619 | DBG(0, "GetHubDescriptor\n"); | ||
| 1620 | isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf); | ||
| 1621 | break; | ||
| 1622 | case GetHubStatus: | ||
| 1623 | DBG(0, "GetHubStatus\n"); | ||
| 1624 | put_unaligned(cpu_to_le32(0), (__le32 *) buf); | ||
| 1625 | break; | ||
| 1626 | case GetPortStatus: | ||
| 1627 | #ifndef VERBOSE | ||
| 1628 | DBG(0, "GetPortStatus\n"); | ||
| 1629 | #endif | ||
| 1630 | if (!wIndex || wIndex > ports) | ||
| 1631 | goto error; | ||
| 1632 | tmp = isp1362_hcd->rhport[--wIndex]; | ||
| 1633 | put_unaligned(cpu_to_le32(tmp), (__le32 *) buf); | ||
| 1634 | break; | ||
| 1635 | case ClearPortFeature: | ||
| 1636 | DBG(0, "ClearPortFeature: "); | ||
| 1637 | if (!wIndex || wIndex > ports) | ||
| 1638 | goto error; | ||
| 1639 | wIndex--; | ||
| 1640 | |||
| 1641 | switch (wValue) { | ||
| 1642 | case USB_PORT_FEAT_ENABLE: | ||
| 1643 | _DBG(0, "USB_PORT_FEAT_ENABLE\n"); | ||
| 1644 | tmp = RH_PS_CCS; | ||
| 1645 | break; | ||
| 1646 | case USB_PORT_FEAT_C_ENABLE: | ||
| 1647 | _DBG(0, "USB_PORT_FEAT_C_ENABLE\n"); | ||
| 1648 | tmp = RH_PS_PESC; | ||
| 1649 | break; | ||
| 1650 | case USB_PORT_FEAT_SUSPEND: | ||
| 1651 | _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); | ||
| 1652 | tmp = RH_PS_POCI; | ||
| 1653 | break; | ||
| 1654 | case USB_PORT_FEAT_C_SUSPEND: | ||
| 1655 | _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n"); | ||
| 1656 | tmp = RH_PS_PSSC; | ||
| 1657 | break; | ||
| 1658 | case USB_PORT_FEAT_POWER: | ||
| 1659 | _DBG(0, "USB_PORT_FEAT_POWER\n"); | ||
| 1660 | tmp = RH_PS_LSDA; | ||
| 1661 | |||
| 1662 | break; | ||
| 1663 | case USB_PORT_FEAT_C_CONNECTION: | ||
| 1664 | _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n"); | ||
| 1665 | tmp = RH_PS_CSC; | ||
| 1666 | break; | ||
| 1667 | case USB_PORT_FEAT_C_OVER_CURRENT: | ||
| 1668 | _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n"); | ||
| 1669 | tmp = RH_PS_OCIC; | ||
| 1670 | break; | ||
| 1671 | case USB_PORT_FEAT_C_RESET: | ||
| 1672 | _DBG(0, "USB_PORT_FEAT_C_RESET\n"); | ||
| 1673 | tmp = RH_PS_PRSC; | ||
| 1674 | break; | ||
| 1675 | default: | ||
| 1676 | goto error; | ||
| 1677 | } | ||
| 1678 | |||
| 1679 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1680 | isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp); | ||
| 1681 | isp1362_hcd->rhport[wIndex] = | ||
| 1682 | isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); | ||
| 1683 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1684 | break; | ||
| 1685 | case SetPortFeature: | ||
| 1686 | DBG(0, "SetPortFeature: "); | ||
| 1687 | if (!wIndex || wIndex > ports) | ||
| 1688 | goto error; | ||
| 1689 | wIndex--; | ||
| 1690 | switch (wValue) { | ||
| 1691 | case USB_PORT_FEAT_SUSPEND: | ||
| 1692 | _DBG(0, "USB_PORT_FEAT_SUSPEND\n"); | ||
| 1693 | #ifdef CONFIG_USB_OTG | ||
| 1694 | if (ohci->hcd.self.otg_port == (wIndex + 1) && | ||
| 1695 | ohci->hcd.self.b_hnp_enable) { | ||
| 1696 | start_hnp(ohci); | ||
| 1697 | break; | ||
| 1698 | } | ||
| 1699 | #endif | ||
| 1700 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1701 | isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS); | ||
| 1702 | isp1362_hcd->rhport[wIndex] = | ||
| 1703 | isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); | ||
| 1704 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1705 | break; | ||
| 1706 | case USB_PORT_FEAT_POWER: | ||
| 1707 | _DBG(0, "USB_PORT_FEAT_POWER\n"); | ||
| 1708 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1709 | isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS); | ||
| 1710 | isp1362_hcd->rhport[wIndex] = | ||
| 1711 | isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); | ||
| 1712 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1713 | break; | ||
| 1714 | case USB_PORT_FEAT_RESET: | ||
| 1715 | _DBG(0, "USB_PORT_FEAT_RESET\n"); | ||
| 1716 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1717 | |||
| 1718 | t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH); | ||
| 1719 | while (time_before(jiffies, t1)) { | ||
| 1720 | /* spin until any current reset finishes */ | ||
| 1721 | for (;;) { | ||
| 1722 | tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); | ||
| 1723 | if (!(tmp & RH_PS_PRS)) | ||
| 1724 | break; | ||
| 1725 | udelay(500); | ||
| 1726 | } | ||
| 1727 | if (!(tmp & RH_PS_CCS)) | ||
| 1728 | break; | ||
| 1729 | /* Reset lasts 10ms (claims datasheet) */ | ||
| 1730 | isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS)); | ||
| 1731 | |||
| 1732 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1733 | msleep(10); | ||
| 1734 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1735 | } | ||
| 1736 | |||
| 1737 | isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, | ||
| 1738 | HCRHPORT1 + wIndex); | ||
| 1739 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1740 | break; | ||
| 1741 | default: | ||
| 1742 | goto error; | ||
| 1743 | } | ||
| 1744 | break; | ||
| 1745 | |||
| 1746 | default: | ||
| 1747 | error: | ||
| 1748 | /* "protocol stall" on error */ | ||
| 1749 | _DBG(0, "PROTOCOL STALL\n"); | ||
| 1750 | retval = -EPIPE; | ||
| 1751 | } | ||
| 1752 | |||
| 1753 | return retval; | ||
| 1754 | } | ||
| 1755 | |||
| 1756 | #ifdef CONFIG_PM | ||
| 1757 | static int isp1362_bus_suspend(struct usb_hcd *hcd) | ||
| 1758 | { | ||
| 1759 | int status = 0; | ||
| 1760 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1761 | unsigned long flags; | ||
| 1762 | |||
| 1763 | if (time_before(jiffies, isp1362_hcd->next_statechange)) | ||
| 1764 | msleep(5); | ||
| 1765 | |||
| 1766 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1767 | |||
| 1768 | isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); | ||
| 1769 | switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { | ||
| 1770 | case OHCI_USB_RESUME: | ||
| 1771 | DBG(0, "%s: resume/suspend?\n", __func__); | ||
| 1772 | isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; | ||
| 1773 | isp1362_hcd->hc_control |= OHCI_USB_RESET; | ||
| 1774 | isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); | ||
| 1775 | /* FALL THROUGH */ | ||
| 1776 | case OHCI_USB_RESET: | ||
| 1777 | status = -EBUSY; | ||
| 1778 | pr_warning("%s: needs reinit!\n", __func__); | ||
| 1779 | goto done; | ||
| 1780 | case OHCI_USB_SUSPEND: | ||
| 1781 | pr_warning("%s: already suspended?\n", __func__); | ||
| 1782 | goto done; | ||
| 1783 | } | ||
| 1784 | DBG(0, "%s: suspend root hub\n", __func__); | ||
| 1785 | |||
| 1786 | /* First stop any processing */ | ||
| 1787 | hcd->state = HC_STATE_QUIESCING; | ||
| 1788 | if (!list_empty(&isp1362_hcd->atl_queue.active) || | ||
| 1789 | !list_empty(&isp1362_hcd->intl_queue.active) || | ||
| 1790 | !list_empty(&isp1362_hcd->istl_queue[0] .active) || | ||
| 1791 | !list_empty(&isp1362_hcd->istl_queue[1] .active)) { | ||
| 1792 | int limit; | ||
| 1793 | |||
| 1794 | isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); | ||
| 1795 | isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); | ||
| 1796 | isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0); | ||
| 1797 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); | ||
| 1798 | isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF); | ||
| 1799 | |||
| 1800 | DBG(0, "%s: stopping schedules ...\n", __func__); | ||
| 1801 | limit = 2000; | ||
| 1802 | while (limit > 0) { | ||
| 1803 | udelay(250); | ||
| 1804 | limit -= 250; | ||
| 1805 | if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF) | ||
| 1806 | break; | ||
| 1807 | } | ||
| 1808 | mdelay(7); | ||
| 1809 | if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) { | ||
| 1810 | u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); | ||
| 1811 | finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); | ||
| 1812 | } | ||
| 1813 | if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) { | ||
| 1814 | u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); | ||
| 1815 | finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); | ||
| 1816 | } | ||
| 1817 | if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0) | ||
| 1818 | finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]); | ||
| 1819 | if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1) | ||
| 1820 | finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]); | ||
| 1821 | } | ||
| 1822 | DBG(0, "%s: HCINTSTAT: %08x\n", __func__, | ||
| 1823 | isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); | ||
| 1824 | isp1362_write_reg32(isp1362_hcd, HCINTSTAT, | ||
| 1825 | isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); | ||
| 1826 | |||
| 1827 | /* Suspend hub */ | ||
| 1828 | isp1362_hcd->hc_control = OHCI_USB_SUSPEND; | ||
| 1829 | isp1362_show_reg(isp1362_hcd, HCCONTROL); | ||
| 1830 | isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); | ||
| 1831 | isp1362_show_reg(isp1362_hcd, HCCONTROL); | ||
| 1832 | |||
| 1833 | #if 1 | ||
| 1834 | isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); | ||
| 1835 | if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) { | ||
| 1836 | pr_err("%s: controller won't suspend %08x\n", __func__, | ||
| 1837 | isp1362_hcd->hc_control); | ||
| 1838 | status = -EBUSY; | ||
| 1839 | } else | ||
| 1840 | #endif | ||
| 1841 | { | ||
| 1842 | /* no resumes until devices finish suspending */ | ||
| 1843 | isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5); | ||
| 1844 | } | ||
| 1845 | done: | ||
| 1846 | if (status == 0) { | ||
| 1847 | hcd->state = HC_STATE_SUSPENDED; | ||
| 1848 | DBG(0, "%s: HCD suspended: %08x\n", __func__, | ||
| 1849 | isp1362_read_reg32(isp1362_hcd, HCCONTROL)); | ||
| 1850 | } | ||
| 1851 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1852 | return status; | ||
| 1853 | } | ||
| 1854 | |||
| 1855 | static int isp1362_bus_resume(struct usb_hcd *hcd) | ||
| 1856 | { | ||
| 1857 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 1858 | u32 port; | ||
| 1859 | unsigned long flags; | ||
| 1860 | int status = -EINPROGRESS; | ||
| 1861 | |||
| 1862 | if (time_before(jiffies, isp1362_hcd->next_statechange)) | ||
| 1863 | msleep(5); | ||
| 1864 | |||
| 1865 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1866 | isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); | ||
| 1867 | pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control); | ||
| 1868 | if (hcd->state == HC_STATE_RESUMING) { | ||
| 1869 | pr_warning("%s: duplicate resume\n", __func__); | ||
| 1870 | status = 0; | ||
| 1871 | } else | ||
| 1872 | switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { | ||
| 1873 | case OHCI_USB_SUSPEND: | ||
| 1874 | DBG(0, "%s: resume root hub\n", __func__); | ||
| 1875 | isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; | ||
| 1876 | isp1362_hcd->hc_control |= OHCI_USB_RESUME; | ||
| 1877 | isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); | ||
| 1878 | break; | ||
| 1879 | case OHCI_USB_RESUME: | ||
| 1880 | /* HCFS changes sometime after INTR_RD */ | ||
| 1881 | DBG(0, "%s: remote wakeup\n", __func__); | ||
| 1882 | break; | ||
| 1883 | case OHCI_USB_OPER: | ||
| 1884 | DBG(0, "%s: odd resume\n", __func__); | ||
| 1885 | status = 0; | ||
| 1886 | hcd->self.root_hub->dev.power.power_state = PMSG_ON; | ||
| 1887 | break; | ||
| 1888 | default: /* RESET, we lost power */ | ||
| 1889 | DBG(0, "%s: root hub hardware reset\n", __func__); | ||
| 1890 | status = -EBUSY; | ||
| 1891 | } | ||
| 1892 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1893 | if (status == -EBUSY) { | ||
| 1894 | DBG(0, "%s: Restarting HC\n", __func__); | ||
| 1895 | isp1362_hc_stop(hcd); | ||
| 1896 | return isp1362_hc_start(hcd); | ||
| 1897 | } | ||
| 1898 | if (status != -EINPROGRESS) | ||
| 1899 | return status; | ||
| 1900 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1901 | port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP; | ||
| 1902 | while (port--) { | ||
| 1903 | u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port); | ||
| 1904 | |||
| 1905 | /* force global, not selective, resume */ | ||
| 1906 | if (!(stat & RH_PS_PSS)) { | ||
| 1907 | DBG(0, "%s: Not Resuming RH port %d\n", __func__, port); | ||
| 1908 | continue; | ||
| 1909 | } | ||
| 1910 | DBG(0, "%s: Resuming RH port %d\n", __func__, port); | ||
| 1911 | isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI); | ||
| 1912 | } | ||
| 1913 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1914 | |||
| 1915 | /* Some controllers (lucent) need extra-long delays */ | ||
| 1916 | hcd->state = HC_STATE_RESUMING; | ||
| 1917 | mdelay(20 /* usb 11.5.1.10 */ + 15); | ||
| 1918 | |||
| 1919 | isp1362_hcd->hc_control = OHCI_USB_OPER; | ||
| 1920 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 1921 | isp1362_show_reg(isp1362_hcd, HCCONTROL); | ||
| 1922 | isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); | ||
| 1923 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 1924 | /* TRSMRCY */ | ||
| 1925 | msleep(10); | ||
| 1926 | |||
| 1927 | /* keep it alive for ~5x suspend + resume costs */ | ||
| 1928 | isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250); | ||
| 1929 | |||
| 1930 | hcd->self.root_hub->dev.power.power_state = PMSG_ON; | ||
| 1931 | hcd->state = HC_STATE_RUNNING; | ||
| 1932 | return 0; | ||
| 1933 | } | ||
| 1934 | #else | ||
| 1935 | #define isp1362_bus_suspend NULL | ||
| 1936 | #define isp1362_bus_resume NULL | ||
| 1937 | #endif | ||
| 1938 | |||
| 1939 | /*-------------------------------------------------------------------------*/ | ||
| 1940 | |||
| 1941 | #ifdef STUB_DEBUG_FILE | ||
| 1942 | |||
| 1943 | static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd) | ||
| 1944 | { | ||
| 1945 | } | ||
| 1946 | static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd) | ||
| 1947 | { | ||
| 1948 | } | ||
| 1949 | |||
| 1950 | #else | ||
| 1951 | |||
| 1952 | #include <linux/proc_fs.h> | ||
| 1953 | #include <linux/seq_file.h> | ||
| 1954 | |||
| 1955 | static void dump_irq(struct seq_file *s, char *label, u16 mask) | ||
| 1956 | { | ||
| 1957 | seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask, | ||
| 1958 | mask & HCuPINT_CLKRDY ? " clkrdy" : "", | ||
| 1959 | mask & HCuPINT_SUSP ? " susp" : "", | ||
| 1960 | mask & HCuPINT_OPR ? " opr" : "", | ||
| 1961 | mask & HCuPINT_EOT ? " eot" : "", | ||
| 1962 | mask & HCuPINT_ATL ? " atl" : "", | ||
| 1963 | mask & HCuPINT_SOF ? " sof" : ""); | ||
| 1964 | } | ||
| 1965 | |||
| 1966 | static void dump_int(struct seq_file *s, char *label, u32 mask) | ||
| 1967 | { | ||
| 1968 | seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask, | ||
| 1969 | mask & OHCI_INTR_MIE ? " MIE" : "", | ||
| 1970 | mask & OHCI_INTR_RHSC ? " rhsc" : "", | ||
| 1971 | mask & OHCI_INTR_FNO ? " fno" : "", | ||
| 1972 | mask & OHCI_INTR_UE ? " ue" : "", | ||
| 1973 | mask & OHCI_INTR_RD ? " rd" : "", | ||
| 1974 | mask & OHCI_INTR_SF ? " sof" : "", | ||
| 1975 | mask & OHCI_INTR_SO ? " so" : ""); | ||
| 1976 | } | ||
| 1977 | |||
| 1978 | static void dump_ctrl(struct seq_file *s, char *label, u32 mask) | ||
| 1979 | { | ||
| 1980 | seq_printf(s, "%-15s %08x%s%s%s\n", label, mask, | ||
| 1981 | mask & OHCI_CTRL_RWC ? " rwc" : "", | ||
| 1982 | mask & OHCI_CTRL_RWE ? " rwe" : "", | ||
| 1983 | ({ | ||
| 1984 | char *hcfs; | ||
| 1985 | switch (mask & OHCI_CTRL_HCFS) { | ||
| 1986 | case OHCI_USB_OPER: | ||
| 1987 | hcfs = " oper"; | ||
| 1988 | break; | ||
| 1989 | case OHCI_USB_RESET: | ||
| 1990 | hcfs = " reset"; | ||
| 1991 | break; | ||
| 1992 | case OHCI_USB_RESUME: | ||
| 1993 | hcfs = " resume"; | ||
| 1994 | break; | ||
| 1995 | case OHCI_USB_SUSPEND: | ||
| 1996 | hcfs = " suspend"; | ||
| 1997 | break; | ||
| 1998 | default: | ||
| 1999 | hcfs = " ?"; | ||
| 2000 | } | ||
| 2001 | hcfs; | ||
| 2002 | })); | ||
| 2003 | } | ||
| 2004 | |||
| 2005 | static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd) | ||
| 2006 | { | ||
| 2007 | seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION), | ||
| 2008 | isp1362_read_reg32(isp1362_hcd, HCREVISION)); | ||
| 2009 | seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL), | ||
| 2010 | isp1362_read_reg32(isp1362_hcd, HCCONTROL)); | ||
| 2011 | seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT), | ||
| 2012 | isp1362_read_reg32(isp1362_hcd, HCCMDSTAT)); | ||
| 2013 | seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT), | ||
| 2014 | isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); | ||
| 2015 | seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB), | ||
| 2016 | isp1362_read_reg32(isp1362_hcd, HCINTENB)); | ||
| 2017 | seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL), | ||
| 2018 | isp1362_read_reg32(isp1362_hcd, HCFMINTVL)); | ||
| 2019 | seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM), | ||
| 2020 | isp1362_read_reg32(isp1362_hcd, HCFMREM)); | ||
| 2021 | seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM), | ||
| 2022 | isp1362_read_reg32(isp1362_hcd, HCFMNUM)); | ||
| 2023 | seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH), | ||
| 2024 | isp1362_read_reg32(isp1362_hcd, HCLSTHRESH)); | ||
| 2025 | seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA), | ||
| 2026 | isp1362_read_reg32(isp1362_hcd, HCRHDESCA)); | ||
| 2027 | seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB), | ||
| 2028 | isp1362_read_reg32(isp1362_hcd, HCRHDESCB)); | ||
| 2029 | seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS), | ||
| 2030 | isp1362_read_reg32(isp1362_hcd, HCRHSTATUS)); | ||
| 2031 | seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1), | ||
| 2032 | isp1362_read_reg32(isp1362_hcd, HCRHPORT1)); | ||
| 2033 | seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2), | ||
| 2034 | isp1362_read_reg32(isp1362_hcd, HCRHPORT2)); | ||
| 2035 | seq_printf(s, "\n"); | ||
| 2036 | seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG), | ||
| 2037 | isp1362_read_reg16(isp1362_hcd, HCHWCFG)); | ||
| 2038 | seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG), | ||
| 2039 | isp1362_read_reg16(isp1362_hcd, HCDMACFG)); | ||
| 2040 | seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR), | ||
| 2041 | isp1362_read_reg16(isp1362_hcd, HCXFERCTR)); | ||
| 2042 | seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT), | ||
| 2043 | isp1362_read_reg16(isp1362_hcd, HCuPINT)); | ||
| 2044 | seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), | ||
| 2045 | isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); | ||
| 2046 | seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID), | ||
| 2047 | isp1362_read_reg16(isp1362_hcd, HCCHIPID)); | ||
| 2048 | seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH), | ||
| 2049 | isp1362_read_reg16(isp1362_hcd, HCSCRATCH)); | ||
| 2050 | seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT), | ||
| 2051 | isp1362_read_reg16(isp1362_hcd, HCBUFSTAT)); | ||
| 2052 | seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR), | ||
| 2053 | isp1362_read_reg32(isp1362_hcd, HCDIRADDR)); | ||
| 2054 | #if 0 | ||
| 2055 | seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA), | ||
| 2056 | isp1362_read_reg16(isp1362_hcd, HCDIRDATA)); | ||
| 2057 | #endif | ||
| 2058 | seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ), | ||
| 2059 | isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ)); | ||
| 2060 | seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE), | ||
| 2061 | isp1362_read_reg16(isp1362_hcd, HCISTLRATE)); | ||
| 2062 | seq_printf(s, "\n"); | ||
| 2063 | seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ), | ||
| 2064 | isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ)); | ||
| 2065 | seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ), | ||
| 2066 | isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ)); | ||
| 2067 | seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE), | ||
| 2068 | isp1362_read_reg32(isp1362_hcd, HCINTLDONE)); | ||
| 2069 | seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP), | ||
| 2070 | isp1362_read_reg32(isp1362_hcd, HCINTLSKIP)); | ||
| 2071 | seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST), | ||
| 2072 | isp1362_read_reg32(isp1362_hcd, HCINTLLAST)); | ||
| 2073 | seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR), | ||
| 2074 | isp1362_read_reg16(isp1362_hcd, HCINTLCURR)); | ||
| 2075 | seq_printf(s, "\n"); | ||
| 2076 | seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ), | ||
| 2077 | isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ)); | ||
| 2078 | seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ), | ||
| 2079 | isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ)); | ||
| 2080 | #if 0 | ||
| 2081 | seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE), | ||
| 2082 | isp1362_read_reg32(isp1362_hcd, HCATLDONE)); | ||
| 2083 | #endif | ||
| 2084 | seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP), | ||
| 2085 | isp1362_read_reg32(isp1362_hcd, HCATLSKIP)); | ||
| 2086 | seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST), | ||
| 2087 | isp1362_read_reg32(isp1362_hcd, HCATLLAST)); | ||
| 2088 | seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR), | ||
| 2089 | isp1362_read_reg16(isp1362_hcd, HCATLCURR)); | ||
| 2090 | seq_printf(s, "\n"); | ||
| 2091 | seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC), | ||
| 2092 | isp1362_read_reg16(isp1362_hcd, HCATLDTC)); | ||
| 2093 | seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO), | ||
| 2094 | isp1362_read_reg16(isp1362_hcd, HCATLDTCTO)); | ||
| 2095 | } | ||
| 2096 | |||
| 2097 | static int proc_isp1362_show(struct seq_file *s, void *unused) | ||
| 2098 | { | ||
| 2099 | struct isp1362_hcd *isp1362_hcd = s->private; | ||
| 2100 | struct isp1362_ep *ep; | ||
| 2101 | int i; | ||
| 2102 | |||
| 2103 | seq_printf(s, "%s\n%s version %s\n", | ||
| 2104 | isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION); | ||
| 2105 | |||
| 2106 | /* collect statistics to help estimate potential win for | ||
| 2107 | * DMA engines that care about alignment (PXA) | ||
| 2108 | */ | ||
| 2109 | seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n", | ||
| 2110 | isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4, | ||
| 2111 | isp1362_hcd->stat2, isp1362_hcd->stat1); | ||
| 2112 | seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds); | ||
| 2113 | seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds); | ||
| 2114 | seq_printf(s, "max # ptds in ISTL fifo: %d\n", | ||
| 2115 | max(isp1362_hcd->istl_queue[0] .stat_maxptds, | ||
| 2116 | isp1362_hcd->istl_queue[1] .stat_maxptds)); | ||
| 2117 | |||
| 2118 | /* FIXME: don't show the following in suspended state */ | ||
| 2119 | spin_lock_irq(&isp1362_hcd->lock); | ||
| 2120 | |||
| 2121 | dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); | ||
| 2122 | dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT)); | ||
| 2123 | dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB)); | ||
| 2124 | dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); | ||
| 2125 | dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL)); | ||
| 2126 | |||
| 2127 | for (i = 0; i < NUM_ISP1362_IRQS; i++) | ||
| 2128 | if (isp1362_hcd->irq_stat[i]) | ||
| 2129 | seq_printf(s, "%-15s: %d\n", | ||
| 2130 | ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]); | ||
| 2131 | |||
| 2132 | dump_regs(s, isp1362_hcd); | ||
| 2133 | list_for_each_entry(ep, &isp1362_hcd->async, schedule) { | ||
| 2134 | struct urb *urb; | ||
| 2135 | |||
| 2136 | seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum, | ||
| 2137 | ({ | ||
| 2138 | char *s; | ||
| 2139 | switch (ep->nextpid) { | ||
| 2140 | case USB_PID_IN: | ||
| 2141 | s = "in"; | ||
| 2142 | break; | ||
| 2143 | case USB_PID_OUT: | ||
| 2144 | s = "out"; | ||
| 2145 | break; | ||
| 2146 | case USB_PID_SETUP: | ||
| 2147 | s = "setup"; | ||
| 2148 | break; | ||
| 2149 | case USB_PID_ACK: | ||
| 2150 | s = "status"; | ||
| 2151 | break; | ||
| 2152 | default: | ||
| 2153 | s = "?"; | ||
| 2154 | break; | ||
| 2155 | }; | ||
| 2156 | s;}), ep->maxpacket) ; | ||
| 2157 | list_for_each_entry(urb, &ep->hep->urb_list, urb_list) { | ||
| 2158 | seq_printf(s, " urb%p, %d/%d\n", urb, | ||
| 2159 | urb->actual_length, | ||
| 2160 | urb->transfer_buffer_length); | ||
| 2161 | } | ||
| 2162 | } | ||
| 2163 | if (!list_empty(&isp1362_hcd->async)) | ||
| 2164 | seq_printf(s, "\n"); | ||
| 2165 | dump_ptd_queue(&isp1362_hcd->atl_queue); | ||
| 2166 | |||
| 2167 | seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE); | ||
| 2168 | |||
| 2169 | list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { | ||
| 2170 | seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch, | ||
| 2171 | isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset); | ||
| 2172 | |||
| 2173 | seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", | ||
| 2174 | ep->interval, ep, | ||
| 2175 | (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", | ||
| 2176 | ep->udev->devnum, ep->epnum, | ||
| 2177 | (ep->epnum == 0) ? "" : | ||
| 2178 | ((ep->nextpid == USB_PID_IN) ? | ||
| 2179 | "in" : "out"), ep->maxpacket); | ||
| 2180 | } | ||
| 2181 | dump_ptd_queue(&isp1362_hcd->intl_queue); | ||
| 2182 | |||
| 2183 | seq_printf(s, "ISO:\n"); | ||
| 2184 | |||
| 2185 | list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) { | ||
| 2186 | seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", | ||
| 2187 | ep->interval, ep, | ||
| 2188 | (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", | ||
| 2189 | ep->udev->devnum, ep->epnum, | ||
| 2190 | (ep->epnum == 0) ? "" : | ||
| 2191 | ((ep->nextpid == USB_PID_IN) ? | ||
| 2192 | "in" : "out"), ep->maxpacket); | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | spin_unlock_irq(&isp1362_hcd->lock); | ||
| 2196 | seq_printf(s, "\n"); | ||
| 2197 | |||
| 2198 | return 0; | ||
| 2199 | } | ||
| 2200 | |||
| 2201 | static int proc_isp1362_open(struct inode *inode, struct file *file) | ||
| 2202 | { | ||
| 2203 | return single_open(file, proc_isp1362_show, PDE(inode)->data); | ||
| 2204 | } | ||
| 2205 | |||
| 2206 | static const struct file_operations proc_ops = { | ||
| 2207 | .open = proc_isp1362_open, | ||
| 2208 | .read = seq_read, | ||
| 2209 | .llseek = seq_lseek, | ||
| 2210 | .release = single_release, | ||
| 2211 | }; | ||
| 2212 | |||
| 2213 | /* expect just one isp1362_hcd per system */ | ||
| 2214 | static const char proc_filename[] = "driver/isp1362"; | ||
| 2215 | |||
| 2216 | static void create_debug_file(struct isp1362_hcd *isp1362_hcd) | ||
| 2217 | { | ||
| 2218 | struct proc_dir_entry *pde; | ||
| 2219 | |||
| 2220 | pde = create_proc_entry(proc_filename, 0, NULL); | ||
| 2221 | if (pde == NULL) { | ||
| 2222 | pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename); | ||
| 2223 | return; | ||
| 2224 | } | ||
| 2225 | |||
| 2226 | pde->proc_fops = &proc_ops; | ||
| 2227 | pde->data = isp1362_hcd; | ||
| 2228 | isp1362_hcd->pde = pde; | ||
| 2229 | } | ||
| 2230 | |||
| 2231 | static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) | ||
| 2232 | { | ||
| 2233 | if (isp1362_hcd->pde) | ||
| 2234 | remove_proc_entry(proc_filename, 0); | ||
| 2235 | } | ||
| 2236 | |||
| 2237 | #endif | ||
| 2238 | |||
| 2239 | /*-------------------------------------------------------------------------*/ | ||
| 2240 | |||
| 2241 | static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) | ||
| 2242 | { | ||
| 2243 | int tmp = 20; | ||
| 2244 | unsigned long flags; | ||
| 2245 | |||
| 2246 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2247 | |||
| 2248 | isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); | ||
| 2249 | isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); | ||
| 2250 | while (--tmp) { | ||
| 2251 | mdelay(1); | ||
| 2252 | if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR)) | ||
| 2253 | break; | ||
| 2254 | } | ||
| 2255 | if (!tmp) | ||
| 2256 | pr_err("Software reset timeout\n"); | ||
| 2257 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | static int isp1362_mem_config(struct usb_hcd *hcd) | ||
| 2261 | { | ||
| 2262 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2263 | unsigned long flags; | ||
| 2264 | u32 total; | ||
| 2265 | u16 istl_size = ISP1362_ISTL_BUFSIZE; | ||
| 2266 | u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE; | ||
| 2267 | u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize; | ||
| 2268 | u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE; | ||
| 2269 | u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize; | ||
| 2270 | u16 atl_size; | ||
| 2271 | int i; | ||
| 2272 | |||
| 2273 | WARN_ON(istl_size & 3); | ||
| 2274 | WARN_ON(atl_blksize & 3); | ||
| 2275 | WARN_ON(intl_blksize & 3); | ||
| 2276 | WARN_ON(atl_blksize < PTD_HEADER_SIZE); | ||
| 2277 | WARN_ON(intl_blksize < PTD_HEADER_SIZE); | ||
| 2278 | |||
| 2279 | BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32); | ||
| 2280 | if (atl_buffers > 32) | ||
| 2281 | atl_buffers = 32; | ||
| 2282 | atl_size = atl_buffers * atl_blksize; | ||
| 2283 | total = atl_size + intl_size + istl_size; | ||
| 2284 | dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); | ||
| 2285 | dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", | ||
| 2286 | istl_size / 2, istl_size, 0, istl_size / 2); | ||
| 2287 | dev_info(hcd->self.controller, " INTL: %4d * (%3u+8): %4d @ $%04x\n", | ||
| 2288 | ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, | ||
| 2289 | intl_size, istl_size); | ||
| 2290 | dev_info(hcd->self.controller, " ATL : %4d * (%3u+8): %4d @ $%04x\n", | ||
| 2291 | atl_buffers, atl_blksize - PTD_HEADER_SIZE, | ||
| 2292 | atl_size, istl_size + intl_size); | ||
| 2293 | dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, | ||
| 2294 | ISP1362_BUF_SIZE - total); | ||
| 2295 | |||
| 2296 | if (total > ISP1362_BUF_SIZE) { | ||
| 2297 | dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n", | ||
| 2298 | __func__, total, ISP1362_BUF_SIZE); | ||
| 2299 | return -ENOMEM; | ||
| 2300 | } | ||
| 2301 | |||
| 2302 | total = istl_size + intl_size + atl_size; | ||
| 2303 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2304 | |||
| 2305 | for (i = 0; i < 2; i++) { | ||
| 2306 | isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2, | ||
| 2307 | isp1362_hcd->istl_queue[i].buf_size = istl_size / 2; | ||
| 2308 | isp1362_hcd->istl_queue[i].blk_size = 4; | ||
| 2309 | INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active); | ||
| 2310 | snprintf(isp1362_hcd->istl_queue[i].name, | ||
| 2311 | sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i); | ||
| 2312 | DBG(3, "%s: %5s buf $%04x %d\n", __func__, | ||
| 2313 | isp1362_hcd->istl_queue[i].name, | ||
| 2314 | isp1362_hcd->istl_queue[i].buf_start, | ||
| 2315 | isp1362_hcd->istl_queue[i].buf_size); | ||
| 2316 | } | ||
| 2317 | isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2); | ||
| 2318 | |||
| 2319 | isp1362_hcd->intl_queue.buf_start = istl_size; | ||
| 2320 | isp1362_hcd->intl_queue.buf_size = intl_size; | ||
| 2321 | isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS; | ||
| 2322 | isp1362_hcd->intl_queue.blk_size = intl_blksize; | ||
| 2323 | isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count; | ||
| 2324 | isp1362_hcd->intl_queue.skip_map = ~0; | ||
| 2325 | INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active); | ||
| 2326 | |||
| 2327 | isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ, | ||
| 2328 | isp1362_hcd->intl_queue.buf_size); | ||
| 2329 | isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ, | ||
| 2330 | isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE); | ||
| 2331 | isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); | ||
| 2332 | isp1362_write_reg32(isp1362_hcd, HCINTLLAST, | ||
| 2333 | 1 << (ISP1362_INTL_BUFFERS - 1)); | ||
| 2334 | |||
| 2335 | isp1362_hcd->atl_queue.buf_start = istl_size + intl_size; | ||
| 2336 | isp1362_hcd->atl_queue.buf_size = atl_size; | ||
| 2337 | isp1362_hcd->atl_queue.buf_count = atl_buffers; | ||
| 2338 | isp1362_hcd->atl_queue.blk_size = atl_blksize; | ||
| 2339 | isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count; | ||
| 2340 | isp1362_hcd->atl_queue.skip_map = ~0; | ||
| 2341 | INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active); | ||
| 2342 | |||
| 2343 | isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ, | ||
| 2344 | isp1362_hcd->atl_queue.buf_size); | ||
| 2345 | isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ, | ||
| 2346 | isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE); | ||
| 2347 | isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); | ||
| 2348 | isp1362_write_reg32(isp1362_hcd, HCATLLAST, | ||
| 2349 | 1 << (atl_buffers - 1)); | ||
| 2350 | |||
| 2351 | snprintf(isp1362_hcd->atl_queue.name, | ||
| 2352 | sizeof(isp1362_hcd->atl_queue.name), "ATL"); | ||
| 2353 | snprintf(isp1362_hcd->intl_queue.name, | ||
| 2354 | sizeof(isp1362_hcd->intl_queue.name), "INTL"); | ||
| 2355 | DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, | ||
| 2356 | isp1362_hcd->intl_queue.name, | ||
| 2357 | isp1362_hcd->intl_queue.buf_start, | ||
| 2358 | ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size, | ||
| 2359 | isp1362_hcd->intl_queue.buf_size); | ||
| 2360 | DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, | ||
| 2361 | isp1362_hcd->atl_queue.name, | ||
| 2362 | isp1362_hcd->atl_queue.buf_start, | ||
| 2363 | atl_buffers, isp1362_hcd->atl_queue.blk_size, | ||
| 2364 | isp1362_hcd->atl_queue.buf_size); | ||
| 2365 | |||
| 2366 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2367 | |||
| 2368 | return 0; | ||
| 2369 | } | ||
| 2370 | |||
| 2371 | static int isp1362_hc_reset(struct usb_hcd *hcd) | ||
| 2372 | { | ||
| 2373 | int ret = 0; | ||
| 2374 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2375 | unsigned long t; | ||
| 2376 | unsigned long timeout = 100; | ||
| 2377 | unsigned long flags; | ||
| 2378 | int clkrdy = 0; | ||
| 2379 | |||
| 2380 | pr_info("%s:\n", __func__); | ||
| 2381 | |||
| 2382 | if (isp1362_hcd->board && isp1362_hcd->board->reset) { | ||
| 2383 | isp1362_hcd->board->reset(hcd->self.controller, 1); | ||
| 2384 | msleep(20); | ||
| 2385 | if (isp1362_hcd->board->clock) | ||
| 2386 | isp1362_hcd->board->clock(hcd->self.controller, 1); | ||
| 2387 | isp1362_hcd->board->reset(hcd->self.controller, 0); | ||
| 2388 | } else | ||
| 2389 | isp1362_sw_reset(isp1362_hcd); | ||
| 2390 | |||
| 2391 | /* chip has been reset. First we need to see a clock */ | ||
| 2392 | t = jiffies + msecs_to_jiffies(timeout); | ||
| 2393 | while (!clkrdy && time_before_eq(jiffies, t)) { | ||
| 2394 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2395 | clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY; | ||
| 2396 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2397 | if (!clkrdy) | ||
| 2398 | msleep(4); | ||
| 2399 | } | ||
| 2400 | |||
| 2401 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2402 | isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY); | ||
| 2403 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2404 | if (!clkrdy) { | ||
| 2405 | pr_err("Clock not ready after %lums\n", timeout); | ||
| 2406 | ret = -ENODEV; | ||
| 2407 | } | ||
| 2408 | return ret; | ||
| 2409 | } | ||
| 2410 | |||
| 2411 | static void isp1362_hc_stop(struct usb_hcd *hcd) | ||
| 2412 | { | ||
| 2413 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2414 | unsigned long flags; | ||
| 2415 | u32 tmp; | ||
| 2416 | |||
| 2417 | pr_info("%s:\n", __func__); | ||
| 2418 | |||
| 2419 | del_timer_sync(&hcd->rh_timer); | ||
| 2420 | |||
| 2421 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2422 | |||
| 2423 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); | ||
| 2424 | |||
| 2425 | /* Switch off power for all ports */ | ||
| 2426 | tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); | ||
| 2427 | tmp &= ~(RH_A_NPS | RH_A_PSM); | ||
| 2428 | isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp); | ||
| 2429 | isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); | ||
| 2430 | |||
| 2431 | /* Reset the chip */ | ||
| 2432 | if (isp1362_hcd->board && isp1362_hcd->board->reset) | ||
| 2433 | isp1362_hcd->board->reset(hcd->self.controller, 1); | ||
| 2434 | else | ||
| 2435 | isp1362_sw_reset(isp1362_hcd); | ||
| 2436 | |||
| 2437 | if (isp1362_hcd->board && isp1362_hcd->board->clock) | ||
| 2438 | isp1362_hcd->board->clock(hcd->self.controller, 0); | ||
| 2439 | |||
| 2440 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2441 | } | ||
| 2442 | |||
| 2443 | #ifdef CHIP_BUFFER_TEST | ||
| 2444 | static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd) | ||
| 2445 | { | ||
| 2446 | int ret = 0; | ||
| 2447 | u16 *ref; | ||
| 2448 | unsigned long flags; | ||
| 2449 | |||
| 2450 | ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL); | ||
| 2451 | if (ref) { | ||
| 2452 | int offset; | ||
| 2453 | u16 *tst = &ref[ISP1362_BUF_SIZE / 2]; | ||
| 2454 | |||
| 2455 | for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) { | ||
| 2456 | ref[offset] = ~offset; | ||
| 2457 | tst[offset] = offset; | ||
| 2458 | } | ||
| 2459 | |||
| 2460 | for (offset = 0; offset < 4; offset++) { | ||
| 2461 | int j; | ||
| 2462 | |||
| 2463 | for (j = 0; j < 8; j++) { | ||
| 2464 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2465 | isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j); | ||
| 2466 | isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j); | ||
| 2467 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2468 | |||
| 2469 | if (memcmp(ref, tst, j)) { | ||
| 2470 | ret = -ENODEV; | ||
| 2471 | pr_err("%s: memory check with %d byte offset %d failed\n", | ||
| 2472 | __func__, j, offset); | ||
| 2473 | dump_data((u8 *)ref + offset, j); | ||
| 2474 | dump_data((u8 *)tst + offset, j); | ||
| 2475 | } | ||
| 2476 | } | ||
| 2477 | } | ||
| 2478 | |||
| 2479 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2480 | isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE); | ||
| 2481 | isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); | ||
| 2482 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2483 | |||
| 2484 | if (memcmp(ref, tst, ISP1362_BUF_SIZE)) { | ||
| 2485 | ret = -ENODEV; | ||
| 2486 | pr_err("%s: memory check failed\n", __func__); | ||
| 2487 | dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2); | ||
| 2488 | } | ||
| 2489 | |||
| 2490 | for (offset = 0; offset < 256; offset++) { | ||
| 2491 | int test_size = 0; | ||
| 2492 | |||
| 2493 | yield(); | ||
| 2494 | |||
| 2495 | memset(tst, 0, ISP1362_BUF_SIZE); | ||
| 2496 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2497 | isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); | ||
| 2498 | isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); | ||
| 2499 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2500 | if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))), | ||
| 2501 | ISP1362_BUF_SIZE / 2)) { | ||
| 2502 | pr_err("%s: Failed to clear buffer\n", __func__); | ||
| 2503 | dump_data((u8 *)tst, ISP1362_BUF_SIZE); | ||
| 2504 | break; | ||
| 2505 | } | ||
| 2506 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2507 | isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE); | ||
| 2508 | isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref), | ||
| 2509 | offset * 2 + PTD_HEADER_SIZE, test_size); | ||
| 2510 | isp1362_read_buffer(isp1362_hcd, tst, offset * 2, | ||
| 2511 | PTD_HEADER_SIZE + test_size); | ||
| 2512 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2513 | if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { | ||
| 2514 | dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size); | ||
| 2515 | dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size); | ||
| 2516 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2517 | isp1362_read_buffer(isp1362_hcd, tst, offset * 2, | ||
| 2518 | PTD_HEADER_SIZE + test_size); | ||
| 2519 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2520 | if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { | ||
| 2521 | ret = -ENODEV; | ||
| 2522 | pr_err("%s: memory check with offset %02x failed\n", | ||
| 2523 | __func__, offset); | ||
| 2524 | break; | ||
| 2525 | } | ||
| 2526 | pr_warning("%s: memory check with offset %02x ok after second read\n", | ||
| 2527 | __func__, offset); | ||
| 2528 | } | ||
| 2529 | } | ||
| 2530 | kfree(ref); | ||
| 2531 | } | ||
| 2532 | return ret; | ||
| 2533 | } | ||
| 2534 | #endif | ||
| 2535 | |||
| 2536 | static int isp1362_hc_start(struct usb_hcd *hcd) | ||
| 2537 | { | ||
| 2538 | int ret; | ||
| 2539 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2540 | struct isp1362_platform_data *board = isp1362_hcd->board; | ||
| 2541 | u16 hwcfg; | ||
| 2542 | u16 chipid; | ||
| 2543 | unsigned long flags; | ||
| 2544 | |||
| 2545 | pr_info("%s:\n", __func__); | ||
| 2546 | |||
| 2547 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2548 | chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID); | ||
| 2549 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2550 | |||
| 2551 | if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) { | ||
| 2552 | pr_err("%s: Invalid chip ID %04x\n", __func__, chipid); | ||
| 2553 | return -ENODEV; | ||
| 2554 | } | ||
| 2555 | |||
| 2556 | #ifdef CHIP_BUFFER_TEST | ||
| 2557 | ret = isp1362_chip_test(isp1362_hcd); | ||
| 2558 | if (ret) | ||
| 2559 | return -ENODEV; | ||
| 2560 | #endif | ||
| 2561 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2562 | /* clear interrupt status and disable all interrupt sources */ | ||
| 2563 | isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff); | ||
| 2564 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); | ||
| 2565 | |||
| 2566 | /* HW conf */ | ||
| 2567 | hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1); | ||
| 2568 | if (board->sel15Kres) | ||
| 2569 | hwcfg |= HCHWCFG_PULLDOWN_DS2 | | ||
| 2570 | ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0); | ||
| 2571 | if (board->clknotstop) | ||
| 2572 | hwcfg |= HCHWCFG_CLKNOTSTOP; | ||
| 2573 | if (board->oc_enable) | ||
| 2574 | hwcfg |= HCHWCFG_ANALOG_OC; | ||
| 2575 | if (board->int_act_high) | ||
| 2576 | hwcfg |= HCHWCFG_INT_POL; | ||
| 2577 | if (board->int_edge_triggered) | ||
| 2578 | hwcfg |= HCHWCFG_INT_TRIGGER; | ||
| 2579 | if (board->dreq_act_high) | ||
| 2580 | hwcfg |= HCHWCFG_DREQ_POL; | ||
| 2581 | if (board->dack_act_high) | ||
| 2582 | hwcfg |= HCHWCFG_DACK_POL; | ||
| 2583 | isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg); | ||
| 2584 | isp1362_show_reg(isp1362_hcd, HCHWCFG); | ||
| 2585 | isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0); | ||
| 2586 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2587 | |||
| 2588 | ret = isp1362_mem_config(hcd); | ||
| 2589 | if (ret) | ||
| 2590 | return ret; | ||
| 2591 | |||
| 2592 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2593 | |||
| 2594 | /* Root hub conf */ | ||
| 2595 | isp1362_hcd->rhdesca = 0; | ||
| 2596 | if (board->no_power_switching) | ||
| 2597 | isp1362_hcd->rhdesca |= RH_A_NPS; | ||
| 2598 | if (board->power_switching_mode) | ||
| 2599 | isp1362_hcd->rhdesca |= RH_A_PSM; | ||
| 2600 | if (board->potpg) | ||
| 2601 | isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT; | ||
| 2602 | else | ||
| 2603 | isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT; | ||
| 2604 | |||
| 2605 | isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM); | ||
| 2606 | isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM); | ||
| 2607 | isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); | ||
| 2608 | |||
| 2609 | isp1362_hcd->rhdescb = RH_B_PPCM; | ||
| 2610 | isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb); | ||
| 2611 | isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB); | ||
| 2612 | |||
| 2613 | isp1362_read_reg32(isp1362_hcd, HCFMINTVL); | ||
| 2614 | isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI); | ||
| 2615 | isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH); | ||
| 2616 | |||
| 2617 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2618 | |||
| 2619 | isp1362_hcd->hc_control = OHCI_USB_OPER; | ||
| 2620 | hcd->state = HC_STATE_RUNNING; | ||
| 2621 | |||
| 2622 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2623 | /* Set up interrupts */ | ||
| 2624 | isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE; | ||
| 2625 | isp1362_hcd->intenb |= OHCI_INTR_RD; | ||
| 2626 | isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP; | ||
| 2627 | isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb); | ||
| 2628 | isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); | ||
| 2629 | |||
| 2630 | /* Go operational */ | ||
| 2631 | isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); | ||
| 2632 | /* enable global power */ | ||
| 2633 | isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE); | ||
| 2634 | |||
| 2635 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2636 | |||
| 2637 | return 0; | ||
| 2638 | } | ||
| 2639 | |||
| 2640 | /*-------------------------------------------------------------------------*/ | ||
| 2641 | |||
| 2642 | static struct hc_driver isp1362_hc_driver = { | ||
| 2643 | .description = hcd_name, | ||
| 2644 | .product_desc = "ISP1362 Host Controller", | ||
| 2645 | .hcd_priv_size = sizeof(struct isp1362_hcd), | ||
| 2646 | |||
| 2647 | .irq = isp1362_irq, | ||
| 2648 | .flags = HCD_USB11 | HCD_MEMORY, | ||
| 2649 | |||
| 2650 | .reset = isp1362_hc_reset, | ||
| 2651 | .start = isp1362_hc_start, | ||
| 2652 | .stop = isp1362_hc_stop, | ||
| 2653 | |||
| 2654 | .urb_enqueue = isp1362_urb_enqueue, | ||
| 2655 | .urb_dequeue = isp1362_urb_dequeue, | ||
| 2656 | .endpoint_disable = isp1362_endpoint_disable, | ||
| 2657 | |||
| 2658 | .get_frame_number = isp1362_get_frame, | ||
| 2659 | |||
| 2660 | .hub_status_data = isp1362_hub_status_data, | ||
| 2661 | .hub_control = isp1362_hub_control, | ||
| 2662 | .bus_suspend = isp1362_bus_suspend, | ||
| 2663 | .bus_resume = isp1362_bus_resume, | ||
| 2664 | }; | ||
| 2665 | |||
| 2666 | /*-------------------------------------------------------------------------*/ | ||
| 2667 | |||
| 2668 | #define resource_len(r) (((r)->end - (r)->start) + 1) | ||
| 2669 | |||
| 2670 | static int __devexit isp1362_remove(struct platform_device *pdev) | ||
| 2671 | { | ||
| 2672 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
| 2673 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2674 | struct resource *res; | ||
| 2675 | |||
| 2676 | remove_debug_file(isp1362_hcd); | ||
| 2677 | DBG(0, "%s: Removing HCD\n", __func__); | ||
| 2678 | usb_remove_hcd(hcd); | ||
| 2679 | |||
| 2680 | DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, | ||
| 2681 | (u32)isp1362_hcd->data_reg); | ||
| 2682 | iounmap(isp1362_hcd->data_reg); | ||
| 2683 | |||
| 2684 | DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, | ||
| 2685 | (u32)isp1362_hcd->addr_reg); | ||
| 2686 | iounmap(isp1362_hcd->addr_reg); | ||
| 2687 | |||
| 2688 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 2689 | DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); | ||
| 2690 | if (res) | ||
| 2691 | release_mem_region(res->start, resource_len(res)); | ||
| 2692 | |||
| 2693 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 2694 | DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start); | ||
| 2695 | if (res) | ||
| 2696 | release_mem_region(res->start, resource_len(res)); | ||
| 2697 | |||
| 2698 | DBG(0, "%s: put_hcd\n", __func__); | ||
| 2699 | usb_put_hcd(hcd); | ||
| 2700 | DBG(0, "%s: Done\n", __func__); | ||
| 2701 | |||
| 2702 | return 0; | ||
| 2703 | } | ||
| 2704 | |||
| 2705 | static int __init isp1362_probe(struct platform_device *pdev) | ||
| 2706 | { | ||
| 2707 | struct usb_hcd *hcd; | ||
| 2708 | struct isp1362_hcd *isp1362_hcd; | ||
| 2709 | struct resource *addr, *data; | ||
| 2710 | void __iomem *addr_reg; | ||
| 2711 | void __iomem *data_reg; | ||
| 2712 | int irq; | ||
| 2713 | int retval = 0; | ||
| 2714 | |||
| 2715 | /* basic sanity checks first. board-specific init logic should | ||
| 2716 | * have initialized this the three resources and probably board | ||
| 2717 | * specific platform_data. we don't probe for IRQs, and do only | ||
| 2718 | * minimal sanity checking. | ||
| 2719 | */ | ||
| 2720 | if (pdev->num_resources < 3) { | ||
| 2721 | retval = -ENODEV; | ||
| 2722 | goto err1; | ||
| 2723 | } | ||
| 2724 | |||
| 2725 | data = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 2726 | addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 2727 | irq = platform_get_irq(pdev, 0); | ||
| 2728 | if (!addr || !data || irq < 0) { | ||
| 2729 | retval = -ENODEV; | ||
| 2730 | goto err1; | ||
| 2731 | } | ||
| 2732 | |||
| 2733 | #ifdef CONFIG_USB_HCD_DMA | ||
| 2734 | if (pdev->dev.dma_mask) { | ||
| 2735 | struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
| 2736 | |||
| 2737 | if (!dma_res) { | ||
| 2738 | retval = -ENODEV; | ||
| 2739 | goto err1; | ||
| 2740 | } | ||
| 2741 | isp1362_hcd->data_dma = dma_res->start; | ||
| 2742 | isp1362_hcd->max_dma_size = resource_len(dma_res); | ||
| 2743 | } | ||
| 2744 | #else | ||
| 2745 | if (pdev->dev.dma_mask) { | ||
| 2746 | DBG(1, "won't do DMA"); | ||
| 2747 | retval = -ENODEV; | ||
| 2748 | goto err1; | ||
| 2749 | } | ||
| 2750 | #endif | ||
| 2751 | |||
| 2752 | if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { | ||
| 2753 | retval = -EBUSY; | ||
| 2754 | goto err1; | ||
| 2755 | } | ||
| 2756 | addr_reg = ioremap(addr->start, resource_len(addr)); | ||
| 2757 | if (addr_reg == NULL) { | ||
| 2758 | retval = -ENOMEM; | ||
| 2759 | goto err2; | ||
| 2760 | } | ||
| 2761 | |||
| 2762 | if (!request_mem_region(data->start, resource_len(data), hcd_name)) { | ||
| 2763 | retval = -EBUSY; | ||
| 2764 | goto err3; | ||
| 2765 | } | ||
| 2766 | data_reg = ioremap(data->start, resource_len(data)); | ||
| 2767 | if (data_reg == NULL) { | ||
| 2768 | retval = -ENOMEM; | ||
| 2769 | goto err4; | ||
| 2770 | } | ||
| 2771 | |||
| 2772 | /* allocate and initialize hcd */ | ||
| 2773 | hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev)); | ||
| 2774 | if (!hcd) { | ||
| 2775 | retval = -ENOMEM; | ||
| 2776 | goto err5; | ||
| 2777 | } | ||
| 2778 | hcd->rsrc_start = data->start; | ||
| 2779 | isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2780 | isp1362_hcd->data_reg = data_reg; | ||
| 2781 | isp1362_hcd->addr_reg = addr_reg; | ||
| 2782 | |||
| 2783 | isp1362_hcd->next_statechange = jiffies; | ||
| 2784 | spin_lock_init(&isp1362_hcd->lock); | ||
| 2785 | INIT_LIST_HEAD(&isp1362_hcd->async); | ||
| 2786 | INIT_LIST_HEAD(&isp1362_hcd->periodic); | ||
| 2787 | INIT_LIST_HEAD(&isp1362_hcd->isoc); | ||
| 2788 | INIT_LIST_HEAD(&isp1362_hcd->remove_list); | ||
| 2789 | isp1362_hcd->board = pdev->dev.platform_data; | ||
| 2790 | #if USE_PLATFORM_DELAY | ||
| 2791 | if (!isp1362_hcd->board->delay) { | ||
| 2792 | dev_err(hcd->self.controller, "No platform delay function given\n"); | ||
| 2793 | retval = -ENODEV; | ||
| 2794 | goto err6; | ||
| 2795 | } | ||
| 2796 | #endif | ||
| 2797 | |||
| 2798 | #ifdef CONFIG_ARM | ||
| 2799 | if (isp1362_hcd->board) | ||
| 2800 | set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING); | ||
| 2801 | #endif | ||
| 2802 | |||
| 2803 | retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED); | ||
| 2804 | if (retval != 0) | ||
| 2805 | goto err6; | ||
| 2806 | pr_info("%s, irq %d\n", hcd->product_desc, irq); | ||
| 2807 | |||
| 2808 | create_debug_file(isp1362_hcd); | ||
| 2809 | |||
| 2810 | return 0; | ||
| 2811 | |||
| 2812 | err6: | ||
| 2813 | DBG(0, "%s: Freeing dev %08x\n", __func__, (u32)isp1362_hcd); | ||
| 2814 | usb_put_hcd(hcd); | ||
| 2815 | err5: | ||
| 2816 | DBG(0, "%s: Unmapping data_reg @ %08x\n", __func__, (u32)data_reg); | ||
| 2817 | iounmap(data_reg); | ||
| 2818 | err4: | ||
| 2819 | DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start); | ||
| 2820 | release_mem_region(data->start, resource_len(data)); | ||
| 2821 | err3: | ||
| 2822 | DBG(0, "%s: Unmapping addr_reg @ %08x\n", __func__, (u32)addr_reg); | ||
| 2823 | iounmap(addr_reg); | ||
| 2824 | err2: | ||
| 2825 | DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start); | ||
| 2826 | release_mem_region(addr->start, resource_len(addr)); | ||
| 2827 | err1: | ||
| 2828 | pr_err("%s: init error, %d\n", __func__, retval); | ||
| 2829 | |||
| 2830 | return retval; | ||
| 2831 | } | ||
| 2832 | |||
| 2833 | #ifdef CONFIG_PM | ||
| 2834 | static int isp1362_suspend(struct platform_device *pdev, pm_message_t state) | ||
| 2835 | { | ||
| 2836 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
| 2837 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2838 | unsigned long flags; | ||
| 2839 | int retval = 0; | ||
| 2840 | |||
| 2841 | DBG(0, "%s: Suspending device\n", __func__); | ||
| 2842 | |||
| 2843 | if (state.event == PM_EVENT_FREEZE) { | ||
| 2844 | DBG(0, "%s: Suspending root hub\n", __func__); | ||
| 2845 | retval = isp1362_bus_suspend(hcd); | ||
| 2846 | } else { | ||
| 2847 | DBG(0, "%s: Suspending RH ports\n", __func__); | ||
| 2848 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2849 | isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); | ||
| 2850 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2851 | } | ||
| 2852 | if (retval == 0) | ||
| 2853 | pdev->dev.power.power_state = state; | ||
| 2854 | return retval; | ||
| 2855 | } | ||
| 2856 | |||
| 2857 | static int isp1362_resume(struct platform_device *pdev) | ||
| 2858 | { | ||
| 2859 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
| 2860 | struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); | ||
| 2861 | unsigned long flags; | ||
| 2862 | |||
| 2863 | DBG(0, "%s: Resuming\n", __func__); | ||
| 2864 | |||
| 2865 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | ||
| 2866 | DBG(0, "%s: Resume RH ports\n", __func__); | ||
| 2867 | spin_lock_irqsave(&isp1362_hcd->lock, flags); | ||
| 2868 | isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC); | ||
| 2869 | spin_unlock_irqrestore(&isp1362_hcd->lock, flags); | ||
| 2870 | return 0; | ||
| 2871 | } | ||
| 2872 | |||
| 2873 | pdev->dev.power.power_state = PMSG_ON; | ||
| 2874 | |||
| 2875 | return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd)); | ||
| 2876 | } | ||
| 2877 | #else | ||
| 2878 | #define isp1362_suspend NULL | ||
| 2879 | #define isp1362_resume NULL | ||
| 2880 | #endif | ||
| 2881 | |||
| 2882 | static struct platform_driver isp1362_driver = { | ||
| 2883 | .probe = isp1362_probe, | ||
| 2884 | .remove = __devexit_p(isp1362_remove), | ||
| 2885 | |||
| 2886 | .suspend = isp1362_suspend, | ||
| 2887 | .resume = isp1362_resume, | ||
| 2888 | .driver = { | ||
| 2889 | .name = (char *)hcd_name, | ||
| 2890 | .owner = THIS_MODULE, | ||
| 2891 | }, | ||
| 2892 | }; | ||
| 2893 | |||
| 2894 | /*-------------------------------------------------------------------------*/ | ||
| 2895 | |||
| 2896 | static int __init isp1362_init(void) | ||
| 2897 | { | ||
| 2898 | if (usb_disabled()) | ||
| 2899 | return -ENODEV; | ||
| 2900 | pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION); | ||
| 2901 | return platform_driver_register(&isp1362_driver); | ||
| 2902 | } | ||
| 2903 | module_init(isp1362_init); | ||
| 2904 | |||
| 2905 | static void __exit isp1362_cleanup(void) | ||
| 2906 | { | ||
| 2907 | platform_driver_unregister(&isp1362_driver); | ||
| 2908 | } | ||
| 2909 | module_exit(isp1362_cleanup); | ||
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h new file mode 100644 index 000000000000..fe60f62a32f3 --- /dev/null +++ b/drivers/usb/host/isp1362.h | |||
| @@ -0,0 +1,1079 @@ | |||
| 1 | /* | ||
| 2 | * ISP1362 HCD (Host Controller Driver) for USB. | ||
| 3 | * | ||
| 4 | * COPYRIGHT (C) by L. Wassmann <LW@KARO-electronics.de> | ||
| 5 | */ | ||
| 6 | |||
| 7 | /* ------------------------------------------------------------------------- */ | ||
| 8 | /* | ||
| 9 | * Platform specific compile time options | ||
| 10 | */ | ||
| 11 | #if defined(CONFIG_ARCH_KARO) | ||
| 12 | #include <asm/arch/hardware.h> | ||
| 13 | #include <asm/arch/pxa-regs.h> | ||
| 14 | #include <asm/arch/karo.h> | ||
| 15 | |||
| 16 | #define USE_32BIT 1 | ||
| 17 | |||
| 18 | |||
| 19 | /* These options are mutually eclusive */ | ||
| 20 | #define USE_PLATFORM_DELAY 1 | ||
| 21 | #define USE_NDELAY 0 | ||
| 22 | /* | ||
| 23 | * MAX_ROOT_PORTS: Number of downstream ports | ||
| 24 | * | ||
| 25 | * The chip has two USB ports, one of which can be configured as | ||
| 26 | * an USB device port, so the value of this constant is implementation | ||
| 27 | * specific. | ||
| 28 | */ | ||
| 29 | #define MAX_ROOT_PORTS 2 | ||
| 30 | #define DUMMY_DELAY_ACCESS do {} while (0) | ||
| 31 | |||
| 32 | /* insert platform specific definitions for other machines here */ | ||
| 33 | #elif defined(CONFIG_BLACKFIN) | ||
| 34 | |||
| 35 | #include <linux/io.h> | ||
| 36 | #define USE_32BIT 0 | ||
| 37 | #define MAX_ROOT_PORTS 2 | ||
| 38 | #define USE_PLATFORM_DELAY 0 | ||
| 39 | #define USE_NDELAY 1 | ||
| 40 | |||
| 41 | #define DUMMY_DELAY_ACCESS \ | ||
| 42 | do { \ | ||
| 43 | bfin_read16(ASYNC_BANK0_BASE); \ | ||
| 44 | bfin_read16(ASYNC_BANK0_BASE); \ | ||
| 45 | bfin_read16(ASYNC_BANK0_BASE); \ | ||
| 46 | } while (0) | ||
| 47 | |||
| 48 | #undef insw | ||
| 49 | #undef outsw | ||
| 50 | |||
| 51 | #define insw delayed_insw | ||
| 52 | #define outsw delayed_outsw | ||
| 53 | |||
| 54 | static inline void delayed_outsw(unsigned int addr, void *buf, int len) | ||
| 55 | { | ||
| 56 | unsigned short *bp = (unsigned short *)buf; | ||
| 57 | while (len--) { | ||
| 58 | DUMMY_DELAY_ACCESS; | ||
| 59 | outw(*bp++, addr); | ||
| 60 | } | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline void delayed_insw(unsigned int addr, void *buf, int len) | ||
| 64 | { | ||
| 65 | unsigned short *bp = (unsigned short *)buf; | ||
| 66 | while (len--) { | ||
| 67 | DUMMY_DELAY_ACCESS; | ||
| 68 | *bp++ = inw((void *)addr); | ||
| 69 | } | ||
| 70 | } | ||
| 71 | |||
| 72 | #else | ||
| 73 | |||
| 74 | #define MAX_ROOT_PORTS 2 | ||
| 75 | |||
| 76 | #define USE_32BIT 0 | ||
| 77 | |||
| 78 | /* These options are mutually eclusive */ | ||
| 79 | #define USE_PLATFORM_DELAY 0 | ||
| 80 | #define USE_NDELAY 0 | ||
| 81 | |||
| 82 | #define DUMMY_DELAY_ACCESS do {} while (0) | ||
| 83 | |||
| 84 | #endif | ||
| 85 | |||
| 86 | |||
| 87 | /* ------------------------------------------------------------------------- */ | ||
| 88 | |||
| 89 | #define USB_RESET_WIDTH 50 | ||
| 90 | #define MAX_XFER_SIZE 1023 | ||
| 91 | |||
| 92 | /* Buffer sizes */ | ||
| 93 | #define ISP1362_BUF_SIZE 4096 | ||
| 94 | #define ISP1362_ISTL_BUFSIZE 512 | ||
| 95 | #define ISP1362_INTL_BLKSIZE 64 | ||
| 96 | #define ISP1362_INTL_BUFFERS 16 | ||
| 97 | #define ISP1362_ATL_BLKSIZE 64 | ||
| 98 | |||
| 99 | #define ISP1362_REG_WRITE_OFFSET 0x80 | ||
| 100 | |||
| 101 | #ifdef ISP1362_DEBUG | ||
| 102 | typedef const unsigned int isp1362_reg_t; | ||
| 103 | |||
| 104 | #define REG_WIDTH_16 0x000 | ||
| 105 | #define REG_WIDTH_32 0x100 | ||
| 106 | #define REG_WIDTH_MASK 0x100 | ||
| 107 | #define REG_NO_MASK 0x0ff | ||
| 108 | |||
| 109 | #define REG_ACCESS_R 0x200 | ||
| 110 | #define REG_ACCESS_W 0x400 | ||
| 111 | #define REG_ACCESS_RW 0x600 | ||
| 112 | #define REG_ACCESS_MASK 0x600 | ||
| 113 | |||
| 114 | #define ISP1362_REG_NO(r) ((r) & REG_NO_MASK) | ||
| 115 | |||
| 116 | #define _BUG_ON(x) BUG_ON(x) | ||
| 117 | #define _WARN_ON(x) WARN_ON(x) | ||
| 118 | |||
| 119 | #define ISP1362_REG(name, addr, width, rw) \ | ||
| 120 | static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw)) | ||
| 121 | |||
| 122 | #define REG_ACCESS_TEST(r) BUG_ON(((r) & ISP1362_REG_WRITE_OFFSET) && !((r) & REG_ACCESS_W)) | ||
| 123 | #define REG_WIDTH_TEST(r, w) BUG_ON(((r) & REG_WIDTH_MASK) != (w)) | ||
| 124 | #else | ||
| 125 | typedef const unsigned char isp1362_reg_t; | ||
| 126 | #define ISP1362_REG_NO(r) (r) | ||
| 127 | #define _BUG_ON(x) do {} while (0) | ||
| 128 | #define _WARN_ON(x) do {} while (0) | ||
| 129 | |||
| 130 | #define ISP1362_REG(name, addr, width, rw) \ | ||
| 131 | static isp1362_reg_t ISP1362_REG_##name = addr | ||
| 132 | |||
| 133 | #define REG_ACCESS_TEST(r) do {} while (0) | ||
| 134 | #define REG_WIDTH_TEST(r, w) do {} while (0) | ||
| 135 | #endif | ||
| 136 | |||
| 137 | /* OHCI compatible registers */ | ||
| 138 | /* | ||
| 139 | * Note: Some of the ISP1362 'OHCI' registers implement only | ||
| 140 | * a subset of the bits defined in the OHCI spec. | ||
| 141 | * | ||
| 142 | * Bitmasks for the individual bits of these registers are defined in "ohci.h" | ||
| 143 | */ | ||
| 144 | ISP1362_REG(HCREVISION, 0x00, REG_WIDTH_32, REG_ACCESS_R); | ||
| 145 | ISP1362_REG(HCCONTROL, 0x01, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 146 | ISP1362_REG(HCCMDSTAT, 0x02, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 147 | ISP1362_REG(HCINTSTAT, 0x03, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 148 | ISP1362_REG(HCINTENB, 0x04, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 149 | ISP1362_REG(HCINTDIS, 0x05, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 150 | ISP1362_REG(HCFMINTVL, 0x0d, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 151 | ISP1362_REG(HCFMREM, 0x0e, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 152 | ISP1362_REG(HCFMNUM, 0x0f, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 153 | ISP1362_REG(HCLSTHRESH, 0x11, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 154 | ISP1362_REG(HCRHDESCA, 0x12, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 155 | ISP1362_REG(HCRHDESCB, 0x13, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 156 | ISP1362_REG(HCRHSTATUS, 0x14, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 157 | ISP1362_REG(HCRHPORT1, 0x15, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 158 | ISP1362_REG(HCRHPORT2, 0x16, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 159 | |||
| 160 | /* Philips ISP1362 specific registers */ | ||
| 161 | ISP1362_REG(HCHWCFG, 0x20, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 162 | #define HCHWCFG_DISABLE_SUSPEND (1 << 15) | ||
| 163 | #define HCHWCFG_GLOBAL_PWRDOWN (1 << 14) | ||
| 164 | #define HCHWCFG_PULLDOWN_DS2 (1 << 13) | ||
| 165 | #define HCHWCFG_PULLDOWN_DS1 (1 << 12) | ||
| 166 | #define HCHWCFG_CLKNOTSTOP (1 << 11) | ||
| 167 | #define HCHWCFG_ANALOG_OC (1 << 10) | ||
| 168 | #define HCHWCFG_ONEINT (1 << 9) | ||
| 169 | #define HCHWCFG_DACK_MODE (1 << 8) | ||
| 170 | #define HCHWCFG_ONEDMA (1 << 7) | ||
| 171 | #define HCHWCFG_DACK_POL (1 << 6) | ||
| 172 | #define HCHWCFG_DREQ_POL (1 << 5) | ||
| 173 | #define HCHWCFG_DBWIDTH_MASK (0x03 << 3) | ||
| 174 | #define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK) | ||
| 175 | #define HCHWCFG_INT_POL (1 << 2) | ||
| 176 | #define HCHWCFG_INT_TRIGGER (1 << 1) | ||
| 177 | #define HCHWCFG_INT_ENABLE (1 << 0) | ||
| 178 | |||
| 179 | ISP1362_REG(HCDMACFG, 0x21, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 180 | #define HCDMACFG_CTR_ENABLE (1 << 7) | ||
| 181 | #define HCDMACFG_BURST_LEN_MASK (0x03 << 5) | ||
| 182 | #define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK) | ||
| 183 | #define HCDMACFG_BURST_LEN_1 HCDMACFG_BURST_LEN(0) | ||
| 184 | #define HCDMACFG_BURST_LEN_4 HCDMACFG_BURST_LEN(1) | ||
| 185 | #define HCDMACFG_BURST_LEN_8 HCDMACFG_BURST_LEN(2) | ||
| 186 | #define HCDMACFG_DMA_ENABLE (1 << 4) | ||
| 187 | #define HCDMACFG_BUF_TYPE_MASK (0x07 << 1) | ||
| 188 | #define HCDMACFG_BUF_TYPE(n) (((n) << 1) & HCDMACFG_BUF_TYPE_MASK) | ||
| 189 | #define HCDMACFG_BUF_ISTL0 HCDMACFG_BUF_TYPE(0) | ||
| 190 | #define HCDMACFG_BUF_ISTL1 HCDMACFG_BUF_TYPE(1) | ||
| 191 | #define HCDMACFG_BUF_INTL HCDMACFG_BUF_TYPE(2) | ||
| 192 | #define HCDMACFG_BUF_ATL HCDMACFG_BUF_TYPE(3) | ||
| 193 | #define HCDMACFG_BUF_DIRECT HCDMACFG_BUF_TYPE(4) | ||
| 194 | #define HCDMACFG_DMA_RW_SELECT (1 << 0) | ||
| 195 | |||
| 196 | ISP1362_REG(HCXFERCTR, 0x22, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 197 | |||
| 198 | ISP1362_REG(HCuPINT, 0x24, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 199 | #define HCuPINT_SOF (1 << 0) | ||
| 200 | #define HCuPINT_ISTL0 (1 << 1) | ||
| 201 | #define HCuPINT_ISTL1 (1 << 2) | ||
| 202 | #define HCuPINT_EOT (1 << 3) | ||
| 203 | #define HCuPINT_OPR (1 << 4) | ||
| 204 | #define HCuPINT_SUSP (1 << 5) | ||
| 205 | #define HCuPINT_CLKRDY (1 << 6) | ||
| 206 | #define HCuPINT_INTL (1 << 7) | ||
| 207 | #define HCuPINT_ATL (1 << 8) | ||
| 208 | #define HCuPINT_OTG (1 << 9) | ||
| 209 | |||
| 210 | ISP1362_REG(HCuPINTENB, 0x25, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 211 | /* same bit definitions apply as for HCuPINT */ | ||
| 212 | |||
| 213 | ISP1362_REG(HCCHIPID, 0x27, REG_WIDTH_16, REG_ACCESS_R); | ||
| 214 | #define HCCHIPID_MASK 0xff00 | ||
| 215 | #define HCCHIPID_MAGIC 0x3600 | ||
| 216 | |||
| 217 | ISP1362_REG(HCSCRATCH, 0x28, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 218 | |||
| 219 | ISP1362_REG(HCSWRES, 0x29, REG_WIDTH_16, REG_ACCESS_W); | ||
| 220 | #define HCSWRES_MAGIC 0x00f6 | ||
| 221 | |||
| 222 | ISP1362_REG(HCBUFSTAT, 0x2c, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 223 | #define HCBUFSTAT_ISTL0_FULL (1 << 0) | ||
| 224 | #define HCBUFSTAT_ISTL1_FULL (1 << 1) | ||
| 225 | #define HCBUFSTAT_INTL_ACTIVE (1 << 2) | ||
| 226 | #define HCBUFSTAT_ATL_ACTIVE (1 << 3) | ||
| 227 | #define HCBUFSTAT_RESET_HWPP (1 << 4) | ||
| 228 | #define HCBUFSTAT_ISTL0_ACTIVE (1 << 5) | ||
| 229 | #define HCBUFSTAT_ISTL1_ACTIVE (1 << 6) | ||
| 230 | #define HCBUFSTAT_ISTL0_DONE (1 << 8) | ||
| 231 | #define HCBUFSTAT_ISTL1_DONE (1 << 9) | ||
| 232 | #define HCBUFSTAT_PAIRED_PTDPP (1 << 10) | ||
| 233 | |||
| 234 | ISP1362_REG(HCDIRADDR, 0x32, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 235 | #define HCDIRADDR_ADDR_MASK 0x0000ffff | ||
| 236 | #define HCDIRADDR_ADDR(n) (((n) << 0) & HCDIRADDR_ADDR_MASK) | ||
| 237 | #define HCDIRADDR_COUNT_MASK 0xffff0000 | ||
| 238 | #define HCDIRADDR_COUNT(n) (((n) << 16) & HCDIRADDR_COUNT_MASK) | ||
| 239 | ISP1362_REG(HCDIRDATA, 0x45, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 240 | |||
| 241 | ISP1362_REG(HCISTLBUFSZ, 0x30, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 242 | ISP1362_REG(HCISTL0PORT, 0x40, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 243 | ISP1362_REG(HCISTL1PORT, 0x42, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 244 | ISP1362_REG(HCISTLRATE, 0x47, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 245 | |||
| 246 | ISP1362_REG(HCINTLBUFSZ, 0x33, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 247 | ISP1362_REG(HCINTLPORT, 0x43, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 248 | ISP1362_REG(HCINTLBLKSZ, 0x53, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 249 | ISP1362_REG(HCINTLDONE, 0x17, REG_WIDTH_32, REG_ACCESS_R); | ||
| 250 | ISP1362_REG(HCINTLSKIP, 0x18, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 251 | ISP1362_REG(HCINTLLAST, 0x19, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 252 | ISP1362_REG(HCINTLCURR, 0x1a, REG_WIDTH_16, REG_ACCESS_R); | ||
| 253 | |||
| 254 | ISP1362_REG(HCATLBUFSZ, 0x34, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 255 | ISP1362_REG(HCATLPORT, 0x44, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 256 | ISP1362_REG(HCATLBLKSZ, 0x54, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 257 | ISP1362_REG(HCATLDONE, 0x1b, REG_WIDTH_32, REG_ACCESS_R); | ||
| 258 | ISP1362_REG(HCATLSKIP, 0x1c, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 259 | ISP1362_REG(HCATLLAST, 0x1d, REG_WIDTH_32, REG_ACCESS_RW); | ||
| 260 | ISP1362_REG(HCATLCURR, 0x1e, REG_WIDTH_16, REG_ACCESS_R); | ||
| 261 | |||
| 262 | ISP1362_REG(HCATLDTC, 0x51, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 263 | ISP1362_REG(HCATLDTCTO, 0x52, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 264 | |||
| 265 | |||
| 266 | ISP1362_REG(OTGCONTROL, 0x62, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 267 | ISP1362_REG(OTGSTATUS, 0x67, REG_WIDTH_16, REG_ACCESS_R); | ||
| 268 | ISP1362_REG(OTGINT, 0x68, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 269 | ISP1362_REG(OTGINTENB, 0x69, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 270 | ISP1362_REG(OTGTIMER, 0x6A, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 271 | ISP1362_REG(OTGALTTMR, 0x6C, REG_WIDTH_16, REG_ACCESS_RW); | ||
| 272 | |||
| 273 | /* Philips transfer descriptor, cpu-endian */ | ||
| 274 | struct ptd { | ||
| 275 | u16 count; | ||
| 276 | #define PTD_COUNT_MSK (0x3ff << 0) | ||
| 277 | #define PTD_TOGGLE_MSK (1 << 10) | ||
| 278 | #define PTD_ACTIVE_MSK (1 << 11) | ||
| 279 | #define PTD_CC_MSK (0xf << 12) | ||
| 280 | u16 mps; | ||
| 281 | #define PTD_MPS_MSK (0x3ff << 0) | ||
| 282 | #define PTD_SPD_MSK (1 << 10) | ||
| 283 | #define PTD_LAST_MSK (1 << 11) | ||
| 284 | #define PTD_EP_MSK (0xf << 12) | ||
| 285 | u16 len; | ||
| 286 | #define PTD_LEN_MSK (0x3ff << 0) | ||
| 287 | #define PTD_DIR_MSK (3 << 10) | ||
| 288 | #define PTD_DIR_SETUP (0) | ||
| 289 | #define PTD_DIR_OUT (1) | ||
| 290 | #define PTD_DIR_IN (2) | ||
| 291 | u16 faddr; | ||
| 292 | #define PTD_FA_MSK (0x7f << 0) | ||
| 293 | /* PTD Byte 7: [StartingFrame (if ISO PTD) | StartingFrame[0..4], PollingRate[0..2] (if INT PTD)] */ | ||
| 294 | #define PTD_SF_ISO_MSK (0xff << 8) | ||
| 295 | #define PTD_SF_INT_MSK (0x1f << 8) | ||
| 296 | #define PTD_PR_MSK (0x07 << 13) | ||
| 297 | } __attribute__ ((packed, aligned(2))); | ||
| 298 | #define PTD_HEADER_SIZE sizeof(struct ptd) | ||
| 299 | |||
| 300 | /* ------------------------------------------------------------------------- */ | ||
| 301 | /* Copied from ohci.h: */ | ||
| 302 | /* | ||
| 303 | * Hardware transfer status codes -- CC from PTD | ||
| 304 | */ | ||
| 305 | #define PTD_CC_NOERROR 0x00 | ||
| 306 | #define PTD_CC_CRC 0x01 | ||
| 307 | #define PTD_CC_BITSTUFFING 0x02 | ||
| 308 | #define PTD_CC_DATATOGGLEM 0x03 | ||
| 309 | #define PTD_CC_STALL 0x04 | ||
| 310 | #define PTD_DEVNOTRESP 0x05 | ||
| 311 | #define PTD_PIDCHECKFAIL 0x06 | ||
| 312 | #define PTD_UNEXPECTEDPID 0x07 | ||
| 313 | #define PTD_DATAOVERRUN 0x08 | ||
| 314 | #define PTD_DATAUNDERRUN 0x09 | ||
| 315 | /* 0x0A, 0x0B reserved for hardware */ | ||
| 316 | #define PTD_BUFFEROVERRUN 0x0C | ||
| 317 | #define PTD_BUFFERUNDERRUN 0x0D | ||
| 318 | /* 0x0E, 0x0F reserved for HCD */ | ||
| 319 | #define PTD_NOTACCESSED 0x0F | ||
| 320 | |||
| 321 | |||
| 322 | /* map OHCI TD status codes (CC) to errno values */ | ||
| 323 | static const int cc_to_error[16] = { | ||
| 324 | /* No Error */ 0, | ||
| 325 | /* CRC Error */ -EILSEQ, | ||
| 326 | /* Bit Stuff */ -EPROTO, | ||
| 327 | /* Data Togg */ -EILSEQ, | ||
| 328 | /* Stall */ -EPIPE, | ||
| 329 | /* DevNotResp */ -ETIMEDOUT, | ||
| 330 | /* PIDCheck */ -EPROTO, | ||
| 331 | /* UnExpPID */ -EPROTO, | ||
| 332 | /* DataOver */ -EOVERFLOW, | ||
| 333 | /* DataUnder */ -EREMOTEIO, | ||
| 334 | /* (for hw) */ -EIO, | ||
| 335 | /* (for hw) */ -EIO, | ||
| 336 | /* BufferOver */ -ECOMM, | ||
| 337 | /* BuffUnder */ -ENOSR, | ||
| 338 | /* (for HCD) */ -EALREADY, | ||
| 339 | /* (for HCD) */ -EALREADY | ||
| 340 | }; | ||
| 341 | |||
| 342 | |||
| 343 | /* | ||
| 344 | * HcControl (control) register masks | ||
| 345 | */ | ||
| 346 | #define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */ | ||
| 347 | #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ | ||
| 348 | #define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */ | ||
| 349 | |||
| 350 | /* pre-shifted values for HCFS */ | ||
| 351 | # define OHCI_USB_RESET (0 << 6) | ||
| 352 | # define OHCI_USB_RESUME (1 << 6) | ||
| 353 | # define OHCI_USB_OPER (2 << 6) | ||
| 354 | # define OHCI_USB_SUSPEND (3 << 6) | ||
| 355 | |||
| 356 | /* | ||
| 357 | * HcCommandStatus (cmdstatus) register masks | ||
| 358 | */ | ||
| 359 | #define OHCI_HCR (1 << 0) /* host controller reset */ | ||
| 360 | #define OHCI_SOC (3 << 16) /* scheduling overrun count */ | ||
| 361 | |||
| 362 | /* | ||
| 363 | * masks used with interrupt registers: | ||
| 364 | * HcInterruptStatus (intrstatus) | ||
| 365 | * HcInterruptEnable (intrenable) | ||
| 366 | * HcInterruptDisable (intrdisable) | ||
| 367 | */ | ||
| 368 | #define OHCI_INTR_SO (1 << 0) /* scheduling overrun */ | ||
| 369 | #define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */ | ||
| 370 | #define OHCI_INTR_SF (1 << 2) /* start frame */ | ||
| 371 | #define OHCI_INTR_RD (1 << 3) /* resume detect */ | ||
| 372 | #define OHCI_INTR_UE (1 << 4) /* unrecoverable error */ | ||
| 373 | #define OHCI_INTR_FNO (1 << 5) /* frame number overflow */ | ||
| 374 | #define OHCI_INTR_RHSC (1 << 6) /* root hub status change */ | ||
| 375 | #define OHCI_INTR_OC (1 << 30) /* ownership change */ | ||
| 376 | #define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */ | ||
| 377 | |||
| 378 | /* roothub.portstatus [i] bits */ | ||
| 379 | #define RH_PS_CCS 0x00000001 /* current connect status */ | ||
| 380 | #define RH_PS_PES 0x00000002 /* port enable status*/ | ||
| 381 | #define RH_PS_PSS 0x00000004 /* port suspend status */ | ||
| 382 | #define RH_PS_POCI 0x00000008 /* port over current indicator */ | ||
| 383 | #define RH_PS_PRS 0x00000010 /* port reset status */ | ||
| 384 | #define RH_PS_PPS 0x00000100 /* port power status */ | ||
| 385 | #define RH_PS_LSDA 0x00000200 /* low speed device attached */ | ||
| 386 | #define RH_PS_CSC 0x00010000 /* connect status change */ | ||
| 387 | #define RH_PS_PESC 0x00020000 /* port enable status change */ | ||
| 388 | #define RH_PS_PSSC 0x00040000 /* port suspend status change */ | ||
| 389 | #define RH_PS_OCIC 0x00080000 /* over current indicator change */ | ||
| 390 | #define RH_PS_PRSC 0x00100000 /* port reset status change */ | ||
| 391 | |||
| 392 | /* roothub.status bits */ | ||
| 393 | #define RH_HS_LPS 0x00000001 /* local power status */ | ||
| 394 | #define RH_HS_OCI 0x00000002 /* over current indicator */ | ||
| 395 | #define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */ | ||
| 396 | #define RH_HS_LPSC 0x00010000 /* local power status change */ | ||
| 397 | #define RH_HS_OCIC 0x00020000 /* over current indicator change */ | ||
| 398 | #define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */ | ||
| 399 | |||
| 400 | /* roothub.b masks */ | ||
| 401 | #define RH_B_DR 0x0000ffff /* device removable flags */ | ||
| 402 | #define RH_B_PPCM 0xffff0000 /* port power control mask */ | ||
| 403 | |||
| 404 | /* roothub.a masks */ | ||
| 405 | #define RH_A_NDP (0xff << 0) /* number of downstream ports */ | ||
| 406 | #define RH_A_PSM (1 << 8) /* power switching mode */ | ||
| 407 | #define RH_A_NPS (1 << 9) /* no power switching */ | ||
| 408 | #define RH_A_DT (1 << 10) /* device type (mbz) */ | ||
| 409 | #define RH_A_OCPM (1 << 11) /* over current protection mode */ | ||
| 410 | #define RH_A_NOCP (1 << 12) /* no over current protection */ | ||
| 411 | #define RH_A_POTPGT (0xff << 24) /* power on to power good time */ | ||
| 412 | |||
| 413 | #define FI 0x2edf /* 12000 bits per frame (-1) */ | ||
| 414 | #define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7)) | ||
| 415 | #define LSTHRESH 0x628 /* lowspeed bit threshold */ | ||
| 416 | |||
| 417 | /* ------------------------------------------------------------------------- */ | ||
| 418 | |||
| 419 | /* PTD accessor macros. */ | ||
| 420 | #define PTD_GET_COUNT(p) (((p)->count & PTD_COUNT_MSK) >> 0) | ||
| 421 | #define PTD_COUNT(v) (((v) << 0) & PTD_COUNT_MSK) | ||
| 422 | #define PTD_GET_TOGGLE(p) (((p)->count & PTD_TOGGLE_MSK) >> 10) | ||
| 423 | #define PTD_TOGGLE(v) (((v) << 10) & PTD_TOGGLE_MSK) | ||
| 424 | #define PTD_GET_ACTIVE(p) (((p)->count & PTD_ACTIVE_MSK) >> 11) | ||
| 425 | #define PTD_ACTIVE(v) (((v) << 11) & PTD_ACTIVE_MSK) | ||
| 426 | #define PTD_GET_CC(p) (((p)->count & PTD_CC_MSK) >> 12) | ||
| 427 | #define PTD_CC(v) (((v) << 12) & PTD_CC_MSK) | ||
| 428 | #define PTD_GET_MPS(p) (((p)->mps & PTD_MPS_MSK) >> 0) | ||
| 429 | #define PTD_MPS(v) (((v) << 0) & PTD_MPS_MSK) | ||
| 430 | #define PTD_GET_SPD(p) (((p)->mps & PTD_SPD_MSK) >> 10) | ||
| 431 | #define PTD_SPD(v) (((v) << 10) & PTD_SPD_MSK) | ||
| 432 | #define PTD_GET_LAST(p) (((p)->mps & PTD_LAST_MSK) >> 11) | ||
| 433 | #define PTD_LAST(v) (((v) << 11) & PTD_LAST_MSK) | ||
| 434 | #define PTD_GET_EP(p) (((p)->mps & PTD_EP_MSK) >> 12) | ||
| 435 | #define PTD_EP(v) (((v) << 12) & PTD_EP_MSK) | ||
| 436 | #define PTD_GET_LEN(p) (((p)->len & PTD_LEN_MSK) >> 0) | ||
| 437 | #define PTD_LEN(v) (((v) << 0) & PTD_LEN_MSK) | ||
| 438 | #define PTD_GET_DIR(p) (((p)->len & PTD_DIR_MSK) >> 10) | ||
| 439 | #define PTD_DIR(v) (((v) << 10) & PTD_DIR_MSK) | ||
| 440 | #define PTD_GET_FA(p) (((p)->faddr & PTD_FA_MSK) >> 0) | ||
| 441 | #define PTD_FA(v) (((v) << 0) & PTD_FA_MSK) | ||
| 442 | #define PTD_GET_SF_INT(p) (((p)->faddr & PTD_SF_INT_MSK) >> 8) | ||
| 443 | #define PTD_SF_INT(v) (((v) << 8) & PTD_SF_INT_MSK) | ||
| 444 | #define PTD_GET_SF_ISO(p) (((p)->faddr & PTD_SF_ISO_MSK) >> 8) | ||
| 445 | #define PTD_SF_ISO(v) (((v) << 8) & PTD_SF_ISO_MSK) | ||
| 446 | #define PTD_GET_PR(p) (((p)->faddr & PTD_PR_MSK) >> 13) | ||
| 447 | #define PTD_PR(v) (((v) << 13) & PTD_PR_MSK) | ||
| 448 | |||
| 449 | #define LOG2_PERIODIC_SIZE 5 /* arbitrary; this matches OHCI */ | ||
| 450 | #define PERIODIC_SIZE (1 << LOG2_PERIODIC_SIZE) | ||
| 451 | |||
| 452 | struct isp1362_ep { | ||
| 453 | struct usb_host_endpoint *hep; | ||
| 454 | struct usb_device *udev; | ||
| 455 | |||
| 456 | /* philips transfer descriptor */ | ||
| 457 | struct ptd ptd; | ||
| 458 | |||
| 459 | u8 maxpacket; | ||
| 460 | u8 epnum; | ||
| 461 | u8 nextpid; | ||
| 462 | u16 error_count; | ||
| 463 | u16 length; /* of current packet */ | ||
| 464 | s16 ptd_offset; /* buffer offset in ISP1362 where | ||
| 465 | PTD has been stored | ||
| 466 | (for access thru HCDIRDATA) */ | ||
| 467 | int ptd_index; | ||
| 468 | int num_ptds; | ||
| 469 | void *data; /* to databuf */ | ||
| 470 | /* queue of active EPs (the ones transmitted to the chip) */ | ||
| 471 | struct list_head active; | ||
| 472 | |||
| 473 | /* periodic schedule */ | ||
| 474 | u8 branch; | ||
| 475 | u16 interval; | ||
| 476 | u16 load; | ||
| 477 | u16 last_iso; | ||
| 478 | |||
| 479 | /* async schedule */ | ||
| 480 | struct list_head schedule; /* list of all EPs that need processing */ | ||
| 481 | struct list_head remove_list; | ||
| 482 | int num_req; | ||
| 483 | }; | ||
| 484 | |||
| 485 | struct isp1362_ep_queue { | ||
| 486 | struct list_head active; /* list of PTDs currently processed by HC */ | ||
| 487 | atomic_t finishing; | ||
| 488 | unsigned long buf_map; | ||
| 489 | unsigned long skip_map; | ||
| 490 | int free_ptd; | ||
| 491 | u16 buf_start; | ||
| 492 | u16 buf_size; | ||
| 493 | u16 blk_size; /* PTD buffer block size for ATL and INTL */ | ||
| 494 | u8 buf_count; | ||
| 495 | u8 buf_avail; | ||
| 496 | char name[16]; | ||
| 497 | |||
| 498 | /* for statistical tracking */ | ||
| 499 | u8 stat_maxptds; /* Max # of ptds seen simultaneously in fifo */ | ||
| 500 | u8 ptd_count; /* number of ptds submitted to this queue */ | ||
| 501 | }; | ||
| 502 | |||
| 503 | struct isp1362_hcd { | ||
| 504 | spinlock_t lock; | ||
| 505 | void __iomem *addr_reg; | ||
| 506 | void __iomem *data_reg; | ||
| 507 | |||
| 508 | struct isp1362_platform_data *board; | ||
| 509 | |||
| 510 | struct proc_dir_entry *pde; | ||
| 511 | unsigned long stat1, stat2, stat4, stat8, stat16; | ||
| 512 | |||
| 513 | /* HC registers */ | ||
| 514 | u32 intenb; /* "OHCI" interrupts */ | ||
| 515 | u16 irqenb; /* uP interrupts */ | ||
| 516 | |||
| 517 | /* Root hub registers */ | ||
| 518 | u32 rhdesca; | ||
| 519 | u32 rhdescb; | ||
| 520 | u32 rhstatus; | ||
| 521 | u32 rhport[MAX_ROOT_PORTS]; | ||
| 522 | unsigned long next_statechange; | ||
| 523 | |||
| 524 | /* HC control reg shadow copy */ | ||
| 525 | u32 hc_control; | ||
| 526 | |||
| 527 | /* async schedule: control, bulk */ | ||
| 528 | struct list_head async; | ||
| 529 | |||
| 530 | /* periodic schedule: int */ | ||
| 531 | u16 load[PERIODIC_SIZE]; | ||
| 532 | struct list_head periodic; | ||
| 533 | u16 fmindex; | ||
| 534 | |||
| 535 | /* periodic schedule: isochronous */ | ||
| 536 | struct list_head isoc; | ||
| 537 | int istl_flip:1; | ||
| 538 | int irq_active:1; | ||
| 539 | |||
| 540 | /* Schedules for the current frame */ | ||
| 541 | struct isp1362_ep_queue atl_queue; | ||
| 542 | struct isp1362_ep_queue intl_queue; | ||
| 543 | struct isp1362_ep_queue istl_queue[2]; | ||
| 544 | |||
| 545 | /* list of PTDs retrieved from HC */ | ||
| 546 | struct list_head remove_list; | ||
| 547 | enum { | ||
| 548 | ISP1362_INT_SOF, | ||
| 549 | ISP1362_INT_ISTL0, | ||
| 550 | ISP1362_INT_ISTL1, | ||
| 551 | ISP1362_INT_EOT, | ||
| 552 | ISP1362_INT_OPR, | ||
| 553 | ISP1362_INT_SUSP, | ||
| 554 | ISP1362_INT_CLKRDY, | ||
| 555 | ISP1362_INT_INTL, | ||
| 556 | ISP1362_INT_ATL, | ||
| 557 | ISP1362_INT_OTG, | ||
| 558 | NUM_ISP1362_IRQS | ||
| 559 | } IRQ_NAMES; | ||
| 560 | unsigned int irq_stat[NUM_ISP1362_IRQS]; | ||
| 561 | int req_serial; | ||
| 562 | }; | ||
| 563 | |||
| 564 | static inline const char *ISP1362_INT_NAME(int n) | ||
| 565 | { | ||
| 566 | switch (n) { | ||
| 567 | case ISP1362_INT_SOF: return "SOF"; | ||
| 568 | case ISP1362_INT_ISTL0: return "ISTL0"; | ||
| 569 | case ISP1362_INT_ISTL1: return "ISTL1"; | ||
| 570 | case ISP1362_INT_EOT: return "EOT"; | ||
| 571 | case ISP1362_INT_OPR: return "OPR"; | ||
| 572 | case ISP1362_INT_SUSP: return "SUSP"; | ||
| 573 | case ISP1362_INT_CLKRDY: return "CLKRDY"; | ||
| 574 | case ISP1362_INT_INTL: return "INTL"; | ||
| 575 | case ISP1362_INT_ATL: return "ATL"; | ||
| 576 | case ISP1362_INT_OTG: return "OTG"; | ||
| 577 | default: return "unknown"; | ||
| 578 | } | ||
| 579 | } | ||
| 580 | |||
| 581 | static inline void ALIGNSTAT(struct isp1362_hcd *isp1362_hcd, void *ptr) | ||
| 582 | { | ||
| 583 | unsigned p = (unsigned)ptr; | ||
| 584 | if (!(p & 0xf)) | ||
| 585 | isp1362_hcd->stat16++; | ||
| 586 | else if (!(p & 0x7)) | ||
| 587 | isp1362_hcd->stat8++; | ||
| 588 | else if (!(p & 0x3)) | ||
| 589 | isp1362_hcd->stat4++; | ||
| 590 | else if (!(p & 0x1)) | ||
| 591 | isp1362_hcd->stat2++; | ||
| 592 | else | ||
| 593 | isp1362_hcd->stat1++; | ||
| 594 | } | ||
| 595 | |||
| 596 | static inline struct isp1362_hcd *hcd_to_isp1362_hcd(struct usb_hcd *hcd) | ||
| 597 | { | ||
| 598 | return (struct isp1362_hcd *) (hcd->hcd_priv); | ||
| 599 | } | ||
| 600 | |||
| 601 | static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd) | ||
| 602 | { | ||
| 603 | return container_of((void *)isp1362_hcd, struct usb_hcd, hcd_priv); | ||
| 604 | } | ||
| 605 | |||
| 606 | #define frame_before(f1, f2) ((s16)((u16)f1 - (u16)f2) < 0) | ||
| 607 | |||
| 608 | /* | ||
| 609 | * ISP1362 HW Interface | ||
| 610 | */ | ||
| 611 | |||
| 612 | #ifdef ISP1362_DEBUG | ||
| 613 | #define DBG(level, fmt...) \ | ||
| 614 | do { \ | ||
| 615 | if (dbg_level > level) \ | ||
| 616 | pr_debug(fmt); \ | ||
| 617 | } while (0) | ||
| 618 | #define _DBG(level, fmt...) \ | ||
| 619 | do { \ | ||
| 620 | if (dbg_level > level) \ | ||
| 621 | printk(fmt); \ | ||
| 622 | } while (0) | ||
| 623 | #else | ||
| 624 | #define DBG(fmt...) do {} while (0) | ||
| 625 | #define _DBG DBG | ||
| 626 | #endif | ||
| 627 | |||
| 628 | #ifdef VERBOSE | ||
| 629 | # define VDBG(fmt...) DBG(3, fmt) | ||
| 630 | #else | ||
| 631 | # define VDBG(fmt...) do {} while (0) | ||
| 632 | #endif | ||
| 633 | |||
| 634 | #ifdef REGISTERS | ||
| 635 | # define RDBG(fmt...) DBG(1, fmt) | ||
| 636 | #else | ||
| 637 | # define RDBG(fmt...) do {} while (0) | ||
| 638 | #endif | ||
| 639 | |||
| 640 | #ifdef URB_TRACE | ||
| 641 | #define URB_DBG(fmt...) DBG(0, fmt) | ||
| 642 | #else | ||
| 643 | #define URB_DBG(fmt...) do {} while (0) | ||
| 644 | #endif | ||
| 645 | |||
| 646 | |||
| 647 | #if USE_PLATFORM_DELAY | ||
| 648 | #if USE_NDELAY | ||
| 649 | #error USE_PLATFORM_DELAY and USE_NDELAY defined simultaneously. | ||
| 650 | #endif | ||
| 651 | #define isp1362_delay(h, d) (h)->board->delay(isp1362_hcd_to_hcd(h)->self.controller, d) | ||
| 652 | #elif USE_NDELAY | ||
| 653 | #define isp1362_delay(h, d) ndelay(d) | ||
| 654 | #else | ||
| 655 | #define isp1362_delay(h, d) do {} while (0) | ||
| 656 | #endif | ||
| 657 | |||
| 658 | #define get_urb(ep) ({ \ | ||
| 659 | BUG_ON(list_empty(&ep->hep->urb_list)); \ | ||
| 660 | container_of(ep->hep->urb_list.next, struct urb, urb_list); \ | ||
| 661 | }) | ||
| 662 | |||
| 663 | /* basic access functions for ISP1362 chip registers */ | ||
| 664 | /* NOTE: The contents of the address pointer register cannot be read back! The driver must ensure, | ||
| 665 | * that all register accesses are performed with interrupts disabled, since the interrupt | ||
| 666 | * handler has no way of restoring the previous state. | ||
| 667 | */ | ||
| 668 | static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg) | ||
| 669 | { | ||
| 670 | /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/ | ||
| 671 | REG_ACCESS_TEST(reg); | ||
| 672 | _BUG_ON(!irqs_disabled()); | ||
| 673 | DUMMY_DELAY_ACCESS; | ||
| 674 | writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg); | ||
| 675 | DUMMY_DELAY_ACCESS; | ||
| 676 | isp1362_delay(isp1362_hcd, 1); | ||
| 677 | } | ||
| 678 | |||
| 679 | static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val) | ||
| 680 | { | ||
| 681 | _BUG_ON(!irqs_disabled()); | ||
| 682 | DUMMY_DELAY_ACCESS; | ||
| 683 | writew(val, isp1362_hcd->data_reg); | ||
| 684 | } | ||
| 685 | |||
| 686 | static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd) | ||
| 687 | { | ||
| 688 | u16 val; | ||
| 689 | |||
| 690 | _BUG_ON(!irqs_disabled()); | ||
| 691 | DUMMY_DELAY_ACCESS; | ||
| 692 | val = readw(isp1362_hcd->data_reg); | ||
| 693 | |||
| 694 | return val; | ||
| 695 | } | ||
| 696 | |||
| 697 | static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val) | ||
| 698 | { | ||
| 699 | _BUG_ON(!irqs_disabled()); | ||
| 700 | #if USE_32BIT | ||
| 701 | DUMMY_DELAY_ACCESS; | ||
| 702 | writel(val, isp1362_hcd->data_reg); | ||
| 703 | #else | ||
| 704 | DUMMY_DELAY_ACCESS; | ||
| 705 | writew((u16)val, isp1362_hcd->data_reg); | ||
| 706 | DUMMY_DELAY_ACCESS; | ||
| 707 | writew(val >> 16, isp1362_hcd->data_reg); | ||
| 708 | #endif | ||
| 709 | } | ||
| 710 | |||
| 711 | static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd) | ||
| 712 | { | ||
| 713 | u32 val; | ||
| 714 | |||
| 715 | _BUG_ON(!irqs_disabled()); | ||
| 716 | #if USE_32BIT | ||
| 717 | DUMMY_DELAY_ACCESS; | ||
| 718 | val = readl(isp1362_hcd->data_reg); | ||
| 719 | #else | ||
| 720 | DUMMY_DELAY_ACCESS; | ||
| 721 | val = (u32)readw(isp1362_hcd->data_reg); | ||
| 722 | DUMMY_DELAY_ACCESS; | ||
| 723 | val |= (u32)readw(isp1362_hcd->data_reg) << 16; | ||
| 724 | #endif | ||
| 725 | return val; | ||
| 726 | } | ||
| 727 | |||
| 728 | /* use readsw/writesw to access the fifo whenever possible */ | ||
| 729 | /* assume HCDIRDATA or XFERCTR & addr_reg have been set up */ | ||
| 730 | static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len) | ||
| 731 | { | ||
| 732 | u8 *dp = buf; | ||
| 733 | u16 data; | ||
| 734 | |||
| 735 | if (!len) | ||
| 736 | return; | ||
| 737 | |||
| 738 | _BUG_ON(!irqs_disabled()); | ||
| 739 | |||
| 740 | RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf); | ||
| 741 | #if USE_32BIT | ||
| 742 | if (len >= 4) { | ||
| 743 | RDBG("%s: Using readsl for %d dwords\n", __func__, len >> 2); | ||
| 744 | readsl(isp1362_hcd->data_reg, dp, len >> 2); | ||
| 745 | dp += len & ~3; | ||
| 746 | len &= 3; | ||
| 747 | } | ||
| 748 | #endif | ||
| 749 | if (len >= 2) { | ||
| 750 | RDBG("%s: Using readsw for %d words\n", __func__, len >> 1); | ||
| 751 | insw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1); | ||
| 752 | dp += len & ~1; | ||
| 753 | len &= 1; | ||
| 754 | } | ||
| 755 | |||
| 756 | BUG_ON(len & ~1); | ||
| 757 | if (len > 0) { | ||
| 758 | data = isp1362_read_data16(isp1362_hcd); | ||
| 759 | RDBG("%s: Reading trailing byte %02x to mem @ %08x\n", __func__, | ||
| 760 | (u8)data, (u32)dp); | ||
| 761 | *dp = (u8)data; | ||
| 762 | } | ||
| 763 | } | ||
| 764 | |||
| 765 | static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 len) | ||
| 766 | { | ||
| 767 | u8 *dp = buf; | ||
| 768 | u16 data; | ||
| 769 | |||
| 770 | if (!len) | ||
| 771 | return; | ||
| 772 | |||
| 773 | if ((unsigned)dp & 0x1) { | ||
| 774 | /* not aligned */ | ||
| 775 | for (; len > 1; len -= 2) { | ||
| 776 | data = *dp++; | ||
| 777 | data |= *dp++ << 8; | ||
| 778 | isp1362_write_data16(isp1362_hcd, data); | ||
| 779 | } | ||
| 780 | if (len) | ||
| 781 | isp1362_write_data16(isp1362_hcd, *dp); | ||
| 782 | return; | ||
| 783 | } | ||
| 784 | |||
| 785 | _BUG_ON(!irqs_disabled()); | ||
| 786 | |||
| 787 | RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf); | ||
| 788 | #if USE_32BIT | ||
| 789 | if (len >= 4) { | ||
| 790 | RDBG("%s: Using writesl for %d dwords\n", __func__, len >> 2); | ||
| 791 | writesl(isp1362_hcd->data_reg, dp, len >> 2); | ||
| 792 | dp += len & ~3; | ||
| 793 | len &= 3; | ||
| 794 | } | ||
| 795 | #endif | ||
| 796 | if (len >= 2) { | ||
| 797 | RDBG("%s: Using writesw for %d words\n", __func__, len >> 1); | ||
| 798 | outsw((unsigned long)isp1362_hcd->data_reg, dp, len >> 1); | ||
| 799 | dp += len & ~1; | ||
| 800 | len &= 1; | ||
| 801 | } | ||
| 802 | |||
| 803 | BUG_ON(len & ~1); | ||
| 804 | if (len > 0) { | ||
| 805 | /* finally write any trailing byte; we don't need to care | ||
| 806 | * about the high byte of the last word written | ||
| 807 | */ | ||
| 808 | data = (u16)*dp; | ||
| 809 | RDBG("%s: Sending trailing byte %02x from mem @ %08x\n", __func__, | ||
| 810 | data, (u32)dp); | ||
| 811 | isp1362_write_data16(isp1362_hcd, data); | ||
| 812 | } | ||
| 813 | } | ||
| 814 | |||
| 815 | #define isp1362_read_reg16(d, r) ({ \ | ||
| 816 | u16 __v; \ | ||
| 817 | REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \ | ||
| 818 | isp1362_write_addr(d, ISP1362_REG_##r); \ | ||
| 819 | __v = isp1362_read_data16(d); \ | ||
| 820 | RDBG("%s: Read %04x from %s[%02x]\n", __func__, __v, #r, \ | ||
| 821 | ISP1362_REG_NO(ISP1362_REG_##r)); \ | ||
| 822 | __v; \ | ||
| 823 | }) | ||
| 824 | |||
| 825 | #define isp1362_read_reg32(d, r) ({ \ | ||
| 826 | u32 __v; \ | ||
| 827 | REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \ | ||
| 828 | isp1362_write_addr(d, ISP1362_REG_##r); \ | ||
| 829 | __v = isp1362_read_data32(d); \ | ||
| 830 | RDBG("%s: Read %08x from %s[%02x]\n", __func__, __v, #r, \ | ||
| 831 | ISP1362_REG_NO(ISP1362_REG_##r)); \ | ||
| 832 | __v; \ | ||
| 833 | }) | ||
| 834 | |||
| 835 | #define isp1362_write_reg16(d, r, v) { \ | ||
| 836 | REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_16); \ | ||
| 837 | isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \ | ||
| 838 | isp1362_write_data16(d, (u16)(v)); \ | ||
| 839 | RDBG("%s: Wrote %04x to %s[%02x]\n", __func__, (u16)(v), #r, \ | ||
| 840 | ISP1362_REG_NO(ISP1362_REG_##r)); \ | ||
| 841 | } | ||
| 842 | |||
| 843 | #define isp1362_write_reg32(d, r, v) { \ | ||
| 844 | REG_WIDTH_TEST(ISP1362_REG_##r, REG_WIDTH_32); \ | ||
| 845 | isp1362_write_addr(d, (ISP1362_REG_##r) | ISP1362_REG_WRITE_OFFSET); \ | ||
| 846 | isp1362_write_data32(d, (u32)(v)); \ | ||
| 847 | RDBG("%s: Wrote %08x to %s[%02x]\n", __func__, (u32)(v), #r, \ | ||
| 848 | ISP1362_REG_NO(ISP1362_REG_##r)); \ | ||
| 849 | } | ||
| 850 | |||
| 851 | #define isp1362_set_mask16(d, r, m) { \ | ||
| 852 | u16 __v; \ | ||
| 853 | __v = isp1362_read_reg16(d, r); \ | ||
| 854 | if ((__v | m) != __v) \ | ||
| 855 | isp1362_write_reg16(d, r, __v | m); \ | ||
| 856 | } | ||
| 857 | |||
| 858 | #define isp1362_clr_mask16(d, r, m) { \ | ||
| 859 | u16 __v; \ | ||
| 860 | __v = isp1362_read_reg16(d, r); \ | ||
| 861 | if ((__v & ~m) != __v) \ | ||
| 862 | isp1362_write_reg16(d, r, __v & ~m); \ | ||
| 863 | } | ||
| 864 | |||
| 865 | #define isp1362_set_mask32(d, r, m) { \ | ||
| 866 | u32 __v; \ | ||
| 867 | __v = isp1362_read_reg32(d, r); \ | ||
| 868 | if ((__v | m) != __v) \ | ||
| 869 | isp1362_write_reg32(d, r, __v | m); \ | ||
| 870 | } | ||
| 871 | |||
| 872 | #define isp1362_clr_mask32(d, r, m) { \ | ||
| 873 | u32 __v; \ | ||
| 874 | __v = isp1362_read_reg32(d, r); \ | ||
| 875 | if ((__v & ~m) != __v) \ | ||
| 876 | isp1362_write_reg32(d, r, __v & ~m); \ | ||
| 877 | } | ||
| 878 | |||
| 879 | #ifdef ISP1362_DEBUG | ||
| 880 | #define isp1362_show_reg(d, r) { \ | ||
| 881 | if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \ | ||
| 882 | DBG(0, "%-12s[%02x]: %08x\n", #r, \ | ||
| 883 | ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg32(d, r)); \ | ||
| 884 | else \ | ||
| 885 | DBG(0, "%-12s[%02x]: %04x\n", #r, \ | ||
| 886 | ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \ | ||
| 887 | } | ||
| 888 | #else | ||
| 889 | #define isp1362_show_reg(d, r) do {} while (0) | ||
| 890 | #endif | ||
| 891 | |||
| 892 | static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd) | ||
| 893 | { | ||
| 894 | isp1362_show_reg(isp1362_hcd, HCREVISION); | ||
| 895 | isp1362_show_reg(isp1362_hcd, HCCONTROL); | ||
| 896 | isp1362_show_reg(isp1362_hcd, HCCMDSTAT); | ||
| 897 | isp1362_show_reg(isp1362_hcd, HCINTSTAT); | ||
| 898 | isp1362_show_reg(isp1362_hcd, HCINTENB); | ||
| 899 | isp1362_show_reg(isp1362_hcd, HCFMINTVL); | ||
| 900 | isp1362_show_reg(isp1362_hcd, HCFMREM); | ||
| 901 | isp1362_show_reg(isp1362_hcd, HCFMNUM); | ||
| 902 | isp1362_show_reg(isp1362_hcd, HCLSTHRESH); | ||
| 903 | isp1362_show_reg(isp1362_hcd, HCRHDESCA); | ||
| 904 | isp1362_show_reg(isp1362_hcd, HCRHDESCB); | ||
| 905 | isp1362_show_reg(isp1362_hcd, HCRHSTATUS); | ||
| 906 | isp1362_show_reg(isp1362_hcd, HCRHPORT1); | ||
| 907 | isp1362_show_reg(isp1362_hcd, HCRHPORT2); | ||
| 908 | |||
| 909 | isp1362_show_reg(isp1362_hcd, HCHWCFG); | ||
| 910 | isp1362_show_reg(isp1362_hcd, HCDMACFG); | ||
| 911 | isp1362_show_reg(isp1362_hcd, HCXFERCTR); | ||
| 912 | isp1362_show_reg(isp1362_hcd, HCuPINT); | ||
| 913 | |||
| 914 | if (in_interrupt()) | ||
| 915 | DBG(0, "%-12s[%02x]: %04x\n", "HCuPINTENB", | ||
| 916 | ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_hcd->irqenb); | ||
| 917 | else | ||
| 918 | isp1362_show_reg(isp1362_hcd, HCuPINTENB); | ||
| 919 | isp1362_show_reg(isp1362_hcd, HCCHIPID); | ||
| 920 | isp1362_show_reg(isp1362_hcd, HCSCRATCH); | ||
| 921 | isp1362_show_reg(isp1362_hcd, HCBUFSTAT); | ||
| 922 | isp1362_show_reg(isp1362_hcd, HCDIRADDR); | ||
| 923 | /* Access would advance fifo | ||
| 924 | * isp1362_show_reg(isp1362_hcd, HCDIRDATA); | ||
| 925 | */ | ||
| 926 | isp1362_show_reg(isp1362_hcd, HCISTLBUFSZ); | ||
| 927 | isp1362_show_reg(isp1362_hcd, HCISTLRATE); | ||
| 928 | isp1362_show_reg(isp1362_hcd, HCINTLBUFSZ); | ||
| 929 | isp1362_show_reg(isp1362_hcd, HCINTLBLKSZ); | ||
| 930 | isp1362_show_reg(isp1362_hcd, HCINTLDONE); | ||
| 931 | isp1362_show_reg(isp1362_hcd, HCINTLSKIP); | ||
| 932 | isp1362_show_reg(isp1362_hcd, HCINTLLAST); | ||
| 933 | isp1362_show_reg(isp1362_hcd, HCINTLCURR); | ||
| 934 | isp1362_show_reg(isp1362_hcd, HCATLBUFSZ); | ||
| 935 | isp1362_show_reg(isp1362_hcd, HCATLBLKSZ); | ||
| 936 | /* only valid after ATL_DONE interrupt | ||
| 937 | * isp1362_show_reg(isp1362_hcd, HCATLDONE); | ||
| 938 | */ | ||
| 939 | isp1362_show_reg(isp1362_hcd, HCATLSKIP); | ||
| 940 | isp1362_show_reg(isp1362_hcd, HCATLLAST); | ||
| 941 | isp1362_show_reg(isp1362_hcd, HCATLCURR); | ||
| 942 | isp1362_show_reg(isp1362_hcd, HCATLDTC); | ||
| 943 | isp1362_show_reg(isp1362_hcd, HCATLDTCTO); | ||
| 944 | } | ||
| 945 | |||
| 946 | static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len) | ||
| 947 | { | ||
| 948 | _BUG_ON(offset & 1); | ||
| 949 | _BUG_ON(offset >= ISP1362_BUF_SIZE); | ||
| 950 | _BUG_ON(len > ISP1362_BUF_SIZE); | ||
| 951 | _BUG_ON(offset + len > ISP1362_BUF_SIZE); | ||
| 952 | len = (len + 1) & ~1; | ||
| 953 | |||
| 954 | isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE); | ||
| 955 | isp1362_write_reg32(isp1362_hcd, HCDIRADDR, | ||
| 956 | HCDIRADDR_ADDR(offset) | HCDIRADDR_COUNT(len)); | ||
| 957 | } | ||
| 958 | |||
| 959 | static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len) | ||
| 960 | { | ||
| 961 | _BUG_ON(offset & 1); | ||
| 962 | |||
| 963 | isp1362_write_diraddr(isp1362_hcd, offset, len); | ||
| 964 | |||
| 965 | DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %08x\n", __func__, | ||
| 966 | len, offset, (u32)buf); | ||
| 967 | |||
| 968 | isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); | ||
| 969 | _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); | ||
| 970 | |||
| 971 | isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA); | ||
| 972 | |||
| 973 | isp1362_read_fifo(isp1362_hcd, buf, len); | ||
| 974 | _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); | ||
| 975 | isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); | ||
| 976 | _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); | ||
| 977 | } | ||
| 978 | |||
| 979 | static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len) | ||
| 980 | { | ||
| 981 | _BUG_ON(offset & 1); | ||
| 982 | |||
| 983 | isp1362_write_diraddr(isp1362_hcd, offset, len); | ||
| 984 | |||
| 985 | DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %08x\n", __func__, | ||
| 986 | len, offset, (u32)buf); | ||
| 987 | |||
| 988 | isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); | ||
| 989 | _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); | ||
| 990 | |||
| 991 | isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET); | ||
| 992 | isp1362_write_fifo(isp1362_hcd, buf, len); | ||
| 993 | |||
| 994 | _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); | ||
| 995 | isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT); | ||
| 996 | _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT)); | ||
| 997 | } | ||
| 998 | |||
| 999 | static void __attribute__((unused)) dump_data(char *buf, int len) | ||
| 1000 | { | ||
| 1001 | if (dbg_level > 0) { | ||
| 1002 | int k; | ||
| 1003 | int lf = 0; | ||
| 1004 | |||
| 1005 | for (k = 0; k < len; ++k) { | ||
| 1006 | if (!lf) | ||
| 1007 | DBG(0, "%04x:", k); | ||
| 1008 | printk(" %02x", ((u8 *) buf)[k]); | ||
| 1009 | lf = 1; | ||
| 1010 | if (!k) | ||
| 1011 | continue; | ||
| 1012 | if (k % 16 == 15) { | ||
| 1013 | printk("\n"); | ||
| 1014 | lf = 0; | ||
| 1015 | continue; | ||
| 1016 | } | ||
| 1017 | if (k % 8 == 7) | ||
| 1018 | printk(" "); | ||
| 1019 | if (k % 4 == 3) | ||
| 1020 | printk(" "); | ||
| 1021 | } | ||
| 1022 | if (lf) | ||
| 1023 | printk("\n"); | ||
| 1024 | } | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | #if defined(ISP1362_DEBUG) && defined(PTD_TRACE) | ||
| 1028 | |||
| 1029 | static void dump_ptd(struct ptd *ptd) | ||
| 1030 | { | ||
| 1031 | DBG(0, "EP %p: CC=%x EP=%d DIR=%x CNT=%d LEN=%d MPS=%d TGL=%x ACT=%x FA=%d SPD=%x SF=%x PR=%x LST=%x\n", | ||
| 1032 | container_of(ptd, struct isp1362_ep, ptd), | ||
| 1033 | PTD_GET_CC(ptd), PTD_GET_EP(ptd), PTD_GET_DIR(ptd), | ||
| 1034 | PTD_GET_COUNT(ptd), PTD_GET_LEN(ptd), PTD_GET_MPS(ptd), | ||
| 1035 | PTD_GET_TOGGLE(ptd), PTD_GET_ACTIVE(ptd), PTD_GET_FA(ptd), | ||
| 1036 | PTD_GET_SPD(ptd), PTD_GET_SF_INT(ptd), PTD_GET_PR(ptd), PTD_GET_LAST(ptd)); | ||
| 1037 | DBG(0, " %04x %04x %04x %04x\n", ptd->count, ptd->mps, ptd->len, ptd->faddr); | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | static void dump_ptd_out_data(struct ptd *ptd, u8 *buf) | ||
| 1041 | { | ||
| 1042 | if (dbg_level > 0) { | ||
| 1043 | if (PTD_GET_DIR(ptd) != PTD_DIR_IN && PTD_GET_LEN(ptd)) { | ||
| 1044 | DBG(0, "--out->\n"); | ||
| 1045 | dump_data(buf, PTD_GET_LEN(ptd)); | ||
| 1046 | } | ||
| 1047 | } | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | static void dump_ptd_in_data(struct ptd *ptd, u8 *buf) | ||
| 1051 | { | ||
| 1052 | if (dbg_level > 0) { | ||
| 1053 | if (PTD_GET_DIR(ptd) == PTD_DIR_IN && PTD_GET_COUNT(ptd)) { | ||
| 1054 | DBG(0, "<--in--\n"); | ||
| 1055 | dump_data(buf, PTD_GET_COUNT(ptd)); | ||
| 1056 | } | ||
| 1057 | DBG(0, "-----\n"); | ||
| 1058 | } | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | static void dump_ptd_queue(struct isp1362_ep_queue *epq) | ||
| 1062 | { | ||
| 1063 | struct isp1362_ep *ep; | ||
| 1064 | int dbg = dbg_level; | ||
| 1065 | |||
| 1066 | dbg_level = 1; | ||
| 1067 | list_for_each_entry(ep, &epq->active, active) { | ||
| 1068 | dump_ptd(&ep->ptd); | ||
| 1069 | dump_data(ep->data, ep->length); | ||
| 1070 | } | ||
| 1071 | dbg_level = dbg; | ||
| 1072 | } | ||
| 1073 | #else | ||
| 1074 | #define dump_ptd(ptd) do {} while (0) | ||
| 1075 | #define dump_ptd_in_data(ptd, buf) do {} while (0) | ||
| 1076 | #define dump_ptd_out_data(ptd, buf) do {} while (0) | ||
| 1077 | #define dump_ptd_data(ptd, buf) do {} while (0) | ||
| 1078 | #define dump_ptd_queue(epq) do {} while (0) | ||
| 1079 | #endif | ||
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 15438469f21a..9600a58299db 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
| @@ -386,6 +386,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd) | |||
| 386 | hwmode |= HW_DACK_POL_HIGH; | 386 | hwmode |= HW_DACK_POL_HIGH; |
| 387 | if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH) | 387 | if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH) |
| 388 | hwmode |= HW_DREQ_POL_HIGH; | 388 | hwmode |= HW_DREQ_POL_HIGH; |
| 389 | if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH) | ||
| 390 | hwmode |= HW_INTR_HIGH_ACT; | ||
| 391 | if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG) | ||
| 392 | hwmode |= HW_INTR_EDGE_TRIG; | ||
| 389 | 393 | ||
| 390 | /* | 394 | /* |
| 391 | * We have to set this first in case we're in 16-bit mode. | 395 | * We have to set this first in case we're in 16-bit mode. |
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 462f4943cb1b..6931ef5c9650 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h | |||
| @@ -142,6 +142,8 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, | |||
| 142 | #define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */ | 142 | #define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */ |
| 143 | #define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */ | 143 | #define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */ |
| 144 | #define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */ | 144 | #define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */ |
| 145 | #define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */ | ||
| 146 | #define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */ | ||
| 145 | 147 | ||
| 146 | /* chip memory management */ | 148 | /* chip memory management */ |
| 147 | struct memory_chunk { | 149 | struct memory_chunk { |
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index d4feebfc63bd..1c9f977a5c9c 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * Currently there is support for | 3 | * Currently there is support for |
| 4 | * - OpenFirmware | 4 | * - OpenFirmware |
| 5 | * - PCI | 5 | * - PCI |
| 6 | * - PDEV (generic platform device centralized driver model) | ||
| 6 | * | 7 | * |
| 7 | * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> | 8 | * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> |
| 8 | * | 9 | * |
| @@ -11,6 +12,7 @@ | |||
| 11 | #include <linux/usb.h> | 12 | #include <linux/usb.h> |
| 12 | #include <linux/io.h> | 13 | #include <linux/io.h> |
| 13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/usb/isp1760.h> | ||
| 14 | 16 | ||
| 15 | #include "../core/hcd.h" | 17 | #include "../core/hcd.h" |
| 16 | #include "isp1760-hcd.h" | 18 | #include "isp1760-hcd.h" |
| @@ -308,6 +310,8 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev) | |||
| 308 | struct resource *mem_res; | 310 | struct resource *mem_res; |
| 309 | struct resource *irq_res; | 311 | struct resource *irq_res; |
| 310 | resource_size_t mem_size; | 312 | resource_size_t mem_size; |
| 313 | struct isp1760_platform_data *priv = pdev->dev.platform_data; | ||
| 314 | unsigned int devflags = 0; | ||
| 311 | unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED; | 315 | unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED; |
| 312 | 316 | ||
| 313 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 317 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| @@ -330,8 +334,23 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev) | |||
| 330 | } | 334 | } |
| 331 | irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; | 335 | irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; |
| 332 | 336 | ||
| 337 | if (priv) { | ||
| 338 | if (priv->is_isp1761) | ||
| 339 | devflags |= ISP1760_FLAG_ISP1761; | ||
| 340 | if (priv->bus_width_16) | ||
| 341 | devflags |= ISP1760_FLAG_BUS_WIDTH_16; | ||
| 342 | if (priv->port1_otg) | ||
| 343 | devflags |= ISP1760_FLAG_OTG_EN; | ||
| 344 | if (priv->analog_oc) | ||
| 345 | devflags |= ISP1760_FLAG_ANALOG_OC; | ||
| 346 | if (priv->dack_polarity_high) | ||
| 347 | devflags |= ISP1760_FLAG_DACK_POL_HIGH; | ||
| 348 | if (priv->dreq_polarity_high) | ||
| 349 | devflags |= ISP1760_FLAG_DREQ_POL_HIGH; | ||
| 350 | } | ||
| 351 | |||
| 333 | hcd = isp1760_register(mem_res->start, mem_size, irq_res->start, | 352 | hcd = isp1760_register(mem_res->start, mem_size, irq_res->start, |
| 334 | irqflags, &pdev->dev, dev_name(&pdev->dev), 0); | 353 | irqflags, &pdev->dev, dev_name(&pdev->dev), devflags); |
| 335 | if (IS_ERR(hcd)) { | 354 | if (IS_ERR(hcd)) { |
| 336 | pr_warning("isp1760: Failed to register the HCD device\n"); | 355 | pr_warning("isp1760: Failed to register the HCD device\n"); |
| 337 | ret = -ENODEV; | 356 | ret = -ENODEV; |
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index bb5e6f671578..7ccffcbe7b6f 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c | |||
| @@ -148,7 +148,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, | |||
| 148 | at91_start_hc(pdev); | 148 | at91_start_hc(pdev); |
| 149 | ohci_hcd_init(hcd_to_ohci(hcd)); | 149 | ohci_hcd_init(hcd_to_ohci(hcd)); |
| 150 | 150 | ||
| 151 | retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED); | 151 | retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); |
| 152 | if (retval == 0) | 152 | if (retval == 0) |
| 153 | return retval; | 153 | return retval; |
| 154 | 154 | ||
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index 2ac4e022a13f..e4380082ebb1 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c | |||
| @@ -248,10 +248,9 @@ static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev) | |||
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | #ifdef CONFIG_PM | 250 | #ifdef CONFIG_PM |
| 251 | static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev, | 251 | static int ohci_hcd_au1xxx_drv_suspend(struct device *dev) |
| 252 | pm_message_t message) | ||
| 253 | { | 252 | { |
| 254 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 253 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 255 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 254 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
| 256 | unsigned long flags; | 255 | unsigned long flags; |
| 257 | int rc; | 256 | int rc; |
| @@ -274,10 +273,6 @@ static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev, | |||
| 274 | ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); | 273 | ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); |
| 275 | (void)ohci_readl(ohci, &ohci->regs->intrdisable); | 274 | (void)ohci_readl(ohci, &ohci->regs->intrdisable); |
| 276 | 275 | ||
| 277 | /* make sure snapshot being resumed re-enumerates everything */ | ||
| 278 | if (message.event == PM_EVENT_PRETHAW) | ||
| 279 | ohci_usb_reset(ohci); | ||
| 280 | |||
| 281 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 276 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
| 282 | 277 | ||
| 283 | au1xxx_stop_ohc(); | 278 | au1xxx_stop_ohc(); |
| @@ -287,9 +282,9 @@ bail: | |||
| 287 | return rc; | 282 | return rc; |
| 288 | } | 283 | } |
| 289 | 284 | ||
| 290 | static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev) | 285 | static int ohci_hcd_au1xxx_drv_resume(struct device *dev) |
| 291 | { | 286 | { |
| 292 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 287 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 293 | 288 | ||
| 294 | au1xxx_start_ohc(); | 289 | au1xxx_start_ohc(); |
| 295 | 290 | ||
| @@ -298,20 +293,26 @@ static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev) | |||
| 298 | 293 | ||
| 299 | return 0; | 294 | return 0; |
| 300 | } | 295 | } |
| 296 | |||
| 297 | static struct dev_pm_ops au1xxx_ohci_pmops = { | ||
| 298 | .suspend = ohci_hcd_au1xxx_drv_suspend, | ||
| 299 | .resume = ohci_hcd_au1xxx_drv_resume, | ||
| 300 | }; | ||
| 301 | |||
| 302 | #define AU1XXX_OHCI_PMOPS &au1xxx_ohci_pmops | ||
| 303 | |||
| 301 | #else | 304 | #else |
| 302 | #define ohci_hcd_au1xxx_drv_suspend NULL | 305 | #define AU1XXX_OHCI_PMOPS NULL |
| 303 | #define ohci_hcd_au1xxx_drv_resume NULL | ||
| 304 | #endif | 306 | #endif |
| 305 | 307 | ||
| 306 | static struct platform_driver ohci_hcd_au1xxx_driver = { | 308 | static struct platform_driver ohci_hcd_au1xxx_driver = { |
| 307 | .probe = ohci_hcd_au1xxx_drv_probe, | 309 | .probe = ohci_hcd_au1xxx_drv_probe, |
| 308 | .remove = ohci_hcd_au1xxx_drv_remove, | 310 | .remove = ohci_hcd_au1xxx_drv_remove, |
| 309 | .shutdown = usb_hcd_platform_shutdown, | 311 | .shutdown = usb_hcd_platform_shutdown, |
| 310 | .suspend = ohci_hcd_au1xxx_drv_suspend, | ||
| 311 | .resume = ohci_hcd_au1xxx_drv_resume, | ||
| 312 | .driver = { | 312 | .driver = { |
| 313 | .name = "au1xxx-ohci", | 313 | .name = "au1xxx-ohci", |
| 314 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
| 315 | .pm = AU1XXX_OHCI_PMOPS, | ||
| 315 | }, | 316 | }, |
| 316 | }; | 317 | }; |
| 317 | 318 | ||
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c index b0dbf4157d29..4e681613e7ae 100644 --- a/drivers/usb/host/ohci-ep93xx.c +++ b/drivers/usb/host/ohci-ep93xx.c | |||
| @@ -188,7 +188,6 @@ static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev) | |||
| 188 | { | 188 | { |
| 189 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 189 | struct usb_hcd *hcd = platform_get_drvdata(pdev); |
| 190 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 190 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
| 191 | int status; | ||
| 192 | 191 | ||
| 193 | if (time_before(jiffies, ohci->next_statechange)) | 192 | if (time_before(jiffies, ohci->next_statechange)) |
| 194 | msleep(5); | 193 | msleep(5); |
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 58151687d351..78bb7710f36d 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | #include <linux/usb/otg.h> | 34 | #include <linux/usb/otg.h> |
| 35 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
| 36 | #include <linux/dmapool.h> | 36 | #include <linux/dmapool.h> |
| 37 | #include <linux/reboot.h> | ||
| 38 | #include <linux/workqueue.h> | 37 | #include <linux/workqueue.h> |
| 39 | #include <linux/debugfs.h> | 38 | #include <linux/debugfs.h> |
| 40 | 39 | ||
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index e44dc2cbca24..f1c06202fdf2 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c | |||
| @@ -177,9 +177,13 @@ static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci, | |||
| 177 | 177 | ||
| 178 | if (inf->flags & NO_OC_PROTECTION) | 178 | if (inf->flags & NO_OC_PROTECTION) |
| 179 | uhcrhda |= UHCRHDA_NOCP; | 179 | uhcrhda |= UHCRHDA_NOCP; |
| 180 | else | ||
| 181 | uhcrhda &= ~UHCRHDA_NOCP; | ||
| 180 | 182 | ||
| 181 | if (inf->flags & OC_MODE_PERPORT) | 183 | if (inf->flags & OC_MODE_PERPORT) |
| 182 | uhcrhda |= UHCRHDA_OCPM; | 184 | uhcrhda |= UHCRHDA_OCPM; |
| 185 | else | ||
| 186 | uhcrhda &= ~UHCRHDA_OCPM; | ||
| 183 | 187 | ||
| 184 | if (inf->power_on_delay) { | 188 | if (inf->power_on_delay) { |
| 185 | uhcrhda &= ~UHCRHDA_POTPGT(0xff); | 189 | uhcrhda &= ~UHCRHDA_POTPGT(0xff); |
| @@ -477,38 +481,47 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev) | |||
| 477 | return 0; | 481 | return 0; |
| 478 | } | 482 | } |
| 479 | 483 | ||
| 480 | #ifdef CONFIG_PM | 484 | #ifdef CONFIG_PM |
| 481 | static int ohci_hcd_pxa27x_drv_suspend(struct platform_device *pdev, pm_message_t state) | 485 | static int ohci_hcd_pxa27x_drv_suspend(struct device *dev) |
| 482 | { | 486 | { |
| 483 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 487 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 484 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); | 488 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); |
| 485 | 489 | ||
| 486 | if (time_before(jiffies, ohci->ohci.next_statechange)) | 490 | if (time_before(jiffies, ohci->ohci.next_statechange)) |
| 487 | msleep(5); | 491 | msleep(5); |
| 488 | ohci->ohci.next_statechange = jiffies; | 492 | ohci->ohci.next_statechange = jiffies; |
| 489 | 493 | ||
| 490 | pxa27x_stop_hc(ohci, &pdev->dev); | 494 | pxa27x_stop_hc(ohci, dev); |
| 491 | hcd->state = HC_STATE_SUSPENDED; | 495 | hcd->state = HC_STATE_SUSPENDED; |
| 492 | 496 | ||
| 493 | return 0; | 497 | return 0; |
| 494 | } | 498 | } |
| 495 | 499 | ||
| 496 | static int ohci_hcd_pxa27x_drv_resume(struct platform_device *pdev) | 500 | static int ohci_hcd_pxa27x_drv_resume(struct device *dev) |
| 497 | { | 501 | { |
| 498 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 502 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 499 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); | 503 | struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); |
| 504 | struct pxaohci_platform_data *inf = dev->platform_data; | ||
| 500 | int status; | 505 | int status; |
| 501 | 506 | ||
| 502 | if (time_before(jiffies, ohci->ohci.next_statechange)) | 507 | if (time_before(jiffies, ohci->ohci.next_statechange)) |
| 503 | msleep(5); | 508 | msleep(5); |
| 504 | ohci->ohci.next_statechange = jiffies; | 509 | ohci->ohci.next_statechange = jiffies; |
| 505 | 510 | ||
| 506 | if ((status = pxa27x_start_hc(ohci, &pdev->dev)) < 0) | 511 | if ((status = pxa27x_start_hc(ohci, dev)) < 0) |
| 507 | return status; | 512 | return status; |
| 508 | 513 | ||
| 514 | /* Select Power Management Mode */ | ||
| 515 | pxa27x_ohci_select_pmm(ohci, inf->port_mode); | ||
| 516 | |||
| 509 | ohci_finish_controller_resume(hcd); | 517 | ohci_finish_controller_resume(hcd); |
| 510 | return 0; | 518 | return 0; |
| 511 | } | 519 | } |
| 520 | |||
| 521 | static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { | ||
| 522 | .suspend = ohci_hcd_pxa27x_drv_suspend, | ||
| 523 | .resume = ohci_hcd_pxa27x_drv_resume, | ||
| 524 | }; | ||
| 512 | #endif | 525 | #endif |
| 513 | 526 | ||
| 514 | /* work with hotplug and coldplug */ | 527 | /* work with hotplug and coldplug */ |
| @@ -518,13 +531,12 @@ static struct platform_driver ohci_hcd_pxa27x_driver = { | |||
| 518 | .probe = ohci_hcd_pxa27x_drv_probe, | 531 | .probe = ohci_hcd_pxa27x_drv_probe, |
| 519 | .remove = ohci_hcd_pxa27x_drv_remove, | 532 | .remove = ohci_hcd_pxa27x_drv_remove, |
| 520 | .shutdown = usb_hcd_platform_shutdown, | 533 | .shutdown = usb_hcd_platform_shutdown, |
| 521 | #ifdef CONFIG_PM | ||
| 522 | .suspend = ohci_hcd_pxa27x_drv_suspend, | ||
| 523 | .resume = ohci_hcd_pxa27x_drv_resume, | ||
| 524 | #endif | ||
| 525 | .driver = { | 534 | .driver = { |
| 526 | .name = "pxa27x-ohci", | 535 | .name = "pxa27x-ohci", |
| 527 | .owner = THIS_MODULE, | 536 | .owner = THIS_MODULE, |
| 537 | #ifdef CONFIG_PM | ||
| 538 | .pm = &ohci_hcd_pxa27x_pm_ops, | ||
| 539 | #endif | ||
| 528 | }, | 540 | }, |
| 529 | }; | 541 | }; |
| 530 | 542 | ||
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index c2d80f80448b..16fecb8ecc39 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
| @@ -418,7 +418,7 @@ static struct ed *ed_get ( | |||
| 418 | is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); | 418 | is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); |
| 419 | 419 | ||
| 420 | /* FIXME usbcore changes dev->devnum before SET_ADDRESS | 420 | /* FIXME usbcore changes dev->devnum before SET_ADDRESS |
| 421 | * suceeds ... otherwise we wouldn't need "pipe". | 421 | * succeeds ... otherwise we wouldn't need "pipe". |
| 422 | */ | 422 | */ |
| 423 | info = usb_pipedevice (pipe); | 423 | info = usb_pipedevice (pipe); |
| 424 | ed->type = usb_pipetype(pipe); | 424 | ed->type = usb_pipetype(pipe); |
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 5ac489ee3dab..50f57f468836 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
| 34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
| 35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
| 36 | #include <linux/reboot.h> | ||
| 37 | #include <linux/usb.h> | 36 | #include <linux/usb.h> |
| 38 | #include <linux/moduleparam.h> | 37 | #include <linux/moduleparam.h> |
| 39 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 83b5f9cea85a..23cf3bde4762 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
| @@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) | |||
| 475 | else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) | 475 | else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) |
| 476 | quirk_usb_handoff_xhci(pdev); | 476 | quirk_usb_handoff_xhci(pdev); |
| 477 | } | 477 | } |
| 478 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); | 478 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); |
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index e18f74946e68..749b53742828 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c | |||
| @@ -91,43 +91,43 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597) | |||
| 91 | u16 tmp; | 91 | u16 tmp; |
| 92 | int i = 0; | 92 | int i = 0; |
| 93 | 93 | ||
| 94 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | 94 | if (r8a66597->pdata->on_chip) { |
| 95 | #if defined(CONFIG_HAVE_CLK) | 95 | #ifdef CONFIG_HAVE_CLK |
| 96 | clk_enable(r8a66597->clk); | 96 | clk_enable(r8a66597->clk); |
| 97 | #endif | 97 | #endif |
| 98 | do { | 98 | do { |
| 99 | r8a66597_write(r8a66597, SCKE, SYSCFG0); | 99 | r8a66597_write(r8a66597, SCKE, SYSCFG0); |
| 100 | tmp = r8a66597_read(r8a66597, SYSCFG0); | 100 | tmp = r8a66597_read(r8a66597, SYSCFG0); |
| 101 | if (i++ > 1000) { | 101 | if (i++ > 1000) { |
| 102 | printk(KERN_ERR "r8a66597: register access fail.\n"); | 102 | printk(KERN_ERR "r8a66597: reg access fail.\n"); |
| 103 | return -ENXIO; | 103 | return -ENXIO; |
| 104 | } | 104 | } |
| 105 | } while ((tmp & SCKE) != SCKE); | 105 | } while ((tmp & SCKE) != SCKE); |
| 106 | r8a66597_write(r8a66597, 0x04, 0x02); | 106 | r8a66597_write(r8a66597, 0x04, 0x02); |
| 107 | #else | 107 | } else { |
| 108 | do { | 108 | do { |
| 109 | r8a66597_write(r8a66597, USBE, SYSCFG0); | 109 | r8a66597_write(r8a66597, USBE, SYSCFG0); |
| 110 | tmp = r8a66597_read(r8a66597, SYSCFG0); | 110 | tmp = r8a66597_read(r8a66597, SYSCFG0); |
| 111 | if (i++ > 1000) { | 111 | if (i++ > 1000) { |
| 112 | printk(KERN_ERR "r8a66597: register access fail.\n"); | 112 | printk(KERN_ERR "r8a66597: reg access fail.\n"); |
| 113 | return -ENXIO; | 113 | return -ENXIO; |
| 114 | } | 114 | } |
| 115 | } while ((tmp & USBE) != USBE); | 115 | } while ((tmp & USBE) != USBE); |
| 116 | r8a66597_bclr(r8a66597, USBE, SYSCFG0); | 116 | r8a66597_bclr(r8a66597, USBE, SYSCFG0); |
| 117 | r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL, | 117 | r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), |
| 118 | SYSCFG0); | 118 | XTAL, SYSCFG0); |
| 119 | 119 | ||
| 120 | i = 0; | 120 | i = 0; |
| 121 | r8a66597_bset(r8a66597, XCKE, SYSCFG0); | 121 | r8a66597_bset(r8a66597, XCKE, SYSCFG0); |
| 122 | do { | 122 | do { |
| 123 | msleep(1); | 123 | msleep(1); |
| 124 | tmp = r8a66597_read(r8a66597, SYSCFG0); | 124 | tmp = r8a66597_read(r8a66597, SYSCFG0); |
| 125 | if (i++ > 500) { | 125 | if (i++ > 500) { |
| 126 | printk(KERN_ERR "r8a66597: register access fail.\n"); | 126 | printk(KERN_ERR "r8a66597: reg access fail.\n"); |
| 127 | return -ENXIO; | 127 | return -ENXIO; |
| 128 | } | 128 | } |
| 129 | } while ((tmp & SCKE) != SCKE); | 129 | } while ((tmp & SCKE) != SCKE); |
| 130 | #endif /* #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) */ | 130 | } |
| 131 | 131 | ||
| 132 | return 0; | 132 | return 0; |
| 133 | } | 133 | } |
| @@ -136,15 +136,16 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597) | |||
| 136 | { | 136 | { |
| 137 | r8a66597_bclr(r8a66597, SCKE, SYSCFG0); | 137 | r8a66597_bclr(r8a66597, SCKE, SYSCFG0); |
| 138 | udelay(1); | 138 | udelay(1); |
| 139 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | 139 | |
| 140 | #if defined(CONFIG_HAVE_CLK) | 140 | if (r8a66597->pdata->on_chip) { |
| 141 | clk_disable(r8a66597->clk); | 141 | #ifdef CONFIG_HAVE_CLK |
| 142 | #endif | 142 | clk_disable(r8a66597->clk); |
| 143 | #else | ||
| 144 | r8a66597_bclr(r8a66597, PLLC, SYSCFG0); | ||
| 145 | r8a66597_bclr(r8a66597, XCKE, SYSCFG0); | ||
| 146 | r8a66597_bclr(r8a66597, USBE, SYSCFG0); | ||
| 147 | #endif | 143 | #endif |
| 144 | } else { | ||
| 145 | r8a66597_bclr(r8a66597, PLLC, SYSCFG0); | ||
| 146 | r8a66597_bclr(r8a66597, XCKE, SYSCFG0); | ||
| 147 | r8a66597_bclr(r8a66597, USBE, SYSCFG0); | ||
| 148 | } | ||
| 148 | } | 149 | } |
| 149 | 150 | ||
| 150 | static void r8a66597_enable_port(struct r8a66597 *r8a66597, int port) | 151 | static void r8a66597_enable_port(struct r8a66597 *r8a66597, int port) |
| @@ -205,7 +206,7 @@ static int enable_controller(struct r8a66597 *r8a66597) | |||
| 205 | 206 | ||
| 206 | r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1); | 207 | r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1); |
| 207 | 208 | ||
| 208 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) | 209 | for (port = 0; port < r8a66597->max_root_hub; port++) |
| 209 | r8a66597_enable_port(r8a66597, port); | 210 | r8a66597_enable_port(r8a66597, port); |
| 210 | 211 | ||
| 211 | return 0; | 212 | return 0; |
| @@ -218,7 +219,7 @@ static void disable_controller(struct r8a66597 *r8a66597) | |||
| 218 | r8a66597_write(r8a66597, 0, INTENB0); | 219 | r8a66597_write(r8a66597, 0, INTENB0); |
| 219 | r8a66597_write(r8a66597, 0, INTSTS0); | 220 | r8a66597_write(r8a66597, 0, INTSTS0); |
| 220 | 221 | ||
| 221 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) | 222 | for (port = 0; port < r8a66597->max_root_hub; port++) |
| 222 | r8a66597_disable_port(r8a66597, port); | 223 | r8a66597_disable_port(r8a66597, port); |
| 223 | 224 | ||
| 224 | r8a66597_clock_disable(r8a66597); | 225 | r8a66597_clock_disable(r8a66597); |
| @@ -249,11 +250,12 @@ static int is_hub_limit(char *devpath) | |||
| 249 | return ((strlen(devpath) >= 4) ? 1 : 0); | 250 | return ((strlen(devpath) >= 4) ? 1 : 0); |
| 250 | } | 251 | } |
| 251 | 252 | ||
| 252 | static void get_port_number(char *devpath, u16 *root_port, u16 *hub_port) | 253 | static void get_port_number(struct r8a66597 *r8a66597, |
| 254 | char *devpath, u16 *root_port, u16 *hub_port) | ||
| 253 | { | 255 | { |
| 254 | if (root_port) { | 256 | if (root_port) { |
| 255 | *root_port = (devpath[0] & 0x0F) - 1; | 257 | *root_port = (devpath[0] & 0x0F) - 1; |
| 256 | if (*root_port >= R8A66597_MAX_ROOT_HUB) | 258 | if (*root_port >= r8a66597->max_root_hub) |
| 257 | printk(KERN_ERR "r8a66597: Illegal root port number.\n"); | 259 | printk(KERN_ERR "r8a66597: Illegal root port number.\n"); |
| 258 | } | 260 | } |
| 259 | if (hub_port) | 261 | if (hub_port) |
| @@ -355,7 +357,8 @@ static int make_r8a66597_device(struct r8a66597 *r8a66597, | |||
| 355 | INIT_LIST_HEAD(&dev->device_list); | 357 | INIT_LIST_HEAD(&dev->device_list); |
| 356 | list_add_tail(&dev->device_list, &r8a66597->child_device); | 358 | list_add_tail(&dev->device_list, &r8a66597->child_device); |
| 357 | 359 | ||
| 358 | get_port_number(urb->dev->devpath, &dev->root_port, &dev->hub_port); | 360 | get_port_number(r8a66597, urb->dev->devpath, |
| 361 | &dev->root_port, &dev->hub_port); | ||
| 359 | if (!is_child_device(urb->dev->devpath)) | 362 | if (!is_child_device(urb->dev->devpath)) |
| 360 | r8a66597->root_hub[dev->root_port].dev = dev; | 363 | r8a66597->root_hub[dev->root_port].dev = dev; |
| 361 | 364 | ||
| @@ -420,7 +423,7 @@ static void free_usb_address(struct r8a66597 *r8a66597, | |||
| 420 | list_del(&dev->device_list); | 423 | list_del(&dev->device_list); |
| 421 | kfree(dev); | 424 | kfree(dev); |
| 422 | 425 | ||
| 423 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { | 426 | for (port = 0; port < r8a66597->max_root_hub; port++) { |
| 424 | if (r8a66597->root_hub[port].dev == dev) { | 427 | if (r8a66597->root_hub[port].dev == dev) { |
| 425 | r8a66597->root_hub[port].dev = NULL; | 428 | r8a66597->root_hub[port].dev = NULL; |
| 426 | break; | 429 | break; |
| @@ -495,10 +498,20 @@ static void r8a66597_pipe_toggle(struct r8a66597 *r8a66597, | |||
| 495 | r8a66597_bset(r8a66597, SQCLR, pipe->pipectr); | 498 | r8a66597_bset(r8a66597, SQCLR, pipe->pipectr); |
| 496 | } | 499 | } |
| 497 | 500 | ||
| 501 | static inline unsigned short mbw_value(struct r8a66597 *r8a66597) | ||
| 502 | { | ||
| 503 | if (r8a66597->pdata->on_chip) | ||
| 504 | return MBW_32; | ||
| 505 | else | ||
| 506 | return MBW_16; | ||
| 507 | } | ||
| 508 | |||
| 498 | /* this function must be called with interrupt disabled */ | 509 | /* this function must be called with interrupt disabled */ |
| 499 | static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) | 510 | static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) |
| 500 | { | 511 | { |
| 501 | r8a66597_mdfy(r8a66597, MBW | pipenum, MBW | CURPIPE, CFIFOSEL); | 512 | unsigned short mbw = mbw_value(r8a66597); |
| 513 | |||
| 514 | r8a66597_mdfy(r8a66597, mbw | pipenum, mbw | CURPIPE, CFIFOSEL); | ||
| 502 | r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum); | 515 | r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum); |
| 503 | } | 516 | } |
| 504 | 517 | ||
| @@ -506,11 +519,13 @@ static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) | |||
| 506 | static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597, | 519 | static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597, |
| 507 | struct r8a66597_pipe *pipe) | 520 | struct r8a66597_pipe *pipe) |
| 508 | { | 521 | { |
| 522 | unsigned short mbw = mbw_value(r8a66597); | ||
| 523 | |||
| 509 | cfifo_change(r8a66597, 0); | 524 | cfifo_change(r8a66597, 0); |
| 510 | r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D0FIFOSEL); | 525 | r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D0FIFOSEL); |
| 511 | r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D1FIFOSEL); | 526 | r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D1FIFOSEL); |
| 512 | 527 | ||
| 513 | r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, MBW | CURPIPE, | 528 | r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, mbw | CURPIPE, |
| 514 | pipe->fifosel); | 529 | pipe->fifosel); |
| 515 | r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum); | 530 | r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum); |
| 516 | } | 531 | } |
| @@ -742,9 +757,13 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, | |||
| 742 | struct r8a66597_pipe *pipe, | 757 | struct r8a66597_pipe *pipe, |
| 743 | struct urb *urb) | 758 | struct urb *urb) |
| 744 | { | 759 | { |
| 745 | #if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | ||
| 746 | int i; | 760 | int i; |
| 747 | struct r8a66597_pipe_info *info = &pipe->info; | 761 | struct r8a66597_pipe_info *info = &pipe->info; |
| 762 | unsigned short mbw = mbw_value(r8a66597); | ||
| 763 | |||
| 764 | /* pipe dma is only for external controlles */ | ||
| 765 | if (r8a66597->pdata->on_chip) | ||
| 766 | return; | ||
| 748 | 767 | ||
| 749 | if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) { | 768 | if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) { |
| 750 | for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) { | 769 | for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) { |
| @@ -763,8 +782,8 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, | |||
| 763 | set_pipe_reg_addr(pipe, i); | 782 | set_pipe_reg_addr(pipe, i); |
| 764 | 783 | ||
| 765 | cfifo_change(r8a66597, 0); | 784 | cfifo_change(r8a66597, 0); |
| 766 | r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, | 785 | r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, |
| 767 | MBW | CURPIPE, pipe->fifosel); | 786 | mbw | CURPIPE, pipe->fifosel); |
| 768 | 787 | ||
| 769 | r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, | 788 | r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, |
| 770 | pipe->info.pipenum); | 789 | pipe->info.pipenum); |
| @@ -772,7 +791,6 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, | |||
| 772 | break; | 791 | break; |
| 773 | } | 792 | } |
| 774 | } | 793 | } |
| 775 | #endif /* #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) */ | ||
| 776 | } | 794 | } |
| 777 | 795 | ||
| 778 | /* this function must be called with interrupt disabled */ | 796 | /* this function must be called with interrupt disabled */ |
| @@ -1769,7 +1787,7 @@ static void r8a66597_timer(unsigned long _r8a66597) | |||
| 1769 | 1787 | ||
| 1770 | spin_lock_irqsave(&r8a66597->lock, flags); | 1788 | spin_lock_irqsave(&r8a66597->lock, flags); |
| 1771 | 1789 | ||
| 1772 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) | 1790 | for (port = 0; port < r8a66597->max_root_hub; port++) |
| 1773 | r8a66597_root_hub_control(r8a66597, port); | 1791 | r8a66597_root_hub_control(r8a66597, port); |
| 1774 | 1792 | ||
| 1775 | spin_unlock_irqrestore(&r8a66597->lock, flags); | 1793 | spin_unlock_irqrestore(&r8a66597->lock, flags); |
| @@ -1807,7 +1825,7 @@ static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb) | |||
| 1807 | u16 root_port, hub_port; | 1825 | u16 root_port, hub_port; |
| 1808 | 1826 | ||
| 1809 | if (usb_address == 0) { | 1827 | if (usb_address == 0) { |
| 1810 | get_port_number(urb->dev->devpath, | 1828 | get_port_number(r8a66597, urb->dev->devpath, |
| 1811 | &root_port, &hub_port); | 1829 | &root_port, &hub_port); |
| 1812 | set_devadd_reg(r8a66597, 0, | 1830 | set_devadd_reg(r8a66597, 0, |
| 1813 | get_r8a66597_usb_speed(urb->dev->speed), | 1831 | get_r8a66597_usb_speed(urb->dev->speed), |
| @@ -2082,7 +2100,7 @@ static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
| 2082 | 2100 | ||
| 2083 | *buf = 0; /* initialize (no change) */ | 2101 | *buf = 0; /* initialize (no change) */ |
| 2084 | 2102 | ||
| 2085 | for (i = 0; i < R8A66597_MAX_ROOT_HUB; i++) { | 2103 | for (i = 0; i < r8a66597->max_root_hub; i++) { |
| 2086 | if (r8a66597->root_hub[i].port & 0xffff0000) | 2104 | if (r8a66597->root_hub[i].port & 0xffff0000) |
| 2087 | *buf |= 1 << (i + 1); | 2105 | *buf |= 1 << (i + 1); |
| 2088 | } | 2106 | } |
| @@ -2097,11 +2115,11 @@ static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597, | |||
| 2097 | { | 2115 | { |
| 2098 | desc->bDescriptorType = 0x29; | 2116 | desc->bDescriptorType = 0x29; |
| 2099 | desc->bHubContrCurrent = 0; | 2117 | desc->bHubContrCurrent = 0; |
| 2100 | desc->bNbrPorts = R8A66597_MAX_ROOT_HUB; | 2118 | desc->bNbrPorts = r8a66597->max_root_hub; |
| 2101 | desc->bDescLength = 9; | 2119 | desc->bDescLength = 9; |
| 2102 | desc->bPwrOn2PwrGood = 0; | 2120 | desc->bPwrOn2PwrGood = 0; |
| 2103 | desc->wHubCharacteristics = cpu_to_le16(0x0011); | 2121 | desc->wHubCharacteristics = cpu_to_le16(0x0011); |
| 2104 | desc->bitmap[0] = ((1 << R8A66597_MAX_ROOT_HUB) - 1) << 1; | 2122 | desc->bitmap[0] = ((1 << r8a66597->max_root_hub) - 1) << 1; |
| 2105 | desc->bitmap[1] = ~0; | 2123 | desc->bitmap[1] = ~0; |
| 2106 | } | 2124 | } |
| 2107 | 2125 | ||
| @@ -2129,7 +2147,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
| 2129 | } | 2147 | } |
| 2130 | break; | 2148 | break; |
| 2131 | case ClearPortFeature: | 2149 | case ClearPortFeature: |
| 2132 | if (wIndex > R8A66597_MAX_ROOT_HUB) | 2150 | if (wIndex > r8a66597->max_root_hub) |
| 2133 | goto error; | 2151 | goto error; |
| 2134 | if (wLength != 0) | 2152 | if (wLength != 0) |
| 2135 | goto error; | 2153 | goto error; |
| @@ -2162,12 +2180,12 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
| 2162 | *buf = 0x00; | 2180 | *buf = 0x00; |
| 2163 | break; | 2181 | break; |
| 2164 | case GetPortStatus: | 2182 | case GetPortStatus: |
| 2165 | if (wIndex > R8A66597_MAX_ROOT_HUB) | 2183 | if (wIndex > r8a66597->max_root_hub) |
| 2166 | goto error; | 2184 | goto error; |
| 2167 | *(__le32 *)buf = cpu_to_le32(rh->port); | 2185 | *(__le32 *)buf = cpu_to_le32(rh->port); |
| 2168 | break; | 2186 | break; |
| 2169 | case SetPortFeature: | 2187 | case SetPortFeature: |
| 2170 | if (wIndex > R8A66597_MAX_ROOT_HUB) | 2188 | if (wIndex > r8a66597->max_root_hub) |
| 2171 | goto error; | 2189 | goto error; |
| 2172 | if (wLength != 0) | 2190 | if (wLength != 0) |
| 2173 | goto error; | 2191 | goto error; |
| @@ -2216,7 +2234,7 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd) | |||
| 2216 | 2234 | ||
| 2217 | dbg("%s", __func__); | 2235 | dbg("%s", __func__); |
| 2218 | 2236 | ||
| 2219 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { | 2237 | for (port = 0; port < r8a66597->max_root_hub; port++) { |
| 2220 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | 2238 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; |
| 2221 | unsigned long dvstctr_reg = get_dvstctr_reg(port); | 2239 | unsigned long dvstctr_reg = get_dvstctr_reg(port); |
| 2222 | 2240 | ||
| @@ -2247,7 +2265,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) | |||
| 2247 | 2265 | ||
| 2248 | dbg("%s", __func__); | 2266 | dbg("%s", __func__); |
| 2249 | 2267 | ||
| 2250 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { | 2268 | for (port = 0; port < r8a66597->max_root_hub; port++) { |
| 2251 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | 2269 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; |
| 2252 | unsigned long dvstctr_reg = get_dvstctr_reg(port); | 2270 | unsigned long dvstctr_reg = get_dvstctr_reg(port); |
| 2253 | 2271 | ||
| @@ -2305,16 +2323,16 @@ static struct hc_driver r8a66597_hc_driver = { | |||
| 2305 | }; | 2323 | }; |
| 2306 | 2324 | ||
| 2307 | #if defined(CONFIG_PM) | 2325 | #if defined(CONFIG_PM) |
| 2308 | static int r8a66597_suspend(struct platform_device *pdev, pm_message_t state) | 2326 | static int r8a66597_suspend(struct device *dev) |
| 2309 | { | 2327 | { |
| 2310 | struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev); | 2328 | struct r8a66597 *r8a66597 = dev_get_drvdata(dev); |
| 2311 | int port; | 2329 | int port; |
| 2312 | 2330 | ||
| 2313 | dbg("%s", __func__); | 2331 | dbg("%s", __func__); |
| 2314 | 2332 | ||
| 2315 | disable_controller(r8a66597); | 2333 | disable_controller(r8a66597); |
| 2316 | 2334 | ||
| 2317 | for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { | 2335 | for (port = 0; port < r8a66597->max_root_hub; port++) { |
| 2318 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; | 2336 | struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; |
| 2319 | 2337 | ||
| 2320 | rh->port = 0x00000000; | 2338 | rh->port = 0x00000000; |
| @@ -2323,9 +2341,9 @@ static int r8a66597_suspend(struct platform_device *pdev, pm_message_t state) | |||
| 2323 | return 0; | 2341 | return 0; |
| 2324 | } | 2342 | } |
| 2325 | 2343 | ||
| 2326 | static int r8a66597_resume(struct platform_device *pdev) | 2344 | static int r8a66597_resume(struct device *dev) |
| 2327 | { | 2345 | { |
| 2328 | struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev); | 2346 | struct r8a66597 *r8a66597 = dev_get_drvdata(dev); |
| 2329 | struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); | 2347 | struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); |
| 2330 | 2348 | ||
| 2331 | dbg("%s", __func__); | 2349 | dbg("%s", __func__); |
| @@ -2335,9 +2353,17 @@ static int r8a66597_resume(struct platform_device *pdev) | |||
| 2335 | 2353 | ||
| 2336 | return 0; | 2354 | return 0; |
| 2337 | } | 2355 | } |
| 2356 | |||
| 2357 | static struct dev_pm_ops r8a66597_dev_pm_ops = { | ||
| 2358 | .suspend = r8a66597_suspend, | ||
| 2359 | .resume = r8a66597_resume, | ||
| 2360 | .poweroff = r8a66597_suspend, | ||
| 2361 | .restore = r8a66597_resume, | ||
| 2362 | }; | ||
| 2363 | |||
| 2364 | #define R8A66597_DEV_PM_OPS (&r8a66597_dev_pm_ops) | ||
| 2338 | #else /* if defined(CONFIG_PM) */ | 2365 | #else /* if defined(CONFIG_PM) */ |
| 2339 | #define r8a66597_suspend NULL | 2366 | #define R8A66597_DEV_PM_OPS NULL |
| 2340 | #define r8a66597_resume NULL | ||
| 2341 | #endif | 2367 | #endif |
| 2342 | 2368 | ||
| 2343 | static int __init_or_module r8a66597_remove(struct platform_device *pdev) | 2369 | static int __init_or_module r8a66597_remove(struct platform_device *pdev) |
| @@ -2348,8 +2374,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) | |||
| 2348 | del_timer_sync(&r8a66597->rh_timer); | 2374 | del_timer_sync(&r8a66597->rh_timer); |
| 2349 | usb_remove_hcd(hcd); | 2375 | usb_remove_hcd(hcd); |
| 2350 | iounmap((void *)r8a66597->reg); | 2376 | iounmap((void *)r8a66597->reg); |
| 2351 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | 2377 | #ifdef CONFIG_HAVE_CLK |
| 2352 | clk_put(r8a66597->clk); | 2378 | if (r8a66597->pdata->on_chip) |
| 2379 | clk_put(r8a66597->clk); | ||
| 2353 | #endif | 2380 | #endif |
| 2354 | usb_put_hcd(hcd); | 2381 | usb_put_hcd(hcd); |
| 2355 | return 0; | 2382 | return 0; |
| @@ -2357,7 +2384,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) | |||
| 2357 | 2384 | ||
| 2358 | static int __devinit r8a66597_probe(struct platform_device *pdev) | 2385 | static int __devinit r8a66597_probe(struct platform_device *pdev) |
| 2359 | { | 2386 | { |
| 2360 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | 2387 | #ifdef CONFIG_HAVE_CLK |
| 2361 | char clk_name[8]; | 2388 | char clk_name[8]; |
| 2362 | #endif | 2389 | #endif |
| 2363 | struct resource *res = NULL, *ires; | 2390 | struct resource *res = NULL, *ires; |
| @@ -2419,15 +2446,20 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) | |||
| 2419 | r8a66597->pdata = pdev->dev.platform_data; | 2446 | r8a66597->pdata = pdev->dev.platform_data; |
| 2420 | r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; | 2447 | r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; |
| 2421 | 2448 | ||
| 2422 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | 2449 | if (r8a66597->pdata->on_chip) { |
| 2423 | snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); | 2450 | #ifdef CONFIG_HAVE_CLK |
| 2424 | r8a66597->clk = clk_get(&pdev->dev, clk_name); | 2451 | snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); |
| 2425 | if (IS_ERR(r8a66597->clk)) { | 2452 | r8a66597->clk = clk_get(&pdev->dev, clk_name); |
| 2426 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | 2453 | if (IS_ERR(r8a66597->clk)) { |
| 2427 | ret = PTR_ERR(r8a66597->clk); | 2454 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", |
| 2428 | goto clean_up2; | 2455 | clk_name); |
| 2429 | } | 2456 | ret = PTR_ERR(r8a66597->clk); |
| 2457 | goto clean_up2; | ||
| 2458 | } | ||
| 2430 | #endif | 2459 | #endif |
| 2460 | r8a66597->max_root_hub = 1; | ||
| 2461 | } else | ||
| 2462 | r8a66597->max_root_hub = 2; | ||
| 2431 | 2463 | ||
| 2432 | spin_lock_init(&r8a66597->lock); | 2464 | spin_lock_init(&r8a66597->lock); |
| 2433 | init_timer(&r8a66597->rh_timer); | 2465 | init_timer(&r8a66597->rh_timer); |
| @@ -2457,8 +2489,9 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) | |||
| 2457 | return 0; | 2489 | return 0; |
| 2458 | 2490 | ||
| 2459 | clean_up3: | 2491 | clean_up3: |
| 2460 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | 2492 | #ifdef CONFIG_HAVE_CLK |
| 2461 | clk_put(r8a66597->clk); | 2493 | if (r8a66597->pdata->on_chip) |
| 2494 | clk_put(r8a66597->clk); | ||
| 2462 | clean_up2: | 2495 | clean_up2: |
| 2463 | #endif | 2496 | #endif |
| 2464 | usb_put_hcd(hcd); | 2497 | usb_put_hcd(hcd); |
| @@ -2473,11 +2506,10 @@ clean_up: | |||
| 2473 | static struct platform_driver r8a66597_driver = { | 2506 | static struct platform_driver r8a66597_driver = { |
| 2474 | .probe = r8a66597_probe, | 2507 | .probe = r8a66597_probe, |
| 2475 | .remove = r8a66597_remove, | 2508 | .remove = r8a66597_remove, |
| 2476 | .suspend = r8a66597_suspend, | ||
| 2477 | .resume = r8a66597_resume, | ||
| 2478 | .driver = { | 2509 | .driver = { |
| 2479 | .name = (char *) hcd_name, | 2510 | .name = (char *) hcd_name, |
| 2480 | .owner = THIS_MODULE, | 2511 | .owner = THIS_MODULE, |
| 2512 | .pm = R8A66597_DEV_PM_OPS, | ||
| 2481 | }, | 2513 | }, |
| 2482 | }; | 2514 | }; |
| 2483 | 2515 | ||
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index d72680b433f9..228e3fb23854 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h | |||
| @@ -26,390 +26,16 @@ | |||
| 26 | #ifndef __R8A66597_H__ | 26 | #ifndef __R8A66597_H__ |
| 27 | #define __R8A66597_H__ | 27 | #define __R8A66597_H__ |
| 28 | 28 | ||
| 29 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | 29 | #ifdef CONFIG_HAVE_CLK |
| 30 | #include <linux/clk.h> | 30 | #include <linux/clk.h> |
| 31 | #endif | 31 | #endif |
| 32 | 32 | ||
| 33 | #include <linux/usb/r8a66597.h> | 33 | #include <linux/usb/r8a66597.h> |
| 34 | 34 | ||
| 35 | #define SYSCFG0 0x00 | ||
| 36 | #define SYSCFG1 0x02 | ||
| 37 | #define SYSSTS0 0x04 | ||
| 38 | #define SYSSTS1 0x06 | ||
| 39 | #define DVSTCTR0 0x08 | ||
| 40 | #define DVSTCTR1 0x0A | ||
| 41 | #define TESTMODE 0x0C | ||
| 42 | #define PINCFG 0x0E | ||
| 43 | #define DMA0CFG 0x10 | ||
| 44 | #define DMA1CFG 0x12 | ||
| 45 | #define CFIFO 0x14 | ||
| 46 | #define D0FIFO 0x18 | ||
| 47 | #define D1FIFO 0x1C | ||
| 48 | #define CFIFOSEL 0x20 | ||
| 49 | #define CFIFOCTR 0x22 | ||
| 50 | #define CFIFOSIE 0x24 | ||
| 51 | #define D0FIFOSEL 0x28 | ||
| 52 | #define D0FIFOCTR 0x2A | ||
| 53 | #define D1FIFOSEL 0x2C | ||
| 54 | #define D1FIFOCTR 0x2E | ||
| 55 | #define INTENB0 0x30 | ||
| 56 | #define INTENB1 0x32 | ||
| 57 | #define INTENB2 0x34 | ||
| 58 | #define BRDYENB 0x36 | ||
| 59 | #define NRDYENB 0x38 | ||
| 60 | #define BEMPENB 0x3A | ||
| 61 | #define SOFCFG 0x3C | ||
| 62 | #define INTSTS0 0x40 | ||
| 63 | #define INTSTS1 0x42 | ||
| 64 | #define INTSTS2 0x44 | ||
| 65 | #define BRDYSTS 0x46 | ||
| 66 | #define NRDYSTS 0x48 | ||
| 67 | #define BEMPSTS 0x4A | ||
| 68 | #define FRMNUM 0x4C | ||
| 69 | #define UFRMNUM 0x4E | ||
| 70 | #define USBADDR 0x50 | ||
| 71 | #define USBREQ 0x54 | ||
| 72 | #define USBVAL 0x56 | ||
| 73 | #define USBINDX 0x58 | ||
| 74 | #define USBLENG 0x5A | ||
| 75 | #define DCPCFG 0x5C | ||
| 76 | #define DCPMAXP 0x5E | ||
| 77 | #define DCPCTR 0x60 | ||
| 78 | #define PIPESEL 0x64 | ||
| 79 | #define PIPECFG 0x68 | ||
| 80 | #define PIPEBUF 0x6A | ||
| 81 | #define PIPEMAXP 0x6C | ||
| 82 | #define PIPEPERI 0x6E | ||
| 83 | #define PIPE1CTR 0x70 | ||
| 84 | #define PIPE2CTR 0x72 | ||
| 85 | #define PIPE3CTR 0x74 | ||
| 86 | #define PIPE4CTR 0x76 | ||
| 87 | #define PIPE5CTR 0x78 | ||
| 88 | #define PIPE6CTR 0x7A | ||
| 89 | #define PIPE7CTR 0x7C | ||
| 90 | #define PIPE8CTR 0x7E | ||
| 91 | #define PIPE9CTR 0x80 | ||
| 92 | #define PIPE1TRE 0x90 | ||
| 93 | #define PIPE1TRN 0x92 | ||
| 94 | #define PIPE2TRE 0x94 | ||
| 95 | #define PIPE2TRN 0x96 | ||
| 96 | #define PIPE3TRE 0x98 | ||
| 97 | #define PIPE3TRN 0x9A | ||
| 98 | #define PIPE4TRE 0x9C | ||
| 99 | #define PIPE4TRN 0x9E | ||
| 100 | #define PIPE5TRE 0xA0 | ||
| 101 | #define PIPE5TRN 0xA2 | ||
| 102 | #define DEVADD0 0xD0 | ||
| 103 | #define DEVADD1 0xD2 | ||
| 104 | #define DEVADD2 0xD4 | ||
| 105 | #define DEVADD3 0xD6 | ||
| 106 | #define DEVADD4 0xD8 | ||
| 107 | #define DEVADD5 0xDA | ||
| 108 | #define DEVADD6 0xDC | ||
| 109 | #define DEVADD7 0xDE | ||
| 110 | #define DEVADD8 0xE0 | ||
| 111 | #define DEVADD9 0xE2 | ||
| 112 | #define DEVADDA 0xE4 | ||
| 113 | |||
| 114 | /* System Configuration Control Register */ | ||
| 115 | #define XTAL 0xC000 /* b15-14: Crystal selection */ | ||
| 116 | #define XTAL48 0x8000 /* 48MHz */ | ||
| 117 | #define XTAL24 0x4000 /* 24MHz */ | ||
| 118 | #define XTAL12 0x0000 /* 12MHz */ | ||
| 119 | #define XCKE 0x2000 /* b13: External clock enable */ | ||
| 120 | #define PLLC 0x0800 /* b11: PLL control */ | ||
| 121 | #define SCKE 0x0400 /* b10: USB clock enable */ | ||
| 122 | #define PCSDIS 0x0200 /* b9: not CS wakeup */ | ||
| 123 | #define LPSME 0x0100 /* b8: Low power sleep mode */ | ||
| 124 | #define HSE 0x0080 /* b7: Hi-speed enable */ | ||
| 125 | #define DCFM 0x0040 /* b6: Controller function select */ | ||
| 126 | #define DRPD 0x0020 /* b5: D+/- pull down control */ | ||
| 127 | #define DPRPU 0x0010 /* b4: D+ pull up control */ | ||
| 128 | #define USBE 0x0001 /* b0: USB module operation enable */ | ||
| 129 | |||
| 130 | /* System Configuration Status Register */ | ||
| 131 | #define OVCBIT 0x8000 /* b15-14: Over-current bit */ | ||
| 132 | #define OVCMON 0xC000 /* b15-14: Over-current monitor */ | ||
| 133 | #define SOFEA 0x0020 /* b5: SOF monitor */ | ||
| 134 | #define IDMON 0x0004 /* b3: ID-pin monitor */ | ||
| 135 | #define LNST 0x0003 /* b1-0: D+, D- line status */ | ||
| 136 | #define SE1 0x0003 /* SE1 */ | ||
| 137 | #define FS_KSTS 0x0002 /* Full-Speed K State */ | ||
| 138 | #define FS_JSTS 0x0001 /* Full-Speed J State */ | ||
| 139 | #define LS_JSTS 0x0002 /* Low-Speed J State */ | ||
| 140 | #define LS_KSTS 0x0001 /* Low-Speed K State */ | ||
| 141 | #define SE0 0x0000 /* SE0 */ | ||
| 142 | |||
| 143 | /* Device State Control Register */ | ||
| 144 | #define EXTLP0 0x0400 /* b10: External port */ | ||
| 145 | #define VBOUT 0x0200 /* b9: VBUS output */ | ||
| 146 | #define WKUP 0x0100 /* b8: Remote wakeup */ | ||
| 147 | #define RWUPE 0x0080 /* b7: Remote wakeup sense */ | ||
| 148 | #define USBRST 0x0040 /* b6: USB reset enable */ | ||
| 149 | #define RESUME 0x0020 /* b5: Resume enable */ | ||
| 150 | #define UACT 0x0010 /* b4: USB bus enable */ | ||
| 151 | #define RHST 0x0007 /* b1-0: Reset handshake status */ | ||
| 152 | #define HSPROC 0x0004 /* HS handshake is processing */ | ||
| 153 | #define HSMODE 0x0003 /* Hi-Speed mode */ | ||
| 154 | #define FSMODE 0x0002 /* Full-Speed mode */ | ||
| 155 | #define LSMODE 0x0001 /* Low-Speed mode */ | ||
| 156 | #define UNDECID 0x0000 /* Undecided */ | ||
| 157 | |||
| 158 | /* Test Mode Register */ | ||
| 159 | #define UTST 0x000F /* b3-0: Test select */ | ||
| 160 | #define H_TST_PACKET 0x000C /* HOST TEST Packet */ | ||
| 161 | #define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ | ||
| 162 | #define H_TST_K 0x000A /* HOST TEST K */ | ||
| 163 | #define H_TST_J 0x0009 /* HOST TEST J */ | ||
| 164 | #define H_TST_NORMAL 0x0000 /* HOST Normal Mode */ | ||
| 165 | #define P_TST_PACKET 0x0004 /* PERI TEST Packet */ | ||
| 166 | #define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ | ||
| 167 | #define P_TST_K 0x0002 /* PERI TEST K */ | ||
| 168 | #define P_TST_J 0x0001 /* PERI TEST J */ | ||
| 169 | #define P_TST_NORMAL 0x0000 /* PERI Normal Mode */ | ||
| 170 | |||
| 171 | /* Data Pin Configuration Register */ | ||
| 172 | #define LDRV 0x8000 /* b15: Drive Current Adjust */ | ||
| 173 | #define VIF1 0x0000 /* VIF = 1.8V */ | ||
| 174 | #define VIF3 0x8000 /* VIF = 3.3V */ | ||
| 175 | #define INTA 0x0001 /* b1: USB INT-pin active */ | ||
| 176 | |||
| 177 | /* DMAx Pin Configuration Register */ | ||
| 178 | #define DREQA 0x4000 /* b14: Dreq active select */ | ||
| 179 | #define BURST 0x2000 /* b13: Burst mode */ | ||
| 180 | #define DACKA 0x0400 /* b10: Dack active select */ | ||
| 181 | #define DFORM 0x0380 /* b9-7: DMA mode select */ | ||
| 182 | #define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ | ||
| 183 | #define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ | ||
| 184 | #define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ | ||
| 185 | #define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ | ||
| 186 | #define DENDA 0x0040 /* b6: Dend active select */ | ||
| 187 | #define PKTM 0x0020 /* b5: Packet mode */ | ||
| 188 | #define DENDE 0x0010 /* b4: Dend enable */ | ||
| 189 | #define OBUS 0x0004 /* b2: OUTbus mode */ | ||
| 190 | |||
| 191 | /* CFIFO/DxFIFO Port Select Register */ | ||
| 192 | #define RCNT 0x8000 /* b15: Read count mode */ | ||
| 193 | #define REW 0x4000 /* b14: Buffer rewind */ | ||
| 194 | #define DCLRM 0x2000 /* b13: DMA buffer clear mode */ | ||
| 195 | #define DREQE 0x1000 /* b12: DREQ output enable */ | ||
| 196 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | ||
| 197 | #define MBW 0x0800 | ||
| 198 | #else | ||
| 199 | #define MBW 0x0400 /* b10: Maximum bit width for FIFO access */ | ||
| 200 | #endif | ||
| 201 | #define MBW_8 0x0000 /* 8bit */ | ||
| 202 | #define MBW_16 0x0400 /* 16bit */ | ||
| 203 | #define BIGEND 0x0100 /* b8: Big endian mode */ | ||
| 204 | #define BYTE_LITTLE 0x0000 /* little dendian */ | ||
| 205 | #define BYTE_BIG 0x0100 /* big endifan */ | ||
| 206 | #define ISEL 0x0020 /* b5: DCP FIFO port direction select */ | ||
| 207 | #define CURPIPE 0x000F /* b2-0: PIPE select */ | ||
| 208 | |||
| 209 | /* CFIFO/DxFIFO Port Control Register */ | ||
| 210 | #define BVAL 0x8000 /* b15: Buffer valid flag */ | ||
| 211 | #define BCLR 0x4000 /* b14: Buffer clear */ | ||
| 212 | #define FRDY 0x2000 /* b13: FIFO ready */ | ||
| 213 | #define DTLN 0x0FFF /* b11-0: FIFO received data length */ | ||
| 214 | |||
| 215 | /* Interrupt Enable Register 0 */ | ||
| 216 | #define VBSE 0x8000 /* b15: VBUS interrupt */ | ||
| 217 | #define RSME 0x4000 /* b14: Resume interrupt */ | ||
| 218 | #define SOFE 0x2000 /* b13: Frame update interrupt */ | ||
| 219 | #define DVSE 0x1000 /* b12: Device state transition interrupt */ | ||
| 220 | #define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ | ||
| 221 | #define BEMPE 0x0400 /* b10: Buffer empty interrupt */ | ||
| 222 | #define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ | ||
| 223 | #define BRDYE 0x0100 /* b8: Buffer ready interrupt */ | ||
| 224 | |||
| 225 | /* Interrupt Enable Register 1 */ | ||
| 226 | #define OVRCRE 0x8000 /* b15: Over-current interrupt */ | ||
| 227 | #define BCHGE 0x4000 /* b14: USB us chenge interrupt */ | ||
| 228 | #define DTCHE 0x1000 /* b12: Detach sense interrupt */ | ||
| 229 | #define ATTCHE 0x0800 /* b11: Attach sense interrupt */ | ||
| 230 | #define EOFERRE 0x0040 /* b6: EOF error interrupt */ | ||
| 231 | #define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ | ||
| 232 | #define SACKE 0x0010 /* b4: SETUP ACK interrupt */ | ||
| 233 | |||
| 234 | /* BRDY Interrupt Enable/Status Register */ | ||
| 235 | #define BRDY9 0x0200 /* b9: PIPE9 */ | ||
| 236 | #define BRDY8 0x0100 /* b8: PIPE8 */ | ||
| 237 | #define BRDY7 0x0080 /* b7: PIPE7 */ | ||
| 238 | #define BRDY6 0x0040 /* b6: PIPE6 */ | ||
| 239 | #define BRDY5 0x0020 /* b5: PIPE5 */ | ||
| 240 | #define BRDY4 0x0010 /* b4: PIPE4 */ | ||
| 241 | #define BRDY3 0x0008 /* b3: PIPE3 */ | ||
| 242 | #define BRDY2 0x0004 /* b2: PIPE2 */ | ||
| 243 | #define BRDY1 0x0002 /* b1: PIPE1 */ | ||
| 244 | #define BRDY0 0x0001 /* b1: PIPE0 */ | ||
| 245 | |||
| 246 | /* NRDY Interrupt Enable/Status Register */ | ||
| 247 | #define NRDY9 0x0200 /* b9: PIPE9 */ | ||
| 248 | #define NRDY8 0x0100 /* b8: PIPE8 */ | ||
| 249 | #define NRDY7 0x0080 /* b7: PIPE7 */ | ||
| 250 | #define NRDY6 0x0040 /* b6: PIPE6 */ | ||
| 251 | #define NRDY5 0x0020 /* b5: PIPE5 */ | ||
| 252 | #define NRDY4 0x0010 /* b4: PIPE4 */ | ||
| 253 | #define NRDY3 0x0008 /* b3: PIPE3 */ | ||
| 254 | #define NRDY2 0x0004 /* b2: PIPE2 */ | ||
| 255 | #define NRDY1 0x0002 /* b1: PIPE1 */ | ||
| 256 | #define NRDY0 0x0001 /* b1: PIPE0 */ | ||
| 257 | |||
| 258 | /* BEMP Interrupt Enable/Status Register */ | ||
| 259 | #define BEMP9 0x0200 /* b9: PIPE9 */ | ||
| 260 | #define BEMP8 0x0100 /* b8: PIPE8 */ | ||
| 261 | #define BEMP7 0x0080 /* b7: PIPE7 */ | ||
| 262 | #define BEMP6 0x0040 /* b6: PIPE6 */ | ||
| 263 | #define BEMP5 0x0020 /* b5: PIPE5 */ | ||
| 264 | #define BEMP4 0x0010 /* b4: PIPE4 */ | ||
| 265 | #define BEMP3 0x0008 /* b3: PIPE3 */ | ||
| 266 | #define BEMP2 0x0004 /* b2: PIPE2 */ | ||
| 267 | #define BEMP1 0x0002 /* b1: PIPE1 */ | ||
| 268 | #define BEMP0 0x0001 /* b0: PIPE0 */ | ||
| 269 | |||
| 270 | /* SOF Pin Configuration Register */ | ||
| 271 | #define TRNENSEL 0x0100 /* b8: Select transaction enable period */ | ||
| 272 | #define BRDYM 0x0040 /* b6: BRDY clear timing */ | ||
| 273 | #define INTL 0x0020 /* b5: Interrupt sense select */ | ||
| 274 | #define EDGESTS 0x0010 /* b4: */ | ||
| 275 | #define SOFMODE 0x000C /* b3-2: SOF pin select */ | ||
| 276 | #define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */ | ||
| 277 | #define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ | ||
| 278 | #define SOF_DISABLE 0x0000 /* SOF OUT Disable */ | ||
| 279 | |||
| 280 | /* Interrupt Status Register 0 */ | ||
| 281 | #define VBINT 0x8000 /* b15: VBUS interrupt */ | ||
| 282 | #define RESM 0x4000 /* b14: Resume interrupt */ | ||
| 283 | #define SOFR 0x2000 /* b13: SOF frame update interrupt */ | ||
| 284 | #define DVST 0x1000 /* b12: Device state transition interrupt */ | ||
| 285 | #define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ | ||
| 286 | #define BEMP 0x0400 /* b10: Buffer empty interrupt */ | ||
| 287 | #define NRDY 0x0200 /* b9: Buffer not ready interrupt */ | ||
| 288 | #define BRDY 0x0100 /* b8: Buffer ready interrupt */ | ||
| 289 | #define VBSTS 0x0080 /* b7: VBUS input port */ | ||
| 290 | #define DVSQ 0x0070 /* b6-4: Device state */ | ||
| 291 | #define DS_SPD_CNFG 0x0070 /* Suspend Configured */ | ||
| 292 | #define DS_SPD_ADDR 0x0060 /* Suspend Address */ | ||
| 293 | #define DS_SPD_DFLT 0x0050 /* Suspend Default */ | ||
| 294 | #define DS_SPD_POWR 0x0040 /* Suspend Powered */ | ||
| 295 | #define DS_SUSP 0x0040 /* Suspend */ | ||
| 296 | #define DS_CNFG 0x0030 /* Configured */ | ||
| 297 | #define DS_ADDS 0x0020 /* Address */ | ||
| 298 | #define DS_DFLT 0x0010 /* Default */ | ||
| 299 | #define DS_POWR 0x0000 /* Powered */ | ||
| 300 | #define DVSQS 0x0030 /* b5-4: Device state */ | ||
| 301 | #define VALID 0x0008 /* b3: Setup packet detected flag */ | ||
| 302 | #define CTSQ 0x0007 /* b2-0: Control transfer stage */ | ||
| 303 | #define CS_SQER 0x0006 /* Sequence error */ | ||
| 304 | #define CS_WRND 0x0005 /* Control write nodata status stage */ | ||
| 305 | #define CS_WRSS 0x0004 /* Control write status stage */ | ||
| 306 | #define CS_WRDS 0x0003 /* Control write data stage */ | ||
| 307 | #define CS_RDSS 0x0002 /* Control read status stage */ | ||
| 308 | #define CS_RDDS 0x0001 /* Control read data stage */ | ||
| 309 | #define CS_IDST 0x0000 /* Idle or setup stage */ | ||
| 310 | |||
| 311 | /* Interrupt Status Register 1 */ | ||
| 312 | #define OVRCR 0x8000 /* b15: Over-current interrupt */ | ||
| 313 | #define BCHG 0x4000 /* b14: USB bus chenge interrupt */ | ||
| 314 | #define DTCH 0x1000 /* b12: Detach sense interrupt */ | ||
| 315 | #define ATTCH 0x0800 /* b11: Attach sense interrupt */ | ||
| 316 | #define EOFERR 0x0040 /* b6: EOF-error interrupt */ | ||
| 317 | #define SIGN 0x0020 /* b5: Setup ignore interrupt */ | ||
| 318 | #define SACK 0x0010 /* b4: Setup acknowledge interrupt */ | ||
| 319 | |||
| 320 | /* Frame Number Register */ | ||
| 321 | #define OVRN 0x8000 /* b15: Overrun error */ | ||
| 322 | #define CRCE 0x4000 /* b14: Received data error */ | ||
| 323 | #define FRNM 0x07FF /* b10-0: Frame number */ | ||
| 324 | |||
| 325 | /* Micro Frame Number Register */ | ||
| 326 | #define UFRNM 0x0007 /* b2-0: Micro frame number */ | ||
| 327 | |||
| 328 | /* Default Control Pipe Maxpacket Size Register */ | ||
| 329 | /* Pipe Maxpacket Size Register */ | ||
| 330 | #define DEVSEL 0xF000 /* b15-14: Device address select */ | ||
| 331 | #define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ | ||
| 332 | |||
| 333 | /* Default Control Pipe Control Register */ | ||
| 334 | #define BSTS 0x8000 /* b15: Buffer status */ | ||
| 335 | #define SUREQ 0x4000 /* b14: Send USB request */ | ||
| 336 | #define CSCLR 0x2000 /* b13: complete-split status clear */ | ||
| 337 | #define CSSTS 0x1000 /* b12: complete-split status */ | ||
| 338 | #define SUREQCLR 0x0800 /* b11: stop setup request */ | ||
| 339 | #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ | ||
| 340 | #define SQSET 0x0080 /* b7: Sequence toggle bit set */ | ||
| 341 | #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ | ||
| 342 | #define PBUSY 0x0020 /* b5: pipe busy */ | ||
| 343 | #define PINGE 0x0010 /* b4: ping enable */ | ||
| 344 | #define CCPL 0x0004 /* b2: Enable control transfer complete */ | ||
| 345 | #define PID 0x0003 /* b1-0: Response PID */ | ||
| 346 | #define PID_STALL11 0x0003 /* STALL */ | ||
| 347 | #define PID_STALL 0x0002 /* STALL */ | ||
| 348 | #define PID_BUF 0x0001 /* BUF */ | ||
| 349 | #define PID_NAK 0x0000 /* NAK */ | ||
| 350 | |||
| 351 | /* Pipe Window Select Register */ | ||
| 352 | #define PIPENM 0x0007 /* b2-0: Pipe select */ | ||
| 353 | |||
| 354 | /* Pipe Configuration Register */ | ||
| 355 | #define R8A66597_TYP 0xC000 /* b15-14: Transfer type */ | ||
| 356 | #define R8A66597_ISO 0xC000 /* Isochronous */ | ||
| 357 | #define R8A66597_INT 0x8000 /* Interrupt */ | ||
| 358 | #define R8A66597_BULK 0x4000 /* Bulk */ | ||
| 359 | #define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ | ||
| 360 | #define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */ | ||
| 361 | #define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */ | ||
| 362 | #define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */ | ||
| 363 | #define R8A66597_DIR 0x0010 /* b4: Transfer direction select */ | ||
| 364 | #define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */ | ||
| 365 | |||
| 366 | /* Pipe Buffer Configuration Register */ | ||
| 367 | #define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ | ||
| 368 | #define BUFNMB 0x007F /* b6-0: Pipe buffer number */ | ||
| 369 | #define PIPE0BUF 256 | ||
| 370 | #define PIPExBUF 64 | ||
| 371 | |||
| 372 | /* Pipe Maxpacket Size Register */ | ||
| 373 | #define MXPS 0x07FF /* b10-0: Maxpacket size */ | ||
| 374 | |||
| 375 | /* Pipe Cycle Configuration Register */ | ||
| 376 | #define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ | ||
| 377 | #define IITV 0x0007 /* b2-0: Isochronous interval */ | ||
| 378 | |||
| 379 | /* Pipex Control Register */ | ||
| 380 | #define BSTS 0x8000 /* b15: Buffer status */ | ||
| 381 | #define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ | ||
| 382 | #define CSCLR 0x2000 /* b13: complete-split status clear */ | ||
| 383 | #define CSSTS 0x1000 /* b12: complete-split status */ | ||
| 384 | #define ATREPM 0x0400 /* b10: Auto repeat mode */ | ||
| 385 | #define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ | ||
| 386 | #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ | ||
| 387 | #define SQSET 0x0080 /* b7: Sequence toggle bit set */ | ||
| 388 | #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ | ||
| 389 | #define PBUSY 0x0020 /* b5: pipe busy */ | ||
| 390 | #define PID 0x0003 /* b1-0: Response PID */ | ||
| 391 | |||
| 392 | /* PIPExTRE */ | ||
| 393 | #define TRENB 0x0200 /* b9: Transaction counter enable */ | ||
| 394 | #define TRCLR 0x0100 /* b8: Transaction counter clear */ | ||
| 395 | |||
| 396 | /* PIPExTRN */ | ||
| 397 | #define TRNCNT 0xFFFF /* b15-0: Transaction counter */ | ||
| 398 | |||
| 399 | /* DEVADDx */ | ||
| 400 | #define UPPHUB 0x7800 | ||
| 401 | #define HUBPORT 0x0700 | ||
| 402 | #define USBSPD 0x00C0 | ||
| 403 | #define RTPORT 0x0001 | ||
| 404 | |||
| 405 | #define R8A66597_MAX_NUM_PIPE 10 | 35 | #define R8A66597_MAX_NUM_PIPE 10 |
| 406 | #define R8A66597_BUF_BSIZE 8 | 36 | #define R8A66597_BUF_BSIZE 8 |
| 407 | #define R8A66597_MAX_DEVICE 10 | 37 | #define R8A66597_MAX_DEVICE 10 |
| 408 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | ||
| 409 | #define R8A66597_MAX_ROOT_HUB 1 | ||
| 410 | #else | ||
| 411 | #define R8A66597_MAX_ROOT_HUB 2 | 38 | #define R8A66597_MAX_ROOT_HUB 2 |
| 412 | #endif | ||
| 413 | #define R8A66597_MAX_SAMPLING 5 | 39 | #define R8A66597_MAX_SAMPLING 5 |
| 414 | #define R8A66597_RH_POLL_TIME 10 | 40 | #define R8A66597_RH_POLL_TIME 10 |
| 415 | #define R8A66597_MAX_DMA_CHANNEL 2 | 41 | #define R8A66597_MAX_DMA_CHANNEL 2 |
| @@ -487,7 +113,7 @@ struct r8a66597_root_hub { | |||
| 487 | struct r8a66597 { | 113 | struct r8a66597 { |
| 488 | spinlock_t lock; | 114 | spinlock_t lock; |
| 489 | unsigned long reg; | 115 | unsigned long reg; |
| 490 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) | 116 | #ifdef CONFIG_HAVE_CLK |
| 491 | struct clk *clk; | 117 | struct clk *clk; |
| 492 | #endif | 118 | #endif |
| 493 | struct r8a66597_platdata *pdata; | 119 | struct r8a66597_platdata *pdata; |
| @@ -504,6 +130,7 @@ struct r8a66597 { | |||
| 504 | unsigned short interval_map; | 130 | unsigned short interval_map; |
| 505 | unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE]; | 131 | unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE]; |
| 506 | unsigned char dma_map; | 132 | unsigned char dma_map; |
| 133 | unsigned int max_root_hub; | ||
| 507 | 134 | ||
| 508 | struct list_head child_device; | 135 | struct list_head child_device; |
| 509 | unsigned long child_connect_map[4]; | 136 | unsigned long child_connect_map[4]; |
| @@ -550,21 +177,22 @@ static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597, | |||
| 550 | unsigned long offset, u16 *buf, | 177 | unsigned long offset, u16 *buf, |
| 551 | int len) | 178 | int len) |
| 552 | { | 179 | { |
| 553 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | ||
| 554 | unsigned long fifoaddr = r8a66597->reg + offset; | 180 | unsigned long fifoaddr = r8a66597->reg + offset; |
| 555 | unsigned long count; | 181 | unsigned long count; |
| 556 | 182 | ||
| 557 | count = len / 4; | 183 | if (r8a66597->pdata->on_chip) { |
| 558 | insl(fifoaddr, buf, count); | 184 | count = len / 4; |
| 185 | insl(fifoaddr, buf, count); | ||
| 559 | 186 | ||
| 560 | if (len & 0x00000003) { | 187 | if (len & 0x00000003) { |
| 561 | unsigned long tmp = inl(fifoaddr); | 188 | unsigned long tmp = inl(fifoaddr); |
| 562 | memcpy((unsigned char *)buf + count * 4, &tmp, len & 0x03); | 189 | memcpy((unsigned char *)buf + count * 4, &tmp, |
| 190 | len & 0x03); | ||
| 191 | } | ||
| 192 | } else { | ||
| 193 | len = (len + 1) / 2; | ||
| 194 | insw(fifoaddr, buf, len); | ||
| 563 | } | 195 | } |
| 564 | #else | ||
| 565 | len = (len + 1) / 2; | ||
| 566 | insw(r8a66597->reg + offset, buf, len); | ||
| 567 | #endif | ||
| 568 | } | 196 | } |
| 569 | 197 | ||
| 570 | static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val, | 198 | static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val, |
| @@ -578,33 +206,33 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597, | |||
| 578 | int len) | 206 | int len) |
| 579 | { | 207 | { |
| 580 | unsigned long fifoaddr = r8a66597->reg + offset; | 208 | unsigned long fifoaddr = r8a66597->reg + offset; |
| 581 | #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) | ||
| 582 | unsigned long count; | 209 | unsigned long count; |
| 583 | unsigned char *pb; | 210 | unsigned char *pb; |
| 584 | int i; | 211 | int i; |
| 585 | 212 | ||
| 586 | count = len / 4; | 213 | if (r8a66597->pdata->on_chip) { |
| 587 | outsl(fifoaddr, buf, count); | 214 | count = len / 4; |
| 215 | outsl(fifoaddr, buf, count); | ||
| 216 | |||
| 217 | if (len & 0x00000003) { | ||
| 218 | pb = (unsigned char *)buf + count * 4; | ||
| 219 | for (i = 0; i < (len & 0x00000003); i++) { | ||
| 220 | if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) | ||
| 221 | outb(pb[i], fifoaddr + i); | ||
| 222 | else | ||
| 223 | outb(pb[i], fifoaddr + 3 - i); | ||
| 224 | } | ||
| 225 | } | ||
| 226 | } else { | ||
| 227 | int odd = len & 0x0001; | ||
| 588 | 228 | ||
| 589 | if (len & 0x00000003) { | 229 | len = len / 2; |
| 590 | pb = (unsigned char *)buf + count * 4; | 230 | outsw(fifoaddr, buf, len); |
| 591 | for (i = 0; i < (len & 0x00000003); i++) { | 231 | if (unlikely(odd)) { |
| 592 | if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) | 232 | buf = &buf[len]; |
| 593 | outb(pb[i], fifoaddr + i); | 233 | outb((unsigned char)*buf, fifoaddr); |
| 594 | else | ||
| 595 | outb(pb[i], fifoaddr + 3 - i); | ||
| 596 | } | 234 | } |
| 597 | } | 235 | } |
| 598 | #else | ||
| 599 | int odd = len & 0x0001; | ||
| 600 | |||
| 601 | len = len / 2; | ||
| 602 | outsw(fifoaddr, buf, len); | ||
| 603 | if (unlikely(odd)) { | ||
| 604 | buf = &buf[len]; | ||
| 605 | outb((unsigned char)*buf, fifoaddr); | ||
| 606 | } | ||
| 607 | #endif | ||
| 608 | } | 236 | } |
| 609 | 237 | ||
| 610 | static inline void r8a66597_mdfy(struct r8a66597 *r8a66597, | 238 | static inline void r8a66597_mdfy(struct r8a66597 *r8a66597, |
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index a949259f18b9..5b22a4d1c9e4 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c | |||
| @@ -719,8 +719,12 @@ retry: | |||
| 719 | /* port status seems weird until after reset, so | 719 | /* port status seems weird until after reset, so |
| 720 | * force the reset and make khubd clean up later. | 720 | * force the reset and make khubd clean up later. |
| 721 | */ | 721 | */ |
| 722 | sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION) | 722 | if (sl811->stat_insrmv & 1) |
| 723 | | (1 << USB_PORT_FEAT_CONNECTION); | 723 | sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION; |
| 724 | else | ||
| 725 | sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION); | ||
| 726 | |||
| 727 | sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION; | ||
| 724 | 728 | ||
| 725 | } else if (irqstat & SL11H_INTMASK_RD) { | 729 | } else if (irqstat & SL11H_INTMASK_RD) { |
| 726 | if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) { | 730 | if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) { |
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 64e57bfe236b..acd582c02802 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
| @@ -1422,7 +1422,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, | |||
| 1422 | goto err_submit_failed; | 1422 | goto err_submit_failed; |
| 1423 | 1423 | ||
| 1424 | /* Add this URB to the QH */ | 1424 | /* Add this URB to the QH */ |
| 1425 | urbp->qh = qh; | ||
| 1426 | list_add_tail(&urbp->node, &qh->queue); | 1425 | list_add_tail(&urbp->node, &qh->queue); |
| 1427 | 1426 | ||
| 1428 | /* If the new URB is the first and only one on this QH then either | 1427 | /* If the new URB is the first and only one on this QH then either |
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index c2050785a819..c632437c7649 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c | |||
| @@ -227,11 +227,21 @@ void scan_async_work(struct work_struct *work) | |||
| 227 | /* | 227 | /* |
| 228 | * Now that the ASL is updated, complete the removal of any | 228 | * Now that the ASL is updated, complete the removal of any |
| 229 | * removed qsets. | 229 | * removed qsets. |
| 230 | * | ||
| 231 | * If the qset was to be reset, do so and reinsert it into the | ||
| 232 | * ASL if it has pending transfers. | ||
| 230 | */ | 233 | */ |
| 231 | spin_lock_irq(&whc->lock); | 234 | spin_lock_irq(&whc->lock); |
| 232 | 235 | ||
| 233 | list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { | 236 | list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) { |
| 234 | qset_remove_complete(whc, qset); | 237 | qset_remove_complete(whc, qset); |
| 238 | if (qset->reset) { | ||
| 239 | qset_reset(whc, qset); | ||
| 240 | if (!list_empty(&qset->stds)) { | ||
| 241 | asl_qset_insert_begin(whc, qset); | ||
| 242 | queue_work(whc->workqueue, &whc->async_work); | ||
| 243 | } | ||
| 244 | } | ||
| 235 | } | 245 | } |
| 236 | 246 | ||
| 237 | spin_unlock_irq(&whc->lock); | 247 | spin_unlock_irq(&whc->lock); |
| @@ -267,7 +277,7 @@ int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) | |||
| 267 | else | 277 | else |
| 268 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); | 278 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); |
| 269 | if (!err) { | 279 | if (!err) { |
| 270 | if (!qset->in_sw_list) | 280 | if (!qset->in_sw_list && !qset->remove) |
| 271 | asl_qset_insert_begin(whc, qset); | 281 | asl_qset_insert_begin(whc, qset); |
| 272 | } else | 282 | } else |
| 273 | usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); | 283 | usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); |
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c index e019a5058ab8..687b622a1612 100644 --- a/drivers/usb/host/whci/hcd.c +++ b/drivers/usb/host/whci/hcd.c | |||
| @@ -192,19 +192,23 @@ static void whc_endpoint_reset(struct usb_hcd *usb_hcd, | |||
| 192 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | 192 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); |
| 193 | struct whc *whc = wusbhc_to_whc(wusbhc); | 193 | struct whc *whc = wusbhc_to_whc(wusbhc); |
| 194 | struct whc_qset *qset; | 194 | struct whc_qset *qset; |
| 195 | unsigned long flags; | ||
| 196 | |||
| 197 | spin_lock_irqsave(&whc->lock, flags); | ||
| 195 | 198 | ||
| 196 | qset = ep->hcpriv; | 199 | qset = ep->hcpriv; |
| 197 | if (qset) { | 200 | if (qset) { |
| 198 | qset->remove = 1; | 201 | qset->remove = 1; |
| 202 | qset->reset = 1; | ||
| 199 | 203 | ||
| 200 | if (usb_endpoint_xfer_bulk(&ep->desc) | 204 | if (usb_endpoint_xfer_bulk(&ep->desc) |
| 201 | || usb_endpoint_xfer_control(&ep->desc)) | 205 | || usb_endpoint_xfer_control(&ep->desc)) |
| 202 | queue_work(whc->workqueue, &whc->async_work); | 206 | queue_work(whc->workqueue, &whc->async_work); |
| 203 | else | 207 | else |
| 204 | queue_work(whc->workqueue, &whc->periodic_work); | 208 | queue_work(whc->workqueue, &whc->periodic_work); |
| 205 | |||
| 206 | qset_reset(whc, qset); | ||
| 207 | } | 209 | } |
| 210 | |||
| 211 | spin_unlock_irqrestore(&whc->lock, flags); | ||
| 208 | } | 212 | } |
| 209 | 213 | ||
| 210 | 214 | ||
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index ff4ef9e910d9..a9e05bac6646 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c | |||
| @@ -255,11 +255,21 @@ void scan_periodic_work(struct work_struct *work) | |||
| 255 | /* | 255 | /* |
| 256 | * Now that the PZL is updated, complete the removal of any | 256 | * Now that the PZL is updated, complete the removal of any |
| 257 | * removed qsets. | 257 | * removed qsets. |
| 258 | * | ||
| 259 | * If the qset was to be reset, do so and reinsert it into the | ||
| 260 | * PZL if it has pending transfers. | ||
| 258 | */ | 261 | */ |
| 259 | spin_lock_irq(&whc->lock); | 262 | spin_lock_irq(&whc->lock); |
| 260 | 263 | ||
| 261 | list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) { | 264 | list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) { |
| 262 | qset_remove_complete(whc, qset); | 265 | qset_remove_complete(whc, qset); |
| 266 | if (qset->reset) { | ||
| 267 | qset_reset(whc, qset); | ||
| 268 | if (!list_empty(&qset->stds)) { | ||
| 269 | qset_insert_in_sw_list(whc, qset); | ||
| 270 | queue_work(whc->workqueue, &whc->periodic_work); | ||
| 271 | } | ||
| 272 | } | ||
| 263 | } | 273 | } |
| 264 | 274 | ||
| 265 | spin_unlock_irq(&whc->lock); | 275 | spin_unlock_irq(&whc->lock); |
| @@ -295,7 +305,7 @@ int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags) | |||
| 295 | else | 305 | else |
| 296 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); | 306 | err = qset_add_urb(whc, qset, urb, GFP_ATOMIC); |
| 297 | if (!err) { | 307 | if (!err) { |
| 298 | if (!qset->in_sw_list) | 308 | if (!qset->in_sw_list && !qset->remove) |
| 299 | qset_insert_in_sw_list(whc, qset); | 309 | qset_insert_in_sw_list(whc, qset); |
| 300 | } else | 310 | } else |
| 301 | usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); | 311 | usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb); |
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 640b38fbd051..1b9dc1571570 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c | |||
| @@ -103,7 +103,6 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) | |||
| 103 | void qset_clear(struct whc *whc, struct whc_qset *qset) | 103 | void qset_clear(struct whc *whc, struct whc_qset *qset) |
| 104 | { | 104 | { |
| 105 | qset->td_start = qset->td_end = qset->ntds = 0; | 105 | qset->td_start = qset->td_end = qset->ntds = 0; |
| 106 | qset->remove = 0; | ||
| 107 | 106 | ||
| 108 | qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); | 107 | qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); |
| 109 | qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; | 108 | qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; |
| @@ -125,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset) | |||
| 125 | */ | 124 | */ |
| 126 | void qset_reset(struct whc *whc, struct whc_qset *qset) | 125 | void qset_reset(struct whc *whc, struct whc_qset *qset) |
| 127 | { | 126 | { |
| 128 | wait_for_completion(&qset->remove_complete); | 127 | qset->reset = 0; |
| 129 | 128 | ||
| 130 | qset->qh.status &= ~QH_STATUS_SEQ_MASK; | 129 | qset->qh.status &= ~QH_STATUS_SEQ_MASK; |
| 131 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | 130 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); |
| @@ -156,6 +155,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb, | |||
| 156 | 155 | ||
| 157 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset) | 156 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset) |
| 158 | { | 157 | { |
| 158 | qset->remove = 0; | ||
| 159 | list_del_init(&qset->list_node); | 159 | list_del_init(&qset->list_node); |
| 160 | complete(&qset->remove_complete); | 160 | complete(&qset->remove_complete); |
| 161 | } | 161 | } |
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h index 794dba0d0f0a..e8d0001605be 100644 --- a/drivers/usb/host/whci/whci-hc.h +++ b/drivers/usb/host/whci/whci-hc.h | |||
| @@ -264,6 +264,7 @@ struct whc_qset { | |||
| 264 | unsigned in_sw_list:1; | 264 | unsigned in_sw_list:1; |
| 265 | unsigned in_hw_list:1; | 265 | unsigned in_hw_list:1; |
| 266 | unsigned remove:1; | 266 | unsigned remove:1; |
| 267 | unsigned reset:1; | ||
| 267 | struct urb *pause_after_urb; | 268 | struct urb *pause_after_urb; |
| 268 | struct completion remove_complete; | 269 | struct completion remove_complete; |
| 269 | int max_burst; | 270 | int max_burst; |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 705e34324156..33128d52f212 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
| @@ -413,7 +413,8 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | |||
| 413 | int i; | 413 | int i; |
| 414 | 414 | ||
| 415 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); | 415 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); |
| 416 | dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); | 416 | dma_addr_t dma = ctx->dma + |
| 417 | ((unsigned long)slot_ctx - (unsigned long)ctx->bytes); | ||
| 417 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | 418 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); |
| 418 | 419 | ||
| 419 | xhci_dbg(xhci, "Slot Context:\n"); | 420 | xhci_dbg(xhci, "Slot Context:\n"); |
| @@ -459,7 +460,7 @@ void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | |||
| 459 | for (i = 0; i < last_ep_ctx; ++i) { | 460 | for (i = 0; i < last_ep_ctx; ++i) { |
| 460 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); | 461 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); |
| 461 | dma_addr_t dma = ctx->dma + | 462 | dma_addr_t dma = ctx->dma + |
| 462 | ((unsigned long)ep_ctx - (unsigned long)ctx); | 463 | ((unsigned long)ep_ctx - (unsigned long)ctx->bytes); |
| 463 | 464 | ||
| 464 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); | 465 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); |
| 465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", | 466 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", |
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 816c39caca1c..99911e727e0b 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
| @@ -22,12 +22,18 @@ | |||
| 22 | 22 | ||
| 23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/moduleparam.h> | ||
| 25 | 26 | ||
| 26 | #include "xhci.h" | 27 | #include "xhci.h" |
| 27 | 28 | ||
| 28 | #define DRIVER_AUTHOR "Sarah Sharp" | 29 | #define DRIVER_AUTHOR "Sarah Sharp" |
| 29 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | 30 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
| 30 | 31 | ||
| 32 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ | ||
| 33 | static int link_quirk; | ||
| 34 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | ||
| 35 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | ||
| 36 | |||
| 31 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ | 37 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
| 32 | /* | 38 | /* |
| 33 | * handshake - spin reading hc until handshake completes or fails | 39 | * handshake - spin reading hc until handshake completes or fails |
| @@ -214,6 +220,12 @@ int xhci_init(struct usb_hcd *hcd) | |||
| 214 | 220 | ||
| 215 | xhci_dbg(xhci, "xhci_init\n"); | 221 | xhci_dbg(xhci, "xhci_init\n"); |
| 216 | spin_lock_init(&xhci->lock); | 222 | spin_lock_init(&xhci->lock); |
| 223 | if (link_quirk) { | ||
| 224 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | ||
| 225 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | ||
| 226 | } else { | ||
| 227 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); | ||
| 228 | } | ||
| 217 | retval = xhci_mem_init(xhci, GFP_KERNEL); | 229 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
| 218 | xhci_dbg(xhci, "Finished xhci_init\n"); | 230 | xhci_dbg(xhci, "Finished xhci_init\n"); |
| 219 | 231 | ||
| @@ -339,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg) | |||
| 339 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | 351 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
| 340 | xhci_dbg_cmd_ptrs(xhci); | 352 | xhci_dbg_cmd_ptrs(xhci); |
| 341 | for (i = 0; i < MAX_HC_SLOTS; ++i) { | 353 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
| 342 | if (xhci->devs[i]) { | 354 | if (!xhci->devs[i]) |
| 343 | for (j = 0; j < 31; ++j) { | 355 | continue; |
| 344 | if (xhci->devs[i]->ep_rings[j]) { | 356 | for (j = 0; j < 31; ++j) { |
| 345 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); | 357 | struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; |
| 346 | xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); | 358 | if (!ring) |
| 347 | } | 359 | continue; |
| 348 | } | 360 | xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); |
| 361 | xhci_debug_segment(xhci, ring->deq_seg); | ||
| 349 | } | 362 | } |
| 350 | } | 363 | } |
| 351 | 364 | ||
| @@ -555,13 +568,22 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) | |||
| 555 | return 1 << (xhci_get_endpoint_index(desc) + 1); | 568 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
| 556 | } | 569 | } |
| 557 | 570 | ||
| 571 | /* Find the flag for this endpoint (for use in the control context). Use the | ||
| 572 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | ||
| 573 | * bit 1, etc. | ||
| 574 | */ | ||
| 575 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) | ||
| 576 | { | ||
| 577 | return 1 << (ep_index + 1); | ||
| 578 | } | ||
| 579 | |||
| 558 | /* Compute the last valid endpoint context index. Basically, this is the | 580 | /* Compute the last valid endpoint context index. Basically, this is the |
| 559 | * endpoint index plus one. For slot contexts with more than valid endpoint, | 581 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
| 560 | * we find the most significant bit set in the added contexts flags. | 582 | * we find the most significant bit set in the added contexts flags. |
| 561 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | 583 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
| 562 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | 584 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
| 563 | */ | 585 | */ |
| 564 | static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | 586 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
| 565 | { | 587 | { |
| 566 | return fls(added_ctxs) - 1; | 588 | return fls(added_ctxs) - 1; |
| 567 | } | 589 | } |
| @@ -589,6 +611,71 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
| 589 | return 1; | 611 | return 1; |
| 590 | } | 612 | } |
| 591 | 613 | ||
| 614 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | ||
| 615 | struct usb_device *udev, struct xhci_command *command, | ||
| 616 | bool ctx_change, bool must_succeed); | ||
| 617 | |||
| 618 | /* | ||
| 619 | * Full speed devices may have a max packet size greater than 8 bytes, but the | ||
| 620 | * USB core doesn't know that until it reads the first 8 bytes of the | ||
| 621 | * descriptor. If the usb_device's max packet size changes after that point, | ||
| 622 | * we need to issue an evaluate context command and wait on it. | ||
| 623 | */ | ||
| 624 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | ||
| 625 | unsigned int ep_index, struct urb *urb) | ||
| 626 | { | ||
| 627 | struct xhci_container_ctx *in_ctx; | ||
| 628 | struct xhci_container_ctx *out_ctx; | ||
| 629 | struct xhci_input_control_ctx *ctrl_ctx; | ||
| 630 | struct xhci_ep_ctx *ep_ctx; | ||
| 631 | int max_packet_size; | ||
| 632 | int hw_max_packet_size; | ||
| 633 | int ret = 0; | ||
| 634 | |||
| 635 | out_ctx = xhci->devs[slot_id]->out_ctx; | ||
| 636 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | ||
| 637 | hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); | ||
| 638 | max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; | ||
| 639 | if (hw_max_packet_size != max_packet_size) { | ||
| 640 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | ||
| 641 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | ||
| 642 | max_packet_size); | ||
| 643 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", | ||
| 644 | hw_max_packet_size); | ||
| 645 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); | ||
| 646 | |||
| 647 | /* Set up the modified control endpoint 0 */ | ||
| 648 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | ||
| 649 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
| 650 | in_ctx = xhci->devs[slot_id]->in_ctx; | ||
| 651 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
| 652 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | ||
| 653 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); | ||
| 654 | |||
| 655 | /* Set up the input context flags for the command */ | ||
| 656 | /* FIXME: This won't work if a non-default control endpoint | ||
| 657 | * changes max packet sizes. | ||
| 658 | */ | ||
| 659 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
| 660 | ctrl_ctx->add_flags = EP0_FLAG; | ||
| 661 | ctrl_ctx->drop_flags = 0; | ||
| 662 | |||
| 663 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | ||
| 664 | xhci_dbg_ctx(xhci, in_ctx, ep_index); | ||
| 665 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | ||
| 666 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | ||
| 667 | |||
| 668 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, | ||
| 669 | true, false); | ||
| 670 | |||
| 671 | /* Clean up the input context for later use by bandwidth | ||
| 672 | * functions. | ||
| 673 | */ | ||
| 674 | ctrl_ctx->add_flags = SLOT_FLAG; | ||
| 675 | } | ||
| 676 | return ret; | ||
| 677 | } | ||
| 678 | |||
| 592 | /* | 679 | /* |
| 593 | * non-error returns are a promise to giveback() the urb later | 680 | * non-error returns are a promise to giveback() the urb later |
| 594 | * we drop ownership so next owner (or urb unlink) can get it | 681 | * we drop ownership so next owner (or urb unlink) can get it |
| @@ -600,13 +687,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
| 600 | int ret = 0; | 687 | int ret = 0; |
| 601 | unsigned int slot_id, ep_index; | 688 | unsigned int slot_id, ep_index; |
| 602 | 689 | ||
| 690 | |||
| 603 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) | 691 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) |
| 604 | return -EINVAL; | 692 | return -EINVAL; |
| 605 | 693 | ||
| 606 | slot_id = urb->dev->slot_id; | 694 | slot_id = urb->dev->slot_id; |
| 607 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 695 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
| 608 | 696 | ||
| 609 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 610 | if (!xhci->devs || !xhci->devs[slot_id]) { | 697 | if (!xhci->devs || !xhci->devs[slot_id]) { |
| 611 | if (!in_interrupt()) | 698 | if (!in_interrupt()) |
| 612 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); | 699 | dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); |
| @@ -619,19 +706,38 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
| 619 | ret = -ESHUTDOWN; | 706 | ret = -ESHUTDOWN; |
| 620 | goto exit; | 707 | goto exit; |
| 621 | } | 708 | } |
| 622 | if (usb_endpoint_xfer_control(&urb->ep->desc)) | 709 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
| 710 | /* Check to see if the max packet size for the default control | ||
| 711 | * endpoint changed during FS device enumeration | ||
| 712 | */ | ||
| 713 | if (urb->dev->speed == USB_SPEED_FULL) { | ||
| 714 | ret = xhci_check_maxpacket(xhci, slot_id, | ||
| 715 | ep_index, urb); | ||
| 716 | if (ret < 0) | ||
| 717 | return ret; | ||
| 718 | } | ||
| 719 | |||
| 623 | /* We have a spinlock and interrupts disabled, so we must pass | 720 | /* We have a spinlock and interrupts disabled, so we must pass |
| 624 | * atomic context to this function, which may allocate memory. | 721 | * atomic context to this function, which may allocate memory. |
| 625 | */ | 722 | */ |
| 723 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 626 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | 724 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
| 627 | slot_id, ep_index); | 725 | slot_id, ep_index); |
| 628 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | 726 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 727 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | ||
| 728 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 629 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 729 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
| 630 | slot_id, ep_index); | 730 | slot_id, ep_index); |
| 631 | else | 731 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 732 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | ||
| 733 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 734 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | ||
| 735 | slot_id, ep_index); | ||
| 736 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 737 | } else { | ||
| 632 | ret = -EINVAL; | 738 | ret = -EINVAL; |
| 739 | } | ||
| 633 | exit: | 740 | exit: |
| 634 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 635 | return ret; | 741 | return ret; |
| 636 | } | 742 | } |
| 637 | 743 | ||
| @@ -674,6 +780,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
| 674 | struct xhci_td *td; | 780 | struct xhci_td *td; |
| 675 | unsigned int ep_index; | 781 | unsigned int ep_index; |
| 676 | struct xhci_ring *ep_ring; | 782 | struct xhci_ring *ep_ring; |
| 783 | struct xhci_virt_ep *ep; | ||
| 677 | 784 | ||
| 678 | xhci = hcd_to_xhci(hcd); | 785 | xhci = hcd_to_xhci(hcd); |
| 679 | spin_lock_irqsave(&xhci->lock, flags); | 786 | spin_lock_irqsave(&xhci->lock, flags); |
| @@ -686,17 +793,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
| 686 | xhci_dbg(xhci, "Event ring:\n"); | 793 | xhci_dbg(xhci, "Event ring:\n"); |
| 687 | xhci_debug_ring(xhci, xhci->event_ring); | 794 | xhci_debug_ring(xhci, xhci->event_ring); |
| 688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 795 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
| 689 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; | 796 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
| 797 | ep_ring = ep->ring; | ||
| 690 | xhci_dbg(xhci, "Endpoint ring:\n"); | 798 | xhci_dbg(xhci, "Endpoint ring:\n"); |
| 691 | xhci_debug_ring(xhci, ep_ring); | 799 | xhci_debug_ring(xhci, ep_ring); |
| 692 | td = (struct xhci_td *) urb->hcpriv; | 800 | td = (struct xhci_td *) urb->hcpriv; |
| 693 | 801 | ||
| 694 | ep_ring->cancels_pending++; | 802 | ep->cancels_pending++; |
| 695 | list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list); | 803 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
| 696 | /* Queue a stop endpoint command, but only if this is | 804 | /* Queue a stop endpoint command, but only if this is |
| 697 | * the first cancellation to be handled. | 805 | * the first cancellation to be handled. |
| 698 | */ | 806 | */ |
| 699 | if (ep_ring->cancels_pending == 1) { | 807 | if (ep->cancels_pending == 1) { |
| 700 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 808 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
| 701 | xhci_ring_cmd_db(xhci); | 809 | xhci_ring_cmd_db(xhci); |
| 702 | } | 810 | } |
| @@ -930,6 +1038,141 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
| 930 | } | 1038 | } |
| 931 | } | 1039 | } |
| 932 | 1040 | ||
| 1041 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | ||
| 1042 | struct usb_device *udev, int *cmd_status) | ||
| 1043 | { | ||
| 1044 | int ret; | ||
| 1045 | |||
| 1046 | switch (*cmd_status) { | ||
| 1047 | case COMP_ENOMEM: | ||
| 1048 | dev_warn(&udev->dev, "Not enough host controller resources " | ||
| 1049 | "for new device state.\n"); | ||
| 1050 | ret = -ENOMEM; | ||
| 1051 | /* FIXME: can we allocate more resources for the HC? */ | ||
| 1052 | break; | ||
| 1053 | case COMP_BW_ERR: | ||
| 1054 | dev_warn(&udev->dev, "Not enough bandwidth " | ||
| 1055 | "for new device state.\n"); | ||
| 1056 | ret = -ENOSPC; | ||
| 1057 | /* FIXME: can we go back to the old state? */ | ||
| 1058 | break; | ||
| 1059 | case COMP_TRB_ERR: | ||
| 1060 | /* the HCD set up something wrong */ | ||
| 1061 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | ||
| 1062 | "add flag = 1, " | ||
| 1063 | "and endpoint is not disabled.\n"); | ||
| 1064 | ret = -EINVAL; | ||
| 1065 | break; | ||
| 1066 | case COMP_SUCCESS: | ||
| 1067 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | ||
| 1068 | ret = 0; | ||
| 1069 | break; | ||
| 1070 | default: | ||
| 1071 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
| 1072 | "code 0x%x.\n", *cmd_status); | ||
| 1073 | ret = -EINVAL; | ||
| 1074 | break; | ||
| 1075 | } | ||
| 1076 | return ret; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | ||
| 1080 | struct usb_device *udev, int *cmd_status) | ||
| 1081 | { | ||
| 1082 | int ret; | ||
| 1083 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | ||
| 1084 | |||
| 1085 | switch (*cmd_status) { | ||
| 1086 | case COMP_EINVAL: | ||
| 1087 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " | ||
| 1088 | "context command.\n"); | ||
| 1089 | ret = -EINVAL; | ||
| 1090 | break; | ||
| 1091 | case COMP_EBADSLT: | ||
| 1092 | dev_warn(&udev->dev, "WARN: slot not enabled for" | ||
| 1093 | "evaluate context command.\n"); | ||
| 1094 | case COMP_CTX_STATE: | ||
| 1095 | dev_warn(&udev->dev, "WARN: invalid context state for " | ||
| 1096 | "evaluate context command.\n"); | ||
| 1097 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | ||
| 1098 | ret = -EINVAL; | ||
| 1099 | break; | ||
| 1100 | case COMP_SUCCESS: | ||
| 1101 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | ||
| 1102 | ret = 0; | ||
| 1103 | break; | ||
| 1104 | default: | ||
| 1105 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
| 1106 | "code 0x%x.\n", *cmd_status); | ||
| 1107 | ret = -EINVAL; | ||
| 1108 | break; | ||
| 1109 | } | ||
| 1110 | return ret; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | /* Issue a configure endpoint command or evaluate context command | ||
| 1114 | * and wait for it to finish. | ||
| 1115 | */ | ||
| 1116 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | ||
| 1117 | struct usb_device *udev, | ||
| 1118 | struct xhci_command *command, | ||
| 1119 | bool ctx_change, bool must_succeed) | ||
| 1120 | { | ||
| 1121 | int ret; | ||
| 1122 | int timeleft; | ||
| 1123 | unsigned long flags; | ||
| 1124 | struct xhci_container_ctx *in_ctx; | ||
| 1125 | struct completion *cmd_completion; | ||
| 1126 | int *cmd_status; | ||
| 1127 | struct xhci_virt_device *virt_dev; | ||
| 1128 | |||
| 1129 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 1130 | virt_dev = xhci->devs[udev->slot_id]; | ||
| 1131 | if (command) { | ||
| 1132 | in_ctx = command->in_ctx; | ||
| 1133 | cmd_completion = command->completion; | ||
| 1134 | cmd_status = &command->status; | ||
| 1135 | command->command_trb = xhci->cmd_ring->enqueue; | ||
| 1136 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | ||
| 1137 | } else { | ||
| 1138 | in_ctx = virt_dev->in_ctx; | ||
| 1139 | cmd_completion = &virt_dev->cmd_completion; | ||
| 1140 | cmd_status = &virt_dev->cmd_status; | ||
| 1141 | } | ||
| 1142 | |||
| 1143 | if (!ctx_change) | ||
| 1144 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, | ||
| 1145 | udev->slot_id, must_succeed); | ||
| 1146 | else | ||
| 1147 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, | ||
| 1148 | udev->slot_id); | ||
| 1149 | if (ret < 0) { | ||
| 1150 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 1151 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | ||
| 1152 | return -ENOMEM; | ||
| 1153 | } | ||
| 1154 | xhci_ring_cmd_db(xhci); | ||
| 1155 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 1156 | |||
| 1157 | /* Wait for the configure endpoint command to complete */ | ||
| 1158 | timeleft = wait_for_completion_interruptible_timeout( | ||
| 1159 | cmd_completion, | ||
| 1160 | USB_CTRL_SET_TIMEOUT); | ||
| 1161 | if (timeleft <= 0) { | ||
| 1162 | xhci_warn(xhci, "%s while waiting for %s command\n", | ||
| 1163 | timeleft == 0 ? "Timeout" : "Signal", | ||
| 1164 | ctx_change == 0 ? | ||
| 1165 | "configure endpoint" : | ||
| 1166 | "evaluate context"); | ||
| 1167 | /* FIXME cancel the configure endpoint command */ | ||
| 1168 | return -ETIME; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | if (!ctx_change) | ||
| 1172 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); | ||
| 1173 | return xhci_evaluate_context_result(xhci, udev, cmd_status); | ||
| 1174 | } | ||
| 1175 | |||
| 933 | /* Called after one or more calls to xhci_add_endpoint() or | 1176 | /* Called after one or more calls to xhci_add_endpoint() or |
| 934 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | 1177 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
| 935 | * to call xhci_reset_bandwidth(). | 1178 | * to call xhci_reset_bandwidth(). |
| @@ -944,8 +1187,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
| 944 | { | 1187 | { |
| 945 | int i; | 1188 | int i; |
| 946 | int ret = 0; | 1189 | int ret = 0; |
| 947 | int timeleft; | ||
| 948 | unsigned long flags; | ||
| 949 | struct xhci_hcd *xhci; | 1190 | struct xhci_hcd *xhci; |
| 950 | struct xhci_virt_device *virt_dev; | 1191 | struct xhci_virt_device *virt_dev; |
| 951 | struct xhci_input_control_ctx *ctrl_ctx; | 1192 | struct xhci_input_control_ctx *ctrl_ctx; |
| @@ -975,56 +1216,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
| 975 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 1216 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
| 976 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1217 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
| 977 | 1218 | ||
| 978 | spin_lock_irqsave(&xhci->lock, flags); | 1219 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
| 979 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, | 1220 | false, false); |
| 980 | udev->slot_id); | ||
| 981 | if (ret < 0) { | ||
| 982 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 983 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | ||
| 984 | return -ENOMEM; | ||
| 985 | } | ||
| 986 | xhci_ring_cmd_db(xhci); | ||
| 987 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 988 | |||
| 989 | /* Wait for the configure endpoint command to complete */ | ||
| 990 | timeleft = wait_for_completion_interruptible_timeout( | ||
| 991 | &virt_dev->cmd_completion, | ||
| 992 | USB_CTRL_SET_TIMEOUT); | ||
| 993 | if (timeleft <= 0) { | ||
| 994 | xhci_warn(xhci, "%s while waiting for configure endpoint command\n", | ||
| 995 | timeleft == 0 ? "Timeout" : "Signal"); | ||
| 996 | /* FIXME cancel the configure endpoint command */ | ||
| 997 | return -ETIME; | ||
| 998 | } | ||
| 999 | |||
| 1000 | switch (virt_dev->cmd_status) { | ||
| 1001 | case COMP_ENOMEM: | ||
| 1002 | dev_warn(&udev->dev, "Not enough host controller resources " | ||
| 1003 | "for new device state.\n"); | ||
| 1004 | ret = -ENOMEM; | ||
| 1005 | /* FIXME: can we allocate more resources for the HC? */ | ||
| 1006 | break; | ||
| 1007 | case COMP_BW_ERR: | ||
| 1008 | dev_warn(&udev->dev, "Not enough bandwidth " | ||
| 1009 | "for new device state.\n"); | ||
| 1010 | ret = -ENOSPC; | ||
| 1011 | /* FIXME: can we go back to the old state? */ | ||
| 1012 | break; | ||
| 1013 | case COMP_TRB_ERR: | ||
| 1014 | /* the HCD set up something wrong */ | ||
| 1015 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, " | ||
| 1016 | "and endpoint is not disabled.\n"); | ||
| 1017 | ret = -EINVAL; | ||
| 1018 | break; | ||
| 1019 | case COMP_SUCCESS: | ||
| 1020 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); | ||
| 1021 | break; | ||
| 1022 | default: | ||
| 1023 | xhci_err(xhci, "ERROR: unexpected command completion " | ||
| 1024 | "code 0x%x.\n", virt_dev->cmd_status); | ||
| 1025 | ret = -EINVAL; | ||
| 1026 | break; | ||
| 1027 | } | ||
| 1028 | if (ret) { | 1221 | if (ret) { |
| 1029 | /* Callee should call reset_bandwidth() */ | 1222 | /* Callee should call reset_bandwidth() */ |
| 1030 | return ret; | 1223 | return ret; |
| @@ -1037,10 +1230,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
| 1037 | xhci_zero_in_ctx(xhci, virt_dev); | 1230 | xhci_zero_in_ctx(xhci, virt_dev); |
| 1038 | /* Free any old rings */ | 1231 | /* Free any old rings */ |
| 1039 | for (i = 1; i < 31; ++i) { | 1232 | for (i = 1; i < 31; ++i) { |
| 1040 | if (virt_dev->new_ep_rings[i]) { | 1233 | if (virt_dev->eps[i].new_ring) { |
| 1041 | xhci_ring_free(xhci, virt_dev->ep_rings[i]); | 1234 | xhci_ring_free(xhci, virt_dev->eps[i].ring); |
| 1042 | virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i]; | 1235 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
| 1043 | virt_dev->new_ep_rings[i] = NULL; | 1236 | virt_dev->eps[i].new_ring = NULL; |
| 1044 | } | 1237 | } |
| 1045 | } | 1238 | } |
| 1046 | 1239 | ||
| @@ -1067,14 +1260,93 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
| 1067 | virt_dev = xhci->devs[udev->slot_id]; | 1260 | virt_dev = xhci->devs[udev->slot_id]; |
| 1068 | /* Free any rings allocated for added endpoints */ | 1261 | /* Free any rings allocated for added endpoints */ |
| 1069 | for (i = 0; i < 31; ++i) { | 1262 | for (i = 0; i < 31; ++i) { |
| 1070 | if (virt_dev->new_ep_rings[i]) { | 1263 | if (virt_dev->eps[i].new_ring) { |
| 1071 | xhci_ring_free(xhci, virt_dev->new_ep_rings[i]); | 1264 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
| 1072 | virt_dev->new_ep_rings[i] = NULL; | 1265 | virt_dev->eps[i].new_ring = NULL; |
| 1073 | } | 1266 | } |
| 1074 | } | 1267 | } |
| 1075 | xhci_zero_in_ctx(xhci, virt_dev); | 1268 | xhci_zero_in_ctx(xhci, virt_dev); |
| 1076 | } | 1269 | } |
| 1077 | 1270 | ||
| 1271 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | ||
| 1272 | struct xhci_container_ctx *in_ctx, | ||
| 1273 | struct xhci_container_ctx *out_ctx, | ||
| 1274 | u32 add_flags, u32 drop_flags) | ||
| 1275 | { | ||
| 1276 | struct xhci_input_control_ctx *ctrl_ctx; | ||
| 1277 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
| 1278 | ctrl_ctx->add_flags = add_flags; | ||
| 1279 | ctrl_ctx->drop_flags = drop_flags; | ||
| 1280 | xhci_slot_copy(xhci, in_ctx, out_ctx); | ||
| 1281 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
| 1282 | |||
| 1283 | xhci_dbg(xhci, "Input Context:\n"); | ||
| 1284 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | ||
| 1288 | unsigned int slot_id, unsigned int ep_index, | ||
| 1289 | struct xhci_dequeue_state *deq_state) | ||
| 1290 | { | ||
| 1291 | struct xhci_container_ctx *in_ctx; | ||
| 1292 | struct xhci_ep_ctx *ep_ctx; | ||
| 1293 | u32 added_ctxs; | ||
| 1294 | dma_addr_t addr; | ||
| 1295 | |||
| 1296 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | ||
| 1297 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
| 1298 | in_ctx = xhci->devs[slot_id]->in_ctx; | ||
| 1299 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
| 1300 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | ||
| 1301 | deq_state->new_deq_ptr); | ||
| 1302 | if (addr == 0) { | ||
| 1303 | xhci_warn(xhci, "WARN Cannot submit config ep after " | ||
| 1304 | "reset ep command\n"); | ||
| 1305 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", | ||
| 1306 | deq_state->new_deq_seg, | ||
| 1307 | deq_state->new_deq_ptr); | ||
| 1308 | return; | ||
| 1309 | } | ||
| 1310 | ep_ctx->deq = addr | deq_state->new_cycle_state; | ||
| 1311 | |||
| 1312 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | ||
| 1313 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, | ||
| 1314 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | ||
| 1318 | struct usb_device *udev, unsigned int ep_index) | ||
| 1319 | { | ||
| 1320 | struct xhci_dequeue_state deq_state; | ||
| 1321 | struct xhci_virt_ep *ep; | ||
| 1322 | |||
| 1323 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
| 1324 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; | ||
| 1325 | /* We need to move the HW's dequeue pointer past this TD, | ||
| 1326 | * or it will attempt to resend it on the next doorbell ring. | ||
| 1327 | */ | ||
| 1328 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
| 1329 | ep_index, ep->stopped_td, | ||
| 1330 | &deq_state); | ||
| 1331 | |||
| 1332 | /* HW with the reset endpoint quirk will use the saved dequeue state to | ||
| 1333 | * issue a configure endpoint command later. | ||
| 1334 | */ | ||
| 1335 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | ||
| 1336 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
| 1337 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | ||
| 1338 | ep_index, &deq_state); | ||
| 1339 | } else { | ||
| 1340 | /* Better hope no one uses the input context between now and the | ||
| 1341 | * reset endpoint completion! | ||
| 1342 | */ | ||
| 1343 | xhci_dbg(xhci, "Setting up input context for " | ||
| 1344 | "configure endpoint command\n"); | ||
| 1345 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, | ||
| 1346 | ep_index, &deq_state); | ||
| 1347 | } | ||
| 1348 | } | ||
| 1349 | |||
| 1078 | /* Deal with stalled endpoints. The core should have sent the control message | 1350 | /* Deal with stalled endpoints. The core should have sent the control message |
| 1079 | * to clear the halt condition. However, we need to make the xHCI hardware | 1351 | * to clear the halt condition. However, we need to make the xHCI hardware |
| 1080 | * reset its sequence number, since a device will expect a sequence number of | 1352 | * reset its sequence number, since a device will expect a sequence number of |
| @@ -1089,8 +1361,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
| 1089 | unsigned int ep_index; | 1361 | unsigned int ep_index; |
| 1090 | unsigned long flags; | 1362 | unsigned long flags; |
| 1091 | int ret; | 1363 | int ret; |
| 1092 | struct xhci_dequeue_state deq_state; | 1364 | struct xhci_virt_ep *virt_ep; |
| 1093 | struct xhci_ring *ep_ring; | ||
| 1094 | 1365 | ||
| 1095 | xhci = hcd_to_xhci(hcd); | 1366 | xhci = hcd_to_xhci(hcd); |
| 1096 | udev = (struct usb_device *) ep->hcpriv; | 1367 | udev = (struct usb_device *) ep->hcpriv; |
| @@ -1100,12 +1371,16 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
| 1100 | if (!ep->hcpriv) | 1371 | if (!ep->hcpriv) |
| 1101 | return; | 1372 | return; |
| 1102 | ep_index = xhci_get_endpoint_index(&ep->desc); | 1373 | ep_index = xhci_get_endpoint_index(&ep->desc); |
| 1103 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | 1374 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
| 1104 | if (!ep_ring->stopped_td) { | 1375 | if (!virt_ep->stopped_td) { |
| 1105 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | 1376 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
| 1106 | ep->desc.bEndpointAddress); | 1377 | ep->desc.bEndpointAddress); |
| 1107 | return; | 1378 | return; |
| 1108 | } | 1379 | } |
| 1380 | if (usb_endpoint_xfer_control(&ep->desc)) { | ||
| 1381 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); | ||
| 1382 | return; | ||
| 1383 | } | ||
| 1109 | 1384 | ||
| 1110 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | 1385 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); |
| 1111 | spin_lock_irqsave(&xhci->lock, flags); | 1386 | spin_lock_irqsave(&xhci->lock, flags); |
| @@ -1116,17 +1391,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
| 1116 | * command. Better hope that last command worked! | 1391 | * command. Better hope that last command worked! |
| 1117 | */ | 1392 | */ |
| 1118 | if (!ret) { | 1393 | if (!ret) { |
| 1119 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | 1394 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
| 1120 | /* We need to move the HW's dequeue pointer past this TD, | 1395 | kfree(virt_ep->stopped_td); |
| 1121 | * or it will attempt to resend it on the next doorbell ring. | ||
| 1122 | */ | ||
| 1123 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
| 1124 | ep_index, ep_ring->stopped_td, &deq_state); | ||
| 1125 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
| 1126 | xhci_queue_new_dequeue_state(xhci, ep_ring, | ||
| 1127 | udev->slot_id, | ||
| 1128 | ep_index, &deq_state); | ||
| 1129 | kfree(ep_ring->stopped_td); | ||
| 1130 | xhci_ring_cmd_db(xhci); | 1396 | xhci_ring_cmd_db(xhci); |
| 1131 | } | 1397 | } |
| 1132 | spin_unlock_irqrestore(&xhci->lock, flags); | 1398 | spin_unlock_irqrestore(&xhci->lock, flags); |
| @@ -1328,6 +1594,88 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
| 1328 | return 0; | 1594 | return 0; |
| 1329 | } | 1595 | } |
| 1330 | 1596 | ||
| 1597 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's | ||
| 1598 | * internal data structures for the device. | ||
| 1599 | */ | ||
| 1600 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | ||
| 1601 | struct usb_tt *tt, gfp_t mem_flags) | ||
| 1602 | { | ||
| 1603 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
| 1604 | struct xhci_virt_device *vdev; | ||
| 1605 | struct xhci_command *config_cmd; | ||
| 1606 | struct xhci_input_control_ctx *ctrl_ctx; | ||
| 1607 | struct xhci_slot_ctx *slot_ctx; | ||
| 1608 | unsigned long flags; | ||
| 1609 | unsigned think_time; | ||
| 1610 | int ret; | ||
| 1611 | |||
| 1612 | /* Ignore root hubs */ | ||
| 1613 | if (!hdev->parent) | ||
| 1614 | return 0; | ||
| 1615 | |||
| 1616 | vdev = xhci->devs[hdev->slot_id]; | ||
| 1617 | if (!vdev) { | ||
| 1618 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | ||
| 1619 | return -EINVAL; | ||
| 1620 | } | ||
| 1621 | config_cmd = xhci_alloc_command(xhci, true, mem_flags); | ||
| 1622 | if (!config_cmd) { | ||
| 1623 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | ||
| 1624 | return -ENOMEM; | ||
| 1625 | } | ||
| 1626 | |||
| 1627 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 1628 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | ||
| 1629 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | ||
| 1630 | ctrl_ctx->add_flags |= SLOT_FLAG; | ||
| 1631 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | ||
| 1632 | slot_ctx->dev_info |= DEV_HUB; | ||
| 1633 | if (tt->multi) | ||
| 1634 | slot_ctx->dev_info |= DEV_MTT; | ||
| 1635 | if (xhci->hci_version > 0x95) { | ||
| 1636 | xhci_dbg(xhci, "xHCI version %x needs hub " | ||
| 1637 | "TT think time and number of ports\n", | ||
| 1638 | (unsigned int) xhci->hci_version); | ||
| 1639 | slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); | ||
| 1640 | /* Set TT think time - convert from ns to FS bit times. | ||
| 1641 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | ||
| 1642 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | ||
| 1643 | */ | ||
| 1644 | think_time = tt->think_time; | ||
| 1645 | if (think_time != 0) | ||
| 1646 | think_time = (think_time / 666) - 1; | ||
| 1647 | slot_ctx->tt_info |= TT_THINK_TIME(think_time); | ||
| 1648 | } else { | ||
| 1649 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | ||
| 1650 | "TT think time or number of ports\n", | ||
| 1651 | (unsigned int) xhci->hci_version); | ||
| 1652 | } | ||
| 1653 | slot_ctx->dev_state = 0; | ||
| 1654 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 1655 | |||
| 1656 | xhci_dbg(xhci, "Set up %s for hub device.\n", | ||
| 1657 | (xhci->hci_version > 0x95) ? | ||
| 1658 | "configure endpoint" : "evaluate context"); | ||
| 1659 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); | ||
| 1660 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); | ||
| 1661 | |||
| 1662 | /* Issue and wait for the configure endpoint or | ||
| 1663 | * evaluate context command. | ||
| 1664 | */ | ||
| 1665 | if (xhci->hci_version > 0x95) | ||
| 1666 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | ||
| 1667 | false, false); | ||
| 1668 | else | ||
| 1669 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | ||
| 1670 | true, false); | ||
| 1671 | |||
| 1672 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); | ||
| 1673 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); | ||
| 1674 | |||
| 1675 | xhci_free_command(xhci, config_cmd); | ||
| 1676 | return ret; | ||
| 1677 | } | ||
| 1678 | |||
| 1331 | int xhci_get_frame(struct usb_hcd *hcd) | 1679 | int xhci_get_frame(struct usb_hcd *hcd) |
| 1332 | { | 1680 | { |
| 1333 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1681 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index e6b9a1c6002d..1db4fea8c170 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -94,6 +94,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
| 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; |
| 95 | val &= ~TRB_TYPE_BITMASK; | 95 | val &= ~TRB_TYPE_BITMASK; |
| 96 | val |= TRB_TYPE(TRB_LINK); | 96 | val |= TRB_TYPE(TRB_LINK); |
| 97 | /* Always set the chain bit with 0.95 hardware */ | ||
| 98 | if (xhci_link_trb_quirk(xhci)) | ||
| 99 | val |= TRB_CHAIN; | ||
| 97 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | 100 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; |
| 98 | } | 101 | } |
| 99 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", | 102 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", |
| @@ -141,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
| 141 | return 0; | 144 | return 0; |
| 142 | 145 | ||
| 143 | INIT_LIST_HEAD(&ring->td_list); | 146 | INIT_LIST_HEAD(&ring->td_list); |
| 144 | INIT_LIST_HEAD(&ring->cancelled_td_list); | ||
| 145 | if (num_segs == 0) | 147 | if (num_segs == 0) |
| 146 | return ring; | 148 | return ring; |
| 147 | 149 | ||
| @@ -262,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
| 262 | return; | 264 | return; |
| 263 | 265 | ||
| 264 | for (i = 0; i < 31; ++i) | 266 | for (i = 0; i < 31; ++i) |
| 265 | if (dev->ep_rings[i]) | 267 | if (dev->eps[i].ring) |
| 266 | xhci_ring_free(xhci, dev->ep_rings[i]); | 268 | xhci_ring_free(xhci, dev->eps[i].ring); |
| 267 | 269 | ||
| 268 | if (dev->in_ctx) | 270 | if (dev->in_ctx) |
| 269 | xhci_free_container_ctx(xhci, dev->in_ctx); | 271 | xhci_free_container_ctx(xhci, dev->in_ctx); |
| @@ -278,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
| 278 | struct usb_device *udev, gfp_t flags) | 280 | struct usb_device *udev, gfp_t flags) |
| 279 | { | 281 | { |
| 280 | struct xhci_virt_device *dev; | 282 | struct xhci_virt_device *dev; |
| 283 | int i; | ||
| 281 | 284 | ||
| 282 | /* Slot ID 0 is reserved */ | 285 | /* Slot ID 0 is reserved */ |
| 283 | if (slot_id == 0 || xhci->devs[slot_id]) { | 286 | if (slot_id == 0 || xhci->devs[slot_id]) { |
| @@ -306,12 +309,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
| 306 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 309 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
| 307 | (unsigned long long)dev->in_ctx->dma); | 310 | (unsigned long long)dev->in_ctx->dma); |
| 308 | 311 | ||
| 312 | /* Initialize the cancellation list for each endpoint */ | ||
| 313 | for (i = 0; i < 31; i++) | ||
| 314 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); | ||
| 315 | |||
| 309 | /* Allocate endpoint 0 ring */ | 316 | /* Allocate endpoint 0 ring */ |
| 310 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 317 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); |
| 311 | if (!dev->ep_rings[0]) | 318 | if (!dev->eps[0].ring) |
| 312 | goto fail; | 319 | goto fail; |
| 313 | 320 | ||
| 314 | init_completion(&dev->cmd_completion); | 321 | init_completion(&dev->cmd_completion); |
| 322 | INIT_LIST_HEAD(&dev->cmd_list); | ||
| 315 | 323 | ||
| 316 | /* Point to output device context in dcbaa. */ | 324 | /* Point to output device context in dcbaa. */ |
| 317 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; | 325 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
| @@ -352,9 +360,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
| 352 | /* 3) Only the control endpoint is valid - one endpoint context */ | 360 | /* 3) Only the control endpoint is valid - one endpoint context */ |
| 353 | slot_ctx->dev_info |= LAST_CTX(1); | 361 | slot_ctx->dev_info |= LAST_CTX(1); |
| 354 | 362 | ||
| 363 | slot_ctx->dev_info |= (u32) udev->route; | ||
| 355 | switch (udev->speed) { | 364 | switch (udev->speed) { |
| 356 | case USB_SPEED_SUPER: | 365 | case USB_SPEED_SUPER: |
| 357 | slot_ctx->dev_info |= (u32) udev->route; | ||
| 358 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; | 366 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; |
| 359 | break; | 367 | break; |
| 360 | case USB_SPEED_HIGH: | 368 | case USB_SPEED_HIGH: |
| @@ -382,14 +390,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
| 382 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); | 390 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
| 383 | 391 | ||
| 384 | /* Is this a LS/FS device under a HS hub? */ | 392 | /* Is this a LS/FS device under a HS hub? */ |
| 385 | /* | ||
| 386 | * FIXME: I don't think this is right, where does the TT info for the | ||
| 387 | * roothub or parent hub come from? | ||
| 388 | */ | ||
| 389 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | 393 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && |
| 390 | udev->tt) { | 394 | udev->tt) { |
| 391 | slot_ctx->tt_info = udev->tt->hub->slot_id; | 395 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
| 392 | slot_ctx->tt_info |= udev->ttport << 8; | 396 | slot_ctx->tt_info |= udev->ttport << 8; |
| 397 | if (udev->tt->multi) | ||
| 398 | slot_ctx->dev_info |= DEV_MTT; | ||
| 393 | } | 399 | } |
| 394 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 400 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
| 395 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 401 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
| @@ -398,22 +404,35 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
| 398 | /* Step 5 */ | 404 | /* Step 5 */ |
| 399 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); | 405 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); |
| 400 | /* | 406 | /* |
| 401 | * See section 4.3 bullet 6: | ||
| 402 | * The default Max Packet size for ep0 is "8 bytes for a USB2 | ||
| 403 | * LS/FS/HS device or 512 bytes for a USB3 SS device" | ||
| 404 | * XXX: Not sure about wireless USB devices. | 407 | * XXX: Not sure about wireless USB devices. |
| 405 | */ | 408 | */ |
| 406 | if (udev->speed == USB_SPEED_SUPER) | 409 | switch (udev->speed) { |
| 410 | case USB_SPEED_SUPER: | ||
| 407 | ep0_ctx->ep_info2 |= MAX_PACKET(512); | 411 | ep0_ctx->ep_info2 |= MAX_PACKET(512); |
| 408 | else | 412 | break; |
| 413 | case USB_SPEED_HIGH: | ||
| 414 | /* USB core guesses at a 64-byte max packet first for FS devices */ | ||
| 415 | case USB_SPEED_FULL: | ||
| 416 | ep0_ctx->ep_info2 |= MAX_PACKET(64); | ||
| 417 | break; | ||
| 418 | case USB_SPEED_LOW: | ||
| 409 | ep0_ctx->ep_info2 |= MAX_PACKET(8); | 419 | ep0_ctx->ep_info2 |= MAX_PACKET(8); |
| 420 | break; | ||
| 421 | case USB_SPEED_VARIABLE: | ||
| 422 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | ||
| 423 | return -EINVAL; | ||
| 424 | break; | ||
| 425 | default: | ||
| 426 | /* New speed? */ | ||
| 427 | BUG(); | ||
| 428 | } | ||
| 410 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ | 429 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ |
| 411 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 430 | ep0_ctx->ep_info2 |= MAX_BURST(0); |
| 412 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 431 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
| 413 | 432 | ||
| 414 | ep0_ctx->deq = | 433 | ep0_ctx->deq = |
| 415 | dev->ep_rings[0]->first_seg->dma; | 434 | dev->eps[0].ring->first_seg->dma; |
| 416 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; | 435 | ep0_ctx->deq |= dev->eps[0].ring->cycle_state; |
| 417 | 436 | ||
| 418 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 437 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
| 419 | 438 | ||
| @@ -523,10 +542,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
| 523 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); | 542 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
| 524 | 543 | ||
| 525 | /* Set up the endpoint ring */ | 544 | /* Set up the endpoint ring */ |
| 526 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 545 | virt_dev->eps[ep_index].new_ring = |
| 527 | if (!virt_dev->new_ep_rings[ep_index]) | 546 | xhci_ring_alloc(xhci, 1, true, mem_flags); |
| 547 | if (!virt_dev->eps[ep_index].new_ring) | ||
| 528 | return -ENOMEM; | 548 | return -ENOMEM; |
| 529 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 549 | ep_ring = virt_dev->eps[ep_index].new_ring; |
| 530 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; | 550 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
| 531 | 551 | ||
| 532 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 552 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
| @@ -598,6 +618,48 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, | |||
| 598 | */ | 618 | */ |
| 599 | } | 619 | } |
| 600 | 620 | ||
| 621 | /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. | ||
| 622 | * Useful when you want to change one particular aspect of the endpoint and then | ||
| 623 | * issue a configure endpoint command. | ||
| 624 | */ | ||
| 625 | void xhci_endpoint_copy(struct xhci_hcd *xhci, | ||
| 626 | struct xhci_container_ctx *in_ctx, | ||
| 627 | struct xhci_container_ctx *out_ctx, | ||
| 628 | unsigned int ep_index) | ||
| 629 | { | ||
| 630 | struct xhci_ep_ctx *out_ep_ctx; | ||
| 631 | struct xhci_ep_ctx *in_ep_ctx; | ||
| 632 | |||
| 633 | out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | ||
| 634 | in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | ||
| 635 | |||
| 636 | in_ep_ctx->ep_info = out_ep_ctx->ep_info; | ||
| 637 | in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; | ||
| 638 | in_ep_ctx->deq = out_ep_ctx->deq; | ||
| 639 | in_ep_ctx->tx_info = out_ep_ctx->tx_info; | ||
| 640 | } | ||
| 641 | |||
| 642 | /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. | ||
| 643 | * Useful when you want to change one particular aspect of the endpoint and then | ||
| 644 | * issue a configure endpoint command. Only the context entries field matters, | ||
| 645 | * but we'll copy the whole thing anyway. | ||
| 646 | */ | ||
| 647 | void xhci_slot_copy(struct xhci_hcd *xhci, | ||
| 648 | struct xhci_container_ctx *in_ctx, | ||
| 649 | struct xhci_container_ctx *out_ctx) | ||
| 650 | { | ||
| 651 | struct xhci_slot_ctx *in_slot_ctx; | ||
| 652 | struct xhci_slot_ctx *out_slot_ctx; | ||
| 653 | |||
| 654 | in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
| 655 | out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); | ||
| 656 | |||
| 657 | in_slot_ctx->dev_info = out_slot_ctx->dev_info; | ||
| 658 | in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; | ||
| 659 | in_slot_ctx->tt_info = out_slot_ctx->tt_info; | ||
| 660 | in_slot_ctx->dev_state = out_slot_ctx->dev_state; | ||
| 661 | } | ||
| 662 | |||
| 601 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ | 663 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ |
| 602 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | 664 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) |
| 603 | { | 665 | { |
| @@ -695,6 +757,44 @@ static void scratchpad_free(struct xhci_hcd *xhci) | |||
| 695 | xhci->scratchpad = NULL; | 757 | xhci->scratchpad = NULL; |
| 696 | } | 758 | } |
| 697 | 759 | ||
| 760 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | ||
| 761 | bool allocate_completion, gfp_t mem_flags) | ||
| 762 | { | ||
| 763 | struct xhci_command *command; | ||
| 764 | |||
| 765 | command = kzalloc(sizeof(*command), mem_flags); | ||
| 766 | if (!command) | ||
| 767 | return NULL; | ||
| 768 | |||
| 769 | command->in_ctx = | ||
| 770 | xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); | ||
| 771 | if (!command->in_ctx) | ||
| 772 | return NULL; | ||
| 773 | |||
| 774 | if (allocate_completion) { | ||
| 775 | command->completion = | ||
| 776 | kzalloc(sizeof(struct completion), mem_flags); | ||
| 777 | if (!command->completion) { | ||
| 778 | xhci_free_container_ctx(xhci, command->in_ctx); | ||
| 779 | return NULL; | ||
| 780 | } | ||
| 781 | init_completion(command->completion); | ||
| 782 | } | ||
| 783 | |||
| 784 | command->status = 0; | ||
| 785 | INIT_LIST_HEAD(&command->cmd_list); | ||
| 786 | return command; | ||
| 787 | } | ||
| 788 | |||
| 789 | void xhci_free_command(struct xhci_hcd *xhci, | ||
| 790 | struct xhci_command *command) | ||
| 791 | { | ||
| 792 | xhci_free_container_ctx(xhci, | ||
| 793 | command->in_ctx); | ||
| 794 | kfree(command->completion); | ||
| 795 | kfree(command); | ||
| 796 | } | ||
| 797 | |||
| 698 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 798 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
| 699 | { | 799 | { |
| 700 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 800 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 592fe7e623f7..06595ec27bb7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -24,6 +24,10 @@ | |||
| 24 | 24 | ||
| 25 | #include "xhci.h" | 25 | #include "xhci.h" |
| 26 | 26 | ||
| 27 | /* Device for a quirk */ | ||
| 28 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 | ||
| 29 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 | ||
| 30 | |||
| 27 | static const char hcd_name[] = "xhci_hcd"; | 31 | static const char hcd_name[] = "xhci_hcd"; |
| 28 | 32 | ||
| 29 | /* called after powerup, by probe or system-pm "wakeup" */ | 33 | /* called after powerup, by probe or system-pm "wakeup" */ |
| @@ -59,9 +63,20 @@ static int xhci_pci_setup(struct usb_hcd *hcd) | |||
| 59 | xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); | 63 | xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); |
| 60 | xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); | 64 | xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); |
| 61 | xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); | 65 | xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); |
| 66 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); | ||
| 67 | xhci->hci_version = HC_VERSION(xhci->hcc_params); | ||
| 62 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | 68 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); |
| 63 | xhci_print_registers(xhci); | 69 | xhci_print_registers(xhci); |
| 64 | 70 | ||
| 71 | /* Look for vendor-specific quirks */ | ||
| 72 | if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && | ||
| 73 | pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && | ||
| 74 | pdev->revision == 0x0) { | ||
| 75 | xhci->quirks |= XHCI_RESET_EP_QUIRK; | ||
| 76 | xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" | ||
| 77 | " endpoint cmd after reset endpoint\n"); | ||
| 78 | } | ||
| 79 | |||
| 65 | /* Make sure the HC is halted. */ | 80 | /* Make sure the HC is halted. */ |
| 66 | retval = xhci_halt(xhci); | 81 | retval = xhci_halt(xhci); |
| 67 | if (retval) | 82 | if (retval) |
| @@ -121,6 +136,7 @@ static const struct hc_driver xhci_pci_hc_driver = { | |||
| 121 | .check_bandwidth = xhci_check_bandwidth, | 136 | .check_bandwidth = xhci_check_bandwidth, |
| 122 | .reset_bandwidth = xhci_reset_bandwidth, | 137 | .reset_bandwidth = xhci_reset_bandwidth, |
| 123 | .address_device = xhci_address_device, | 138 | .address_device = xhci_address_device, |
| 139 | .update_hub_device = xhci_update_hub_device, | ||
| 124 | 140 | ||
| 125 | /* | 141 | /* |
| 126 | * scheduling support | 142 | * scheduling support |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index aa88a067148b..173c39c76489 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
| 172 | * have their chain bit cleared (so that each Link TRB is a separate TD). | 172 | * have their chain bit cleared (so that each Link TRB is a separate TD). |
| 173 | * | 173 | * |
| 174 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit | 174 | * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit |
| 175 | * set, but other sections talk about dealing with the chain bit set. | 175 | * set, but other sections talk about dealing with the chain bit set. This was |
| 176 | * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. | 176 | * fixed in the 0.96 specification errata, but we have to assume that all 0.95 |
| 177 | * xHCI hardware can't handle the chain bit being cleared on a link TRB. | ||
| 177 | */ | 178 | */ |
| 178 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 179 | static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) |
| 179 | { | 180 | { |
| @@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
| 191 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | 192 | while (last_trb(xhci, ring, ring->enq_seg, next)) { |
| 192 | if (!consumer) { | 193 | if (!consumer) { |
| 193 | if (ring != xhci->event_ring) { | 194 | if (ring != xhci->event_ring) { |
| 194 | next->link.control &= ~TRB_CHAIN; | 195 | /* If we're not dealing with 0.95 hardware, |
| 195 | next->link.control |= chain; | 196 | * carry over the chain bit of the previous TRB |
| 197 | * (which may mean the chain bit is cleared). | ||
| 198 | */ | ||
| 199 | if (!xhci_link_trb_quirk(xhci)) { | ||
| 200 | next->link.control &= ~TRB_CHAIN; | ||
| 201 | next->link.control |= chain; | ||
| 202 | } | ||
| 196 | /* Give this link TRB to the hardware */ | 203 | /* Give this link TRB to the hardware */ |
| 197 | wmb(); | 204 | wmb(); |
| 198 | if (next->link.control & TRB_CYCLE) | 205 | if (next->link.control & TRB_CYCLE) |
| @@ -289,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
| 289 | unsigned int slot_id, | 296 | unsigned int slot_id, |
| 290 | unsigned int ep_index) | 297 | unsigned int ep_index) |
| 291 | { | 298 | { |
| 292 | struct xhci_ring *ep_ring; | 299 | struct xhci_virt_ep *ep; |
| 300 | unsigned int ep_state; | ||
| 293 | u32 field; | 301 | u32 field; |
| 294 | __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; | 302 | __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; |
| 295 | 303 | ||
| 296 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 304 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
| 305 | ep_state = ep->ep_state; | ||
| 297 | /* Don't ring the doorbell for this endpoint if there are pending | 306 | /* Don't ring the doorbell for this endpoint if there are pending |
| 298 | * cancellations because the we don't want to interrupt processing. | 307 | * cancellations because the we don't want to interrupt processing. |
| 299 | */ | 308 | */ |
| 300 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) | 309 | if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) |
| 301 | && !(ep_ring->state & EP_HALTED)) { | 310 | && !(ep_state & EP_HALTED)) { |
| 302 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 311 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
| 303 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 312 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); |
| 304 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 313 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
| @@ -354,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
| 354 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) | 363 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
| 355 | { | 364 | { |
| 356 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 365 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
| 357 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 366 | struct xhci_ring *ep_ring = dev->eps[ep_index].ring; |
| 358 | struct xhci_generic_trb *trb; | 367 | struct xhci_generic_trb *trb; |
| 359 | struct xhci_ep_ctx *ep_ctx; | 368 | struct xhci_ep_ctx *ep_ctx; |
| 360 | dma_addr_t addr; | 369 | dma_addr_t addr; |
| @@ -362,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
| 362 | state->new_cycle_state = 0; | 371 | state->new_cycle_state = 0; |
| 363 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | 372 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); |
| 364 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 373 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
| 365 | ep_ring->stopped_trb, | 374 | dev->eps[ep_index].stopped_trb, |
| 366 | &state->new_cycle_state); | 375 | &state->new_cycle_state); |
| 367 | if (!state->new_deq_seg) | 376 | if (!state->new_deq_seg) |
| 368 | BUG(); | 377 | BUG(); |
| @@ -442,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
| 442 | union xhci_trb *deq_ptr, u32 cycle_state); | 451 | union xhci_trb *deq_ptr, u32 cycle_state); |
| 443 | 452 | ||
| 444 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 453 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
| 445 | struct xhci_ring *ep_ring, unsigned int slot_id, | 454 | unsigned int slot_id, unsigned int ep_index, |
| 446 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | 455 | struct xhci_dequeue_state *deq_state) |
| 447 | { | 456 | { |
| 457 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | ||
| 458 | |||
| 448 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 459 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " |
| 449 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 460 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", |
| 450 | deq_state->new_deq_seg, | 461 | deq_state->new_deq_seg, |
| @@ -461,8 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | |||
| 461 | * if the ring is running, and ringing the doorbell starts the | 472 | * if the ring is running, and ringing the doorbell starts the |
| 462 | * ring running. | 473 | * ring running. |
| 463 | */ | 474 | */ |
| 464 | ep_ring->state |= SET_DEQ_PENDING; | 475 | ep->ep_state |= SET_DEQ_PENDING; |
| 465 | xhci_ring_cmd_db(xhci); | ||
| 466 | } | 476 | } |
| 467 | 477 | ||
| 468 | /* | 478 | /* |
| @@ -481,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 481 | unsigned int slot_id; | 491 | unsigned int slot_id; |
| 482 | unsigned int ep_index; | 492 | unsigned int ep_index; |
| 483 | struct xhci_ring *ep_ring; | 493 | struct xhci_ring *ep_ring; |
| 494 | struct xhci_virt_ep *ep; | ||
| 484 | struct list_head *entry; | 495 | struct list_head *entry; |
| 485 | struct xhci_td *cur_td = 0; | 496 | struct xhci_td *cur_td = 0; |
| 486 | struct xhci_td *last_unlinked_td; | 497 | struct xhci_td *last_unlinked_td; |
| @@ -493,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 493 | memset(&deq_state, 0, sizeof(deq_state)); | 504 | memset(&deq_state, 0, sizeof(deq_state)); |
| 494 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 505 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
| 495 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 506 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
| 496 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 507 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
| 508 | ep_ring = ep->ring; | ||
| 497 | 509 | ||
| 498 | if (list_empty(&ep_ring->cancelled_td_list)) | 510 | if (list_empty(&ep->cancelled_td_list)) |
| 499 | return; | 511 | return; |
| 500 | 512 | ||
| 501 | /* Fix up the ep ring first, so HW stops executing cancelled TDs. | 513 | /* Fix up the ep ring first, so HW stops executing cancelled TDs. |
| @@ -503,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 503 | * it. We're also in the event handler, so we can't get re-interrupted | 515 | * it. We're also in the event handler, so we can't get re-interrupted |
| 504 | * if another Stop Endpoint command completes | 516 | * if another Stop Endpoint command completes |
| 505 | */ | 517 | */ |
| 506 | list_for_each(entry, &ep_ring->cancelled_td_list) { | 518 | list_for_each(entry, &ep->cancelled_td_list) { |
| 507 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); | 519 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); |
| 508 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", | 520 | xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", |
| 509 | cur_td->first_trb, | 521 | cur_td->first_trb, |
| @@ -512,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 512 | * If we stopped on the TD we need to cancel, then we have to | 524 | * If we stopped on the TD we need to cancel, then we have to |
| 513 | * move the xHC endpoint ring dequeue pointer past this TD. | 525 | * move the xHC endpoint ring dequeue pointer past this TD. |
| 514 | */ | 526 | */ |
| 515 | if (cur_td == ep_ring->stopped_td) | 527 | if (cur_td == ep->stopped_td) |
| 516 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 528 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
| 517 | &deq_state); | 529 | &deq_state); |
| 518 | else | 530 | else |
| @@ -523,14 +535,15 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 523 | * the cancelled TD list for URB completion later. | 535 | * the cancelled TD list for URB completion later. |
| 524 | */ | 536 | */ |
| 525 | list_del(&cur_td->td_list); | 537 | list_del(&cur_td->td_list); |
| 526 | ep_ring->cancels_pending--; | 538 | ep->cancels_pending--; |
| 527 | } | 539 | } |
| 528 | last_unlinked_td = cur_td; | 540 | last_unlinked_td = cur_td; |
| 529 | 541 | ||
| 530 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 542 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
| 531 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 543 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
| 532 | xhci_queue_new_dequeue_state(xhci, ep_ring, | 544 | xhci_queue_new_dequeue_state(xhci, |
| 533 | slot_id, ep_index, &deq_state); | 545 | slot_id, ep_index, &deq_state); |
| 546 | xhci_ring_cmd_db(xhci); | ||
| 534 | } else { | 547 | } else { |
| 535 | /* Otherwise just ring the doorbell to restart the ring */ | 548 | /* Otherwise just ring the doorbell to restart the ring */ |
| 536 | ring_ep_doorbell(xhci, slot_id, ep_index); | 549 | ring_ep_doorbell(xhci, slot_id, ep_index); |
| @@ -543,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
| 543 | * So stop when we've completed the URB for the last TD we unlinked. | 556 | * So stop when we've completed the URB for the last TD we unlinked. |
| 544 | */ | 557 | */ |
| 545 | do { | 558 | do { |
| 546 | cur_td = list_entry(ep_ring->cancelled_td_list.next, | 559 | cur_td = list_entry(ep->cancelled_td_list.next, |
| 547 | struct xhci_td, cancelled_td_list); | 560 | struct xhci_td, cancelled_td_list); |
| 548 | list_del(&cur_td->cancelled_td_list); | 561 | list_del(&cur_td->cancelled_td_list); |
| 549 | 562 | ||
| @@ -590,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
| 590 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 603 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
| 591 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 604 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
| 592 | dev = xhci->devs[slot_id]; | 605 | dev = xhci->devs[slot_id]; |
| 593 | ep_ring = dev->ep_rings[ep_index]; | 606 | ep_ring = dev->eps[ep_index].ring; |
| 594 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 607 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
| 595 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | 608 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); |
| 596 | 609 | ||
| @@ -634,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
| 634 | ep_ctx->deq); | 647 | ep_ctx->deq); |
| 635 | } | 648 | } |
| 636 | 649 | ||
| 637 | ep_ring->state &= ~SET_DEQ_PENDING; | 650 | dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; |
| 638 | ring_ep_doorbell(xhci, slot_id, ep_index); | 651 | ring_ep_doorbell(xhci, slot_id, ep_index); |
| 639 | } | 652 | } |
| 640 | 653 | ||
| @@ -644,18 +657,60 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
| 644 | { | 657 | { |
| 645 | int slot_id; | 658 | int slot_id; |
| 646 | unsigned int ep_index; | 659 | unsigned int ep_index; |
| 660 | struct xhci_ring *ep_ring; | ||
| 647 | 661 | ||
| 648 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 662 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
| 649 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 663 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
| 664 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | ||
| 650 | /* This command will only fail if the endpoint wasn't halted, | 665 | /* This command will only fail if the endpoint wasn't halted, |
| 651 | * but we don't care. | 666 | * but we don't care. |
| 652 | */ | 667 | */ |
| 653 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | 668 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", |
| 654 | (unsigned int) GET_COMP_CODE(event->status)); | 669 | (unsigned int) GET_COMP_CODE(event->status)); |
| 655 | 670 | ||
| 656 | /* Clear our internal halted state and restart the ring */ | 671 | /* HW with the reset endpoint quirk needs to have a configure endpoint |
| 657 | xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; | 672 | * command complete before the endpoint can be used. Queue that here |
| 658 | ring_ep_doorbell(xhci, slot_id, ep_index); | 673 | * because the HW can't handle two commands being queued in a row. |
| 674 | */ | ||
| 675 | if (xhci->quirks & XHCI_RESET_EP_QUIRK) { | ||
| 676 | xhci_dbg(xhci, "Queueing configure endpoint command\n"); | ||
| 677 | xhci_queue_configure_endpoint(xhci, | ||
| 678 | xhci->devs[slot_id]->in_ctx->dma, slot_id, | ||
| 679 | false); | ||
| 680 | xhci_ring_cmd_db(xhci); | ||
| 681 | } else { | ||
| 682 | /* Clear our internal halted state and restart the ring */ | ||
| 683 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; | ||
| 684 | ring_ep_doorbell(xhci, slot_id, ep_index); | ||
| 685 | } | ||
| 686 | } | ||
| 687 | |||
| 688 | /* Check to see if a command in the device's command queue matches this one. | ||
| 689 | * Signal the completion or free the command, and return 1. Return 0 if the | ||
| 690 | * completed command isn't at the head of the command list. | ||
| 691 | */ | ||
| 692 | static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, | ||
| 693 | struct xhci_virt_device *virt_dev, | ||
| 694 | struct xhci_event_cmd *event) | ||
| 695 | { | ||
| 696 | struct xhci_command *command; | ||
| 697 | |||
| 698 | if (list_empty(&virt_dev->cmd_list)) | ||
| 699 | return 0; | ||
| 700 | |||
| 701 | command = list_entry(virt_dev->cmd_list.next, | ||
| 702 | struct xhci_command, cmd_list); | ||
| 703 | if (xhci->cmd_ring->dequeue != command->command_trb) | ||
| 704 | return 0; | ||
| 705 | |||
| 706 | command->status = | ||
| 707 | GET_COMP_CODE(event->status); | ||
| 708 | list_del(&command->cmd_list); | ||
| 709 | if (command->completion) | ||
| 710 | complete(command->completion); | ||
| 711 | else | ||
| 712 | xhci_free_command(xhci, command); | ||
| 713 | return 1; | ||
| 659 | } | 714 | } |
| 660 | 715 | ||
| 661 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 716 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
| @@ -664,6 +719,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
| 664 | int slot_id = TRB_TO_SLOT_ID(event->flags); | 719 | int slot_id = TRB_TO_SLOT_ID(event->flags); |
| 665 | u64 cmd_dma; | 720 | u64 cmd_dma; |
| 666 | dma_addr_t cmd_dequeue_dma; | 721 | dma_addr_t cmd_dequeue_dma; |
| 722 | struct xhci_input_control_ctx *ctrl_ctx; | ||
| 723 | struct xhci_virt_device *virt_dev; | ||
| 724 | unsigned int ep_index; | ||
| 725 | struct xhci_ring *ep_ring; | ||
| 726 | unsigned int ep_state; | ||
| 667 | 727 | ||
| 668 | cmd_dma = event->cmd_trb; | 728 | cmd_dma = event->cmd_trb; |
| 669 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 729 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
| @@ -691,6 +751,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
| 691 | xhci_free_virt_device(xhci, slot_id); | 751 | xhci_free_virt_device(xhci, slot_id); |
| 692 | break; | 752 | break; |
| 693 | case TRB_TYPE(TRB_CONFIG_EP): | 753 | case TRB_TYPE(TRB_CONFIG_EP): |
| 754 | virt_dev = xhci->devs[slot_id]; | ||
| 755 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | ||
| 756 | break; | ||
| 757 | /* | ||
| 758 | * Configure endpoint commands can come from the USB core | ||
| 759 | * configuration or alt setting changes, or because the HW | ||
| 760 | * needed an extra configure endpoint command after a reset | ||
| 761 | * endpoint command. In the latter case, the xHCI driver is | ||
| 762 | * not waiting on the configure endpoint command. | ||
| 763 | */ | ||
| 764 | ctrl_ctx = xhci_get_input_control_ctx(xhci, | ||
| 765 | virt_dev->in_ctx); | ||
| 766 | /* Input ctx add_flags are the endpoint index plus one */ | ||
| 767 | ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; | ||
| 768 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | ||
| 769 | if (!ep_ring) { | ||
| 770 | /* This must have been an initial configure endpoint */ | ||
| 771 | xhci->devs[slot_id]->cmd_status = | ||
| 772 | GET_COMP_CODE(event->status); | ||
| 773 | complete(&xhci->devs[slot_id]->cmd_completion); | ||
| 774 | break; | ||
| 775 | } | ||
| 776 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | ||
| 777 | xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, " | ||
| 778 | "state = %d\n", ep_index, ep_state); | ||
| 779 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && | ||
| 780 | ep_state & EP_HALTED) { | ||
| 781 | /* Clear our internal halted state and restart ring */ | ||
| 782 | xhci->devs[slot_id]->eps[ep_index].ep_state &= | ||
| 783 | ~EP_HALTED; | ||
| 784 | ring_ep_doorbell(xhci, slot_id, ep_index); | ||
| 785 | } else { | ||
| 786 | xhci->devs[slot_id]->cmd_status = | ||
| 787 | GET_COMP_CODE(event->status); | ||
| 788 | complete(&xhci->devs[slot_id]->cmd_completion); | ||
| 789 | } | ||
| 790 | break; | ||
| 791 | case TRB_TYPE(TRB_EVAL_CONTEXT): | ||
| 792 | virt_dev = xhci->devs[slot_id]; | ||
| 793 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | ||
| 794 | break; | ||
| 694 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | 795 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); |
| 695 | complete(&xhci->devs[slot_id]->cmd_completion); | 796 | complete(&xhci->devs[slot_id]->cmd_completion); |
| 696 | break; | 797 | break; |
| @@ -805,7 +906,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 805 | struct xhci_transfer_event *event) | 906 | struct xhci_transfer_event *event) |
| 806 | { | 907 | { |
| 807 | struct xhci_virt_device *xdev; | 908 | struct xhci_virt_device *xdev; |
| 909 | struct xhci_virt_ep *ep; | ||
| 808 | struct xhci_ring *ep_ring; | 910 | struct xhci_ring *ep_ring; |
| 911 | unsigned int slot_id; | ||
| 809 | int ep_index; | 912 | int ep_index; |
| 810 | struct xhci_td *td = 0; | 913 | struct xhci_td *td = 0; |
| 811 | dma_addr_t event_dma; | 914 | dma_addr_t event_dma; |
| @@ -814,9 +917,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 814 | struct urb *urb = 0; | 917 | struct urb *urb = 0; |
| 815 | int status = -EINPROGRESS; | 918 | int status = -EINPROGRESS; |
| 816 | struct xhci_ep_ctx *ep_ctx; | 919 | struct xhci_ep_ctx *ep_ctx; |
| 920 | u32 trb_comp_code; | ||
| 817 | 921 | ||
| 818 | xhci_dbg(xhci, "In %s\n", __func__); | 922 | xhci_dbg(xhci, "In %s\n", __func__); |
| 819 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | 923 | slot_id = TRB_TO_SLOT_ID(event->flags); |
| 924 | xdev = xhci->devs[slot_id]; | ||
| 820 | if (!xdev) { | 925 | if (!xdev) { |
| 821 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | 926 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); |
| 822 | return -ENODEV; | 927 | return -ENODEV; |
| @@ -825,7 +930,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 825 | /* Endpoint ID is 1 based, our index is zero based */ | 930 | /* Endpoint ID is 1 based, our index is zero based */ |
| 826 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 931 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
| 827 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | 932 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); |
| 828 | ep_ring = xdev->ep_rings[ep_index]; | 933 | ep = &xdev->eps[ep_index]; |
| 934 | ep_ring = ep->ring; | ||
| 829 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 935 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
| 830 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 936 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { |
| 831 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 937 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); |
| @@ -870,7 +976,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 870 | (unsigned int) event->flags); | 976 | (unsigned int) event->flags); |
| 871 | 977 | ||
| 872 | /* Look for common error cases */ | 978 | /* Look for common error cases */ |
| 873 | switch (GET_COMP_CODE(event->transfer_len)) { | 979 | trb_comp_code = GET_COMP_CODE(event->transfer_len); |
| 980 | switch (trb_comp_code) { | ||
| 874 | /* Skip codes that require special handling depending on | 981 | /* Skip codes that require special handling depending on |
| 875 | * transfer type | 982 | * transfer type |
| 876 | */ | 983 | */ |
| @@ -885,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 885 | break; | 992 | break; |
| 886 | case COMP_STALL: | 993 | case COMP_STALL: |
| 887 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | 994 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); |
| 888 | ep_ring->state |= EP_HALTED; | 995 | ep->ep_state |= EP_HALTED; |
| 889 | status = -EPIPE; | 996 | status = -EPIPE; |
| 890 | break; | 997 | break; |
| 891 | case COMP_TRB_ERR: | 998 | case COMP_TRB_ERR: |
| @@ -913,7 +1020,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 913 | /* Was this a control transfer? */ | 1020 | /* Was this a control transfer? */ |
| 914 | if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { | 1021 | if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { |
| 915 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); | 1022 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); |
| 916 | switch (GET_COMP_CODE(event->transfer_len)) { | 1023 | switch (trb_comp_code) { |
| 917 | case COMP_SUCCESS: | 1024 | case COMP_SUCCESS: |
| 918 | if (event_trb == ep_ring->dequeue) { | 1025 | if (event_trb == ep_ring->dequeue) { |
| 919 | xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); | 1026 | xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); |
| @@ -928,8 +1035,37 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 928 | break; | 1035 | break; |
| 929 | case COMP_SHORT_TX: | 1036 | case COMP_SHORT_TX: |
| 930 | xhci_warn(xhci, "WARN: short transfer on control ep\n"); | 1037 | xhci_warn(xhci, "WARN: short transfer on control ep\n"); |
| 931 | status = -EREMOTEIO; | 1038 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
| 1039 | status = -EREMOTEIO; | ||
| 1040 | else | ||
| 1041 | status = 0; | ||
| 932 | break; | 1042 | break; |
| 1043 | case COMP_BABBLE: | ||
| 1044 | /* The 0.96 spec says a babbling control endpoint | ||
| 1045 | * is not halted. The 0.96 spec says it is. Some HW | ||
| 1046 | * claims to be 0.95 compliant, but it halts the control | ||
| 1047 | * endpoint anyway. Check if a babble halted the | ||
| 1048 | * endpoint. | ||
| 1049 | */ | ||
| 1050 | if (ep_ctx->ep_info != EP_STATE_HALTED) | ||
| 1051 | break; | ||
| 1052 | /* else fall through */ | ||
| 1053 | case COMP_STALL: | ||
| 1054 | /* Did we transfer part of the data (middle) phase? */ | ||
| 1055 | if (event_trb != ep_ring->dequeue && | ||
| 1056 | event_trb != td->last_trb) | ||
| 1057 | td->urb->actual_length = | ||
| 1058 | td->urb->transfer_buffer_length | ||
| 1059 | - TRB_LEN(event->transfer_len); | ||
| 1060 | else | ||
| 1061 | td->urb->actual_length = 0; | ||
| 1062 | |||
| 1063 | ep->stopped_td = td; | ||
| 1064 | ep->stopped_trb = event_trb; | ||
| 1065 | xhci_queue_reset_ep(xhci, slot_id, ep_index); | ||
| 1066 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); | ||
| 1067 | xhci_ring_cmd_db(xhci); | ||
| 1068 | goto td_cleanup; | ||
| 933 | default: | 1069 | default: |
| 934 | /* Others already handled above */ | 1070 | /* Others already handled above */ |
| 935 | break; | 1071 | break; |
| @@ -943,7 +1079,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 943 | if (event_trb == td->last_trb) { | 1079 | if (event_trb == td->last_trb) { |
| 944 | if (td->urb->actual_length != 0) { | 1080 | if (td->urb->actual_length != 0) { |
| 945 | /* Don't overwrite a previously set error code */ | 1081 | /* Don't overwrite a previously set error code */ |
| 946 | if (status == -EINPROGRESS || status == 0) | 1082 | if ((status == -EINPROGRESS || |
| 1083 | status == 0) && | ||
| 1084 | (td->urb->transfer_flags | ||
| 1085 | & URB_SHORT_NOT_OK)) | ||
| 947 | /* Did we already see a short data stage? */ | 1086 | /* Did we already see a short data stage? */ |
| 948 | status = -EREMOTEIO; | 1087 | status = -EREMOTEIO; |
| 949 | } else { | 1088 | } else { |
| @@ -952,7 +1091,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 952 | } | 1091 | } |
| 953 | } else { | 1092 | } else { |
| 954 | /* Maybe the event was for the data stage? */ | 1093 | /* Maybe the event was for the data stage? */ |
| 955 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { | 1094 | if (trb_comp_code != COMP_STOP_INVAL) { |
| 956 | /* We didn't stop on a link TRB in the middle */ | 1095 | /* We didn't stop on a link TRB in the middle */ |
| 957 | td->urb->actual_length = | 1096 | td->urb->actual_length = |
| 958 | td->urb->transfer_buffer_length - | 1097 | td->urb->transfer_buffer_length - |
| @@ -964,7 +1103,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 964 | } | 1103 | } |
| 965 | } | 1104 | } |
| 966 | } else { | 1105 | } else { |
| 967 | switch (GET_COMP_CODE(event->transfer_len)) { | 1106 | switch (trb_comp_code) { |
| 968 | case COMP_SUCCESS: | 1107 | case COMP_SUCCESS: |
| 969 | /* Double check that the HW transferred everything. */ | 1108 | /* Double check that the HW transferred everything. */ |
| 970 | if (event_trb != td->last_trb) { | 1109 | if (event_trb != td->last_trb) { |
| @@ -975,7 +1114,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 975 | else | 1114 | else |
| 976 | status = 0; | 1115 | status = 0; |
| 977 | } else { | 1116 | } else { |
| 978 | xhci_dbg(xhci, "Successful bulk transfer!\n"); | 1117 | if (usb_endpoint_xfer_bulk(&td->urb->ep->desc)) |
| 1118 | xhci_dbg(xhci, "Successful bulk " | ||
| 1119 | "transfer!\n"); | ||
| 1120 | else | ||
| 1121 | xhci_dbg(xhci, "Successful interrupt " | ||
| 1122 | "transfer!\n"); | ||
| 979 | status = 0; | 1123 | status = 0; |
| 980 | } | 1124 | } |
| 981 | break; | 1125 | break; |
| @@ -1001,11 +1145,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 1001 | td->urb->actual_length = | 1145 | td->urb->actual_length = |
| 1002 | td->urb->transfer_buffer_length - | 1146 | td->urb->transfer_buffer_length - |
| 1003 | TRB_LEN(event->transfer_len); | 1147 | TRB_LEN(event->transfer_len); |
| 1004 | if (td->urb->actual_length < 0) { | 1148 | if (td->urb->transfer_buffer_length < |
| 1149 | td->urb->actual_length) { | ||
| 1005 | xhci_warn(xhci, "HC gave bad length " | 1150 | xhci_warn(xhci, "HC gave bad length " |
| 1006 | "of %d bytes left\n", | 1151 | "of %d bytes left\n", |
| 1007 | TRB_LEN(event->transfer_len)); | 1152 | TRB_LEN(event->transfer_len)); |
| 1008 | td->urb->actual_length = 0; | 1153 | td->urb->actual_length = 0; |
| 1154 | if (td->urb->transfer_flags & | ||
| 1155 | URB_SHORT_NOT_OK) | ||
| 1156 | status = -EREMOTEIO; | ||
| 1157 | else | ||
| 1158 | status = 0; | ||
| 1009 | } | 1159 | } |
| 1010 | /* Don't overwrite a previously set error code */ | 1160 | /* Don't overwrite a previously set error code */ |
| 1011 | if (status == -EINPROGRESS) { | 1161 | if (status == -EINPROGRESS) { |
| @@ -1041,30 +1191,31 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 1041 | /* If the ring didn't stop on a Link or No-op TRB, add | 1191 | /* If the ring didn't stop on a Link or No-op TRB, add |
| 1042 | * in the actual bytes transferred from the Normal TRB | 1192 | * in the actual bytes transferred from the Normal TRB |
| 1043 | */ | 1193 | */ |
| 1044 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | 1194 | if (trb_comp_code != COMP_STOP_INVAL) |
| 1045 | td->urb->actual_length += | 1195 | td->urb->actual_length += |
| 1046 | TRB_LEN(cur_trb->generic.field[2]) - | 1196 | TRB_LEN(cur_trb->generic.field[2]) - |
| 1047 | TRB_LEN(event->transfer_len); | 1197 | TRB_LEN(event->transfer_len); |
| 1048 | } | 1198 | } |
| 1049 | } | 1199 | } |
| 1050 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | 1200 | if (trb_comp_code == COMP_STOP_INVAL || |
| 1051 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | 1201 | trb_comp_code == COMP_STOP) { |
| 1052 | /* The Endpoint Stop Command completion will take care of any | 1202 | /* The Endpoint Stop Command completion will take care of any |
| 1053 | * stopped TDs. A stopped TD may be restarted, so don't update | 1203 | * stopped TDs. A stopped TD may be restarted, so don't update |
| 1054 | * the ring dequeue pointer or take this TD off any lists yet. | 1204 | * the ring dequeue pointer or take this TD off any lists yet. |
| 1055 | */ | 1205 | */ |
| 1056 | ep_ring->stopped_td = td; | 1206 | ep->stopped_td = td; |
| 1057 | ep_ring->stopped_trb = event_trb; | 1207 | ep->stopped_trb = event_trb; |
| 1058 | } else { | 1208 | } else { |
| 1059 | if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { | 1209 | if (trb_comp_code == COMP_STALL || |
| 1210 | trb_comp_code == COMP_BABBLE) { | ||
| 1060 | /* The transfer is completed from the driver's | 1211 | /* The transfer is completed from the driver's |
| 1061 | * perspective, but we need to issue a set dequeue | 1212 | * perspective, but we need to issue a set dequeue |
| 1062 | * command for this stalled endpoint to move the dequeue | 1213 | * command for this stalled endpoint to move the dequeue |
| 1063 | * pointer past the TD. We can't do that here because | 1214 | * pointer past the TD. We can't do that here because |
| 1064 | * the halt condition must be cleared first. | 1215 | * the halt condition must be cleared first. |
| 1065 | */ | 1216 | */ |
| 1066 | ep_ring->stopped_td = td; | 1217 | ep->stopped_td = td; |
| 1067 | ep_ring->stopped_trb = event_trb; | 1218 | ep->stopped_trb = event_trb; |
| 1068 | } else { | 1219 | } else { |
| 1069 | /* Update ring dequeue pointer */ | 1220 | /* Update ring dequeue pointer */ |
| 1070 | while (ep_ring->dequeue != td->last_trb) | 1221 | while (ep_ring->dequeue != td->last_trb) |
| @@ -1072,16 +1223,41 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 1072 | inc_deq(xhci, ep_ring, false); | 1223 | inc_deq(xhci, ep_ring, false); |
| 1073 | } | 1224 | } |
| 1074 | 1225 | ||
| 1226 | td_cleanup: | ||
| 1075 | /* Clean up the endpoint's TD list */ | 1227 | /* Clean up the endpoint's TD list */ |
| 1076 | urb = td->urb; | 1228 | urb = td->urb; |
| 1229 | /* Do one last check of the actual transfer length. | ||
| 1230 | * If the host controller said we transferred more data than | ||
| 1231 | * the buffer length, urb->actual_length will be a very big | ||
| 1232 | * number (since it's unsigned). Play it safe and say we didn't | ||
| 1233 | * transfer anything. | ||
| 1234 | */ | ||
| 1235 | if (urb->actual_length > urb->transfer_buffer_length) { | ||
| 1236 | xhci_warn(xhci, "URB transfer length is wrong, " | ||
| 1237 | "xHC issue? req. len = %u, " | ||
| 1238 | "act. len = %u\n", | ||
| 1239 | urb->transfer_buffer_length, | ||
| 1240 | urb->actual_length); | ||
| 1241 | urb->actual_length = 0; | ||
| 1242 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | ||
| 1243 | status = -EREMOTEIO; | ||
| 1244 | else | ||
| 1245 | status = 0; | ||
| 1246 | } | ||
| 1077 | list_del(&td->td_list); | 1247 | list_del(&td->td_list); |
| 1078 | /* Was this TD slated to be cancelled but completed anyway? */ | 1248 | /* Was this TD slated to be cancelled but completed anyway? */ |
| 1079 | if (!list_empty(&td->cancelled_td_list)) { | 1249 | if (!list_empty(&td->cancelled_td_list)) { |
| 1080 | list_del(&td->cancelled_td_list); | 1250 | list_del(&td->cancelled_td_list); |
| 1081 | ep_ring->cancels_pending--; | 1251 | ep->cancels_pending--; |
| 1082 | } | 1252 | } |
| 1083 | /* Leave the TD around for the reset endpoint function to use */ | 1253 | /* Leave the TD around for the reset endpoint function to use |
| 1084 | if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { | 1254 | * (but only if it's not a control endpoint, since we already |
| 1255 | * queued the Set TR dequeue pointer command for stalled | ||
| 1256 | * control endpoints). | ||
| 1257 | */ | ||
| 1258 | if (usb_endpoint_xfer_control(&urb->ep->desc) || | ||
| 1259 | (trb_comp_code != COMP_STALL && | ||
| 1260 | trb_comp_code != COMP_BABBLE)) { | ||
| 1085 | kfree(td); | 1261 | kfree(td); |
| 1086 | } | 1262 | } |
| 1087 | urb->hcpriv = NULL; | 1263 | urb->hcpriv = NULL; |
| @@ -1094,7 +1270,7 @@ cleanup: | |||
| 1094 | if (urb) { | 1270 | if (urb) { |
| 1095 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | 1271 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); |
| 1096 | xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", | 1272 | xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", |
| 1097 | urb, td->urb->actual_length, status); | 1273 | urb, urb->actual_length, status); |
| 1098 | spin_unlock(&xhci->lock); | 1274 | spin_unlock(&xhci->lock); |
| 1099 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | 1275 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); |
| 1100 | spin_lock(&xhci->lock); | 1276 | spin_lock(&xhci->lock); |
| @@ -1235,7 +1411,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
| 1235 | { | 1411 | { |
| 1236 | int ret; | 1412 | int ret; |
| 1237 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1413 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
| 1238 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | 1414 | ret = prepare_ring(xhci, xdev->eps[ep_index].ring, |
| 1239 | ep_ctx->ep_info & EP_STATE_MASK, | 1415 | ep_ctx->ep_info & EP_STATE_MASK, |
| 1240 | num_trbs, mem_flags); | 1416 | num_trbs, mem_flags); |
| 1241 | if (ret) | 1417 | if (ret) |
| @@ -1255,9 +1431,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
| 1255 | (*td)->urb = urb; | 1431 | (*td)->urb = urb; |
| 1256 | urb->hcpriv = (void *) (*td); | 1432 | urb->hcpriv = (void *) (*td); |
| 1257 | /* Add this TD to the tail of the endpoint ring's TD list */ | 1433 | /* Add this TD to the tail of the endpoint ring's TD list */ |
| 1258 | list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); | 1434 | list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); |
| 1259 | (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg; | 1435 | (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; |
| 1260 | (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue; | 1436 | (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; |
| 1261 | 1437 | ||
| 1262 | return 0; | 1438 | return 0; |
| 1263 | } | 1439 | } |
| @@ -1335,6 +1511,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, | |||
| 1335 | ring_ep_doorbell(xhci, slot_id, ep_index); | 1511 | ring_ep_doorbell(xhci, slot_id, ep_index); |
| 1336 | } | 1512 | } |
| 1337 | 1513 | ||
| 1514 | /* | ||
| 1515 | * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt | ||
| 1516 | * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD | ||
| 1517 | * (comprised of sg list entries) can take several service intervals to | ||
| 1518 | * transmit. | ||
| 1519 | */ | ||
| 1520 | int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | ||
| 1521 | struct urb *urb, int slot_id, unsigned int ep_index) | ||
| 1522 | { | ||
| 1523 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, | ||
| 1524 | xhci->devs[slot_id]->out_ctx, ep_index); | ||
| 1525 | int xhci_interval; | ||
| 1526 | int ep_interval; | ||
| 1527 | |||
| 1528 | xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); | ||
| 1529 | ep_interval = urb->interval; | ||
| 1530 | /* Convert to microframes */ | ||
| 1531 | if (urb->dev->speed == USB_SPEED_LOW || | ||
| 1532 | urb->dev->speed == USB_SPEED_FULL) | ||
| 1533 | ep_interval *= 8; | ||
| 1534 | /* FIXME change this to a warning and a suggestion to use the new API | ||
| 1535 | * to set the polling interval (once the API is added). | ||
| 1536 | */ | ||
| 1537 | if (xhci_interval != ep_interval) { | ||
| 1538 | if (!printk_ratelimit()) | ||
| 1539 | dev_dbg(&urb->dev->dev, "Driver uses different interval" | ||
| 1540 | " (%d microframe%s) than xHCI " | ||
| 1541 | "(%d microframe%s)\n", | ||
| 1542 | ep_interval, | ||
| 1543 | ep_interval == 1 ? "" : "s", | ||
| 1544 | xhci_interval, | ||
| 1545 | xhci_interval == 1 ? "" : "s"); | ||
| 1546 | urb->interval = xhci_interval; | ||
| 1547 | /* Convert back to frames for LS/FS devices */ | ||
| 1548 | if (urb->dev->speed == USB_SPEED_LOW || | ||
| 1549 | urb->dev->speed == USB_SPEED_FULL) | ||
| 1550 | urb->interval /= 8; | ||
| 1551 | } | ||
| 1552 | return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); | ||
| 1553 | } | ||
| 1554 | |||
| 1338 | static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | 1555 | static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
| 1339 | struct urb *urb, int slot_id, unsigned int ep_index) | 1556 | struct urb *urb, int slot_id, unsigned int ep_index) |
| 1340 | { | 1557 | { |
| @@ -1350,7 +1567,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1350 | struct xhci_generic_trb *start_trb; | 1567 | struct xhci_generic_trb *start_trb; |
| 1351 | int start_cycle; | 1568 | int start_cycle; |
| 1352 | 1569 | ||
| 1353 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1570 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
| 1354 | num_trbs = count_sg_trbs_needed(xhci, urb); | 1571 | num_trbs = count_sg_trbs_needed(xhci, urb); |
| 1355 | num_sgs = urb->num_sgs; | 1572 | num_sgs = urb->num_sgs; |
| 1356 | 1573 | ||
| @@ -1483,7 +1700,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1483 | if (urb->sg) | 1700 | if (urb->sg) |
| 1484 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); | 1701 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); |
| 1485 | 1702 | ||
| 1486 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1703 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
| 1487 | 1704 | ||
| 1488 | num_trbs = 0; | 1705 | num_trbs = 0; |
| 1489 | /* How much data is (potentially) left before the 64KB boundary? */ | 1706 | /* How much data is (potentially) left before the 64KB boundary? */ |
| @@ -1594,7 +1811,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1594 | u32 field, length_field; | 1811 | u32 field, length_field; |
| 1595 | struct xhci_td *td; | 1812 | struct xhci_td *td; |
| 1596 | 1813 | ||
| 1597 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1814 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
| 1598 | 1815 | ||
| 1599 | /* | 1816 | /* |
| 1600 | * Need to copy setup packet into setup TRB, so we can't use the setup | 1817 | * Need to copy setup packet into setup TRB, so we can't use the setup |
| @@ -1677,12 +1894,27 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 1677 | 1894 | ||
| 1678 | /**** Command Ring Operations ****/ | 1895 | /**** Command Ring Operations ****/ |
| 1679 | 1896 | ||
| 1680 | /* Generic function for queueing a command TRB on the command ring */ | 1897 | /* Generic function for queueing a command TRB on the command ring. |
| 1681 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) | 1898 | * Check to make sure there's room on the command ring for one command TRB. |
| 1899 | * Also check that there's room reserved for commands that must not fail. | ||
| 1900 | * If this is a command that must not fail, meaning command_must_succeed = TRUE, | ||
| 1901 | * then only check for the number of reserved spots. | ||
| 1902 | * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB | ||
| 1903 | * because the command event handler may want to resubmit a failed command. | ||
| 1904 | */ | ||
| 1905 | static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, | ||
| 1906 | u32 field3, u32 field4, bool command_must_succeed) | ||
| 1682 | { | 1907 | { |
| 1683 | if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { | 1908 | int reserved_trbs = xhci->cmd_ring_reserved_trbs; |
| 1909 | if (!command_must_succeed) | ||
| 1910 | reserved_trbs++; | ||
| 1911 | |||
| 1912 | if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) { | ||
| 1684 | if (!in_interrupt()) | 1913 | if (!in_interrupt()) |
| 1685 | xhci_err(xhci, "ERR: No room for command on command ring\n"); | 1914 | xhci_err(xhci, "ERR: No room for command on command ring\n"); |
| 1915 | if (command_must_succeed) | ||
| 1916 | xhci_err(xhci, "ERR: Reserved TRB counting for " | ||
| 1917 | "unfailable commands failed.\n"); | ||
| 1686 | return -ENOMEM; | 1918 | return -ENOMEM; |
| 1687 | } | 1919 | } |
| 1688 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, | 1920 | queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, |
| @@ -1693,7 +1925,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 fiel | |||
| 1693 | /* Queue a no-op command on the command ring */ | 1925 | /* Queue a no-op command on the command ring */ |
| 1694 | static int queue_cmd_noop(struct xhci_hcd *xhci) | 1926 | static int queue_cmd_noop(struct xhci_hcd *xhci) |
| 1695 | { | 1927 | { |
| 1696 | return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); | 1928 | return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false); |
| 1697 | } | 1929 | } |
| 1698 | 1930 | ||
| 1699 | /* | 1931 | /* |
| @@ -1712,7 +1944,7 @@ void *xhci_setup_one_noop(struct xhci_hcd *xhci) | |||
| 1712 | int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | 1944 | int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) |
| 1713 | { | 1945 | { |
| 1714 | return queue_command(xhci, 0, 0, 0, | 1946 | return queue_command(xhci, 0, 0, 0, |
| 1715 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); | 1947 | TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); |
| 1716 | } | 1948 | } |
| 1717 | 1949 | ||
| 1718 | /* Queue an address device command TRB */ | 1950 | /* Queue an address device command TRB */ |
| @@ -1721,16 +1953,28 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
| 1721 | { | 1953 | { |
| 1722 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), | 1954 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
| 1723 | upper_32_bits(in_ctx_ptr), 0, | 1955 | upper_32_bits(in_ctx_ptr), 0, |
| 1724 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | 1956 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), |
| 1957 | false); | ||
| 1725 | } | 1958 | } |
| 1726 | 1959 | ||
| 1727 | /* Queue a configure endpoint command TRB */ | 1960 | /* Queue a configure endpoint command TRB */ |
| 1728 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1961 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
| 1962 | u32 slot_id, bool command_must_succeed) | ||
| 1963 | { | ||
| 1964 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), | ||
| 1965 | upper_32_bits(in_ctx_ptr), 0, | ||
| 1966 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), | ||
| 1967 | command_must_succeed); | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | /* Queue an evaluate context command TRB */ | ||
| 1971 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | ||
| 1729 | u32 slot_id) | 1972 | u32 slot_id) |
| 1730 | { | 1973 | { |
| 1731 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), | 1974 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
| 1732 | upper_32_bits(in_ctx_ptr), 0, | 1975 | upper_32_bits(in_ctx_ptr), 0, |
| 1733 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | 1976 | TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), |
| 1977 | false); | ||
| 1734 | } | 1978 | } |
| 1735 | 1979 | ||
| 1736 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, | 1980 | int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, |
| @@ -1741,7 +1985,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, | |||
| 1741 | u32 type = TRB_TYPE(TRB_STOP_RING); | 1985 | u32 type = TRB_TYPE(TRB_STOP_RING); |
| 1742 | 1986 | ||
| 1743 | return queue_command(xhci, 0, 0, 0, | 1987 | return queue_command(xhci, 0, 0, 0, |
| 1744 | trb_slot_id | trb_ep_index | type); | 1988 | trb_slot_id | trb_ep_index | type, false); |
| 1745 | } | 1989 | } |
| 1746 | 1990 | ||
| 1747 | /* Set Transfer Ring Dequeue Pointer command. | 1991 | /* Set Transfer Ring Dequeue Pointer command. |
| @@ -1765,7 +2009,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
| 1765 | } | 2009 | } |
| 1766 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | 2010 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, |
| 1767 | upper_32_bits(addr), 0, | 2011 | upper_32_bits(addr), 0, |
| 1768 | trb_slot_id | trb_ep_index | type); | 2012 | trb_slot_id | trb_ep_index | type, false); |
| 1769 | } | 2013 | } |
| 1770 | 2014 | ||
| 1771 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | 2015 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, |
| @@ -1775,5 +2019,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | |||
| 1775 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | 2019 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
| 1776 | u32 type = TRB_TYPE(TRB_RESET_EP); | 2020 | u32 type = TRB_TYPE(TRB_RESET_EP); |
| 1777 | 2021 | ||
| 1778 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); | 2022 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, |
| 2023 | false); | ||
| 1779 | } | 2024 | } |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index d31d32206ba3..4b254b6fa245 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
| @@ -509,6 +509,8 @@ struct xhci_slot_ctx { | |||
| 509 | #define MAX_EXIT (0xffff) | 509 | #define MAX_EXIT (0xffff) |
| 510 | /* Root hub port number that is needed to access the USB device */ | 510 | /* Root hub port number that is needed to access the USB device */ |
| 511 | #define ROOT_HUB_PORT(p) (((p) & 0xff) << 16) | 511 | #define ROOT_HUB_PORT(p) (((p) & 0xff) << 16) |
| 512 | /* Maximum number of ports under a hub device */ | ||
| 513 | #define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24) | ||
| 512 | 514 | ||
| 513 | /* tt_info bitmasks */ | 515 | /* tt_info bitmasks */ |
| 514 | /* | 516 | /* |
| @@ -522,6 +524,7 @@ struct xhci_slot_ctx { | |||
| 522 | * '0' if the device is not low or full speed. | 524 | * '0' if the device is not low or full speed. |
| 523 | */ | 525 | */ |
| 524 | #define TT_PORT (0xff << 8) | 526 | #define TT_PORT (0xff << 8) |
| 527 | #define TT_THINK_TIME(p) (((p) & 0x3) << 16) | ||
| 525 | 528 | ||
| 526 | /* dev_state bitmasks */ | 529 | /* dev_state bitmasks */ |
| 527 | /* USB device address - assigned by the HC */ | 530 | /* USB device address - assigned by the HC */ |
| @@ -581,6 +584,7 @@ struct xhci_ep_ctx { | |||
| 581 | /* bit 15 is Linear Stream Array */ | 584 | /* bit 15 is Linear Stream Array */ |
| 582 | /* Interval - period between requests to an endpoint - 125u increments. */ | 585 | /* Interval - period between requests to an endpoint - 125u increments. */ |
| 583 | #define EP_INTERVAL(p) ((p & 0xff) << 16) | 586 | #define EP_INTERVAL(p) ((p & 0xff) << 16) |
| 587 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) | ||
| 584 | 588 | ||
| 585 | /* ep_info2 bitmasks */ | 589 | /* ep_info2 bitmasks */ |
| 586 | /* | 590 | /* |
| @@ -589,6 +593,7 @@ struct xhci_ep_ctx { | |||
| 589 | */ | 593 | */ |
| 590 | #define FORCE_EVENT (0x1) | 594 | #define FORCE_EVENT (0x1) |
| 591 | #define ERROR_COUNT(p) (((p) & 0x3) << 1) | 595 | #define ERROR_COUNT(p) (((p) & 0x3) << 1) |
| 596 | #define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7) | ||
| 592 | #define EP_TYPE(p) ((p) << 3) | 597 | #define EP_TYPE(p) ((p) << 3) |
| 593 | #define ISOC_OUT_EP 1 | 598 | #define ISOC_OUT_EP 1 |
| 594 | #define BULK_OUT_EP 2 | 599 | #define BULK_OUT_EP 2 |
| @@ -601,6 +606,8 @@ struct xhci_ep_ctx { | |||
| 601 | /* bit 7 is Host Initiate Disable - for disabling stream selection */ | 606 | /* bit 7 is Host Initiate Disable - for disabling stream selection */ |
| 602 | #define MAX_BURST(p) (((p)&0xff) << 8) | 607 | #define MAX_BURST(p) (((p)&0xff) << 8) |
| 603 | #define MAX_PACKET(p) (((p)&0xffff) << 16) | 608 | #define MAX_PACKET(p) (((p)&0xffff) << 16) |
| 609 | #define MAX_PACKET_MASK (0xffff << 16) | ||
| 610 | #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) | ||
| 604 | 611 | ||
| 605 | 612 | ||
| 606 | /** | 613 | /** |
| @@ -616,11 +623,44 @@ struct xhci_input_control_ctx { | |||
| 616 | u32 rsvd2[6]; | 623 | u32 rsvd2[6]; |
| 617 | }; | 624 | }; |
| 618 | 625 | ||
| 626 | /* Represents everything that is needed to issue a command on the command ring. | ||
| 627 | * It's useful to pre-allocate these for commands that cannot fail due to | ||
| 628 | * out-of-memory errors, like freeing streams. | ||
| 629 | */ | ||
| 630 | struct xhci_command { | ||
| 631 | /* Input context for changing device state */ | ||
| 632 | struct xhci_container_ctx *in_ctx; | ||
| 633 | u32 status; | ||
| 634 | /* If completion is null, no one is waiting on this command | ||
| 635 | * and the structure can be freed after the command completes. | ||
| 636 | */ | ||
| 637 | struct completion *completion; | ||
| 638 | union xhci_trb *command_trb; | ||
| 639 | struct list_head cmd_list; | ||
| 640 | }; | ||
| 641 | |||
| 619 | /* drop context bitmasks */ | 642 | /* drop context bitmasks */ |
| 620 | #define DROP_EP(x) (0x1 << x) | 643 | #define DROP_EP(x) (0x1 << x) |
| 621 | /* add context bitmasks */ | 644 | /* add context bitmasks */ |
| 622 | #define ADD_EP(x) (0x1 << x) | 645 | #define ADD_EP(x) (0x1 << x) |
| 623 | 646 | ||
| 647 | struct xhci_virt_ep { | ||
| 648 | struct xhci_ring *ring; | ||
| 649 | /* Temporary storage in case the configure endpoint command fails and we | ||
| 650 | * have to restore the device state to the previous state | ||
| 651 | */ | ||
| 652 | struct xhci_ring *new_ring; | ||
| 653 | unsigned int ep_state; | ||
| 654 | #define SET_DEQ_PENDING (1 << 0) | ||
| 655 | #define EP_HALTED (1 << 1) | ||
| 656 | /* ---- Related to URB cancellation ---- */ | ||
| 657 | struct list_head cancelled_td_list; | ||
| 658 | unsigned int cancels_pending; | ||
| 659 | /* The TRB that was last reported in a stopped endpoint ring */ | ||
| 660 | union xhci_trb *stopped_trb; | ||
| 661 | struct xhci_td *stopped_td; | ||
| 662 | }; | ||
| 663 | |||
| 624 | struct xhci_virt_device { | 664 | struct xhci_virt_device { |
| 625 | /* | 665 | /* |
| 626 | * Commands to the hardware are passed an "input context" that | 666 | * Commands to the hardware are passed an "input context" that |
| @@ -633,16 +673,11 @@ struct xhci_virt_device { | |||
| 633 | struct xhci_container_ctx *out_ctx; | 673 | struct xhci_container_ctx *out_ctx; |
| 634 | /* Used for addressing devices and configuration changes */ | 674 | /* Used for addressing devices and configuration changes */ |
| 635 | struct xhci_container_ctx *in_ctx; | 675 | struct xhci_container_ctx *in_ctx; |
| 636 | 676 | struct xhci_virt_ep eps[31]; | |
| 637 | /* FIXME when stream support is added */ | ||
| 638 | struct xhci_ring *ep_rings[31]; | ||
| 639 | /* Temporary storage in case the configure endpoint command fails and we | ||
| 640 | * have to restore the device state to the previous state | ||
| 641 | */ | ||
| 642 | struct xhci_ring *new_ep_rings[31]; | ||
| 643 | struct completion cmd_completion; | 677 | struct completion cmd_completion; |
| 644 | /* Status of the last command issued for this device */ | 678 | /* Status of the last command issued for this device */ |
| 645 | u32 cmd_status; | 679 | u32 cmd_status; |
| 680 | struct list_head cmd_list; | ||
| 646 | }; | 681 | }; |
| 647 | 682 | ||
| 648 | 683 | ||
| @@ -905,6 +940,8 @@ union xhci_trb { | |||
| 905 | * It must also be greater than 16. | 940 | * It must also be greater than 16. |
| 906 | */ | 941 | */ |
| 907 | #define TRBS_PER_SEGMENT 64 | 942 | #define TRBS_PER_SEGMENT 64 |
| 943 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ | ||
| 944 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) | ||
| 908 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | 945 | #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
| 909 | /* TRB buffer pointers can't cross 64KB boundaries */ | 946 | /* TRB buffer pointers can't cross 64KB boundaries */ |
| 910 | #define TRB_MAX_BUFF_SHIFT 16 | 947 | #define TRB_MAX_BUFF_SHIFT 16 |
| @@ -926,6 +963,12 @@ struct xhci_td { | |||
| 926 | union xhci_trb *last_trb; | 963 | union xhci_trb *last_trb; |
| 927 | }; | 964 | }; |
| 928 | 965 | ||
| 966 | struct xhci_dequeue_state { | ||
| 967 | struct xhci_segment *new_deq_seg; | ||
| 968 | union xhci_trb *new_deq_ptr; | ||
| 969 | int new_cycle_state; | ||
| 970 | }; | ||
| 971 | |||
| 929 | struct xhci_ring { | 972 | struct xhci_ring { |
| 930 | struct xhci_segment *first_seg; | 973 | struct xhci_segment *first_seg; |
| 931 | union xhci_trb *enqueue; | 974 | union xhci_trb *enqueue; |
| @@ -935,15 +978,6 @@ struct xhci_ring { | |||
| 935 | struct xhci_segment *deq_seg; | 978 | struct xhci_segment *deq_seg; |
| 936 | unsigned int deq_updates; | 979 | unsigned int deq_updates; |
| 937 | struct list_head td_list; | 980 | struct list_head td_list; |
| 938 | /* ---- Related to URB cancellation ---- */ | ||
| 939 | struct list_head cancelled_td_list; | ||
| 940 | unsigned int cancels_pending; | ||
| 941 | unsigned int state; | ||
| 942 | #define SET_DEQ_PENDING (1 << 0) | ||
| 943 | #define EP_HALTED (1 << 1) | ||
| 944 | /* The TRB that was last reported in a stopped endpoint ring */ | ||
| 945 | union xhci_trb *stopped_trb; | ||
| 946 | struct xhci_td *stopped_td; | ||
| 947 | /* | 981 | /* |
| 948 | * Write the cycle state into the TRB cycle field to give ownership of | 982 | * Write the cycle state into the TRB cycle field to give ownership of |
| 949 | * the TRB to the host controller (if we are the producer), or to check | 983 | * the TRB to the host controller (if we are the producer), or to check |
| @@ -952,12 +986,6 @@ struct xhci_ring { | |||
| 952 | u32 cycle_state; | 986 | u32 cycle_state; |
| 953 | }; | 987 | }; |
| 954 | 988 | ||
| 955 | struct xhci_dequeue_state { | ||
| 956 | struct xhci_segment *new_deq_seg; | ||
| 957 | union xhci_trb *new_deq_ptr; | ||
| 958 | int new_cycle_state; | ||
| 959 | }; | ||
| 960 | |||
| 961 | struct xhci_erst_entry { | 989 | struct xhci_erst_entry { |
| 962 | /* 64-bit event ring segment address */ | 990 | /* 64-bit event ring segment address */ |
| 963 | u64 seg_addr; | 991 | u64 seg_addr; |
| @@ -1034,6 +1062,7 @@ struct xhci_hcd { | |||
| 1034 | /* data structures */ | 1062 | /* data structures */ |
| 1035 | struct xhci_device_context_array *dcbaa; | 1063 | struct xhci_device_context_array *dcbaa; |
| 1036 | struct xhci_ring *cmd_ring; | 1064 | struct xhci_ring *cmd_ring; |
| 1065 | unsigned int cmd_ring_reserved_trbs; | ||
| 1037 | struct xhci_ring *event_ring; | 1066 | struct xhci_ring *event_ring; |
| 1038 | struct xhci_erst erst; | 1067 | struct xhci_erst erst; |
| 1039 | /* Scratchpad */ | 1068 | /* Scratchpad */ |
| @@ -1058,6 +1087,9 @@ struct xhci_hcd { | |||
| 1058 | int noops_submitted; | 1087 | int noops_submitted; |
| 1059 | int noops_handled; | 1088 | int noops_handled; |
| 1060 | int error_bitmask; | 1089 | int error_bitmask; |
| 1090 | unsigned int quirks; | ||
| 1091 | #define XHCI_LINK_TRB_QUIRK (1 << 0) | ||
| 1092 | #define XHCI_RESET_EP_QUIRK (1 << 1) | ||
| 1061 | }; | 1093 | }; |
| 1062 | 1094 | ||
| 1063 | /* For testing purposes */ | 1095 | /* For testing purposes */ |
| @@ -1136,6 +1168,13 @@ static inline void xhci_write_64(struct xhci_hcd *xhci, | |||
| 1136 | writel(val_hi, ptr + 1); | 1168 | writel(val_hi, ptr + 1); |
| 1137 | } | 1169 | } |
| 1138 | 1170 | ||
| 1171 | static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) | ||
| 1172 | { | ||
| 1173 | u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); | ||
| 1174 | return ((HC_VERSION(temp) == 0x95) && | ||
| 1175 | (xhci->quirks & XHCI_LINK_TRB_QUIRK)); | ||
| 1176 | } | ||
| 1177 | |||
| 1139 | /* xHCI debugging */ | 1178 | /* xHCI debugging */ |
| 1140 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1179 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); |
| 1141 | void xhci_print_registers(struct xhci_hcd *xhci); | 1180 | void xhci_print_registers(struct xhci_hcd *xhci); |
| @@ -1150,7 +1189,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); | |||
| 1150 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); | 1189 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); |
| 1151 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); | 1190 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); |
| 1152 | 1191 | ||
| 1153 | /* xHCI memory managment */ | 1192 | /* xHCI memory management */ |
| 1154 | void xhci_mem_cleanup(struct xhci_hcd *xhci); | 1193 | void xhci_mem_cleanup(struct xhci_hcd *xhci); |
| 1155 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); | 1194 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); |
| 1156 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); | 1195 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); |
| @@ -1158,11 +1197,24 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device | |||
| 1158 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); | 1197 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); |
| 1159 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); | 1198 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); |
| 1160 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); | 1199 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); |
| 1200 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index); | ||
| 1201 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs); | ||
| 1161 | void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep); | 1202 | void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep); |
| 1203 | void xhci_endpoint_copy(struct xhci_hcd *xhci, | ||
| 1204 | struct xhci_container_ctx *in_ctx, | ||
| 1205 | struct xhci_container_ctx *out_ctx, | ||
| 1206 | unsigned int ep_index); | ||
| 1207 | void xhci_slot_copy(struct xhci_hcd *xhci, | ||
| 1208 | struct xhci_container_ctx *in_ctx, | ||
| 1209 | struct xhci_container_ctx *out_ctx); | ||
| 1162 | int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, | 1210 | int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, |
| 1163 | struct usb_device *udev, struct usb_host_endpoint *ep, | 1211 | struct usb_device *udev, struct usb_host_endpoint *ep, |
| 1164 | gfp_t mem_flags); | 1212 | gfp_t mem_flags); |
| 1165 | void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); | 1213 | void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); |
| 1214 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, | ||
| 1215 | bool allocate_completion, gfp_t mem_flags); | ||
| 1216 | void xhci_free_command(struct xhci_hcd *xhci, | ||
| 1217 | struct xhci_command *command); | ||
| 1166 | 1218 | ||
| 1167 | #ifdef CONFIG_PCI | 1219 | #ifdef CONFIG_PCI |
| 1168 | /* xHCI PCI glue */ | 1220 | /* xHCI PCI glue */ |
| @@ -1182,6 +1234,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd); | |||
| 1182 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); | 1234 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); |
| 1183 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); | 1235 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); |
| 1184 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); | 1236 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); |
| 1237 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | ||
| 1238 | struct usb_tt *tt, gfp_t mem_flags); | ||
| 1185 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); | 1239 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); |
| 1186 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); | 1240 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); |
| 1187 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1241 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
| @@ -1205,7 +1259,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | |||
| 1205 | int slot_id, unsigned int ep_index); | 1259 | int slot_id, unsigned int ep_index); |
| 1206 | int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | 1260 | int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, |
| 1207 | int slot_id, unsigned int ep_index); | 1261 | int slot_id, unsigned int ep_index); |
| 1262 | int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | ||
| 1263 | int slot_id, unsigned int ep_index); | ||
| 1208 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1264 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
| 1265 | u32 slot_id, bool command_must_succeed); | ||
| 1266 | int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | ||
| 1209 | u32 slot_id); | 1267 | u32 slot_id); |
| 1210 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | 1268 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, |
| 1211 | unsigned int ep_index); | 1269 | unsigned int ep_index); |
| @@ -1213,8 +1271,13 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
| 1213 | unsigned int slot_id, unsigned int ep_index, | 1271 | unsigned int slot_id, unsigned int ep_index, |
| 1214 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | 1272 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); |
| 1215 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | 1273 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, |
| 1216 | struct xhci_ring *ep_ring, unsigned int slot_id, | 1274 | unsigned int slot_id, unsigned int ep_index, |
| 1217 | unsigned int ep_index, struct xhci_dequeue_state *deq_state); | 1275 | struct xhci_dequeue_state *deq_state); |
| 1276 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | ||
| 1277 | struct usb_device *udev, unsigned int ep_index); | ||
| 1278 | void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, | ||
| 1279 | unsigned int slot_id, unsigned int ep_index, | ||
| 1280 | struct xhci_dequeue_state *deq_state); | ||
| 1218 | 1281 | ||
| 1219 | /* xHCI roothub code */ | 1282 | /* xHCI roothub code */ |
| 1220 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1283 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |
