diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 15:33:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 15:33:02 -0400 |
commit | c44dead70a841d90ddc01968012f323c33217c9e (patch) | |
tree | 85489ebe9b9a3413cd8ee197ffb40c8aa8d97e63 /drivers/usb/host | |
parent | 99dff5856220a02b8711f2e8746413ea6e53ccf6 (diff) | |
parent | d5f6db9e1aff6ccf1876224f152c0268b0c8a992 (diff) |
Merge branch 'usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* 'usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (205 commits)
USB: EHCI: Remove SPARC_LEON {read,write}_be definitions from ehci.h
USB: UHCI: Support big endian GRUSBHC HC
sparc: add {read,write}*_be routines
USB: UHCI: Add support for big endian descriptors
USB: UHCI: Use ACCESS_ONCE rather than using a full compiler barrier
USB: UHCI: Add support for big endian mmio
usb-storage: Correct adjust_quirks to include latest flags
usb/isp1760: Fix possible unlink problems
usb/isp1760: Move function isp1760_endpoint_disable() within file.
USB: remove remaining usages of hcd->state from usbcore and fix regression
usb: musb: ux500: add configuration and build options for ux500 dma
usb: musb: ux500: add dma glue layer for ux500
usb: musb: ux500: add dma name for ux500
usb: musb: ux500: add ux500 specific code for gadget side
usb: musb: fix compile error
usb-storage: fix up the unusual_realtek device list
USB: gadget: f_audio: Fix invalid dereference of initdata
EHCI: don't rescan interrupt QHs needlessly
OHCI: fix regression caused by nVidia shutdown workaround
USB: OTG: msm: Free VCCCX regulator even if we can't set the voltage
...
Diffstat (limited to 'drivers/usb/host')
57 files changed, 4001 insertions, 1982 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index e0e0787b724b..ab085f12d570 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -106,13 +106,13 @@ config USB_EHCI_BIG_ENDIAN_MMIO | |||
106 | depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || \ | 106 | depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || \ |
107 | ARCH_IXP4XX || XPS_USB_HCD_XILINX || \ | 107 | ARCH_IXP4XX || XPS_USB_HCD_XILINX || \ |
108 | PPC_MPC512x || CPU_CAVIUM_OCTEON || \ | 108 | PPC_MPC512x || CPU_CAVIUM_OCTEON || \ |
109 | PMC_MSP) | 109 | PMC_MSP || SPARC_LEON) |
110 | default y | 110 | default y |
111 | 111 | ||
112 | config USB_EHCI_BIG_ENDIAN_DESC | 112 | config USB_EHCI_BIG_ENDIAN_DESC |
113 | bool | 113 | bool |
114 | depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX || \ | 114 | depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX || \ |
115 | PPC_MPC512x || PMC_MSP) | 115 | PPC_MPC512x || PMC_MSP || SPARC_LEON) |
116 | default y | 116 | default y |
117 | 117 | ||
118 | config XPS_USB_HCD_XILINX | 118 | config XPS_USB_HCD_XILINX |
@@ -188,6 +188,12 @@ config USB_EHCI_SH | |||
188 | Enables support for the on-chip EHCI controller on the SuperH. | 188 | Enables support for the on-chip EHCI controller on the SuperH. |
189 | If you use the PCI EHCI controller, this option is not necessary. | 189 | If you use the PCI EHCI controller, this option is not necessary. |
190 | 190 | ||
191 | config USB_EHCI_S5P | ||
192 | boolean "S5P EHCI support" | ||
193 | depends on USB_EHCI_HCD && PLAT_S5P | ||
194 | help | ||
195 | Enable support for the S5P SOC's on-chip EHCI controller. | ||
196 | |||
191 | config USB_W90X900_EHCI | 197 | config USB_W90X900_EHCI |
192 | bool "W90X900(W90P910) EHCI support" | 198 | bool "W90X900(W90P910) EHCI support" |
193 | depends on USB_EHCI_HCD && ARCH_W90X900 | 199 | depends on USB_EHCI_HCD && ARCH_W90X900 |
@@ -202,6 +208,15 @@ config USB_CNS3XXX_EHCI | |||
202 | It is needed for high-speed (480Mbit/sec) USB 2.0 device | 208 | It is needed for high-speed (480Mbit/sec) USB 2.0 device |
203 | support. | 209 | support. |
204 | 210 | ||
211 | config USB_EHCI_ATH79 | ||
212 | bool "EHCI support for AR7XXX/AR9XXX SoCs" | ||
213 | depends on USB_EHCI_HCD && (SOC_AR71XX || SOC_AR724X || SOC_AR913X) | ||
214 | select USB_EHCI_ROOT_HUB_TT | ||
215 | default y | ||
216 | ---help--- | ||
217 | Enables support for the built-in EHCI controller present | ||
218 | on the Atheros AR7XXX/AR9XXX SoCs. | ||
219 | |||
205 | config USB_OXU210HP_HCD | 220 | config USB_OXU210HP_HCD |
206 | tristate "OXU210HP HCD support" | 221 | tristate "OXU210HP HCD support" |
207 | depends on USB | 222 | depends on USB |
@@ -287,6 +302,14 @@ config USB_OHCI_HCD_OMAP3 | |||
287 | Enables support for the on-chip OHCI controller on | 302 | Enables support for the on-chip OHCI controller on |
288 | OMAP3 and later chips. | 303 | OMAP3 and later chips. |
289 | 304 | ||
305 | config USB_OHCI_ATH79 | ||
306 | bool "USB OHCI support for the Atheros AR71XX/AR7240 SoCs" | ||
307 | depends on USB_OHCI_HCD && (SOC_AR71XX || SOC_AR724X) | ||
308 | default y | ||
309 | help | ||
310 | Enables support for the built-in OHCI controller present on the | ||
311 | Atheros AR71XX/AR7240 SoCs. | ||
312 | |||
290 | config USB_OHCI_HCD_PPC_SOC | 313 | config USB_OHCI_HCD_PPC_SOC |
291 | bool "OHCI support for on-chip PPC USB controller" | 314 | bool "OHCI support for on-chip PPC USB controller" |
292 | depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx) | 315 | depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx) |
@@ -373,7 +396,7 @@ config USB_OHCI_LITTLE_ENDIAN | |||
373 | 396 | ||
374 | config USB_UHCI_HCD | 397 | config USB_UHCI_HCD |
375 | tristate "UHCI HCD (most Intel and VIA) support" | 398 | tristate "UHCI HCD (most Intel and VIA) support" |
376 | depends on USB && PCI | 399 | depends on USB && (PCI || SPARC_LEON) |
377 | ---help--- | 400 | ---help--- |
378 | The Universal Host Controller Interface is a standard by Intel for | 401 | The Universal Host Controller Interface is a standard by Intel for |
379 | accessing the USB hardware in the PC (which is also called the USB | 402 | accessing the USB hardware in the PC (which is also called the USB |
@@ -382,11 +405,27 @@ config USB_UHCI_HCD | |||
382 | with Intel PCI chipsets (like intel 430TX, 440FX, 440LX, 440BX, | 405 | with Intel PCI chipsets (like intel 430TX, 440FX, 440LX, 440BX, |
383 | i810, i820) conform to this standard. Also all VIA PCI chipsets | 406 | i810, i820) conform to this standard. Also all VIA PCI chipsets |
384 | (like VIA VP2, VP3, MVP3, Apollo Pro, Apollo Pro II or Apollo Pro | 407 | (like VIA VP2, VP3, MVP3, Apollo Pro, Apollo Pro II or Apollo Pro |
385 | 133). If unsure, say Y. | 408 | 133) and LEON/GRLIB SoCs with the GRUSBHC controller. |
409 | If unsure, say Y. | ||
386 | 410 | ||
387 | To compile this driver as a module, choose M here: the | 411 | To compile this driver as a module, choose M here: the |
388 | module will be called uhci-hcd. | 412 | module will be called uhci-hcd. |
389 | 413 | ||
414 | config USB_UHCI_SUPPORT_NON_PCI_HC | ||
415 | bool | ||
416 | depends on USB_UHCI_HCD | ||
417 | default y if SPARC_LEON | ||
418 | |||
419 | config USB_UHCI_BIG_ENDIAN_MMIO | ||
420 | bool | ||
421 | depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON | ||
422 | default y | ||
423 | |||
424 | config USB_UHCI_BIG_ENDIAN_DESC | ||
425 | bool | ||
426 | depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON | ||
427 | default y | ||
428 | |||
390 | config USB_FHCI_HCD | 429 | config USB_FHCI_HCD |
391 | tristate "Freescale QE USB Host Controller support" | 430 | tristate "Freescale QE USB Host Controller support" |
392 | depends on USB && OF_GPIO && QE_GPIO && QUICC_ENGINE | 431 | depends on USB && OF_GPIO && QE_GPIO && QUICC_ENGINE |
@@ -444,6 +483,16 @@ config USB_SL811_HCD | |||
444 | To compile this driver as a module, choose M here: the | 483 | To compile this driver as a module, choose M here: the |
445 | module will be called sl811-hcd. | 484 | module will be called sl811-hcd. |
446 | 485 | ||
486 | config USB_SL811_HCD_ISO | ||
487 | bool "partial ISO support" | ||
488 | depends on USB_SL811_HCD | ||
489 | help | ||
490 | The driver doesn't support iso_frame_desc (yet), but for some simple | ||
491 | devices that just queue one ISO frame per URB, then ISO transfers | ||
492 | "should" work using the normal urb status fields. | ||
493 | |||
494 | If unsure, say N. | ||
495 | |||
447 | config USB_SL811_CS | 496 | config USB_SL811_CS |
448 | tristate "CF/PCMCIA support for SL811HS HCD" | 497 | tristate "CF/PCMCIA support for SL811HS HCD" |
449 | depends on USB_SL811_HCD && PCMCIA | 498 | depends on USB_SL811_HCD && PCMCIA |
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c new file mode 100644 index 000000000000..98cc8a13169c --- /dev/null +++ b/drivers/usb/host/ehci-ath79.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Bus Glue for Atheros AR7XXX/AR9XXX built-in EHCI controller. | ||
3 | * | ||
4 | * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> | ||
5 | * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> | ||
6 | * | ||
7 | * Parts of this file are based on Atheros' 2.6.15 BSP | ||
8 | * Copyright (C) 2007 Atheros Communications, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License version 2 as published | ||
12 | * by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/platform_device.h> | ||
16 | |||
17 | enum { | ||
18 | EHCI_ATH79_IP_V1 = 0, | ||
19 | EHCI_ATH79_IP_V2, | ||
20 | }; | ||
21 | |||
22 | static const struct platform_device_id ehci_ath79_id_table[] = { | ||
23 | { | ||
24 | .name = "ar71xx-ehci", | ||
25 | .driver_data = EHCI_ATH79_IP_V1, | ||
26 | }, | ||
27 | { | ||
28 | .name = "ar724x-ehci", | ||
29 | .driver_data = EHCI_ATH79_IP_V2, | ||
30 | }, | ||
31 | { | ||
32 | .name = "ar913x-ehci", | ||
33 | .driver_data = EHCI_ATH79_IP_V2, | ||
34 | }, | ||
35 | { | ||
36 | /* terminating entry */ | ||
37 | }, | ||
38 | }; | ||
39 | |||
40 | MODULE_DEVICE_TABLE(platform, ehci_ath79_id_table); | ||
41 | |||
42 | static int ehci_ath79_init(struct usb_hcd *hcd) | ||
43 | { | ||
44 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
45 | struct platform_device *pdev = to_platform_device(hcd->self.controller); | ||
46 | const struct platform_device_id *id; | ||
47 | int hclength; | ||
48 | int ret; | ||
49 | |||
50 | id = platform_get_device_id(pdev); | ||
51 | if (!id) { | ||
52 | dev_err(hcd->self.controller, "missing device id\n"); | ||
53 | return -EINVAL; | ||
54 | } | ||
55 | |||
56 | hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); | ||
57 | switch (id->driver_data) { | ||
58 | case EHCI_ATH79_IP_V1: | ||
59 | ehci->has_synopsys_hc_bug = 1; | ||
60 | |||
61 | ehci->caps = hcd->regs; | ||
62 | ehci->regs = hcd->regs + hclength; | ||
63 | break; | ||
64 | |||
65 | case EHCI_ATH79_IP_V2: | ||
66 | hcd->has_tt = 1; | ||
67 | |||
68 | ehci->caps = hcd->regs + 0x100; | ||
69 | ehci->regs = hcd->regs + 0x100 + hclength; | ||
70 | break; | ||
71 | |||
72 | default: | ||
73 | BUG(); | ||
74 | } | ||
75 | |||
76 | dbg_hcs_params(ehci, "reset"); | ||
77 | dbg_hcc_params(ehci, "reset"); | ||
78 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | ||
79 | ehci->sbrn = 0x20; | ||
80 | |||
81 | ehci_reset(ehci); | ||
82 | |||
83 | ret = ehci_init(hcd); | ||
84 | if (ret) | ||
85 | return ret; | ||
86 | |||
87 | ehci_port_power(ehci, 0); | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static const struct hc_driver ehci_ath79_hc_driver = { | ||
93 | .description = hcd_name, | ||
94 | .product_desc = "Atheros built-in EHCI controller", | ||
95 | .hcd_priv_size = sizeof(struct ehci_hcd), | ||
96 | .irq = ehci_irq, | ||
97 | .flags = HCD_MEMORY | HCD_USB2, | ||
98 | |||
99 | .reset = ehci_ath79_init, | ||
100 | .start = ehci_run, | ||
101 | .stop = ehci_stop, | ||
102 | .shutdown = ehci_shutdown, | ||
103 | |||
104 | .urb_enqueue = ehci_urb_enqueue, | ||
105 | .urb_dequeue = ehci_urb_dequeue, | ||
106 | .endpoint_disable = ehci_endpoint_disable, | ||
107 | .endpoint_reset = ehci_endpoint_reset, | ||
108 | |||
109 | .get_frame_number = ehci_get_frame, | ||
110 | |||
111 | .hub_status_data = ehci_hub_status_data, | ||
112 | .hub_control = ehci_hub_control, | ||
113 | |||
114 | .relinquish_port = ehci_relinquish_port, | ||
115 | .port_handed_over = ehci_port_handed_over, | ||
116 | |||
117 | .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, | ||
118 | }; | ||
119 | |||
120 | static int ehci_ath79_probe(struct platform_device *pdev) | ||
121 | { | ||
122 | struct usb_hcd *hcd; | ||
123 | struct resource *res; | ||
124 | int irq; | ||
125 | int ret; | ||
126 | |||
127 | if (usb_disabled()) | ||
128 | return -ENODEV; | ||
129 | |||
130 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
131 | if (!res) { | ||
132 | dev_dbg(&pdev->dev, "no IRQ specified\n"); | ||
133 | return -ENODEV; | ||
134 | } | ||
135 | irq = res->start; | ||
136 | |||
137 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
138 | if (!res) { | ||
139 | dev_dbg(&pdev->dev, "no base address specified\n"); | ||
140 | return -ENODEV; | ||
141 | } | ||
142 | |||
143 | hcd = usb_create_hcd(&ehci_ath79_hc_driver, &pdev->dev, | ||
144 | dev_name(&pdev->dev)); | ||
145 | if (!hcd) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | hcd->rsrc_start = res->start; | ||
149 | hcd->rsrc_len = res->end - res->start + 1; | ||
150 | |||
151 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | ||
152 | dev_dbg(&pdev->dev, "controller already in use\n"); | ||
153 | ret = -EBUSY; | ||
154 | goto err_put_hcd; | ||
155 | } | ||
156 | |||
157 | hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); | ||
158 | if (!hcd->regs) { | ||
159 | dev_dbg(&pdev->dev, "error mapping memory\n"); | ||
160 | ret = -EFAULT; | ||
161 | goto err_release_region; | ||
162 | } | ||
163 | |||
164 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); | ||
165 | if (ret) | ||
166 | goto err_iounmap; | ||
167 | |||
168 | return 0; | ||
169 | |||
170 | err_iounmap: | ||
171 | iounmap(hcd->regs); | ||
172 | |||
173 | err_release_region: | ||
174 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
175 | err_put_hcd: | ||
176 | usb_put_hcd(hcd); | ||
177 | return ret; | ||
178 | } | ||
179 | |||
180 | static int ehci_ath79_remove(struct platform_device *pdev) | ||
181 | { | ||
182 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
183 | |||
184 | usb_remove_hcd(hcd); | ||
185 | iounmap(hcd->regs); | ||
186 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
187 | usb_put_hcd(hcd); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static struct platform_driver ehci_ath79_driver = { | ||
193 | .probe = ehci_ath79_probe, | ||
194 | .remove = ehci_ath79_remove, | ||
195 | .id_table = ehci_ath79_id_table, | ||
196 | .driver = { | ||
197 | .owner = THIS_MODULE, | ||
198 | .name = "ath79-ehci", | ||
199 | } | ||
200 | }; | ||
201 | |||
202 | MODULE_ALIAS(PLATFORM_MODULE_PREFIX "ath79-ehci"); | ||
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index b2ed55cb811d..a5a3ef1f0096 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c | |||
@@ -56,7 +56,7 @@ static int ehci_atmel_setup(struct usb_hcd *hcd) | |||
56 | /* registers start at offset 0x0 */ | 56 | /* registers start at offset 0x0 */ |
57 | ehci->caps = hcd->regs; | 57 | ehci->caps = hcd->regs; |
58 | ehci->regs = hcd->regs + | 58 | ehci->regs = hcd->regs + |
59 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 59 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
60 | dbg_hcs_params(ehci, "reset"); | 60 | dbg_hcs_params(ehci, "reset"); |
61 | dbg_hcc_params(ehci, "reset"); | 61 | dbg_hcc_params(ehci, "reset"); |
62 | 62 | ||
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index a869e3c103d3..42ae57409908 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c | |||
@@ -175,7 +175,8 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) | |||
175 | 175 | ||
176 | ehci = hcd_to_ehci(hcd); | 176 | ehci = hcd_to_ehci(hcd); |
177 | ehci->caps = hcd->regs; | 177 | ehci->caps = hcd->regs; |
178 | ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase)); | 178 | ehci->regs = hcd->regs + |
179 | HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); | ||
179 | /* cache this readonly data; minimize chip reads */ | 180 | /* cache this readonly data; minimize chip reads */ |
180 | ehci->hcs_params = readl(&ehci->caps->hcs_params); | 181 | ehci->hcs_params = readl(&ehci->caps->hcs_params); |
181 | 182 | ||
@@ -215,10 +216,7 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) | |||
215 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 216 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
216 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 217 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
217 | unsigned long flags; | 218 | unsigned long flags; |
218 | int rc; | 219 | int rc = 0; |
219 | |||
220 | return 0; | ||
221 | rc = 0; | ||
222 | 220 | ||
223 | if (time_before(jiffies, ehci->next_statechange)) | 221 | if (time_before(jiffies, ehci->next_statechange)) |
224 | msleep(10); | 222 | msleep(10); |
@@ -233,13 +231,13 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) | |||
233 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); | 231 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); |
234 | 232 | ||
235 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 233 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
236 | |||
237 | au1xxx_stop_ehc(); | ||
238 | spin_unlock_irqrestore(&ehci->lock, flags); | 234 | spin_unlock_irqrestore(&ehci->lock, flags); |
239 | 235 | ||
240 | // could save FLADJ in case of Vaux power loss | 236 | // could save FLADJ in case of Vaux power loss |
241 | // ... we'd only use it to handle clock skew | 237 | // ... we'd only use it to handle clock skew |
242 | 238 | ||
239 | au1xxx_stop_ehc(); | ||
240 | |||
243 | return rc; | 241 | return rc; |
244 | } | 242 | } |
245 | 243 | ||
diff --git a/drivers/usb/host/ehci-cns3xxx.c b/drivers/usb/host/ehci-cns3xxx.c index 708a05b5d258..d41745c6f0c4 100644 --- a/drivers/usb/host/ehci-cns3xxx.c +++ b/drivers/usb/host/ehci-cns3xxx.c | |||
@@ -34,7 +34,7 @@ static int cns3xxx_ehci_init(struct usb_hcd *hcd) | |||
34 | 34 | ||
35 | ehci->caps = hcd->regs; | 35 | ehci->caps = hcd->regs; |
36 | ehci->regs = hcd->regs | 36 | ehci->regs = hcd->regs |
37 | + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 37 | + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
38 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 38 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
39 | 39 | ||
40 | hcd->has_tt = 0; | 40 | hcd->has_tt = 0; |
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 693c29b30521..40a844c1dbb4 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c | |||
@@ -726,7 +726,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) | |||
726 | } | 726 | } |
727 | 727 | ||
728 | /* Capability Registers */ | 728 | /* Capability Registers */ |
729 | i = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 729 | i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
730 | temp = scnprintf (next, size, | 730 | temp = scnprintf (next, size, |
731 | "bus %s, device %s\n" | 731 | "bus %s, device %s\n" |
732 | "%s\n" | 732 | "%s\n" |
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 5c761df7fa83..f380bf97e5af 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c | |||
@@ -117,6 +117,9 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, | |||
117 | 117 | ||
118 | pdata->regs = hcd->regs; | 118 | pdata->regs = hcd->regs; |
119 | 119 | ||
120 | if (pdata->power_budget) | ||
121 | hcd->power_budget = pdata->power_budget; | ||
122 | |||
120 | /* | 123 | /* |
121 | * do platform specific init: check the clock, grab/config pins, etc. | 124 | * do platform specific init: check the clock, grab/config pins, etc. |
122 | */ | 125 | */ |
@@ -134,6 +137,30 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, | |||
134 | retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); | 137 | retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); |
135 | if (retval != 0) | 138 | if (retval != 0) |
136 | goto err4; | 139 | goto err4; |
140 | |||
141 | #ifdef CONFIG_USB_OTG | ||
142 | if (pdata->operating_mode == FSL_USB2_DR_OTG) { | ||
143 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
144 | |||
145 | ehci->transceiver = otg_get_transceiver(); | ||
146 | dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, transceiver=0x%p\n", | ||
147 | hcd, ehci, ehci->transceiver); | ||
148 | |||
149 | if (ehci->transceiver) { | ||
150 | retval = otg_set_host(ehci->transceiver, | ||
151 | &ehci_to_hcd(ehci)->self); | ||
152 | if (retval) { | ||
153 | if (ehci->transceiver) | ||
154 | put_device(ehci->transceiver->dev); | ||
155 | goto err4; | ||
156 | } | ||
157 | } else { | ||
158 | dev_err(&pdev->dev, "can't find transceiver\n"); | ||
159 | retval = -ENODEV; | ||
160 | goto err4; | ||
161 | } | ||
162 | } | ||
163 | #endif | ||
137 | return retval; | 164 | return retval; |
138 | 165 | ||
139 | err4: | 166 | err4: |
@@ -164,6 +191,12 @@ static void usb_hcd_fsl_remove(struct usb_hcd *hcd, | |||
164 | struct platform_device *pdev) | 191 | struct platform_device *pdev) |
165 | { | 192 | { |
166 | struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; | 193 | struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; |
194 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
195 | |||
196 | if (ehci->transceiver) { | ||
197 | otg_set_host(ehci->transceiver, NULL); | ||
198 | put_device(ehci->transceiver->dev); | ||
199 | } | ||
167 | 200 | ||
168 | usb_remove_hcd(hcd); | 201 | usb_remove_hcd(hcd); |
169 | 202 | ||
@@ -291,7 +324,7 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) | |||
291 | /* EHCI registers start at offset 0x100 */ | 324 | /* EHCI registers start at offset 0x100 */ |
292 | ehci->caps = hcd->regs + 0x100; | 325 | ehci->caps = hcd->regs + 0x100; |
293 | ehci->regs = hcd->regs + 0x100 + | 326 | ehci->regs = hcd->regs + 0x100 + |
294 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 327 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
295 | dbg_hcs_params(ehci, "reset"); | 328 | dbg_hcs_params(ehci, "reset"); |
296 | dbg_hcc_params(ehci, "reset"); | 329 | dbg_hcc_params(ehci, "reset"); |
297 | 330 | ||
@@ -328,6 +361,149 @@ struct ehci_fsl { | |||
328 | 361 | ||
329 | #ifdef CONFIG_PM | 362 | #ifdef CONFIG_PM |
330 | 363 | ||
364 | #ifdef CONFIG_PPC_MPC512x | ||
365 | static int ehci_fsl_mpc512x_drv_suspend(struct device *dev) | ||
366 | { | ||
367 | struct usb_hcd *hcd = dev_get_drvdata(dev); | ||
368 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
369 | struct fsl_usb2_platform_data *pdata = dev->platform_data; | ||
370 | u32 tmp; | ||
371 | |||
372 | #ifdef DEBUG | ||
373 | u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE); | ||
374 | mode &= USBMODE_CM_MASK; | ||
375 | tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */ | ||
376 | |||
377 | dev_dbg(dev, "suspend=%d already_suspended=%d " | ||
378 | "mode=%d usbcmd %08x\n", pdata->suspended, | ||
379 | pdata->already_suspended, mode, tmp); | ||
380 | #endif | ||
381 | |||
382 | /* | ||
383 | * If the controller is already suspended, then this must be a | ||
384 | * PM suspend. Remember this fact, so that we will leave the | ||
385 | * controller suspended at PM resume time. | ||
386 | */ | ||
387 | if (pdata->suspended) { | ||
388 | dev_dbg(dev, "already suspended, leaving early\n"); | ||
389 | pdata->already_suspended = 1; | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | dev_dbg(dev, "suspending...\n"); | ||
394 | |||
395 | hcd->state = HC_STATE_SUSPENDED; | ||
396 | dev->power.power_state = PMSG_SUSPEND; | ||
397 | |||
398 | /* ignore non-host interrupts */ | ||
399 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
400 | |||
401 | /* stop the controller */ | ||
402 | tmp = ehci_readl(ehci, &ehci->regs->command); | ||
403 | tmp &= ~CMD_RUN; | ||
404 | ehci_writel(ehci, tmp, &ehci->regs->command); | ||
405 | |||
406 | /* save EHCI registers */ | ||
407 | pdata->pm_command = ehci_readl(ehci, &ehci->regs->command); | ||
408 | pdata->pm_command &= ~CMD_RUN; | ||
409 | pdata->pm_status = ehci_readl(ehci, &ehci->regs->status); | ||
410 | pdata->pm_intr_enable = ehci_readl(ehci, &ehci->regs->intr_enable); | ||
411 | pdata->pm_frame_index = ehci_readl(ehci, &ehci->regs->frame_index); | ||
412 | pdata->pm_segment = ehci_readl(ehci, &ehci->regs->segment); | ||
413 | pdata->pm_frame_list = ehci_readl(ehci, &ehci->regs->frame_list); | ||
414 | pdata->pm_async_next = ehci_readl(ehci, &ehci->regs->async_next); | ||
415 | pdata->pm_configured_flag = | ||
416 | ehci_readl(ehci, &ehci->regs->configured_flag); | ||
417 | pdata->pm_portsc = ehci_readl(ehci, &ehci->regs->port_status[0]); | ||
418 | pdata->pm_usbgenctrl = ehci_readl(ehci, | ||
419 | hcd->regs + FSL_SOC_USB_USBGENCTRL); | ||
420 | |||
421 | /* clear the W1C bits */ | ||
422 | pdata->pm_portsc &= cpu_to_hc32(ehci, ~PORT_RWC_BITS); | ||
423 | |||
424 | pdata->suspended = 1; | ||
425 | |||
426 | /* clear PP to cut power to the port */ | ||
427 | tmp = ehci_readl(ehci, &ehci->regs->port_status[0]); | ||
428 | tmp &= ~PORT_POWER; | ||
429 | ehci_writel(ehci, tmp, &ehci->regs->port_status[0]); | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | static int ehci_fsl_mpc512x_drv_resume(struct device *dev) | ||
435 | { | ||
436 | struct usb_hcd *hcd = dev_get_drvdata(dev); | ||
437 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
438 | struct fsl_usb2_platform_data *pdata = dev->platform_data; | ||
439 | u32 tmp; | ||
440 | |||
441 | dev_dbg(dev, "suspend=%d already_suspended=%d\n", | ||
442 | pdata->suspended, pdata->already_suspended); | ||
443 | |||
444 | /* | ||
445 | * If the controller was already suspended at suspend time, | ||
446 | * then don't resume it now. | ||
447 | */ | ||
448 | if (pdata->already_suspended) { | ||
449 | dev_dbg(dev, "already suspended, leaving early\n"); | ||
450 | pdata->already_suspended = 0; | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | if (!pdata->suspended) { | ||
455 | dev_dbg(dev, "not suspended, leaving early\n"); | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | pdata->suspended = 0; | ||
460 | |||
461 | dev_dbg(dev, "resuming...\n"); | ||
462 | |||
463 | /* set host mode */ | ||
464 | tmp = USBMODE_CM_HOST | (pdata->es ? USBMODE_ES : 0); | ||
465 | ehci_writel(ehci, tmp, hcd->regs + FSL_SOC_USB_USBMODE); | ||
466 | |||
467 | ehci_writel(ehci, pdata->pm_usbgenctrl, | ||
468 | hcd->regs + FSL_SOC_USB_USBGENCTRL); | ||
469 | ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE, | ||
470 | hcd->regs + FSL_SOC_USB_ISIPHYCTRL); | ||
471 | |||
472 | /* restore EHCI registers */ | ||
473 | ehci_writel(ehci, pdata->pm_command, &ehci->regs->command); | ||
474 | ehci_writel(ehci, pdata->pm_intr_enable, &ehci->regs->intr_enable); | ||
475 | ehci_writel(ehci, pdata->pm_frame_index, &ehci->regs->frame_index); | ||
476 | ehci_writel(ehci, pdata->pm_segment, &ehci->regs->segment); | ||
477 | ehci_writel(ehci, pdata->pm_frame_list, &ehci->regs->frame_list); | ||
478 | ehci_writel(ehci, pdata->pm_async_next, &ehci->regs->async_next); | ||
479 | ehci_writel(ehci, pdata->pm_configured_flag, | ||
480 | &ehci->regs->configured_flag); | ||
481 | ehci_writel(ehci, pdata->pm_portsc, &ehci->regs->port_status[0]); | ||
482 | |||
483 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
484 | hcd->state = HC_STATE_RUNNING; | ||
485 | dev->power.power_state = PMSG_ON; | ||
486 | |||
487 | tmp = ehci_readl(ehci, &ehci->regs->command); | ||
488 | tmp |= CMD_RUN; | ||
489 | ehci_writel(ehci, tmp, &ehci->regs->command); | ||
490 | |||
491 | usb_hcd_resume_root_hub(hcd); | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | #else | ||
496 | static inline int ehci_fsl_mpc512x_drv_suspend(struct device *dev) | ||
497 | { | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev) | ||
502 | { | ||
503 | return 0; | ||
504 | } | ||
505 | #endif /* CONFIG_PPC_MPC512x */ | ||
506 | |||
331 | static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) | 507 | static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) |
332 | { | 508 | { |
333 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 509 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
@@ -341,6 +517,11 @@ static int ehci_fsl_drv_suspend(struct device *dev) | |||
341 | struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); | 517 | struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); |
342 | void __iomem *non_ehci = hcd->regs; | 518 | void __iomem *non_ehci = hcd->regs; |
343 | 519 | ||
520 | if (of_device_is_compatible(dev->parent->of_node, | ||
521 | "fsl,mpc5121-usb2-dr")) { | ||
522 | return ehci_fsl_mpc512x_drv_suspend(dev); | ||
523 | } | ||
524 | |||
344 | ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd), | 525 | ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd), |
345 | device_may_wakeup(dev)); | 526 | device_may_wakeup(dev)); |
346 | if (!fsl_deep_sleep()) | 527 | if (!fsl_deep_sleep()) |
@@ -357,6 +538,11 @@ static int ehci_fsl_drv_resume(struct device *dev) | |||
357 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 538 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
358 | void __iomem *non_ehci = hcd->regs; | 539 | void __iomem *non_ehci = hcd->regs; |
359 | 540 | ||
541 | if (of_device_is_compatible(dev->parent->of_node, | ||
542 | "fsl,mpc5121-usb2-dr")) { | ||
543 | return ehci_fsl_mpc512x_drv_resume(dev); | ||
544 | } | ||
545 | |||
360 | ehci_prepare_ports_for_controller_resume(ehci); | 546 | ehci_prepare_ports_for_controller_resume(ehci); |
361 | if (!fsl_deep_sleep()) | 547 | if (!fsl_deep_sleep()) |
362 | return 0; | 548 | return 0; |
@@ -391,6 +577,38 @@ static struct dev_pm_ops ehci_fsl_pm_ops = { | |||
391 | #define EHCI_FSL_PM_OPS NULL | 577 | #define EHCI_FSL_PM_OPS NULL |
392 | #endif /* CONFIG_PM */ | 578 | #endif /* CONFIG_PM */ |
393 | 579 | ||
580 | #ifdef CONFIG_USB_OTG | ||
581 | static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port) | ||
582 | { | ||
583 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
584 | u32 status; | ||
585 | |||
586 | if (!port) | ||
587 | return -EINVAL; | ||
588 | |||
589 | port--; | ||
590 | |||
591 | /* start port reset before HNP protocol time out */ | ||
592 | status = readl(&ehci->regs->port_status[port]); | ||
593 | if (!(status & PORT_CONNECT)) | ||
594 | return -ENODEV; | ||
595 | |||
596 | /* khubd will finish the reset later */ | ||
597 | if (ehci_is_TDI(ehci)) { | ||
598 | writel(PORT_RESET | | ||
599 | (status & ~(PORT_CSC | PORT_PEC | PORT_OCC)), | ||
600 | &ehci->regs->port_status[port]); | ||
601 | } else { | ||
602 | writel(PORT_RESET, &ehci->regs->port_status[port]); | ||
603 | } | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | #else | ||
608 | #define ehci_start_port_reset NULL | ||
609 | #endif /* CONFIG_USB_OTG */ | ||
610 | |||
611 | |||
394 | static const struct hc_driver ehci_fsl_hc_driver = { | 612 | static const struct hc_driver ehci_fsl_hc_driver = { |
395 | .description = hcd_name, | 613 | .description = hcd_name, |
396 | .product_desc = "Freescale On-Chip EHCI Host Controller", | 614 | .product_desc = "Freescale On-Chip EHCI Host Controller", |
@@ -430,6 +648,7 @@ static const struct hc_driver ehci_fsl_hc_driver = { | |||
430 | .hub_control = ehci_hub_control, | 648 | .hub_control = ehci_hub_control, |
431 | .bus_suspend = ehci_bus_suspend, | 649 | .bus_suspend = ehci_bus_suspend, |
432 | .bus_resume = ehci_bus_resume, | 650 | .bus_resume = ehci_bus_resume, |
651 | .start_port_reset = ehci_start_port_reset, | ||
433 | .relinquish_port = ehci_relinquish_port, | 652 | .relinquish_port = ehci_relinquish_port, |
434 | .port_handed_over = ehci_port_handed_over, | 653 | .port_handed_over = ehci_port_handed_over, |
435 | 654 | ||
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h index 3fabed33d940..491806221165 100644 --- a/drivers/usb/host/ehci-fsl.h +++ b/drivers/usb/host/ehci-fsl.h | |||
@@ -27,6 +27,10 @@ | |||
27 | #define PORT_PTS_SERIAL (3<<30) | 27 | #define PORT_PTS_SERIAL (3<<30) |
28 | #define PORT_PTS_PTW (1<<28) | 28 | #define PORT_PTS_PTW (1<<28) |
29 | #define FSL_SOC_USB_PORTSC2 0x188 | 29 | #define FSL_SOC_USB_PORTSC2 0x188 |
30 | #define FSL_SOC_USB_USBMODE 0x1a8 | ||
31 | #define USBMODE_CM_MASK (3 << 0) /* controller mode mask */ | ||
32 | #define USBMODE_CM_HOST (3 << 0) /* controller mode: host */ | ||
33 | #define USBMODE_ES (1 << 2) /* (Big) Endian Select */ | ||
30 | 34 | ||
31 | #define FSL_SOC_USB_USBGENCTRL 0x200 | 35 | #define FSL_SOC_USB_USBGENCTRL 0x200 |
32 | #define USBGENCTRL_PPP (1 << 3) | 36 | #define USBGENCTRL_PPP (1 << 3) |
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c new file mode 100644 index 000000000000..93b230dc51a2 --- /dev/null +++ b/drivers/usb/host/ehci-grlib.c | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller | ||
3 | * | ||
4 | * GRUSBHC is typically found on LEON/GRLIB SoCs | ||
5 | * | ||
6 | * (c) Jan Andersson <jan@gaisler.com> | ||
7 | * | ||
8 | * Based on ehci-ppc-of.c which is: | ||
9 | * (c) Valentine Barshak <vbarshak@ru.mvista.com> | ||
10 | * and in turn based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> | ||
11 | * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, but | ||
19 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
20 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
21 | * for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software Foundation, | ||
25 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
26 | */ | ||
27 | |||
28 | |||
29 | #include <linux/signal.h> | ||
30 | |||
31 | #include <linux/of_irq.h> | ||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/of_platform.h> | ||
34 | |||
35 | #define GRUSBHC_HCIVERSION 0x0100 /* Known value of cap. reg. HCIVERSION */ | ||
36 | |||
37 | /* called during probe() after chip reset completes */ | ||
38 | static int ehci_grlib_setup(struct usb_hcd *hcd) | ||
39 | { | ||
40 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
41 | int retval; | ||
42 | |||
43 | retval = ehci_halt(ehci); | ||
44 | if (retval) | ||
45 | return retval; | ||
46 | |||
47 | retval = ehci_init(hcd); | ||
48 | if (retval) | ||
49 | return retval; | ||
50 | |||
51 | ehci->sbrn = 0x20; | ||
52 | ehci_port_power(ehci, 1); | ||
53 | |||
54 | return ehci_reset(ehci); | ||
55 | } | ||
56 | |||
57 | |||
58 | static const struct hc_driver ehci_grlib_hc_driver = { | ||
59 | .description = hcd_name, | ||
60 | .product_desc = "GRLIB GRUSBHC EHCI", | ||
61 | .hcd_priv_size = sizeof(struct ehci_hcd), | ||
62 | |||
63 | /* | ||
64 | * generic hardware linkage | ||
65 | */ | ||
66 | .irq = ehci_irq, | ||
67 | .flags = HCD_MEMORY | HCD_USB2, | ||
68 | |||
69 | /* | ||
70 | * basic lifecycle operations | ||
71 | */ | ||
72 | .reset = ehci_grlib_setup, | ||
73 | .start = ehci_run, | ||
74 | .stop = ehci_stop, | ||
75 | .shutdown = ehci_shutdown, | ||
76 | |||
77 | /* | ||
78 | * managing i/o requests and associated device resources | ||
79 | */ | ||
80 | .urb_enqueue = ehci_urb_enqueue, | ||
81 | .urb_dequeue = ehci_urb_dequeue, | ||
82 | .endpoint_disable = ehci_endpoint_disable, | ||
83 | .endpoint_reset = ehci_endpoint_reset, | ||
84 | |||
85 | /* | ||
86 | * scheduling support | ||
87 | */ | ||
88 | .get_frame_number = ehci_get_frame, | ||
89 | |||
90 | /* | ||
91 | * root hub support | ||
92 | */ | ||
93 | .hub_status_data = ehci_hub_status_data, | ||
94 | .hub_control = ehci_hub_control, | ||
95 | #ifdef CONFIG_PM | ||
96 | .bus_suspend = ehci_bus_suspend, | ||
97 | .bus_resume = ehci_bus_resume, | ||
98 | #endif | ||
99 | .relinquish_port = ehci_relinquish_port, | ||
100 | .port_handed_over = ehci_port_handed_over, | ||
101 | |||
102 | .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, | ||
103 | }; | ||
104 | |||
105 | |||
106 | static int __devinit ehci_hcd_grlib_probe(struct platform_device *op) | ||
107 | { | ||
108 | struct device_node *dn = op->dev.of_node; | ||
109 | struct usb_hcd *hcd; | ||
110 | struct ehci_hcd *ehci = NULL; | ||
111 | struct resource res; | ||
112 | u32 hc_capbase; | ||
113 | int irq; | ||
114 | int rv; | ||
115 | |||
116 | if (usb_disabled()) | ||
117 | return -ENODEV; | ||
118 | |||
119 | dev_dbg(&op->dev, "initializing GRUSBHC EHCI USB Controller\n"); | ||
120 | |||
121 | rv = of_address_to_resource(dn, 0, &res); | ||
122 | if (rv) | ||
123 | return rv; | ||
124 | |||
125 | /* usb_create_hcd requires dma_mask != NULL */ | ||
126 | op->dev.dma_mask = &op->dev.coherent_dma_mask; | ||
127 | hcd = usb_create_hcd(&ehci_grlib_hc_driver, &op->dev, | ||
128 | "GRUSBHC EHCI USB"); | ||
129 | if (!hcd) | ||
130 | return -ENOMEM; | ||
131 | |||
132 | hcd->rsrc_start = res.start; | ||
133 | hcd->rsrc_len = res.end - res.start + 1; | ||
134 | |||
135 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | ||
136 | printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); | ||
137 | rv = -EBUSY; | ||
138 | goto err_rmr; | ||
139 | } | ||
140 | |||
141 | irq = irq_of_parse_and_map(dn, 0); | ||
142 | if (irq == NO_IRQ) { | ||
143 | printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); | ||
144 | rv = -EBUSY; | ||
145 | goto err_irq; | ||
146 | } | ||
147 | |||
148 | hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); | ||
149 | if (!hcd->regs) { | ||
150 | printk(KERN_ERR "%s: ioremap failed\n", __FILE__); | ||
151 | rv = -ENOMEM; | ||
152 | goto err_ioremap; | ||
153 | } | ||
154 | |||
155 | ehci = hcd_to_ehci(hcd); | ||
156 | |||
157 | ehci->caps = hcd->regs; | ||
158 | |||
159 | /* determine endianness of this implementation */ | ||
160 | hc_capbase = ehci_readl(ehci, &ehci->caps->hc_capbase); | ||
161 | if (HC_VERSION(ehci, hc_capbase) != GRUSBHC_HCIVERSION) { | ||
162 | ehci->big_endian_mmio = 1; | ||
163 | ehci->big_endian_desc = 1; | ||
164 | ehci->big_endian_capbase = 1; | ||
165 | } | ||
166 | |||
167 | ehci->regs = hcd->regs + | ||
168 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); | ||
169 | |||
170 | /* cache this readonly data; minimize chip reads */ | ||
171 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | ||
172 | |||
173 | rv = usb_add_hcd(hcd, irq, 0); | ||
174 | if (rv) | ||
175 | goto err_ehci; | ||
176 | |||
177 | return 0; | ||
178 | |||
179 | err_ehci: | ||
180 | iounmap(hcd->regs); | ||
181 | err_ioremap: | ||
182 | irq_dispose_mapping(irq); | ||
183 | err_irq: | ||
184 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
185 | err_rmr: | ||
186 | usb_put_hcd(hcd); | ||
187 | |||
188 | return rv; | ||
189 | } | ||
190 | |||
191 | |||
192 | static int ehci_hcd_grlib_remove(struct platform_device *op) | ||
193 | { | ||
194 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | ||
195 | |||
196 | dev_set_drvdata(&op->dev, NULL); | ||
197 | |||
198 | dev_dbg(&op->dev, "stopping GRLIB GRUSBHC EHCI USB Controller\n"); | ||
199 | |||
200 | usb_remove_hcd(hcd); | ||
201 | |||
202 | iounmap(hcd->regs); | ||
203 | irq_dispose_mapping(hcd->irq); | ||
204 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
205 | |||
206 | usb_put_hcd(hcd); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | |||
212 | static void ehci_hcd_grlib_shutdown(struct platform_device *op) | ||
213 | { | ||
214 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | ||
215 | |||
216 | if (hcd->driver->shutdown) | ||
217 | hcd->driver->shutdown(hcd); | ||
218 | } | ||
219 | |||
220 | |||
221 | static const struct of_device_id ehci_hcd_grlib_of_match[] = { | ||
222 | { | ||
223 | .name = "GAISLER_EHCI", | ||
224 | }, | ||
225 | { | ||
226 | .name = "01_026", | ||
227 | }, | ||
228 | {}, | ||
229 | }; | ||
230 | MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match); | ||
231 | |||
232 | |||
233 | static struct platform_driver ehci_grlib_driver = { | ||
234 | .probe = ehci_hcd_grlib_probe, | ||
235 | .remove = ehci_hcd_grlib_remove, | ||
236 | .shutdown = ehci_hcd_grlib_shutdown, | ||
237 | .driver = { | ||
238 | .name = "grlib-ehci", | ||
239 | .owner = THIS_MODULE, | ||
240 | .of_match_table = ehci_hcd_grlib_of_match, | ||
241 | }, | ||
242 | }; | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 78561d112c04..b435ed67dd5c 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -739,7 +739,7 @@ static int ehci_run (struct usb_hcd *hcd) | |||
739 | up_write(&ehci_cf_port_reset_rwsem); | 739 | up_write(&ehci_cf_port_reset_rwsem); |
740 | ehci->last_periodic_enable = ktime_get_real(); | 740 | ehci->last_periodic_enable = ktime_get_real(); |
741 | 741 | ||
742 | temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 742 | temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
743 | ehci_info (ehci, | 743 | ehci_info (ehci, |
744 | "USB %x.%x started, EHCI %x.%02x%s\n", | 744 | "USB %x.%x started, EHCI %x.%02x%s\n", |
745 | ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), | 745 | ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), |
@@ -777,8 +777,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) | |||
777 | goto dead; | 777 | goto dead; |
778 | } | 778 | } |
779 | 779 | ||
780 | /* Shared IRQ? */ | ||
780 | masked_status = status & INTR_MASK; | 781 | masked_status = status & INTR_MASK; |
781 | if (!masked_status) { /* irq sharing? */ | 782 | if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) { |
782 | spin_unlock(&ehci->lock); | 783 | spin_unlock(&ehci->lock); |
783 | return IRQ_NONE; | 784 | return IRQ_NONE; |
784 | } | 785 | } |
@@ -873,6 +874,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) | |||
873 | dead: | 874 | dead: |
874 | ehci_reset(ehci); | 875 | ehci_reset(ehci); |
875 | ehci_writel(ehci, 0, &ehci->regs->configured_flag); | 876 | ehci_writel(ehci, 0, &ehci->regs->configured_flag); |
877 | usb_hc_died(hcd); | ||
876 | /* generic layer kills/unlinks all urbs, then | 878 | /* generic layer kills/unlinks all urbs, then |
877 | * uses ehci_stop to clean up the rest | 879 | * uses ehci_stop to clean up the rest |
878 | */ | 880 | */ |
@@ -1265,6 +1267,21 @@ MODULE_LICENSE ("GPL"); | |||
1265 | #define PLATFORM_DRIVER tegra_ehci_driver | 1267 | #define PLATFORM_DRIVER tegra_ehci_driver |
1266 | #endif | 1268 | #endif |
1267 | 1269 | ||
1270 | #ifdef CONFIG_USB_EHCI_S5P | ||
1271 | #include "ehci-s5p.c" | ||
1272 | #define PLATFORM_DRIVER s5p_ehci_driver | ||
1273 | #endif | ||
1274 | |||
1275 | #ifdef CONFIG_USB_EHCI_ATH79 | ||
1276 | #include "ehci-ath79.c" | ||
1277 | #define PLATFORM_DRIVER ehci_ath79_driver | ||
1278 | #endif | ||
1279 | |||
1280 | #ifdef CONFIG_SPARC_LEON | ||
1281 | #include "ehci-grlib.c" | ||
1282 | #define PLATFORM_DRIVER ehci_grlib_driver | ||
1283 | #endif | ||
1284 | |||
1268 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ | 1285 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ |
1269 | !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \ | 1286 | !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \ |
1270 | !defined(XILINX_OF_PLATFORM_DRIVER) | 1287 | !defined(XILINX_OF_PLATFORM_DRIVER) |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index d05ea03cfb4d..ea6184bf48d0 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | /*-------------------------------------------------------------------------*/ | 29 | /*-------------------------------------------------------------------------*/ |
30 | #include <linux/usb/otg.h> | ||
30 | 31 | ||
31 | #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) | 32 | #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) |
32 | 33 | ||
@@ -127,7 +128,7 @@ static int ehci_port_change(struct ehci_hcd *ehci) | |||
127 | return 0; | 128 | return 0; |
128 | } | 129 | } |
129 | 130 | ||
130 | static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, | 131 | static __maybe_unused void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, |
131 | bool suspending, bool do_wakeup) | 132 | bool suspending, bool do_wakeup) |
132 | { | 133 | { |
133 | int port; | 134 | int port; |
@@ -801,6 +802,13 @@ static int ehci_hub_control ( | |||
801 | goto error; | 802 | goto error; |
802 | if (ehci->no_selective_suspend) | 803 | if (ehci->no_selective_suspend) |
803 | break; | 804 | break; |
805 | #ifdef CONFIG_USB_OTG | ||
806 | if ((hcd->self.otg_port == (wIndex + 1)) | ||
807 | && hcd->self.b_hnp_enable) { | ||
808 | otg_start_hnp(ehci->transceiver); | ||
809 | break; | ||
810 | } | ||
811 | #endif | ||
804 | if (!(temp & PORT_SUSPEND)) | 812 | if (!(temp & PORT_SUSPEND)) |
805 | break; | 813 | break; |
806 | if ((temp & PORT_PE) == 0) | 814 | if ((temp & PORT_PE) == 0) |
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c index 89b7c70c6ed6..50e600d26e28 100644 --- a/drivers/usb/host/ehci-ixp4xx.c +++ b/drivers/usb/host/ehci-ixp4xx.c | |||
@@ -23,7 +23,7 @@ static int ixp4xx_ehci_init(struct usb_hcd *hcd) | |||
23 | 23 | ||
24 | ehci->caps = hcd->regs + 0x100; | 24 | ehci->caps = hcd->regs + 0x100; |
25 | ehci->regs = hcd->regs + 0x100 | 25 | ehci->regs = hcd->regs + 0x100 |
26 | + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 26 | + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
27 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 27 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
28 | 28 | ||
29 | hcd->has_tt = 1; | 29 | hcd->has_tt = 1; |
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index 9ce1b0bc186d..b5a0bf649c95 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c | |||
@@ -41,7 +41,7 @@ static int ehci_msm_reset(struct usb_hcd *hcd) | |||
41 | 41 | ||
42 | ehci->caps = USB_CAPLENGTH; | 42 | ehci->caps = USB_CAPLENGTH; |
43 | ehci->regs = USB_CAPLENGTH + | 43 | ehci->regs = USB_CAPLENGTH + |
44 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 44 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
45 | dbg_hcs_params(ehci, "reset"); | 45 | dbg_hcs_params(ehci, "reset"); |
46 | dbg_hcc_params(ehci, "reset"); | 46 | dbg_hcc_params(ehci, "reset"); |
47 | 47 | ||
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index 25c8c10bb689..0c058be35a38 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c | |||
@@ -208,7 +208,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) | |||
208 | /* EHCI registers start at offset 0x100 */ | 208 | /* EHCI registers start at offset 0x100 */ |
209 | ehci->caps = hcd->regs + 0x100; | 209 | ehci->caps = hcd->regs + 0x100; |
210 | ehci->regs = hcd->regs + 0x100 + | 210 | ehci->regs = hcd->regs + 0x100 + |
211 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 211 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
212 | 212 | ||
213 | /* set up the PORTSCx register */ | 213 | /* set up the PORTSCx register */ |
214 | ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); | 214 | ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); |
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c index a31a031178a8..ff55757ba7d8 100644 --- a/drivers/usb/host/ehci-octeon.c +++ b/drivers/usb/host/ehci-octeon.c | |||
@@ -151,7 +151,7 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev) | |||
151 | 151 | ||
152 | ehci->caps = hcd->regs; | 152 | ehci->caps = hcd->regs; |
153 | ehci->regs = hcd->regs + | 153 | ehci->regs = hcd->regs + |
154 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 154 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
155 | /* cache this readonly data; minimize chip reads */ | 155 | /* cache this readonly data; minimize chip reads */ |
156 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 156 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
157 | 157 | ||
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 627f3a678759..55a57c23dd0f 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c | |||
@@ -208,7 +208,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) | |||
208 | /* we know this is the memory we want, no need to ioremap again */ | 208 | /* we know this is the memory we want, no need to ioremap again */ |
209 | omap_ehci->caps = hcd->regs; | 209 | omap_ehci->caps = hcd->regs; |
210 | omap_ehci->regs = hcd->regs | 210 | omap_ehci->regs = hcd->regs |
211 | + HC_LENGTH(readl(&omap_ehci->caps->hc_capbase)); | 211 | + HC_LENGTH(ehci, readl(&omap_ehci->caps->hc_capbase)); |
212 | 212 | ||
213 | dbg_hcs_params(omap_ehci, "reset"); | 213 | dbg_hcs_params(omap_ehci, "reset"); |
214 | dbg_hcc_params(omap_ehci, "reset"); | 214 | dbg_hcc_params(omap_ehci, "reset"); |
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 281e094e1c18..395bdb0248d5 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c | |||
@@ -251,7 +251,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev) | |||
251 | ehci = hcd_to_ehci(hcd); | 251 | ehci = hcd_to_ehci(hcd); |
252 | ehci->caps = hcd->regs + 0x100; | 252 | ehci->caps = hcd->regs + 0x100; |
253 | ehci->regs = hcd->regs + 0x100 + | 253 | ehci->regs = hcd->regs + 0x100 + |
254 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 254 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
255 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 255 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
256 | hcd->has_tt = 1; | 256 | hcd->has_tt = 1; |
257 | ehci->sbrn = 0x20; | 257 | ehci->sbrn = 0x20; |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index d5eaea7caf89..660b80a75cac 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -70,7 +70,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
70 | 70 | ||
71 | ehci->caps = hcd->regs; | 71 | ehci->caps = hcd->regs; |
72 | ehci->regs = hcd->regs + | 72 | ehci->regs = hcd->regs + |
73 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 73 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
74 | 74 | ||
75 | dbg_hcs_params(ehci, "reset"); | 75 | dbg_hcs_params(ehci, "reset"); |
76 | dbg_hcc_params(ehci, "reset"); | 76 | dbg_hcc_params(ehci, "reset"); |
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c index a2168642175b..cd69099cda19 100644 --- a/drivers/usb/host/ehci-pmcmsp.c +++ b/drivers/usb/host/ehci-pmcmsp.c | |||
@@ -83,7 +83,7 @@ static int ehci_msp_setup(struct usb_hcd *hcd) | |||
83 | 83 | ||
84 | ehci->caps = hcd->regs; | 84 | ehci->caps = hcd->regs; |
85 | ehci->regs = hcd->regs + | 85 | ehci->regs = hcd->regs + |
86 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 86 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
87 | dbg_hcs_params(ehci, "reset"); | 87 | dbg_hcs_params(ehci, "reset"); |
88 | dbg_hcc_params(ehci, "reset"); | 88 | dbg_hcc_params(ehci, "reset"); |
89 | 89 | ||
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 1f09f253697e..8552db6c29c9 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c | |||
@@ -179,7 +179,7 @@ static int __devinit ehci_hcd_ppc_of_probe(struct platform_device *op) | |||
179 | 179 | ||
180 | ehci->caps = hcd->regs; | 180 | ehci->caps = hcd->regs; |
181 | ehci->regs = hcd->regs + | 181 | ehci->regs = hcd->regs + |
182 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 182 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
183 | 183 | ||
184 | /* cache this readonly data; minimize chip reads */ | 184 | /* cache this readonly data; minimize chip reads */ |
185 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 185 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c index 1dee33b9139e..64626a777d61 100644 --- a/drivers/usb/host/ehci-ps3.c +++ b/drivers/usb/host/ehci-ps3.c | |||
@@ -29,7 +29,7 @@ static int ps3_ehci_hc_reset(struct usb_hcd *hcd) | |||
29 | ehci->big_endian_mmio = 1; | 29 | ehci->big_endian_mmio = 1; |
30 | 30 | ||
31 | ehci->caps = hcd->regs; | 31 | ehci->caps = hcd->regs; |
32 | ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, | 32 | ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, |
33 | &ehci->caps->hc_capbase)); | 33 | &ehci->caps->hc_capbase)); |
34 | 34 | ||
35 | dbg_hcs_params(ehci, "reset"); | 35 | dbg_hcs_params(ehci, "reset"); |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 42abd0f603bf..5d6bc624c961 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -826,6 +826,7 @@ qh_make ( | |||
826 | is_input, 0, | 826 | is_input, 0, |
827 | hb_mult(maxp) * max_packet(maxp))); | 827 | hb_mult(maxp) * max_packet(maxp))); |
828 | qh->start = NO_FRAME; | 828 | qh->start = NO_FRAME; |
829 | qh->stamp = ehci->periodic_stamp; | ||
829 | 830 | ||
830 | if (urb->dev->speed == USB_SPEED_HIGH) { | 831 | if (urb->dev->speed == USB_SPEED_HIGH) { |
831 | qh->c_usecs = 0; | 832 | qh->c_usecs = 0; |
@@ -1183,6 +1184,10 @@ static void end_unlink_async (struct ehci_hcd *ehci) | |||
1183 | ehci->reclaim = NULL; | 1184 | ehci->reclaim = NULL; |
1184 | start_unlink_async (ehci, next); | 1185 | start_unlink_async (ehci, next); |
1185 | } | 1186 | } |
1187 | |||
1188 | if (ehci->has_synopsys_hc_bug) | ||
1189 | ehci_writel(ehci, (u32) ehci->async->qh_dma, | ||
1190 | &ehci->regs->async_next); | ||
1186 | } | 1191 | } |
1187 | 1192 | ||
1188 | /* makes sure the async qh will become idle */ | 1193 | /* makes sure the async qh will become idle */ |
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c new file mode 100644 index 000000000000..e3374c8f7b3f --- /dev/null +++ b/drivers/usb/host/ehci-s5p.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * SAMSUNG S5P USB HOST EHCI Controller | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co.Ltd | ||
5 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
6 | * Author: Joonyoung Shim <jy0922.shim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/clk.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <mach/regs-pmu.h> | ||
18 | #include <plat/cpu.h> | ||
19 | #include <plat/ehci.h> | ||
20 | #include <plat/usb-phy.h> | ||
21 | |||
22 | struct s5p_ehci_hcd { | ||
23 | struct device *dev; | ||
24 | struct usb_hcd *hcd; | ||
25 | struct clk *clk; | ||
26 | }; | ||
27 | |||
28 | static const struct hc_driver s5p_ehci_hc_driver = { | ||
29 | .description = hcd_name, | ||
30 | .product_desc = "S5P EHCI Host Controller", | ||
31 | .hcd_priv_size = sizeof(struct ehci_hcd), | ||
32 | |||
33 | .irq = ehci_irq, | ||
34 | .flags = HCD_MEMORY | HCD_USB2, | ||
35 | |||
36 | .reset = ehci_init, | ||
37 | .start = ehci_run, | ||
38 | .stop = ehci_stop, | ||
39 | .shutdown = ehci_shutdown, | ||
40 | |||
41 | .get_frame_number = ehci_get_frame, | ||
42 | |||
43 | .urb_enqueue = ehci_urb_enqueue, | ||
44 | .urb_dequeue = ehci_urb_dequeue, | ||
45 | .endpoint_disable = ehci_endpoint_disable, | ||
46 | .endpoint_reset = ehci_endpoint_reset, | ||
47 | |||
48 | .hub_status_data = ehci_hub_status_data, | ||
49 | .hub_control = ehci_hub_control, | ||
50 | .bus_suspend = ehci_bus_suspend, | ||
51 | .bus_resume = ehci_bus_resume, | ||
52 | |||
53 | .relinquish_port = ehci_relinquish_port, | ||
54 | .port_handed_over = ehci_port_handed_over, | ||
55 | |||
56 | .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, | ||
57 | }; | ||
58 | |||
59 | static int __devinit s5p_ehci_probe(struct platform_device *pdev) | ||
60 | { | ||
61 | struct s5p_ehci_platdata *pdata; | ||
62 | struct s5p_ehci_hcd *s5p_ehci; | ||
63 | struct usb_hcd *hcd; | ||
64 | struct ehci_hcd *ehci; | ||
65 | struct resource *res; | ||
66 | int irq; | ||
67 | int err; | ||
68 | |||
69 | pdata = pdev->dev.platform_data; | ||
70 | if (!pdata) { | ||
71 | dev_err(&pdev->dev, "No platform data defined\n"); | ||
72 | return -EINVAL; | ||
73 | } | ||
74 | |||
75 | s5p_ehci = kzalloc(sizeof(struct s5p_ehci_hcd), GFP_KERNEL); | ||
76 | if (!s5p_ehci) | ||
77 | return -ENOMEM; | ||
78 | |||
79 | s5p_ehci->dev = &pdev->dev; | ||
80 | |||
81 | hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev, | ||
82 | dev_name(&pdev->dev)); | ||
83 | if (!hcd) { | ||
84 | dev_err(&pdev->dev, "Unable to create HCD\n"); | ||
85 | err = -ENOMEM; | ||
86 | goto fail_hcd; | ||
87 | } | ||
88 | |||
89 | s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); | ||
90 | |||
91 | if (IS_ERR(s5p_ehci->clk)) { | ||
92 | dev_err(&pdev->dev, "Failed to get usbhost clock\n"); | ||
93 | err = PTR_ERR(s5p_ehci->clk); | ||
94 | goto fail_clk; | ||
95 | } | ||
96 | |||
97 | err = clk_enable(s5p_ehci->clk); | ||
98 | if (err) | ||
99 | goto fail_clken; | ||
100 | |||
101 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
102 | if (!res) { | ||
103 | dev_err(&pdev->dev, "Failed to get I/O memory\n"); | ||
104 | err = -ENXIO; | ||
105 | goto fail_io; | ||
106 | } | ||
107 | |||
108 | hcd->rsrc_start = res->start; | ||
109 | hcd->rsrc_len = resource_size(res); | ||
110 | hcd->regs = ioremap(res->start, resource_size(res)); | ||
111 | if (!hcd->regs) { | ||
112 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | ||
113 | err = -ENOMEM; | ||
114 | goto fail_io; | ||
115 | } | ||
116 | |||
117 | irq = platform_get_irq(pdev, 0); | ||
118 | if (!irq) { | ||
119 | dev_err(&pdev->dev, "Failed to get IRQ\n"); | ||
120 | err = -ENODEV; | ||
121 | goto fail; | ||
122 | } | ||
123 | |||
124 | if (pdata->phy_init) | ||
125 | pdata->phy_init(pdev, S5P_USB_PHY_HOST); | ||
126 | |||
127 | ehci = hcd_to_ehci(hcd); | ||
128 | ehci->caps = hcd->regs; | ||
129 | ehci->regs = hcd->regs + | ||
130 | HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); | ||
131 | |||
132 | dbg_hcs_params(ehci, "reset"); | ||
133 | dbg_hcc_params(ehci, "reset"); | ||
134 | |||
135 | /* cache this readonly data; minimize chip reads */ | ||
136 | ehci->hcs_params = readl(&ehci->caps->hcs_params); | ||
137 | |||
138 | err = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); | ||
139 | if (err) { | ||
140 | dev_err(&pdev->dev, "Failed to add USB HCD\n"); | ||
141 | goto fail; | ||
142 | } | ||
143 | |||
144 | platform_set_drvdata(pdev, s5p_ehci); | ||
145 | |||
146 | return 0; | ||
147 | |||
148 | fail: | ||
149 | iounmap(hcd->regs); | ||
150 | fail_io: | ||
151 | clk_disable(s5p_ehci->clk); | ||
152 | fail_clken: | ||
153 | clk_put(s5p_ehci->clk); | ||
154 | fail_clk: | ||
155 | usb_put_hcd(hcd); | ||
156 | fail_hcd: | ||
157 | kfree(s5p_ehci); | ||
158 | return err; | ||
159 | } | ||
160 | |||
161 | static int __devexit s5p_ehci_remove(struct platform_device *pdev) | ||
162 | { | ||
163 | struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; | ||
164 | struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); | ||
165 | struct usb_hcd *hcd = s5p_ehci->hcd; | ||
166 | |||
167 | usb_remove_hcd(hcd); | ||
168 | |||
169 | if (pdata && pdata->phy_exit) | ||
170 | pdata->phy_exit(pdev, S5P_USB_PHY_HOST); | ||
171 | |||
172 | iounmap(hcd->regs); | ||
173 | |||
174 | clk_disable(s5p_ehci->clk); | ||
175 | clk_put(s5p_ehci->clk); | ||
176 | |||
177 | usb_put_hcd(hcd); | ||
178 | kfree(s5p_ehci); | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static void s5p_ehci_shutdown(struct platform_device *pdev) | ||
184 | { | ||
185 | struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); | ||
186 | struct usb_hcd *hcd = s5p_ehci->hcd; | ||
187 | |||
188 | if (hcd->driver->shutdown) | ||
189 | hcd->driver->shutdown(hcd); | ||
190 | } | ||
191 | |||
192 | static struct platform_driver s5p_ehci_driver = { | ||
193 | .probe = s5p_ehci_probe, | ||
194 | .remove = __devexit_p(s5p_ehci_remove), | ||
195 | .shutdown = s5p_ehci_shutdown, | ||
196 | .driver = { | ||
197 | .name = "s5p-ehci", | ||
198 | .owner = THIS_MODULE, | ||
199 | } | ||
200 | }; | ||
201 | |||
202 | MODULE_ALIAS("platform:s5p-ehci"); | ||
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 1543c838b3d1..6c9fbe352f73 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -471,8 +471,10 @@ static int enable_periodic (struct ehci_hcd *ehci) | |||
471 | */ | 471 | */ |
472 | status = handshake_on_error_set_halt(ehci, &ehci->regs->status, | 472 | status = handshake_on_error_set_halt(ehci, &ehci->regs->status, |
473 | STS_PSS, 0, 9 * 125); | 473 | STS_PSS, 0, 9 * 125); |
474 | if (status) | 474 | if (status) { |
475 | usb_hc_died(ehci_to_hcd(ehci)); | ||
475 | return status; | 476 | return status; |
477 | } | ||
476 | 478 | ||
477 | cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE; | 479 | cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE; |
478 | ehci_writel(ehci, cmd, &ehci->regs->command); | 480 | ehci_writel(ehci, cmd, &ehci->regs->command); |
@@ -510,8 +512,10 @@ static int disable_periodic (struct ehci_hcd *ehci) | |||
510 | */ | 512 | */ |
511 | status = handshake_on_error_set_halt(ehci, &ehci->regs->status, | 513 | status = handshake_on_error_set_halt(ehci, &ehci->regs->status, |
512 | STS_PSS, STS_PSS, 9 * 125); | 514 | STS_PSS, STS_PSS, 9 * 125); |
513 | if (status) | 515 | if (status) { |
516 | usb_hc_died(ehci_to_hcd(ehci)); | ||
514 | return status; | 517 | return status; |
518 | } | ||
515 | 519 | ||
516 | cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE; | 520 | cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE; |
517 | ehci_writel(ehci, cmd, &ehci->regs->command); | 521 | ehci_writel(ehci, cmd, &ehci->regs->command); |
@@ -2287,6 +2291,7 @@ scan_periodic (struct ehci_hcd *ehci) | |||
2287 | } | 2291 | } |
2288 | clock &= mod - 1; | 2292 | clock &= mod - 1; |
2289 | clock_frame = clock >> 3; | 2293 | clock_frame = clock >> 3; |
2294 | ++ehci->periodic_stamp; | ||
2290 | 2295 | ||
2291 | for (;;) { | 2296 | for (;;) { |
2292 | union ehci_shadow q, *q_p; | 2297 | union ehci_shadow q, *q_p; |
@@ -2315,10 +2320,14 @@ restart: | |||
2315 | temp.qh = qh_get (q.qh); | 2320 | temp.qh = qh_get (q.qh); |
2316 | type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next); | 2321 | type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next); |
2317 | q = q.qh->qh_next; | 2322 | q = q.qh->qh_next; |
2318 | modified = qh_completions (ehci, temp.qh); | 2323 | if (temp.qh->stamp != ehci->periodic_stamp) { |
2319 | if (unlikely(list_empty(&temp.qh->qtd_list) || | 2324 | modified = qh_completions(ehci, temp.qh); |
2320 | temp.qh->needs_rescan)) | 2325 | if (!modified) |
2321 | intr_deschedule (ehci, temp.qh); | 2326 | temp.qh->stamp = ehci->periodic_stamp; |
2327 | if (unlikely(list_empty(&temp.qh->qtd_list) || | ||
2328 | temp.qh->needs_rescan)) | ||
2329 | intr_deschedule(ehci, temp.qh); | ||
2330 | } | ||
2322 | qh_put (temp.qh); | 2331 | qh_put (temp.qh); |
2323 | break; | 2332 | break; |
2324 | case Q_TYPE_FSTN: | 2333 | case Q_TYPE_FSTN: |
@@ -2460,6 +2469,7 @@ restart: | |||
2460 | if (ehci->clock_frame != clock_frame) { | 2469 | if (ehci->clock_frame != clock_frame) { |
2461 | free_cached_lists(ehci); | 2470 | free_cached_lists(ehci); |
2462 | ehci->clock_frame = clock_frame; | 2471 | ehci->clock_frame = clock_frame; |
2472 | ++ehci->periodic_stamp; | ||
2463 | } | 2473 | } |
2464 | } else { | 2474 | } else { |
2465 | now_uframe++; | 2475 | now_uframe++; |
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index 595f70f42b52..86a95bb80a61 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c | |||
@@ -23,7 +23,7 @@ static int ehci_sh_reset(struct usb_hcd *hcd) | |||
23 | int ret; | 23 | int ret; |
24 | 24 | ||
25 | ehci->caps = hcd->regs; | 25 | ehci->caps = hcd->regs; |
26 | ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, | 26 | ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, |
27 | &ehci->caps->hc_capbase)); | 27 | &ehci->caps->hc_capbase)); |
28 | 28 | ||
29 | dbg_hcs_params(ehci, "reset"); | 29 | dbg_hcs_params(ehci, "reset"); |
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index 75c00873443d..dbf1e4ef3c17 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c | |||
@@ -38,7 +38,7 @@ static int ehci_spear_setup(struct usb_hcd *hcd) | |||
38 | 38 | ||
39 | /* registers start at offset 0x0 */ | 39 | /* registers start at offset 0x0 */ |
40 | ehci->caps = hcd->regs; | 40 | ehci->caps = hcd->regs; |
41 | ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, | 41 | ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, |
42 | &ehci->caps->hc_capbase)); | 42 | &ehci->caps->hc_capbase)); |
43 | /* cache this readonly data; minimize chip reads */ | 43 | /* cache this readonly data; minimize chip reads */ |
44 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 44 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index a516af28c29b..02b2bfd49a10 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c | |||
@@ -58,6 +58,71 @@ static void tegra_ehci_power_down(struct usb_hcd *hcd) | |||
58 | clk_disable(tegra->emc_clk); | 58 | clk_disable(tegra->emc_clk); |
59 | } | 59 | } |
60 | 60 | ||
61 | static int tegra_ehci_internal_port_reset( | ||
62 | struct ehci_hcd *ehci, | ||
63 | u32 __iomem *portsc_reg | ||
64 | ) | ||
65 | { | ||
66 | u32 temp; | ||
67 | unsigned long flags; | ||
68 | int retval = 0; | ||
69 | int i, tries; | ||
70 | u32 saved_usbintr; | ||
71 | |||
72 | spin_lock_irqsave(&ehci->lock, flags); | ||
73 | saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable); | ||
74 | /* disable USB interrupt */ | ||
75 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); | ||
76 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
77 | |||
78 | /* | ||
79 | * Here we have to do Port Reset at most twice for | ||
80 | * Port Enable bit to be set. | ||
81 | */ | ||
82 | for (i = 0; i < 2; i++) { | ||
83 | temp = ehci_readl(ehci, portsc_reg); | ||
84 | temp |= PORT_RESET; | ||
85 | ehci_writel(ehci, temp, portsc_reg); | ||
86 | mdelay(10); | ||
87 | temp &= ~PORT_RESET; | ||
88 | ehci_writel(ehci, temp, portsc_reg); | ||
89 | mdelay(1); | ||
90 | tries = 100; | ||
91 | do { | ||
92 | mdelay(1); | ||
93 | /* | ||
94 | * Up to this point, Port Enable bit is | ||
95 | * expected to be set after 2 ms waiting. | ||
96 | * USB1 usually takes extra 45 ms, for safety, | ||
97 | * we take 100 ms as timeout. | ||
98 | */ | ||
99 | temp = ehci_readl(ehci, portsc_reg); | ||
100 | } while (!(temp & PORT_PE) && tries--); | ||
101 | if (temp & PORT_PE) | ||
102 | break; | ||
103 | } | ||
104 | if (i == 2) | ||
105 | retval = -ETIMEDOUT; | ||
106 | |||
107 | /* | ||
108 | * Clear Connect Status Change bit if it's set. | ||
109 | * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared. | ||
110 | */ | ||
111 | if (temp & PORT_CSC) | ||
112 | ehci_writel(ehci, PORT_CSC, portsc_reg); | ||
113 | |||
114 | /* | ||
115 | * Write to clear any interrupt status bits that might be set | ||
116 | * during port reset. | ||
117 | */ | ||
118 | temp = ehci_readl(ehci, &ehci->regs->status); | ||
119 | ehci_writel(ehci, temp, &ehci->regs->status); | ||
120 | |||
121 | /* restore original interrupt enable bits */ | ||
122 | ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable); | ||
123 | return retval; | ||
124 | } | ||
125 | |||
61 | static int tegra_ehci_hub_control( | 126 | static int tegra_ehci_hub_control( |
62 | struct usb_hcd *hcd, | 127 | struct usb_hcd *hcd, |
63 | u16 typeReq, | 128 | u16 typeReq, |
@@ -121,6 +186,13 @@ static int tegra_ehci_hub_control( | |||
121 | goto done; | 186 | goto done; |
122 | } | 187 | } |
123 | 188 | ||
189 | /* For USB1 port we need to issue Port Reset twice internally */ | ||
190 | if (tegra->phy->instance == 0 && | ||
191 | (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) { | ||
192 | spin_unlock_irqrestore(&ehci->lock, flags); | ||
193 | return tegra_ehci_internal_port_reset(ehci, status_reg); | ||
194 | } | ||
195 | |||
124 | /* | 196 | /* |
125 | * Tegra host controller will time the resume operation to clear the bit | 197 | * Tegra host controller will time the resume operation to clear the bit |
126 | * when the port control state switches to HS or FS Idle. This behavior | 198 | * when the port control state switches to HS or FS Idle. This behavior |
@@ -328,7 +400,7 @@ static int tegra_ehci_setup(struct usb_hcd *hcd) | |||
328 | /* EHCI registers start at offset 0x100 */ | 400 | /* EHCI registers start at offset 0x100 */ |
329 | ehci->caps = hcd->regs + 0x100; | 401 | ehci->caps = hcd->regs + 0x100; |
330 | ehci->regs = hcd->regs + 0x100 + | 402 | ehci->regs = hcd->regs + 0x100 + |
331 | HC_LENGTH(readl(&ehci->caps->hc_capbase)); | 403 | HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); |
332 | 404 | ||
333 | dbg_hcs_params(ehci, "reset"); | 405 | dbg_hcs_params(ehci, "reset"); |
334 | dbg_hcc_params(ehci, "reset"); | 406 | dbg_hcc_params(ehci, "reset"); |
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c index 20168062035a..47d749631bc7 100644 --- a/drivers/usb/host/ehci-vt8500.c +++ b/drivers/usb/host/ehci-vt8500.c | |||
@@ -121,7 +121,8 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev) | |||
121 | 121 | ||
122 | ehci = hcd_to_ehci(hcd); | 122 | ehci = hcd_to_ehci(hcd); |
123 | ehci->caps = hcd->regs; | 123 | ehci->caps = hcd->regs; |
124 | ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase)); | 124 | ehci->regs = hcd->regs + |
125 | HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); | ||
125 | 126 | ||
126 | dbg_hcs_params(ehci, "reset"); | 127 | dbg_hcs_params(ehci, "reset"); |
127 | dbg_hcc_params(ehci, "reset"); | 128 | dbg_hcc_params(ehci, "reset"); |
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c index 6bc35809a5c6..52a027aaa370 100644 --- a/drivers/usb/host/ehci-w90x900.c +++ b/drivers/usb/host/ehci-w90x900.c | |||
@@ -57,7 +57,7 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver, | |||
57 | ehci = hcd_to_ehci(hcd); | 57 | ehci = hcd_to_ehci(hcd); |
58 | ehci->caps = hcd->regs; | 58 | ehci->caps = hcd->regs; |
59 | ehci->regs = hcd->regs + | 59 | ehci->regs = hcd->regs + |
60 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 60 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
61 | 61 | ||
62 | /* enable PHY 0,1,the regs only apply to w90p910 | 62 | /* enable PHY 0,1,the regs only apply to w90p910 |
63 | * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of | 63 | * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index effc58d7af8b..a64d6d66d760 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
@@ -220,7 +220,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op) | |||
220 | */ | 220 | */ |
221 | ehci->caps = hcd->regs + 0x100; | 221 | ehci->caps = hcd->regs + 0x100; |
222 | ehci->regs = hcd->regs + 0x100 + | 222 | ehci->regs = hcd->regs + 0x100 + |
223 | HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); | 223 | HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); |
224 | 224 | ||
225 | /* cache this readonly data; minimize chip reads */ | 225 | /* cache this readonly data; minimize chip reads */ |
226 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); | 226 | ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 333ddc156919..bd6ff489baf9 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -118,6 +118,7 @@ struct ehci_hcd { /* one per controller */ | |||
118 | struct timer_list watchdog; | 118 | struct timer_list watchdog; |
119 | unsigned long actions; | 119 | unsigned long actions; |
120 | unsigned stamp; | 120 | unsigned stamp; |
121 | unsigned periodic_stamp; | ||
121 | unsigned random_frame; | 122 | unsigned random_frame; |
122 | unsigned long next_statechange; | 123 | unsigned long next_statechange; |
123 | ktime_t last_periodic_enable; | 124 | ktime_t last_periodic_enable; |
@@ -128,12 +129,14 @@ struct ehci_hcd { /* one per controller */ | |||
128 | unsigned has_fsl_port_bug:1; /* FreeScale */ | 129 | unsigned has_fsl_port_bug:1; /* FreeScale */ |
129 | unsigned big_endian_mmio:1; | 130 | unsigned big_endian_mmio:1; |
130 | unsigned big_endian_desc:1; | 131 | unsigned big_endian_desc:1; |
132 | unsigned big_endian_capbase:1; | ||
131 | unsigned has_amcc_usb23:1; | 133 | unsigned has_amcc_usb23:1; |
132 | unsigned need_io_watchdog:1; | 134 | unsigned need_io_watchdog:1; |
133 | unsigned broken_periodic:1; | 135 | unsigned broken_periodic:1; |
134 | unsigned amd_pll_fix:1; | 136 | unsigned amd_pll_fix:1; |
135 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ | 137 | unsigned fs_i_thresh:1; /* Intel iso scheduling */ |
136 | unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ | 138 | unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ |
139 | unsigned has_synopsys_hc_bug:1; /* Synopsys HC */ | ||
137 | 140 | ||
138 | /* required for usb32 quirk */ | 141 | /* required for usb32 quirk */ |
139 | #define OHCI_CTRL_HCFS (3 << 6) | 142 | #define OHCI_CTRL_HCFS (3 << 6) |
@@ -160,6 +163,10 @@ struct ehci_hcd { /* one per controller */ | |||
160 | #ifdef DEBUG | 163 | #ifdef DEBUG |
161 | struct dentry *debug_dir; | 164 | struct dentry *debug_dir; |
162 | #endif | 165 | #endif |
166 | /* | ||
167 | * OTG controllers and transceivers need software interaction | ||
168 | */ | ||
169 | struct otg_transceiver *transceiver; | ||
163 | }; | 170 | }; |
164 | 171 | ||
165 | /* convert between an HCD pointer and the corresponding EHCI_HCD */ | 172 | /* convert between an HCD pointer and the corresponding EHCI_HCD */ |
@@ -600,12 +607,18 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) | |||
600 | * This attempts to support either format at compile time without a | 607 | * This attempts to support either format at compile time without a |
601 | * runtime penalty, or both formats with the additional overhead | 608 | * runtime penalty, or both formats with the additional overhead |
602 | * of checking a flag bit. | 609 | * of checking a flag bit. |
610 | * | ||
611 | * ehci_big_endian_capbase is a special quirk for controllers that | ||
612 | * implement the HC capability registers as separate registers and not | ||
613 | * as fields of a 32-bit register. | ||
603 | */ | 614 | */ |
604 | 615 | ||
605 | #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO | 616 | #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO |
606 | #define ehci_big_endian_mmio(e) ((e)->big_endian_mmio) | 617 | #define ehci_big_endian_mmio(e) ((e)->big_endian_mmio) |
618 | #define ehci_big_endian_capbase(e) ((e)->big_endian_capbase) | ||
607 | #else | 619 | #else |
608 | #define ehci_big_endian_mmio(e) 0 | 620 | #define ehci_big_endian_mmio(e) 0 |
621 | #define ehci_big_endian_capbase(e) 0 | ||
609 | #endif | 622 | #endif |
610 | 623 | ||
611 | /* | 624 | /* |
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index c0e22f26da19..baae4ccd16ac 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c | |||
@@ -612,6 +612,7 @@ static irqreturn_t isp116x_irq(struct usb_hcd *hcd) | |||
612 | /* IRQ's are off, we do no DMA, | 612 | /* IRQ's are off, we do no DMA, |
613 | perfectly ready to die ... */ | 613 | perfectly ready to die ... */ |
614 | hcd->state = HC_STATE_HALT; | 614 | hcd->state = HC_STATE_HALT; |
615 | usb_hc_died(hcd); | ||
615 | ret = IRQ_HANDLED; | 616 | ret = IRQ_HANDLED; |
616 | goto done; | 617 | goto done; |
617 | } | 618 | } |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 7b2e69aa2e98..c9e6e454c625 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * | 8 | * |
9 | * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> | 9 | * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de> |
10 | * | 10 | * |
11 | * (c) 2011 Arvid Brodin <arvid.brodin@enea.com> | ||
12 | * | ||
11 | */ | 13 | */ |
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
@@ -26,14 +28,18 @@ | |||
26 | 28 | ||
27 | static struct kmem_cache *qtd_cachep; | 29 | static struct kmem_cache *qtd_cachep; |
28 | static struct kmem_cache *qh_cachep; | 30 | static struct kmem_cache *qh_cachep; |
31 | static struct kmem_cache *urb_listitem_cachep; | ||
29 | 32 | ||
30 | struct isp1760_hcd { | 33 | struct isp1760_hcd { |
31 | u32 hcs_params; | 34 | u32 hcs_params; |
32 | spinlock_t lock; | 35 | spinlock_t lock; |
33 | struct inter_packet_info atl_ints[32]; | 36 | struct slotinfo atl_slots[32]; |
34 | struct inter_packet_info int_ints[32]; | 37 | int atl_done_map; |
38 | struct slotinfo int_slots[32]; | ||
39 | int int_done_map; | ||
35 | struct memory_chunk memory_pool[BLOCKS]; | 40 | struct memory_chunk memory_pool[BLOCKS]; |
36 | u32 atl_queued; | 41 | struct list_head controlqhs, bulkqhs, interruptqhs; |
42 | int active_ptds; | ||
37 | 43 | ||
38 | /* periodic schedule support */ | 44 | /* periodic schedule support */ |
39 | #define DEFAULT_I_TDPS 1024 | 45 | #define DEFAULT_I_TDPS 1024 |
@@ -85,18 +91,34 @@ struct isp1760_qtd { | |||
85 | struct list_head qtd_list; | 91 | struct list_head qtd_list; |
86 | struct urb *urb; | 92 | struct urb *urb; |
87 | size_t length; | 93 | size_t length; |
88 | 94 | size_t actual_length; | |
89 | /* isp special*/ | 95 | |
96 | /* QTD_ENQUEUED: waiting for transfer (inactive) */ | ||
97 | /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */ | ||
98 | /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only | ||
99 | interrupt handler may touch this qtd! */ | ||
100 | /* QTD_XFER_COMPLETE: payload has been transferred successfully */ | ||
101 | /* QTD_RETIRE: transfer error/abort qtd */ | ||
102 | #define QTD_ENQUEUED 0 | ||
103 | #define QTD_PAYLOAD_ALLOC 1 | ||
104 | #define QTD_XFER_STARTED 2 | ||
105 | #define QTD_XFER_COMPLETE 3 | ||
106 | #define QTD_RETIRE 4 | ||
90 | u32 status; | 107 | u32 status; |
91 | #define URB_ENQUEUED (1 << 1) | ||
92 | }; | 108 | }; |
93 | 109 | ||
110 | /* Queue head, one for each active endpoint */ | ||
94 | struct isp1760_qh { | 111 | struct isp1760_qh { |
95 | /* first part defined by EHCI spec */ | 112 | struct list_head qh_list; |
96 | struct list_head qtd_list; | 113 | struct list_head qtd_list; |
97 | |||
98 | u32 toggle; | 114 | u32 toggle; |
99 | u32 ping; | 115 | u32 ping; |
116 | int slot; | ||
117 | }; | ||
118 | |||
119 | struct urb_listitem { | ||
120 | struct list_head urb_list; | ||
121 | struct urb *urb; | ||
100 | }; | 122 | }; |
101 | 123 | ||
102 | /* | 124 | /* |
@@ -272,7 +294,7 @@ static void init_memory(struct isp1760_hcd *priv) | |||
272 | payload_addr += priv->memory_pool[curr + i].size; | 294 | payload_addr += priv->memory_pool[curr + i].size; |
273 | } | 295 | } |
274 | 296 | ||
275 | BUG_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE); | 297 | WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE); |
276 | } | 298 | } |
277 | 299 | ||
278 | static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | 300 | static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) |
@@ -280,7 +302,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | |||
280 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | 302 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
281 | int i; | 303 | int i; |
282 | 304 | ||
283 | BUG_ON(qtd->payload_addr); | 305 | WARN_ON(qtd->payload_addr); |
284 | 306 | ||
285 | if (!qtd->length) | 307 | if (!qtd->length) |
286 | return; | 308 | return; |
@@ -293,19 +315,6 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | |||
293 | return; | 315 | return; |
294 | } | 316 | } |
295 | } | 317 | } |
296 | |||
297 | dev_err(hcd->self.controller, | ||
298 | "%s: Cannot allocate %zu bytes of memory\n" | ||
299 | "Current memory map:\n", | ||
300 | __func__, qtd->length); | ||
301 | for (i = 0; i < BLOCKS; i++) { | ||
302 | dev_err(hcd->self.controller, "Pool %2d size %4d status: %d\n", | ||
303 | i, priv->memory_pool[i].size, | ||
304 | priv->memory_pool[i].free); | ||
305 | } | ||
306 | /* XXX maybe -ENOMEM could be possible */ | ||
307 | BUG(); | ||
308 | return; | ||
309 | } | 318 | } |
310 | 319 | ||
311 | static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | 320 | static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) |
@@ -318,7 +327,7 @@ static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | |||
318 | 327 | ||
319 | for (i = 0; i < BLOCKS; i++) { | 328 | for (i = 0; i < BLOCKS; i++) { |
320 | if (priv->memory_pool[i].start == qtd->payload_addr) { | 329 | if (priv->memory_pool[i].start == qtd->payload_addr) { |
321 | BUG_ON(priv->memory_pool[i].free); | 330 | WARN_ON(priv->memory_pool[i].free); |
322 | priv->memory_pool[i].free = 1; | 331 | priv->memory_pool[i].free = 1; |
323 | qtd->payload_addr = 0; | 332 | qtd->payload_addr = 0; |
324 | return; | 333 | return; |
@@ -327,19 +336,8 @@ static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | |||
327 | 336 | ||
328 | dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n", | 337 | dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n", |
329 | __func__, qtd->payload_addr); | 338 | __func__, qtd->payload_addr); |
330 | BUG(); | 339 | WARN_ON(1); |
331 | } | 340 | qtd->payload_addr = 0; |
332 | |||
333 | static void isp1760_init_regs(struct usb_hcd *hcd) | ||
334 | { | ||
335 | reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0); | ||
336 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE); | ||
337 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE); | ||
338 | reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE); | ||
339 | |||
340 | reg_write32(hcd->regs, HC_ATL_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE); | ||
341 | reg_write32(hcd->regs, HC_INT_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE); | ||
342 | reg_write32(hcd->regs, HC_ISO_PTD_DONEMAP_REG, ~NO_TRANSFER_ACTIVE); | ||
343 | } | 341 | } |
344 | 342 | ||
345 | static int handshake(struct usb_hcd *hcd, u32 reg, | 343 | static int handshake(struct usb_hcd *hcd, u32 reg, |
@@ -377,31 +375,27 @@ static int ehci_reset(struct usb_hcd *hcd) | |||
377 | return retval; | 375 | return retval; |
378 | } | 376 | } |
379 | 377 | ||
380 | static void qh_destroy(struct isp1760_qh *qh) | 378 | static struct isp1760_qh *qh_alloc(gfp_t flags) |
381 | { | ||
382 | BUG_ON(!list_empty(&qh->qtd_list)); | ||
383 | kmem_cache_free(qh_cachep, qh); | ||
384 | } | ||
385 | |||
386 | static struct isp1760_qh *isp1760_qh_alloc(gfp_t flags) | ||
387 | { | 379 | { |
388 | struct isp1760_qh *qh; | 380 | struct isp1760_qh *qh; |
389 | 381 | ||
390 | qh = kmem_cache_zalloc(qh_cachep, flags); | 382 | qh = kmem_cache_zalloc(qh_cachep, flags); |
391 | if (!qh) | 383 | if (!qh) |
392 | return qh; | 384 | return NULL; |
393 | 385 | ||
386 | INIT_LIST_HEAD(&qh->qh_list); | ||
394 | INIT_LIST_HEAD(&qh->qtd_list); | 387 | INIT_LIST_HEAD(&qh->qtd_list); |
388 | qh->slot = -1; | ||
389 | |||
395 | return qh; | 390 | return qh; |
396 | } | 391 | } |
397 | 392 | ||
398 | /* magic numbers that can affect system performance */ | 393 | static void qh_free(struct isp1760_qh *qh) |
399 | #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ | 394 | { |
400 | #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ | 395 | WARN_ON(!list_empty(&qh->qtd_list)); |
401 | #define EHCI_TUNE_RL_TT 0 | 396 | WARN_ON(qh->slot > -1); |
402 | #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ | 397 | kmem_cache_free(qh_cachep, qh); |
403 | #define EHCI_TUNE_MULT_TT 1 | 398 | } |
404 | #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */ | ||
405 | 399 | ||
406 | /* one-time init, only for memory state */ | 400 | /* one-time init, only for memory state */ |
407 | static int priv_init(struct usb_hcd *hcd) | 401 | static int priv_init(struct usb_hcd *hcd) |
@@ -411,6 +405,10 @@ static int priv_init(struct usb_hcd *hcd) | |||
411 | 405 | ||
412 | spin_lock_init(&priv->lock); | 406 | spin_lock_init(&priv->lock); |
413 | 407 | ||
408 | INIT_LIST_HEAD(&priv->interruptqhs); | ||
409 | INIT_LIST_HEAD(&priv->controlqhs); | ||
410 | INIT_LIST_HEAD(&priv->bulkqhs); | ||
411 | |||
414 | /* | 412 | /* |
415 | * hw default: 1K periodic list heads, one per frame. | 413 | * hw default: 1K periodic list heads, one per frame. |
416 | * periodic_size can shrink by USBCMD update if hcc_params allows. | 414 | * periodic_size can shrink by USBCMD update if hcc_params allows. |
@@ -468,7 +466,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd) | |||
468 | } | 466 | } |
469 | 467 | ||
470 | /* pre reset */ | 468 | /* pre reset */ |
471 | isp1760_init_regs(hcd); | 469 | reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0); |
470 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE); | ||
471 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE); | ||
472 | reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE); | ||
472 | 473 | ||
473 | /* reset */ | 474 | /* reset */ |
474 | reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL); | 475 | reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL); |
@@ -488,12 +489,15 @@ static int isp1760_hc_setup(struct usb_hcd *hcd) | |||
488 | 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ? | 489 | 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ? |
489 | "analog" : "digital"); | 490 | "analog" : "digital"); |
490 | 491 | ||
492 | /* This is weird: at the first plug-in of a device there seems to be | ||
493 | one packet queued that never gets returned? */ | ||
494 | priv->active_ptds = -1; | ||
495 | |||
491 | /* ATL reset */ | 496 | /* ATL reset */ |
492 | reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET); | 497 | reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET); |
493 | mdelay(10); | 498 | mdelay(10); |
494 | reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode); | 499 | reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode); |
495 | 500 | ||
496 | reg_write32(hcd->regs, HC_INTERRUPT_REG, INTERRUPT_ENABLE_MASK); | ||
497 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK); | 501 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK); |
498 | 502 | ||
499 | /* | 503 | /* |
@@ -516,14 +520,21 @@ static void isp1760_init_maps(struct usb_hcd *hcd) | |||
516 | reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000); | 520 | reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000); |
517 | reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000); | 521 | reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000); |
518 | reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001); | 522 | reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001); |
523 | |||
524 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff); | ||
525 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff); | ||
526 | reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff); | ||
527 | |||
528 | reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, | ||
529 | ATL_BUF_FILL | INT_BUF_FILL); | ||
519 | } | 530 | } |
520 | 531 | ||
521 | static void isp1760_enable_interrupts(struct usb_hcd *hcd) | 532 | static void isp1760_enable_interrupts(struct usb_hcd *hcd) |
522 | { | 533 | { |
523 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0); | 534 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0); |
524 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0); | 535 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff); |
525 | reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0); | 536 | reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0); |
526 | reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0); | 537 | reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff); |
527 | reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0); | 538 | reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0); |
528 | reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff); | 539 | reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff); |
529 | /* step 23 passed */ | 540 | /* step 23 passed */ |
@@ -548,8 +559,7 @@ static int isp1760_run(struct usb_hcd *hcd) | |||
548 | command |= CMD_RUN; | 559 | command |= CMD_RUN; |
549 | reg_write32(hcd->regs, HC_USBCMD, command); | 560 | reg_write32(hcd->regs, HC_USBCMD, command); |
550 | 561 | ||
551 | retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, | 562 | retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000); |
552 | 250 * 1000); | ||
553 | if (retval) | 563 | if (retval) |
554 | return retval; | 564 | return retval; |
555 | 565 | ||
@@ -598,12 +608,19 @@ static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh) | |||
598 | return (qtd->urb != urb); | 608 | return (qtd->urb != urb); |
599 | } | 609 | } |
600 | 610 | ||
601 | static void transform_into_atl(struct isp1760_qh *qh, | 611 | /* magic numbers that can affect system performance */ |
612 | #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ | ||
613 | #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ | ||
614 | #define EHCI_TUNE_RL_TT 0 | ||
615 | #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ | ||
616 | #define EHCI_TUNE_MULT_TT 1 | ||
617 | #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */ | ||
618 | |||
619 | static void create_ptd_atl(struct isp1760_qh *qh, | ||
602 | struct isp1760_qtd *qtd, struct ptd *ptd) | 620 | struct isp1760_qtd *qtd, struct ptd *ptd) |
603 | { | 621 | { |
604 | u32 maxpacket; | 622 | u32 maxpacket; |
605 | u32 multi; | 623 | u32 multi; |
606 | u32 pid_code; | ||
607 | u32 rl = RL_COUNTER; | 624 | u32 rl = RL_COUNTER; |
608 | u32 nak = NAK_COUNTER; | 625 | u32 nak = NAK_COUNTER; |
609 | 626 | ||
@@ -616,67 +633,62 @@ static void transform_into_atl(struct isp1760_qh *qh, | |||
616 | maxpacket &= 0x7ff; | 633 | maxpacket &= 0x7ff; |
617 | 634 | ||
618 | /* DW0 */ | 635 | /* DW0 */ |
619 | ptd->dw0 = PTD_VALID; | 636 | ptd->dw0 = DW0_VALID_BIT; |
620 | ptd->dw0 |= PTD_LENGTH(qtd->length); | 637 | ptd->dw0 |= TO_DW0_LENGTH(qtd->length); |
621 | ptd->dw0 |= PTD_MAXPACKET(maxpacket); | 638 | ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket); |
622 | ptd->dw0 |= PTD_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe)); | 639 | ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe)); |
623 | 640 | ||
624 | /* DW1 */ | 641 | /* DW1 */ |
625 | ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1; | 642 | ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1; |
626 | ptd->dw1 |= PTD_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe)); | 643 | ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe)); |
627 | 644 | ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type); | |
628 | pid_code = qtd->packet_type; | ||
629 | ptd->dw1 |= PTD_PID_TOKEN(pid_code); | ||
630 | 645 | ||
631 | if (usb_pipebulk(qtd->urb->pipe)) | 646 | if (usb_pipebulk(qtd->urb->pipe)) |
632 | ptd->dw1 |= PTD_TRANS_BULK; | 647 | ptd->dw1 |= DW1_TRANS_BULK; |
633 | else if (usb_pipeint(qtd->urb->pipe)) | 648 | else if (usb_pipeint(qtd->urb->pipe)) |
634 | ptd->dw1 |= PTD_TRANS_INT; | 649 | ptd->dw1 |= DW1_TRANS_INT; |
635 | 650 | ||
636 | if (qtd->urb->dev->speed != USB_SPEED_HIGH) { | 651 | if (qtd->urb->dev->speed != USB_SPEED_HIGH) { |
637 | /* split transaction */ | 652 | /* split transaction */ |
638 | 653 | ||
639 | ptd->dw1 |= PTD_TRANS_SPLIT; | 654 | ptd->dw1 |= DW1_TRANS_SPLIT; |
640 | if (qtd->urb->dev->speed == USB_SPEED_LOW) | 655 | if (qtd->urb->dev->speed == USB_SPEED_LOW) |
641 | ptd->dw1 |= PTD_SE_USB_LOSPEED; | 656 | ptd->dw1 |= DW1_SE_USB_LOSPEED; |
642 | 657 | ||
643 | ptd->dw1 |= PTD_PORT_NUM(qtd->urb->dev->ttport); | 658 | ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport); |
644 | ptd->dw1 |= PTD_HUB_NUM(qtd->urb->dev->tt->hub->devnum); | 659 | ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum); |
645 | 660 | ||
646 | /* SE bit for Split INT transfers */ | 661 | /* SE bit for Split INT transfers */ |
647 | if (usb_pipeint(qtd->urb->pipe) && | 662 | if (usb_pipeint(qtd->urb->pipe) && |
648 | (qtd->urb->dev->speed == USB_SPEED_LOW)) | 663 | (qtd->urb->dev->speed == USB_SPEED_LOW)) |
649 | ptd->dw1 |= 2 << 16; | 664 | ptd->dw1 |= 2 << 16; |
650 | 665 | ||
651 | ptd->dw3 = 0; | ||
652 | rl = 0; | 666 | rl = 0; |
653 | nak = 0; | 667 | nak = 0; |
654 | } else { | 668 | } else { |
655 | ptd->dw0 |= PTD_MULTI(multi); | 669 | ptd->dw0 |= TO_DW0_MULTI(multi); |
656 | if (usb_pipecontrol(qtd->urb->pipe) || | 670 | if (usb_pipecontrol(qtd->urb->pipe) || |
657 | usb_pipebulk(qtd->urb->pipe)) | 671 | usb_pipebulk(qtd->urb->pipe)) |
658 | ptd->dw3 = qh->ping; | 672 | ptd->dw3 |= TO_DW3_PING(qh->ping); |
659 | else | ||
660 | ptd->dw3 = 0; | ||
661 | } | 673 | } |
662 | /* DW2 */ | 674 | /* DW2 */ |
663 | ptd->dw2 = 0; | 675 | ptd->dw2 = 0; |
664 | ptd->dw2 |= PTD_DATA_START_ADDR(base_to_chip(qtd->payload_addr)); | 676 | ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr)); |
665 | ptd->dw2 |= PTD_RL_CNT(rl); | 677 | ptd->dw2 |= TO_DW2_RL(rl); |
666 | ptd->dw3 |= PTD_NAC_CNT(nak); | ||
667 | 678 | ||
668 | /* DW3 */ | 679 | /* DW3 */ |
669 | ptd->dw3 |= qh->toggle; | 680 | ptd->dw3 |= TO_DW3_NAKCOUNT(nak); |
681 | ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle); | ||
670 | if (usb_pipecontrol(qtd->urb->pipe)) { | 682 | if (usb_pipecontrol(qtd->urb->pipe)) { |
671 | if (qtd->data_buffer == qtd->urb->setup_packet) | 683 | if (qtd->data_buffer == qtd->urb->setup_packet) |
672 | ptd->dw3 &= ~PTD_DATA_TOGGLE(1); | 684 | ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1); |
673 | else if (last_qtd_of_urb(qtd, qh)) | 685 | else if (last_qtd_of_urb(qtd, qh)) |
674 | ptd->dw3 |= PTD_DATA_TOGGLE(1); | 686 | ptd->dw3 |= TO_DW3_DATA_TOGGLE(1); |
675 | } | 687 | } |
676 | 688 | ||
677 | ptd->dw3 |= PTD_ACTIVE; | 689 | ptd->dw3 |= DW3_ACTIVE_BIT; |
678 | /* Cerr */ | 690 | /* Cerr */ |
679 | ptd->dw3 |= PTD_CERR(ERR_COUNTER); | 691 | ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER); |
680 | } | 692 | } |
681 | 693 | ||
682 | static void transform_add_int(struct isp1760_qh *qh, | 694 | static void transform_add_int(struct isp1760_qh *qh, |
@@ -731,197 +743,13 @@ static void transform_add_int(struct isp1760_qh *qh, | |||
731 | ptd->dw4 = usof; | 743 | ptd->dw4 = usof; |
732 | } | 744 | } |
733 | 745 | ||
734 | static void transform_into_int(struct isp1760_qh *qh, | 746 | static void create_ptd_int(struct isp1760_qh *qh, |
735 | struct isp1760_qtd *qtd, struct ptd *ptd) | 747 | struct isp1760_qtd *qtd, struct ptd *ptd) |
736 | { | 748 | { |
737 | transform_into_atl(qh, qtd, ptd); | 749 | create_ptd_atl(qh, qtd, ptd); |
738 | transform_add_int(qh, qtd, ptd); | 750 | transform_add_int(qh, qtd, ptd); |
739 | } | 751 | } |
740 | 752 | ||
741 | static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len, | ||
742 | u32 token) | ||
743 | { | ||
744 | int count; | ||
745 | |||
746 | qtd->data_buffer = databuffer; | ||
747 | qtd->packet_type = GET_QTD_TOKEN_TYPE(token); | ||
748 | |||
749 | if (len > MAX_PAYLOAD_SIZE) | ||
750 | count = MAX_PAYLOAD_SIZE; | ||
751 | else | ||
752 | count = len; | ||
753 | |||
754 | qtd->length = count; | ||
755 | return count; | ||
756 | } | ||
757 | |||
758 | static int check_error(struct usb_hcd *hcd, struct ptd *ptd) | ||
759 | { | ||
760 | int error = 0; | ||
761 | |||
762 | if (ptd->dw3 & DW3_HALT_BIT) { | ||
763 | error = -EPIPE; | ||
764 | |||
765 | if (ptd->dw3 & DW3_ERROR_BIT) | ||
766 | pr_err("error bit is set in DW3\n"); | ||
767 | } | ||
768 | |||
769 | if (ptd->dw3 & DW3_QTD_ACTIVE) { | ||
770 | dev_err(hcd->self.controller, "Transfer active bit is set DW3\n" | ||
771 | "nak counter: %d, rl: %d\n", | ||
772 | (ptd->dw3 >> 19) & 0xf, (ptd->dw2 >> 25) & 0xf); | ||
773 | } | ||
774 | |||
775 | return error; | ||
776 | } | ||
777 | |||
778 | static void check_int_err_status(struct usb_hcd *hcd, u32 dw4) | ||
779 | { | ||
780 | u32 i; | ||
781 | |||
782 | dw4 >>= 8; | ||
783 | |||
784 | for (i = 0; i < 8; i++) { | ||
785 | switch (dw4 & 0x7) { | ||
786 | case INT_UNDERRUN: | ||
787 | dev_err(hcd->self.controller, "Underrun (%d)\n", i); | ||
788 | break; | ||
789 | |||
790 | case INT_EXACT: | ||
791 | dev_err(hcd->self.controller, | ||
792 | "Transaction error (%d)\n", i); | ||
793 | break; | ||
794 | |||
795 | case INT_BABBLE: | ||
796 | dev_err(hcd->self.controller, "Babble error (%d)\n", i); | ||
797 | break; | ||
798 | } | ||
799 | dw4 >>= 3; | ||
800 | } | ||
801 | } | ||
802 | |||
803 | static void enqueue_one_qtd(struct usb_hcd *hcd, struct isp1760_qtd *qtd) | ||
804 | { | ||
805 | if (qtd->length && (qtd->length <= MAX_PAYLOAD_SIZE)) { | ||
806 | switch (qtd->packet_type) { | ||
807 | case IN_PID: | ||
808 | break; | ||
809 | case OUT_PID: | ||
810 | case SETUP_PID: | ||
811 | mem_writes8(hcd->regs, qtd->payload_addr, | ||
812 | qtd->data_buffer, qtd->length); | ||
813 | } | ||
814 | } | ||
815 | } | ||
816 | |||
817 | static void enqueue_one_atl_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh, | ||
818 | u32 slot, struct isp1760_qtd *qtd) | ||
819 | { | ||
820 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
821 | struct ptd ptd; | ||
822 | |||
823 | alloc_mem(hcd, qtd); | ||
824 | transform_into_atl(qh, qtd, &ptd); | ||
825 | ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); | ||
826 | enqueue_one_qtd(hcd, qtd); | ||
827 | |||
828 | priv->atl_ints[slot].qh = qh; | ||
829 | priv->atl_ints[slot].qtd = qtd; | ||
830 | qtd->status |= URB_ENQUEUED; | ||
831 | qtd->status |= slot << 16; | ||
832 | } | ||
833 | |||
834 | static void enqueue_one_int_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh, | ||
835 | u32 slot, struct isp1760_qtd *qtd) | ||
836 | { | ||
837 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
838 | struct ptd ptd; | ||
839 | |||
840 | alloc_mem(hcd, qtd); | ||
841 | transform_into_int(qh, qtd, &ptd); | ||
842 | ptd_write(hcd->regs, INT_PTD_OFFSET, slot, &ptd); | ||
843 | enqueue_one_qtd(hcd, qtd); | ||
844 | |||
845 | priv->int_ints[slot].qh = qh; | ||
846 | priv->int_ints[slot].qtd = qtd; | ||
847 | qtd->status |= URB_ENQUEUED; | ||
848 | qtd->status |= slot << 16; | ||
849 | } | ||
850 | |||
851 | static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, | ||
852 | struct isp1760_qtd *qtd) | ||
853 | { | ||
854 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
855 | u32 skip_map, or_map; | ||
856 | u32 slot; | ||
857 | u32 buffstatus; | ||
858 | |||
859 | /* | ||
860 | * When this function is called from the interrupt handler to enqueue | ||
861 | * a follow-up packet, the SKIP register gets written and read back | ||
862 | * almost immediately. With ISP1761, this register requires a delay of | ||
863 | * 195ns between a write and subsequent read (see section 15.1.1.3). | ||
864 | */ | ||
865 | mmiowb(); | ||
866 | ndelay(195); | ||
867 | skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); | ||
868 | |||
869 | BUG_ON(!skip_map); | ||
870 | slot = __ffs(skip_map); | ||
871 | |||
872 | enqueue_one_atl_qtd(hcd, qh, slot, qtd); | ||
873 | |||
874 | or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG); | ||
875 | or_map |= (1 << slot); | ||
876 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map); | ||
877 | |||
878 | skip_map &= ~(1 << slot); | ||
879 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); | ||
880 | |||
881 | priv->atl_queued++; | ||
882 | if (priv->atl_queued == 2) | ||
883 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, | ||
884 | INTERRUPT_ENABLE_SOT_MASK); | ||
885 | |||
886 | buffstatus = reg_read32(hcd->regs, HC_BUFFER_STATUS_REG); | ||
887 | buffstatus |= ATL_BUFFER; | ||
888 | reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, buffstatus); | ||
889 | } | ||
890 | |||
891 | static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, | ||
892 | struct isp1760_qtd *qtd) | ||
893 | { | ||
894 | u32 skip_map, or_map; | ||
895 | u32 slot; | ||
896 | u32 buffstatus; | ||
897 | |||
898 | /* | ||
899 | * When this function is called from the interrupt handler to enqueue | ||
900 | * a follow-up packet, the SKIP register gets written and read back | ||
901 | * almost immediately. With ISP1761, this register requires a delay of | ||
902 | * 195ns between a write and subsequent read (see section 15.1.1.3). | ||
903 | */ | ||
904 | mmiowb(); | ||
905 | ndelay(195); | ||
906 | skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); | ||
907 | |||
908 | BUG_ON(!skip_map); | ||
909 | slot = __ffs(skip_map); | ||
910 | |||
911 | enqueue_one_int_qtd(hcd, qh, slot, qtd); | ||
912 | |||
913 | or_map = reg_read32(hcd->regs, HC_INT_IRQ_MASK_OR_REG); | ||
914 | or_map |= (1 << slot); | ||
915 | reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, or_map); | ||
916 | |||
917 | skip_map &= ~(1 << slot); | ||
918 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map); | ||
919 | |||
920 | buffstatus = reg_read32(hcd->regs, HC_BUFFER_STATUS_REG); | ||
921 | buffstatus |= INT_BUFFER; | ||
922 | reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, buffstatus); | ||
923 | } | ||
924 | |||
925 | static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb) | 753 | static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb) |
926 | __releases(priv->lock) | 754 | __releases(priv->lock) |
927 | __acquires(priv->lock) | 755 | __acquires(priv->lock) |
@@ -948,557 +776,654 @@ __acquires(priv->lock) | |||
948 | spin_lock(&priv->lock); | 776 | spin_lock(&priv->lock); |
949 | } | 777 | } |
950 | 778 | ||
951 | static void isp1760_qtd_free(struct isp1760_qtd *qtd) | 779 | static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb, |
780 | u8 packet_type) | ||
952 | { | 781 | { |
953 | BUG_ON(qtd->payload_addr); | 782 | struct isp1760_qtd *qtd; |
954 | kmem_cache_free(qtd_cachep, qtd); | 783 | |
784 | qtd = kmem_cache_zalloc(qtd_cachep, flags); | ||
785 | if (!qtd) | ||
786 | return NULL; | ||
787 | |||
788 | INIT_LIST_HEAD(&qtd->qtd_list); | ||
789 | qtd->urb = urb; | ||
790 | qtd->packet_type = packet_type; | ||
791 | qtd->status = QTD_ENQUEUED; | ||
792 | qtd->actual_length = 0; | ||
793 | |||
794 | return qtd; | ||
955 | } | 795 | } |
956 | 796 | ||
957 | static struct isp1760_qtd *clean_this_qtd(struct isp1760_qtd *qtd, | 797 | static void qtd_free(struct isp1760_qtd *qtd) |
958 | struct isp1760_qh *qh) | ||
959 | { | 798 | { |
960 | struct isp1760_qtd *tmp_qtd; | 799 | WARN_ON(qtd->payload_addr); |
961 | 800 | kmem_cache_free(qtd_cachep, qtd); | |
962 | if (list_is_last(&qtd->qtd_list, &qh->qtd_list)) | ||
963 | tmp_qtd = NULL; | ||
964 | else | ||
965 | tmp_qtd = list_entry(qtd->qtd_list.next, struct isp1760_qtd, | ||
966 | qtd_list); | ||
967 | list_del(&qtd->qtd_list); | ||
968 | isp1760_qtd_free(qtd); | ||
969 | return tmp_qtd; | ||
970 | } | 801 | } |
971 | 802 | ||
972 | /* | 803 | static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot, |
973 | * Remove this QTD from the QH list and free its memory. If this QTD | 804 | struct slotinfo *slots, struct isp1760_qtd *qtd, |
974 | * isn't the last one than remove also his successor(s). | 805 | struct isp1760_qh *qh, struct ptd *ptd) |
975 | * Returns the QTD which is part of an new URB and should be enqueued. | ||
976 | */ | ||
977 | static struct isp1760_qtd *clean_up_qtdlist(struct isp1760_qtd *qtd, | ||
978 | struct isp1760_qh *qh) | ||
979 | { | 806 | { |
980 | struct urb *urb; | 807 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
808 | int skip_map; | ||
809 | |||
810 | WARN_ON((slot < 0) || (slot > 31)); | ||
811 | WARN_ON(qtd->length && !qtd->payload_addr); | ||
812 | WARN_ON(slots[slot].qtd); | ||
813 | WARN_ON(slots[slot].qh); | ||
814 | WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC); | ||
815 | |||
816 | slots[slot].qtd = qtd; | ||
817 | slots[slot].qh = qh; | ||
818 | qh->slot = slot; | ||
819 | qtd->status = QTD_XFER_STARTED; /* Set this before writing ptd, since | ||
820 | interrupt routine may preempt and expects this value. */ | ||
821 | ptd_write(hcd->regs, ptd_offset, slot, ptd); | ||
822 | priv->active_ptds++; | ||
823 | |||
824 | /* Make sure done map has not triggered from some unlinked transfer */ | ||
825 | if (ptd_offset == ATL_PTD_OFFSET) { | ||
826 | priv->atl_done_map |= reg_read32(hcd->regs, | ||
827 | HC_ATL_PTD_DONEMAP_REG); | ||
828 | priv->atl_done_map &= ~(1 << qh->slot); | ||
981 | 829 | ||
982 | urb = qtd->urb; | 830 | skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); |
983 | do { | 831 | skip_map &= ~(1 << qh->slot); |
984 | qtd = clean_this_qtd(qtd, qh); | 832 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); |
985 | } while (qtd && (qtd->urb == urb)); | 833 | } else { |
834 | priv->int_done_map |= reg_read32(hcd->regs, | ||
835 | HC_INT_PTD_DONEMAP_REG); | ||
836 | priv->int_done_map &= ~(1 << qh->slot); | ||
986 | 837 | ||
987 | return qtd; | 838 | skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); |
839 | skip_map &= ~(1 << qh->slot); | ||
840 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map); | ||
841 | } | ||
988 | } | 842 | } |
989 | 843 | ||
990 | static void do_atl_int(struct usb_hcd *hcd) | 844 | static int is_short_bulk(struct isp1760_qtd *qtd) |
991 | { | 845 | { |
992 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | 846 | return (usb_pipebulk(qtd->urb->pipe) && |
993 | u32 done_map, skip_map; | 847 | (qtd->actual_length < qtd->length)); |
994 | struct ptd ptd; | 848 | } |
995 | struct urb *urb; | ||
996 | u32 slot; | ||
997 | u32 length; | ||
998 | u32 or_map; | ||
999 | u32 status = -EINVAL; | ||
1000 | int error; | ||
1001 | struct isp1760_qtd *qtd; | ||
1002 | struct isp1760_qh *qh; | ||
1003 | u32 rl; | ||
1004 | u32 nakcount; | ||
1005 | 849 | ||
1006 | done_map = reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG); | 850 | static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh, |
1007 | skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); | 851 | struct list_head *urb_list) |
852 | { | ||
853 | int last_qtd; | ||
854 | struct isp1760_qtd *qtd, *qtd_next; | ||
855 | struct urb_listitem *urb_listitem; | ||
1008 | 856 | ||
1009 | or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG); | 857 | list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) { |
1010 | or_map &= ~done_map; | 858 | if (qtd->status < QTD_XFER_COMPLETE) |
1011 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map); | 859 | break; |
1012 | 860 | ||
1013 | while (done_map) { | 861 | if (list_is_last(&qtd->qtd_list, &qh->qtd_list)) |
1014 | status = 0; | 862 | last_qtd = 1; |
1015 | priv->atl_queued--; | 863 | else |
864 | last_qtd = qtd->urb != qtd_next->urb; | ||
865 | |||
866 | if ((!last_qtd) && (qtd->status == QTD_RETIRE)) | ||
867 | qtd_next->status = QTD_RETIRE; | ||
868 | |||
869 | if (qtd->status == QTD_XFER_COMPLETE) { | ||
870 | if (qtd->actual_length) { | ||
871 | switch (qtd->packet_type) { | ||
872 | case IN_PID: | ||
873 | mem_reads8(hcd->regs, qtd->payload_addr, | ||
874 | qtd->data_buffer, | ||
875 | qtd->actual_length); | ||
876 | /* Fall through (?) */ | ||
877 | case OUT_PID: | ||
878 | qtd->urb->actual_length += | ||
879 | qtd->actual_length; | ||
880 | /* Fall through ... */ | ||
881 | case SETUP_PID: | ||
882 | break; | ||
883 | } | ||
884 | } | ||
1016 | 885 | ||
1017 | slot = __ffs(done_map); | 886 | if (is_short_bulk(qtd)) { |
1018 | done_map &= ~(1 << slot); | 887 | if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK) |
1019 | skip_map |= (1 << slot); | 888 | qtd->urb->status = -EREMOTEIO; |
889 | if (!last_qtd) | ||
890 | qtd_next->status = QTD_RETIRE; | ||
891 | } | ||
892 | } | ||
1020 | 893 | ||
1021 | qtd = priv->atl_ints[slot].qtd; | 894 | if (qtd->payload_addr) |
1022 | qh = priv->atl_ints[slot].qh; | 895 | free_mem(hcd, qtd); |
1023 | 896 | ||
1024 | if (!qh) { | 897 | if (last_qtd) { |
1025 | dev_err(hcd->self.controller, "qh is 0\n"); | 898 | if ((qtd->status == QTD_RETIRE) && |
1026 | continue; | 899 | (qtd->urb->status == -EINPROGRESS)) |
900 | qtd->urb->status = -EPIPE; | ||
901 | /* Defer calling of urb_done() since it releases lock */ | ||
902 | urb_listitem = kmem_cache_zalloc(urb_listitem_cachep, | ||
903 | GFP_ATOMIC); | ||
904 | if (unlikely(!urb_listitem)) | ||
905 | break; | ||
906 | urb_listitem->urb = qtd->urb; | ||
907 | list_add_tail(&urb_listitem->urb_list, urb_list); | ||
1027 | } | 908 | } |
1028 | ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); | ||
1029 | 909 | ||
1030 | rl = (ptd.dw2 >> 25) & 0x0f; | 910 | list_del(&qtd->qtd_list); |
1031 | nakcount = (ptd.dw3 >> 19) & 0xf; | 911 | qtd_free(qtd); |
1032 | 912 | } | |
1033 | /* Transfer Error, *but* active and no HALT -> reload */ | 913 | } |
1034 | if ((ptd.dw3 & DW3_ERROR_BIT) && (ptd.dw3 & DW3_QTD_ACTIVE) && | ||
1035 | !(ptd.dw3 & DW3_HALT_BIT)) { | ||
1036 | |||
1037 | /* according to ppriv code, we have to | ||
1038 | * reload this one if trasfered bytes != requested bytes | ||
1039 | * else act like everything went smooth.. | ||
1040 | * XXX This just doesn't feel right and hasn't | ||
1041 | * triggered so far. | ||
1042 | */ | ||
1043 | 914 | ||
1044 | length = PTD_XFERRED_LENGTH(ptd.dw3); | 915 | #define ENQUEUE_DEPTH 2 |
1045 | dev_err(hcd->self.controller, | 916 | static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh) |
1046 | "Should reload now... transferred %d " | 917 | { |
1047 | "of %zu\n", length, qtd->length); | 918 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
1048 | BUG(); | 919 | int ptd_offset; |
1049 | } | 920 | struct slotinfo *slots; |
921 | int curr_slot, free_slot; | ||
922 | int n; | ||
923 | struct ptd ptd; | ||
924 | struct isp1760_qtd *qtd; | ||
1050 | 925 | ||
1051 | if (!nakcount && (ptd.dw3 & DW3_QTD_ACTIVE)) { | 926 | if (unlikely(list_empty(&qh->qtd_list))) { |
1052 | u32 buffstatus; | 927 | WARN_ON(1); |
928 | return; | ||
929 | } | ||
1053 | 930 | ||
1054 | /* | 931 | if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd, |
1055 | * NAKs are handled in HW by the chip. Usually if the | 932 | qtd_list)->urb->pipe)) { |
1056 | * device is not able to send data fast enough. | 933 | ptd_offset = INT_PTD_OFFSET; |
1057 | * This happens mostly on slower hardware. | 934 | slots = priv->int_slots; |
1058 | */ | 935 | } else { |
936 | ptd_offset = ATL_PTD_OFFSET; | ||
937 | slots = priv->atl_slots; | ||
938 | } | ||
1059 | 939 | ||
1060 | /* RL counter = ERR counter */ | 940 | free_slot = -1; |
1061 | ptd.dw3 &= ~(0xf << 19); | 941 | for (curr_slot = 0; curr_slot < 32; curr_slot++) { |
1062 | ptd.dw3 |= rl << 19; | 942 | if ((free_slot == -1) && (slots[curr_slot].qtd == NULL)) |
1063 | ptd.dw3 &= ~(3 << (55 - 32)); | 943 | free_slot = curr_slot; |
1064 | ptd.dw3 |= ERR_COUNTER << (55 - 32); | 944 | if (slots[curr_slot].qh == qh) |
1065 | 945 | break; | |
1066 | /* | 946 | } |
1067 | * It is not needed to write skip map back because it | ||
1068 | * is unchanged. Just make sure that this entry is | ||
1069 | * unskipped once it gets written to the HW. | ||
1070 | */ | ||
1071 | skip_map &= ~(1 << slot); | ||
1072 | or_map = reg_read32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG); | ||
1073 | or_map |= 1 << slot; | ||
1074 | reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, or_map); | ||
1075 | 947 | ||
1076 | ptd.dw0 |= PTD_VALID; | 948 | n = 0; |
1077 | ptd_write(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); | 949 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list) { |
950 | if (qtd->status == QTD_ENQUEUED) { | ||
951 | WARN_ON(qtd->payload_addr); | ||
952 | alloc_mem(hcd, qtd); | ||
953 | if ((qtd->length) && (!qtd->payload_addr)) | ||
954 | break; | ||
1078 | 955 | ||
1079 | priv->atl_queued++; | 956 | if ((qtd->length) && |
1080 | if (priv->atl_queued == 2) | 957 | ((qtd->packet_type == SETUP_PID) || |
1081 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, | 958 | (qtd->packet_type == OUT_PID))) { |
1082 | INTERRUPT_ENABLE_SOT_MASK); | 959 | mem_writes8(hcd->regs, qtd->payload_addr, |
960 | qtd->data_buffer, qtd->length); | ||
961 | } | ||
1083 | 962 | ||
1084 | buffstatus = reg_read32(hcd->regs, | 963 | qtd->status = QTD_PAYLOAD_ALLOC; |
1085 | HC_BUFFER_STATUS_REG); | ||
1086 | buffstatus |= ATL_BUFFER; | ||
1087 | reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, | ||
1088 | buffstatus); | ||
1089 | continue; | ||
1090 | } | 964 | } |
1091 | 965 | ||
1092 | error = check_error(hcd, &ptd); | 966 | if (qtd->status == QTD_PAYLOAD_ALLOC) { |
1093 | if (error) { | 967 | /* |
1094 | status = error; | 968 | if ((curr_slot > 31) && (free_slot == -1)) |
1095 | priv->atl_ints[slot].qh->toggle = 0; | 969 | dev_dbg(hcd->self.controller, "%s: No slot " |
1096 | priv->atl_ints[slot].qh->ping = 0; | 970 | "available for transfer\n", __func__); |
1097 | qtd->urb->status = -EPIPE; | 971 | */ |
1098 | 972 | /* Start xfer for this endpoint if not already done */ | |
1099 | #if 0 | 973 | if ((curr_slot > 31) && (free_slot > -1)) { |
1100 | printk(KERN_ERR "Error in %s().\n", __func__); | 974 | if (usb_pipeint(qtd->urb->pipe)) |
1101 | printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x " | 975 | create_ptd_int(qh, qtd, &ptd); |
1102 | "dw3: %08x dw4: %08x dw5: %08x dw6: " | 976 | else |
1103 | "%08x dw7: %08x\n", | 977 | create_ptd_atl(qh, qtd, &ptd); |
1104 | ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3, | 978 | |
1105 | ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7); | 979 | start_bus_transfer(hcd, ptd_offset, free_slot, |
1106 | #endif | 980 | slots, qtd, qh, &ptd); |
1107 | } else { | 981 | curr_slot = free_slot; |
1108 | priv->atl_ints[slot].qh->toggle = ptd.dw3 & (1 << 25); | 982 | } |
1109 | priv->atl_ints[slot].qh->ping = ptd.dw3 & (1 << 26); | ||
1110 | } | ||
1111 | 983 | ||
1112 | length = PTD_XFERRED_LENGTH(ptd.dw3); | 984 | n++; |
1113 | if (length) { | 985 | if (n >= ENQUEUE_DEPTH) |
1114 | switch (DW1_GET_PID(ptd.dw1)) { | 986 | break; |
1115 | case IN_PID: | 987 | } |
1116 | mem_reads8(hcd->regs, qtd->payload_addr, | 988 | } |
1117 | qtd->data_buffer, length); | 989 | } |
1118 | 990 | ||
1119 | case OUT_PID: | 991 | void schedule_ptds(struct usb_hcd *hcd) |
992 | { | ||
993 | struct isp1760_hcd *priv; | ||
994 | struct isp1760_qh *qh, *qh_next; | ||
995 | struct list_head *ep_queue; | ||
996 | struct usb_host_endpoint *ep; | ||
997 | LIST_HEAD(urb_list); | ||
998 | struct urb_listitem *urb_listitem, *urb_listitem_next; | ||
999 | |||
1000 | if (!hcd) { | ||
1001 | WARN_ON(1); | ||
1002 | return; | ||
1003 | } | ||
1120 | 1004 | ||
1121 | qtd->urb->actual_length += length; | 1005 | priv = hcd_to_priv(hcd); |
1122 | 1006 | ||
1123 | case SETUP_PID: | 1007 | /* |
1124 | break; | 1008 | * check finished/retired xfers, transfer payloads, call urb_done() |
1009 | */ | ||
1010 | ep_queue = &priv->interruptqhs; | ||
1011 | while (ep_queue) { | ||
1012 | list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) { | ||
1013 | ep = list_entry(qh->qtd_list.next, struct isp1760_qtd, | ||
1014 | qtd_list)->urb->ep; | ||
1015 | collect_qtds(hcd, qh, &urb_list); | ||
1016 | if (list_empty(&qh->qtd_list)) { | ||
1017 | list_del(&qh->qh_list); | ||
1018 | if (ep->hcpriv == NULL) { | ||
1019 | /* Endpoint has been disabled, so we | ||
1020 | can free the associated queue head. */ | ||
1021 | qh_free(qh); | ||
1022 | } | ||
1125 | } | 1023 | } |
1126 | } | 1024 | } |
1127 | 1025 | ||
1128 | priv->atl_ints[slot].qtd = NULL; | 1026 | if (ep_queue == &priv->interruptqhs) |
1129 | priv->atl_ints[slot].qh = NULL; | 1027 | ep_queue = &priv->controlqhs; |
1130 | 1028 | else if (ep_queue == &priv->controlqhs) | |
1131 | free_mem(hcd, qtd); | 1029 | ep_queue = &priv->bulkqhs; |
1030 | else | ||
1031 | ep_queue = NULL; | ||
1032 | } | ||
1132 | 1033 | ||
1133 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); | 1034 | list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list, |
1035 | urb_list) { | ||
1036 | isp1760_urb_done(hcd, urb_listitem->urb); | ||
1037 | kmem_cache_free(urb_listitem_cachep, urb_listitem); | ||
1038 | } | ||
1134 | 1039 | ||
1135 | if (qtd->urb->status == -EPIPE) { | 1040 | /* |
1136 | /* HALT was received */ | 1041 | * Schedule packets for transfer. |
1042 | * | ||
1043 | * According to USB2.0 specification: | ||
1044 | * | ||
1045 | * 1st prio: interrupt xfers, up to 80 % of bandwidth | ||
1046 | * 2nd prio: control xfers | ||
1047 | * 3rd prio: bulk xfers | ||
1048 | * | ||
1049 | * ... but let's use a simpler scheme here (mostly because ISP1761 doc | ||
1050 | * is very unclear on how to prioritize traffic): | ||
1051 | * | ||
1052 | * 1) Enqueue any queued control transfers, as long as payload chip mem | ||
1053 | * and PTD ATL slots are available. | ||
1054 | * 2) Enqueue any queued INT transfers, as long as payload chip mem | ||
1055 | * and PTD INT slots are available. | ||
1056 | * 3) Enqueue any queued bulk transfers, as long as payload chip mem | ||
1057 | * and PTD ATL slots are available. | ||
1058 | * | ||
1059 | * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between | ||
1060 | * conservation of chip mem and performance. | ||
1061 | * | ||
1062 | * I'm sure this scheme could be improved upon! | ||
1063 | */ | ||
1064 | ep_queue = &priv->controlqhs; | ||
1065 | while (ep_queue) { | ||
1066 | list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) | ||
1067 | enqueue_qtds(hcd, qh); | ||
1068 | |||
1069 | if (ep_queue == &priv->controlqhs) | ||
1070 | ep_queue = &priv->interruptqhs; | ||
1071 | else if (ep_queue == &priv->interruptqhs) | ||
1072 | ep_queue = &priv->bulkqhs; | ||
1073 | else | ||
1074 | ep_queue = NULL; | ||
1075 | } | ||
1076 | } | ||
1137 | 1077 | ||
1138 | urb = qtd->urb; | 1078 | #define PTD_STATE_QTD_DONE 1 |
1139 | qtd = clean_up_qtdlist(qtd, qh); | 1079 | #define PTD_STATE_QTD_RELOAD 2 |
1140 | isp1760_urb_done(hcd, urb); | 1080 | #define PTD_STATE_URB_RETIRE 3 |
1141 | 1081 | ||
1142 | } else if (usb_pipebulk(qtd->urb->pipe) && | 1082 | static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd, |
1143 | (length < qtd->length)) { | 1083 | struct urb *urb) |
1144 | /* short BULK received */ | 1084 | { |
1085 | __dw dw4; | ||
1086 | int i; | ||
1145 | 1087 | ||
1146 | if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK) { | 1088 | dw4 = ptd->dw4; |
1147 | qtd->urb->status = -EREMOTEIO; | 1089 | dw4 >>= 8; |
1148 | dev_dbg(hcd->self.controller, | ||
1149 | "short bulk, %d instead %zu " | ||
1150 | "with URB_SHORT_NOT_OK flag.\n", | ||
1151 | length, qtd->length); | ||
1152 | } | ||
1153 | 1090 | ||
1154 | if (qtd->urb->status == -EINPROGRESS) | 1091 | /* FIXME: ISP1761 datasheet does not say what to do with these. Do we |
1155 | qtd->urb->status = 0; | 1092 | need to handle these errors? Is it done in hardware? */ |
1156 | 1093 | ||
1157 | urb = qtd->urb; | 1094 | if (ptd->dw3 & DW3_HALT_BIT) { |
1158 | qtd = clean_up_qtdlist(qtd, qh); | ||
1159 | isp1760_urb_done(hcd, urb); | ||
1160 | 1095 | ||
1161 | } else if (last_qtd_of_urb(qtd, qh)) { | 1096 | urb->status = -EPROTO; /* Default unknown error */ |
1162 | /* that was the last qtd of that URB */ | ||
1163 | 1097 | ||
1164 | if (qtd->urb->status == -EINPROGRESS) | 1098 | for (i = 0; i < 8; i++) { |
1165 | qtd->urb->status = 0; | 1099 | switch (dw4 & 0x7) { |
1100 | case INT_UNDERRUN: | ||
1101 | dev_dbg(hcd->self.controller, "%s: underrun " | ||
1102 | "during uFrame %d\n", | ||
1103 | __func__, i); | ||
1104 | urb->status = -ECOMM; /* Could not write data */ | ||
1105 | break; | ||
1106 | case INT_EXACT: | ||
1107 | dev_dbg(hcd->self.controller, "%s: transaction " | ||
1108 | "error during uFrame %d\n", | ||
1109 | __func__, i); | ||
1110 | urb->status = -EPROTO; /* timeout, bad CRC, PID | ||
1111 | error etc. */ | ||
1112 | break; | ||
1113 | case INT_BABBLE: | ||
1114 | dev_dbg(hcd->self.controller, "%s: babble " | ||
1115 | "error during uFrame %d\n", | ||
1116 | __func__, i); | ||
1117 | urb->status = -EOVERFLOW; | ||
1118 | break; | ||
1119 | } | ||
1120 | dw4 >>= 3; | ||
1121 | } | ||
1166 | 1122 | ||
1167 | urb = qtd->urb; | 1123 | return PTD_STATE_URB_RETIRE; |
1168 | qtd = clean_up_qtdlist(qtd, qh); | 1124 | } |
1169 | isp1760_urb_done(hcd, urb); | ||
1170 | 1125 | ||
1171 | } else { | 1126 | return PTD_STATE_QTD_DONE; |
1172 | /* next QTD of this URB */ | 1127 | } |
1173 | 1128 | ||
1174 | qtd = clean_this_qtd(qtd, qh); | 1129 | static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd, |
1175 | BUG_ON(!qtd); | 1130 | struct urb *urb) |
1176 | } | 1131 | { |
1132 | WARN_ON(!ptd); | ||
1133 | if (ptd->dw3 & DW3_HALT_BIT) { | ||
1134 | if (ptd->dw3 & DW3_BABBLE_BIT) | ||
1135 | urb->status = -EOVERFLOW; | ||
1136 | else if (FROM_DW3_CERR(ptd->dw3)) | ||
1137 | urb->status = -EPIPE; /* Stall */ | ||
1138 | else if (ptd->dw3 & DW3_ERROR_BIT) | ||
1139 | urb->status = -EPROTO; /* XactErr */ | ||
1140 | else | ||
1141 | urb->status = -EPROTO; /* Unknown */ | ||
1142 | /* | ||
1143 | dev_dbg(hcd->self.controller, "%s: ptd error:\n" | ||
1144 | " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n" | ||
1145 | " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n", | ||
1146 | __func__, | ||
1147 | ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3, | ||
1148 | ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7); | ||
1149 | */ | ||
1150 | return PTD_STATE_URB_RETIRE; | ||
1151 | } | ||
1177 | 1152 | ||
1178 | if (qtd) | 1153 | if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) { |
1179 | enqueue_an_ATL_packet(hcd, qh, qtd); | 1154 | /* Transfer Error, *but* active and no HALT -> reload */ |
1155 | dev_dbg(hcd->self.controller, "PID error; reloading ptd\n"); | ||
1156 | return PTD_STATE_QTD_RELOAD; | ||
1157 | } | ||
1180 | 1158 | ||
1181 | skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); | 1159 | if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) { |
1160 | /* | ||
1161 | * NAKs are handled in HW by the chip. Usually if the | ||
1162 | * device is not able to send data fast enough. | ||
1163 | * This happens mostly on slower hardware. | ||
1164 | */ | ||
1165 | return PTD_STATE_QTD_RELOAD; | ||
1182 | } | 1166 | } |
1183 | if (priv->atl_queued <= 1) | 1167 | |
1184 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, | 1168 | return PTD_STATE_QTD_DONE; |
1185 | INTERRUPT_ENABLE_MASK); | ||
1186 | } | 1169 | } |
1187 | 1170 | ||
1188 | static void do_intl_int(struct usb_hcd *hcd) | 1171 | static irqreturn_t isp1760_irq(struct usb_hcd *hcd) |
1189 | { | 1172 | { |
1190 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | 1173 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
1191 | u32 done_map, skip_map; | 1174 | u32 imask; |
1175 | irqreturn_t irqret = IRQ_NONE; | ||
1192 | struct ptd ptd; | 1176 | struct ptd ptd; |
1193 | struct urb *urb; | ||
1194 | u32 length; | ||
1195 | u32 or_map; | ||
1196 | int error; | ||
1197 | u32 slot; | ||
1198 | struct isp1760_qtd *qtd; | ||
1199 | struct isp1760_qh *qh; | 1177 | struct isp1760_qh *qh; |
1178 | int slot; | ||
1179 | int state; | ||
1180 | struct slotinfo *slots; | ||
1181 | u32 ptd_offset; | ||
1182 | struct isp1760_qtd *qtd; | ||
1183 | int modified; | ||
1184 | static int last_active_ptds; | ||
1185 | int int_skip_map, atl_skip_map; | ||
1200 | 1186 | ||
1201 | done_map = reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG); | 1187 | spin_lock(&priv->lock); |
1202 | skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); | ||
1203 | |||
1204 | or_map = reg_read32(hcd->regs, HC_INT_IRQ_MASK_OR_REG); | ||
1205 | or_map &= ~done_map; | ||
1206 | reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, or_map); | ||
1207 | |||
1208 | while (done_map) { | ||
1209 | slot = __ffs(done_map); | ||
1210 | done_map &= ~(1 << slot); | ||
1211 | skip_map |= (1 << slot); | ||
1212 | |||
1213 | qtd = priv->int_ints[slot].qtd; | ||
1214 | qh = priv->int_ints[slot].qh; | ||
1215 | |||
1216 | if (!qh) { | ||
1217 | dev_err(hcd->self.controller, "(INT) qh is 0\n"); | ||
1218 | continue; | ||
1219 | } | ||
1220 | 1188 | ||
1221 | ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd); | 1189 | if (!(hcd->state & HC_STATE_RUNNING)) |
1222 | check_int_err_status(hcd, ptd.dw4); | 1190 | goto leave; |
1223 | |||
1224 | error = check_error(hcd, &ptd); | ||
1225 | if (error) { | ||
1226 | #if 0 | ||
1227 | printk(KERN_ERR "Error in %s().\n", __func__); | ||
1228 | printk(KERN_ERR "IN dw0: %08x dw1: %08x dw2: %08x " | ||
1229 | "dw3: %08x dw4: %08x dw5: %08x dw6: " | ||
1230 | "%08x dw7: %08x\n", | ||
1231 | ptd.dw0, ptd.dw1, ptd.dw2, ptd.dw3, | ||
1232 | ptd.dw4, ptd.dw5, ptd.dw6, ptd.dw7); | ||
1233 | #endif | ||
1234 | qtd->urb->status = -EPIPE; | ||
1235 | priv->int_ints[slot].qh->toggle = 0; | ||
1236 | priv->int_ints[slot].qh->ping = 0; | ||
1237 | 1191 | ||
1192 | imask = reg_read32(hcd->regs, HC_INTERRUPT_REG); | ||
1193 | if (unlikely(!imask)) | ||
1194 | goto leave; | ||
1195 | reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */ | ||
1196 | |||
1197 | int_skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); | ||
1198 | atl_skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); | ||
1199 | priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG); | ||
1200 | priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG); | ||
1201 | priv->int_done_map &= ~int_skip_map; | ||
1202 | priv->atl_done_map &= ~atl_skip_map; | ||
1203 | |||
1204 | modified = priv->int_done_map | priv->atl_done_map; | ||
1205 | |||
1206 | while (priv->int_done_map || priv->atl_done_map) { | ||
1207 | if (priv->int_done_map) { | ||
1208 | /* INT ptd */ | ||
1209 | slot = __ffs(priv->int_done_map); | ||
1210 | priv->int_done_map &= ~(1 << slot); | ||
1211 | slots = priv->int_slots; | ||
1212 | /* This should not trigger, and could be removed if | ||
1213 | noone have any problems with it triggering: */ | ||
1214 | if (!slots[slot].qh) { | ||
1215 | WARN_ON(1); | ||
1216 | continue; | ||
1217 | } | ||
1218 | ptd_offset = INT_PTD_OFFSET; | ||
1219 | ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd); | ||
1220 | state = check_int_transfer(hcd, &ptd, | ||
1221 | slots[slot].qtd->urb); | ||
1238 | } else { | 1222 | } else { |
1239 | priv->int_ints[slot].qh->toggle = ptd.dw3 & (1 << 25); | 1223 | /* ATL ptd */ |
1240 | priv->int_ints[slot].qh->ping = ptd.dw3 & (1 << 26); | 1224 | slot = __ffs(priv->atl_done_map); |
1241 | } | 1225 | priv->atl_done_map &= ~(1 << slot); |
1242 | 1226 | slots = priv->atl_slots; | |
1243 | if (qtd->urb->dev->speed != USB_SPEED_HIGH) | 1227 | /* This should not trigger, and could be removed if |
1244 | length = PTD_XFERRED_LENGTH_LO(ptd.dw3); | 1228 | noone have any problems with it triggering: */ |
1245 | else | 1229 | if (!slots[slot].qh) { |
1246 | length = PTD_XFERRED_LENGTH(ptd.dw3); | 1230 | WARN_ON(1); |
1247 | 1231 | continue; | |
1248 | if (length) { | ||
1249 | switch (DW1_GET_PID(ptd.dw1)) { | ||
1250 | case IN_PID: | ||
1251 | mem_reads8(hcd->regs, qtd->payload_addr, | ||
1252 | qtd->data_buffer, length); | ||
1253 | case OUT_PID: | ||
1254 | |||
1255 | qtd->urb->actual_length += length; | ||
1256 | |||
1257 | case SETUP_PID: | ||
1258 | break; | ||
1259 | } | 1232 | } |
1233 | ptd_offset = ATL_PTD_OFFSET; | ||
1234 | ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); | ||
1235 | state = check_atl_transfer(hcd, &ptd, | ||
1236 | slots[slot].qtd->urb); | ||
1260 | } | 1237 | } |
1261 | 1238 | ||
1262 | priv->int_ints[slot].qtd = NULL; | 1239 | qtd = slots[slot].qtd; |
1263 | priv->int_ints[slot].qh = NULL; | 1240 | slots[slot].qtd = NULL; |
1264 | 1241 | qh = slots[slot].qh; | |
1265 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map); | 1242 | slots[slot].qh = NULL; |
1266 | free_mem(hcd, qtd); | 1243 | priv->active_ptds--; |
1267 | 1244 | qh->slot = -1; | |
1268 | if (qtd->urb->status == -EPIPE) { | 1245 | |
1269 | /* HALT received */ | 1246 | WARN_ON(qtd->status != QTD_XFER_STARTED); |
1270 | 1247 | ||
1271 | urb = qtd->urb; | 1248 | switch (state) { |
1272 | qtd = clean_up_qtdlist(qtd, qh); | 1249 | case PTD_STATE_QTD_DONE: |
1273 | isp1760_urb_done(hcd, urb); | 1250 | if ((usb_pipeint(qtd->urb->pipe)) && |
1274 | 1251 | (qtd->urb->dev->speed != USB_SPEED_HIGH)) | |
1275 | } else if (last_qtd_of_urb(qtd, qh)) { | 1252 | qtd->actual_length = |
1276 | 1253 | FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3); | |
1277 | if (qtd->urb->status == -EINPROGRESS) | 1254 | else |
1278 | qtd->urb->status = 0; | 1255 | qtd->actual_length = |
1256 | FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3); | ||
1257 | |||
1258 | qtd->status = QTD_XFER_COMPLETE; | ||
1259 | if (list_is_last(&qtd->qtd_list, &qh->qtd_list) || | ||
1260 | is_short_bulk(qtd)) | ||
1261 | qtd = NULL; | ||
1262 | else | ||
1263 | qtd = list_entry(qtd->qtd_list.next, | ||
1264 | typeof(*qtd), qtd_list); | ||
1265 | |||
1266 | qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3); | ||
1267 | qh->ping = FROM_DW3_PING(ptd.dw3); | ||
1268 | break; | ||
1279 | 1269 | ||
1280 | urb = qtd->urb; | 1270 | case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */ |
1281 | qtd = clean_up_qtdlist(qtd, qh); | 1271 | qtd->status = QTD_PAYLOAD_ALLOC; |
1282 | isp1760_urb_done(hcd, urb); | 1272 | ptd.dw0 |= DW0_VALID_BIT; |
1273 | /* RL counter = ERR counter */ | ||
1274 | ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf); | ||
1275 | ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2)); | ||
1276 | ptd.dw3 &= ~TO_DW3_CERR(3); | ||
1277 | ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER); | ||
1278 | qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3); | ||
1279 | qh->ping = FROM_DW3_PING(ptd.dw3); | ||
1280 | break; | ||
1283 | 1281 | ||
1284 | } else { | 1282 | case PTD_STATE_URB_RETIRE: |
1285 | /* next QTD of this URB */ | 1283 | qtd->status = QTD_RETIRE; |
1284 | qtd = NULL; | ||
1285 | qh->toggle = 0; | ||
1286 | qh->ping = 0; | ||
1287 | break; | ||
1286 | 1288 | ||
1287 | qtd = clean_this_qtd(qtd, qh); | 1289 | default: |
1288 | BUG_ON(!qtd); | 1290 | WARN_ON(1); |
1291 | continue; | ||
1289 | } | 1292 | } |
1290 | 1293 | ||
1291 | if (qtd) | 1294 | if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) { |
1292 | enqueue_an_INT_packet(hcd, qh, qtd); | 1295 | if (slots == priv->int_slots) { |
1296 | if (state == PTD_STATE_QTD_RELOAD) | ||
1297 | dev_err(hcd->self.controller, | ||
1298 | "%s: PTD_STATE_QTD_RELOAD on " | ||
1299 | "interrupt packet\n", __func__); | ||
1300 | if (state != PTD_STATE_QTD_RELOAD) | ||
1301 | create_ptd_int(qh, qtd, &ptd); | ||
1302 | } else { | ||
1303 | if (state != PTD_STATE_QTD_RELOAD) | ||
1304 | create_ptd_atl(qh, qtd, &ptd); | ||
1305 | } | ||
1293 | 1306 | ||
1294 | skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); | 1307 | start_bus_transfer(hcd, ptd_offset, slot, slots, qtd, |
1308 | qh, &ptd); | ||
1309 | } | ||
1295 | } | 1310 | } |
1296 | } | ||
1297 | 1311 | ||
1298 | static struct isp1760_qh *qh_make(struct usb_hcd *hcd, struct urb *urb, | 1312 | if (modified) |
1299 | gfp_t flags) | 1313 | schedule_ptds(hcd); |
1300 | { | ||
1301 | struct isp1760_qh *qh; | ||
1302 | int is_input, type; | ||
1303 | 1314 | ||
1304 | qh = isp1760_qh_alloc(flags); | 1315 | /* ISP1760 Errata 2 explains that interrupts may be missed (or not |
1305 | if (!qh) | 1316 | happen?) if two USB devices are running simultaneously. Perhaps |
1306 | return qh; | 1317 | this happens when a PTD is finished during interrupt handling; |
1307 | 1318 | enable SOF interrupts if PTDs are still scheduled when exiting this | |
1308 | /* | 1319 | interrupt handler, just to be safe. */ |
1309 | * init endpoint/device data for this QH | ||
1310 | */ | ||
1311 | is_input = usb_pipein(urb->pipe); | ||
1312 | type = usb_pipetype(urb->pipe); | ||
1313 | 1320 | ||
1314 | if (!usb_pipecontrol(urb->pipe)) | 1321 | if (priv->active_ptds != last_active_ptds) { |
1315 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, | 1322 | if (priv->active_ptds > 0) |
1316 | 1); | 1323 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, |
1317 | return qh; | 1324 | INTERRUPT_ENABLE_SOT_MASK); |
1318 | } | 1325 | else |
1319 | 1326 | reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, | |
1320 | /* | 1327 | INTERRUPT_ENABLE_MASK); |
1321 | * For control/bulk/interrupt, return QH with these TDs appended. | 1328 | last_active_ptds = priv->active_ptds; |
1322 | * Allocates and initializes the QH if necessary. | ||
1323 | * Returns null if it can't allocate a QH it needs to. | ||
1324 | * If the QH has TDs (urbs) already, that's great. | ||
1325 | */ | ||
1326 | static struct isp1760_qh *qh_append_tds(struct usb_hcd *hcd, | ||
1327 | struct urb *urb, struct list_head *qtd_list, int epnum, | ||
1328 | void **ptr) | ||
1329 | { | ||
1330 | struct isp1760_qh *qh; | ||
1331 | |||
1332 | qh = (struct isp1760_qh *)*ptr; | ||
1333 | if (!qh) { | ||
1334 | /* can't sleep here, we have priv->lock... */ | ||
1335 | qh = qh_make(hcd, urb, GFP_ATOMIC); | ||
1336 | if (!qh) | ||
1337 | return qh; | ||
1338 | *ptr = qh; | ||
1339 | } | 1329 | } |
1340 | 1330 | ||
1341 | list_splice(qtd_list, qh->qtd_list.prev); | 1331 | irqret = IRQ_HANDLED; |
1332 | leave: | ||
1333 | spin_unlock(&priv->lock); | ||
1342 | 1334 | ||
1343 | return qh; | 1335 | return irqret; |
1344 | } | 1336 | } |
1345 | 1337 | ||
1346 | static void qtd_list_free(struct urb *urb, struct list_head *qtd_list) | 1338 | static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len) |
1347 | { | 1339 | { |
1348 | struct list_head *entry, *temp; | 1340 | qtd->data_buffer = databuffer; |
1349 | 1341 | ||
1350 | list_for_each_safe(entry, temp, qtd_list) { | 1342 | if (len > MAX_PAYLOAD_SIZE) |
1351 | struct isp1760_qtd *qtd; | 1343 | len = MAX_PAYLOAD_SIZE; |
1344 | qtd->length = len; | ||
1352 | 1345 | ||
1353 | qtd = list_entry(entry, struct isp1760_qtd, qtd_list); | 1346 | return qtd->length; |
1354 | list_del(&qtd->qtd_list); | ||
1355 | isp1760_qtd_free(qtd); | ||
1356 | } | ||
1357 | } | 1347 | } |
1358 | 1348 | ||
1359 | static int isp1760_prepare_enqueue(struct usb_hcd *hcd, struct urb *urb, | 1349 | static void qtd_list_free(struct list_head *qtd_list) |
1360 | struct list_head *qtd_list, gfp_t mem_flags, packet_enqueue *p) | ||
1361 | { | 1350 | { |
1362 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | 1351 | struct isp1760_qtd *qtd, *qtd_next; |
1363 | struct isp1760_qtd *qtd; | ||
1364 | int epnum; | ||
1365 | unsigned long flags; | ||
1366 | struct isp1760_qh *qh = NULL; | ||
1367 | int rc; | ||
1368 | int qh_busy; | ||
1369 | |||
1370 | qtd = list_entry(qtd_list->next, struct isp1760_qtd, qtd_list); | ||
1371 | epnum = urb->ep->desc.bEndpointAddress; | ||
1372 | |||
1373 | spin_lock_irqsave(&priv->lock, flags); | ||
1374 | if (!HCD_HW_ACCESSIBLE(hcd)) { | ||
1375 | rc = -ESHUTDOWN; | ||
1376 | goto done; | ||
1377 | } | ||
1378 | rc = usb_hcd_link_urb_to_ep(hcd, urb); | ||
1379 | if (rc) | ||
1380 | goto done; | ||
1381 | |||
1382 | qh = urb->ep->hcpriv; | ||
1383 | if (qh) | ||
1384 | qh_busy = !list_empty(&qh->qtd_list); | ||
1385 | else | ||
1386 | qh_busy = 0; | ||
1387 | 1352 | ||
1388 | qh = qh_append_tds(hcd, urb, qtd_list, epnum, &urb->ep->hcpriv); | 1353 | list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) { |
1389 | if (!qh) { | 1354 | list_del(&qtd->qtd_list); |
1390 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 1355 | qtd_free(qtd); |
1391 | rc = -ENOMEM; | ||
1392 | goto done; | ||
1393 | } | 1356 | } |
1394 | |||
1395 | if (!qh_busy) | ||
1396 | p(hcd, qh, qtd); | ||
1397 | |||
1398 | done: | ||
1399 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1400 | if (!qh) | ||
1401 | qtd_list_free(urb, qtd_list); | ||
1402 | return rc; | ||
1403 | } | ||
1404 | |||
1405 | static struct isp1760_qtd *isp1760_qtd_alloc(gfp_t flags) | ||
1406 | { | ||
1407 | struct isp1760_qtd *qtd; | ||
1408 | |||
1409 | qtd = kmem_cache_zalloc(qtd_cachep, flags); | ||
1410 | if (qtd) | ||
1411 | INIT_LIST_HEAD(&qtd->qtd_list); | ||
1412 | |||
1413 | return qtd; | ||
1414 | } | 1357 | } |
1415 | 1358 | ||
1416 | /* | 1359 | /* |
1417 | * create a list of filled qtds for this URB; won't link into qh. | 1360 | * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize. |
1361 | * Also calculate the PID type (SETUP/IN/OUT) for each packet. | ||
1418 | */ | 1362 | */ |
1419 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | 1363 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) |
1420 | static struct list_head *qh_urb_transaction(struct usb_hcd *hcd, | 1364 | static void packetize_urb(struct usb_hcd *hcd, |
1421 | struct urb *urb, struct list_head *head, gfp_t flags) | 1365 | struct urb *urb, struct list_head *head, gfp_t flags) |
1422 | { | 1366 | { |
1423 | struct isp1760_qtd *qtd; | 1367 | struct isp1760_qtd *qtd; |
1424 | void *buf; | 1368 | void *buf; |
1425 | int len, maxpacket; | 1369 | int len, maxpacketsize; |
1426 | int is_input; | 1370 | u8 packet_type; |
1427 | u32 token; | ||
1428 | 1371 | ||
1429 | /* | 1372 | /* |
1430 | * URBs map to sequences of QTDs: one logical transaction | 1373 | * URBs map to sequences of QTDs: one logical transaction |
1431 | */ | 1374 | */ |
1432 | qtd = isp1760_qtd_alloc(flags); | ||
1433 | if (!qtd) | ||
1434 | return NULL; | ||
1435 | 1375 | ||
1436 | list_add_tail(&qtd->qtd_list, head); | 1376 | if (!urb->transfer_buffer && urb->transfer_buffer_length) { |
1437 | qtd->urb = urb; | 1377 | /* XXX This looks like usb storage / SCSI bug */ |
1438 | urb->status = -EINPROGRESS; | 1378 | dev_err(hcd->self.controller, |
1379 | "buf is null, dma is %08lx len is %d\n", | ||
1380 | (long unsigned)urb->transfer_dma, | ||
1381 | urb->transfer_buffer_length); | ||
1382 | WARN_ON(1); | ||
1383 | } | ||
1439 | 1384 | ||
1440 | token = 0; | 1385 | if (usb_pipein(urb->pipe)) |
1441 | /* for split transactions, SplitXState initialized to zero */ | 1386 | packet_type = IN_PID; |
1387 | else | ||
1388 | packet_type = OUT_PID; | ||
1442 | 1389 | ||
1443 | len = urb->transfer_buffer_length; | ||
1444 | is_input = usb_pipein(urb->pipe); | ||
1445 | if (usb_pipecontrol(urb->pipe)) { | 1390 | if (usb_pipecontrol(urb->pipe)) { |
1446 | /* SETUP pid */ | 1391 | qtd = qtd_alloc(flags, urb, SETUP_PID); |
1447 | qtd_fill(qtd, urb->setup_packet, | ||
1448 | sizeof(struct usb_ctrlrequest), | ||
1449 | token | SETUP_PID); | ||
1450 | |||
1451 | /* ... and always at least one more pid */ | ||
1452 | qtd = isp1760_qtd_alloc(flags); | ||
1453 | if (!qtd) | 1392 | if (!qtd) |
1454 | goto cleanup; | 1393 | goto cleanup; |
1455 | qtd->urb = urb; | 1394 | qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest)); |
1456 | list_add_tail(&qtd->qtd_list, head); | 1395 | list_add_tail(&qtd->qtd_list, head); |
1457 | 1396 | ||
1458 | /* for zero length DATA stages, STATUS is always IN */ | 1397 | /* for zero length DATA stages, STATUS is always IN */ |
1459 | if (len == 0) | 1398 | if (urb->transfer_buffer_length == 0) |
1460 | token |= IN_PID; | 1399 | packet_type = IN_PID; |
1461 | } | 1400 | } |
1462 | 1401 | ||
1463 | /* | 1402 | maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe, |
1464 | * data transfer stage: buffer setup | 1403 | usb_pipeout(urb->pipe))); |
1465 | */ | ||
1466 | buf = urb->transfer_buffer; | ||
1467 | |||
1468 | if (is_input) | ||
1469 | token |= IN_PID; | ||
1470 | else | ||
1471 | token |= OUT_PID; | ||
1472 | |||
1473 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | ||
1474 | 1404 | ||
1475 | /* | 1405 | /* |
1476 | * buffer gets wrapped in one or more qtds; | 1406 | * buffer gets wrapped in one or more qtds; |
1477 | * last one may be "short" (including zero len) | 1407 | * last one may be "short" (including zero len) |
1478 | * and may serve as a control status ack | 1408 | * and may serve as a control status ack |
1479 | */ | 1409 | */ |
1410 | buf = urb->transfer_buffer; | ||
1411 | len = urb->transfer_buffer_length; | ||
1412 | |||
1480 | for (;;) { | 1413 | for (;;) { |
1481 | int this_qtd_len; | 1414 | int this_qtd_len; |
1482 | 1415 | ||
1483 | if (!buf && len) { | 1416 | qtd = qtd_alloc(flags, urb, packet_type); |
1484 | /* XXX This looks like usb storage / SCSI bug */ | 1417 | if (!qtd) |
1485 | dev_err(hcd->self.controller, "buf is null, dma is %08lx len is %d\n", | 1418 | goto cleanup; |
1486 | (long unsigned)urb->transfer_dma, len); | 1419 | this_qtd_len = qtd_fill(qtd, buf, len); |
1487 | WARN_ON(1); | 1420 | list_add_tail(&qtd->qtd_list, head); |
1488 | } | ||
1489 | 1421 | ||
1490 | this_qtd_len = qtd_fill(qtd, buf, len, token); | ||
1491 | len -= this_qtd_len; | 1422 | len -= this_qtd_len; |
1492 | buf += this_qtd_len; | 1423 | buf += this_qtd_len; |
1493 | 1424 | ||
1494 | if (len <= 0) | 1425 | if (len <= 0) |
1495 | break; | 1426 | break; |
1496 | |||
1497 | qtd = isp1760_qtd_alloc(flags); | ||
1498 | if (!qtd) | ||
1499 | goto cleanup; | ||
1500 | qtd->urb = urb; | ||
1501 | list_add_tail(&qtd->qtd_list, head); | ||
1502 | } | 1427 | } |
1503 | 1428 | ||
1504 | /* | 1429 | /* |
@@ -1510,184 +1435,204 @@ static struct list_head *qh_urb_transaction(struct usb_hcd *hcd, | |||
1510 | 1435 | ||
1511 | if (usb_pipecontrol(urb->pipe)) { | 1436 | if (usb_pipecontrol(urb->pipe)) { |
1512 | one_more = 1; | 1437 | one_more = 1; |
1513 | /* "in" <--> "out" */ | 1438 | if (packet_type == IN_PID) |
1514 | token ^= IN_PID; | 1439 | packet_type = OUT_PID; |
1440 | else | ||
1441 | packet_type = IN_PID; | ||
1515 | } else if (usb_pipebulk(urb->pipe) | 1442 | } else if (usb_pipebulk(urb->pipe) |
1516 | && (urb->transfer_flags & URB_ZERO_PACKET) | 1443 | && (urb->transfer_flags & URB_ZERO_PACKET) |
1517 | && !(urb->transfer_buffer_length % maxpacket)) { | 1444 | && !(urb->transfer_buffer_length % |
1445 | maxpacketsize)) { | ||
1518 | one_more = 1; | 1446 | one_more = 1; |
1519 | } | 1447 | } |
1520 | if (one_more) { | 1448 | if (one_more) { |
1521 | qtd = isp1760_qtd_alloc(flags); | 1449 | qtd = qtd_alloc(flags, urb, packet_type); |
1522 | if (!qtd) | 1450 | if (!qtd) |
1523 | goto cleanup; | 1451 | goto cleanup; |
1524 | qtd->urb = urb; | ||
1525 | list_add_tail(&qtd->qtd_list, head); | ||
1526 | 1452 | ||
1527 | /* never any data in such packets */ | 1453 | /* never any data in such packets */ |
1528 | qtd_fill(qtd, NULL, 0, token); | 1454 | qtd_fill(qtd, NULL, 0); |
1455 | list_add_tail(&qtd->qtd_list, head); | ||
1529 | } | 1456 | } |
1530 | } | 1457 | } |
1531 | 1458 | ||
1532 | qtd->status = 0; | 1459 | return; |
1533 | return head; | ||
1534 | 1460 | ||
1535 | cleanup: | 1461 | cleanup: |
1536 | qtd_list_free(urb, head); | 1462 | qtd_list_free(head); |
1537 | return NULL; | ||
1538 | } | 1463 | } |
1539 | 1464 | ||
1540 | static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | 1465 | static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, |
1541 | gfp_t mem_flags) | 1466 | gfp_t mem_flags) |
1542 | { | 1467 | { |
1543 | struct list_head qtd_list; | 1468 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
1544 | packet_enqueue *pe; | 1469 | struct list_head *ep_queue; |
1545 | 1470 | struct isp1760_qh *qh, *qhit; | |
1546 | INIT_LIST_HEAD(&qtd_list); | 1471 | unsigned long spinflags; |
1472 | LIST_HEAD(new_qtds); | ||
1473 | int retval; | ||
1474 | int qh_in_queue; | ||
1547 | 1475 | ||
1548 | switch (usb_pipetype(urb->pipe)) { | 1476 | switch (usb_pipetype(urb->pipe)) { |
1549 | case PIPE_CONTROL: | 1477 | case PIPE_CONTROL: |
1478 | ep_queue = &priv->controlqhs; | ||
1479 | break; | ||
1550 | case PIPE_BULK: | 1480 | case PIPE_BULK: |
1551 | if (!qh_urb_transaction(hcd, urb, &qtd_list, mem_flags)) | 1481 | ep_queue = &priv->bulkqhs; |
1552 | return -ENOMEM; | ||
1553 | pe = enqueue_an_ATL_packet; | ||
1554 | break; | 1482 | break; |
1555 | |||
1556 | case PIPE_INTERRUPT: | 1483 | case PIPE_INTERRUPT: |
1557 | if (!qh_urb_transaction(hcd, urb, &qtd_list, mem_flags)) | 1484 | if (urb->interval < 0) |
1558 | return -ENOMEM; | 1485 | return -EINVAL; |
1559 | pe = enqueue_an_INT_packet; | 1486 | /* FIXME: Check bandwidth */ |
1487 | ep_queue = &priv->interruptqhs; | ||
1560 | break; | 1488 | break; |
1561 | |||
1562 | case PIPE_ISOCHRONOUS: | 1489 | case PIPE_ISOCHRONOUS: |
1563 | dev_err(hcd->self.controller, "PIPE_ISOCHRONOUS ain't supported\n"); | 1490 | dev_err(hcd->self.controller, "%s: isochronous USB packets " |
1491 | "not yet supported\n", | ||
1492 | __func__); | ||
1493 | return -EPIPE; | ||
1564 | default: | 1494 | default: |
1495 | dev_err(hcd->self.controller, "%s: unknown pipe type\n", | ||
1496 | __func__); | ||
1565 | return -EPIPE; | 1497 | return -EPIPE; |
1566 | } | 1498 | } |
1567 | 1499 | ||
1568 | return isp1760_prepare_enqueue(hcd, urb, &qtd_list, mem_flags, pe); | 1500 | if (usb_pipein(urb->pipe)) |
1569 | } | 1501 | urb->actual_length = 0; |
1570 | |||
1571 | static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | ||
1572 | { | ||
1573 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
1574 | struct inter_packet_info *ints; | ||
1575 | u32 i; | ||
1576 | u32 reg_base, or_reg, skip_reg; | ||
1577 | unsigned long flags; | ||
1578 | struct ptd ptd; | ||
1579 | packet_enqueue *pe; | ||
1580 | 1502 | ||
1581 | switch (usb_pipetype(urb->pipe)) { | 1503 | packetize_urb(hcd, urb, &new_qtds, mem_flags); |
1582 | case PIPE_ISOCHRONOUS: | 1504 | if (list_empty(&new_qtds)) |
1583 | return -EPIPE; | 1505 | return -ENOMEM; |
1584 | break; | 1506 | urb->hcpriv = NULL; /* Used to signal unlink to interrupt handler */ |
1585 | 1507 | ||
1586 | case PIPE_INTERRUPT: | 1508 | retval = 0; |
1587 | ints = priv->int_ints; | 1509 | spin_lock_irqsave(&priv->lock, spinflags); |
1588 | reg_base = INT_PTD_OFFSET; | ||
1589 | or_reg = HC_INT_IRQ_MASK_OR_REG; | ||
1590 | skip_reg = HC_INT_PTD_SKIPMAP_REG; | ||
1591 | pe = enqueue_an_INT_packet; | ||
1592 | break; | ||
1593 | 1510 | ||
1594 | default: | 1511 | if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { |
1595 | ints = priv->atl_ints; | 1512 | retval = -ESHUTDOWN; |
1596 | reg_base = ATL_PTD_OFFSET; | 1513 | goto out; |
1597 | or_reg = HC_ATL_IRQ_MASK_OR_REG; | ||
1598 | skip_reg = HC_ATL_PTD_SKIPMAP_REG; | ||
1599 | pe = enqueue_an_ATL_packet; | ||
1600 | break; | ||
1601 | } | 1514 | } |
1515 | retval = usb_hcd_link_urb_to_ep(hcd, urb); | ||
1516 | if (retval) | ||
1517 | goto out; | ||
1602 | 1518 | ||
1603 | memset(&ptd, 0, sizeof(ptd)); | 1519 | qh = urb->ep->hcpriv; |
1604 | spin_lock_irqsave(&priv->lock, flags); | 1520 | if (qh) { |
1605 | 1521 | qh_in_queue = 0; | |
1606 | for (i = 0; i < 32; i++) { | 1522 | list_for_each_entry(qhit, ep_queue, qh_list) { |
1607 | if (!ints[i].qh) | 1523 | if (qhit == qh) { |
1608 | continue; | 1524 | qh_in_queue = 1; |
1609 | BUG_ON(!ints[i].qtd); | 1525 | break; |
1526 | } | ||
1527 | } | ||
1528 | if (!qh_in_queue) | ||
1529 | list_add_tail(&qh->qh_list, ep_queue); | ||
1530 | } else { | ||
1531 | qh = qh_alloc(GFP_ATOMIC); | ||
1532 | if (!qh) { | ||
1533 | retval = -ENOMEM; | ||
1534 | goto out; | ||
1535 | } | ||
1536 | list_add_tail(&qh->qh_list, ep_queue); | ||
1537 | urb->ep->hcpriv = qh; | ||
1538 | } | ||
1610 | 1539 | ||
1611 | if (ints[i].qtd->urb == urb) { | 1540 | list_splice_tail(&new_qtds, &qh->qtd_list); |
1612 | u32 skip_map; | 1541 | schedule_ptds(hcd); |
1613 | u32 or_map; | ||
1614 | struct isp1760_qtd *qtd; | ||
1615 | struct isp1760_qh *qh; | ||
1616 | 1542 | ||
1617 | skip_map = reg_read32(hcd->regs, skip_reg); | 1543 | out: |
1618 | skip_map |= 1 << i; | 1544 | spin_unlock_irqrestore(&priv->lock, spinflags); |
1619 | reg_write32(hcd->regs, skip_reg, skip_map); | 1545 | return retval; |
1546 | } | ||
1620 | 1547 | ||
1621 | or_map = reg_read32(hcd->regs, or_reg); | 1548 | static void kill_transfer(struct usb_hcd *hcd, struct urb *urb, |
1622 | or_map &= ~(1 << i); | 1549 | struct isp1760_qh *qh) |
1623 | reg_write32(hcd->regs, or_reg, or_map); | 1550 | { |
1551 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
1552 | int skip_map; | ||
1624 | 1553 | ||
1625 | ptd_write(hcd->regs, reg_base, i, &ptd); | 1554 | WARN_ON(qh->slot == -1); |
1626 | 1555 | ||
1627 | qtd = ints[i].qtd; | 1556 | /* We need to forcefully reclaim the slot since some transfers never |
1628 | qh = ints[i].qh; | 1557 | return, e.g. interrupt transfers and NAKed bulk transfers. */ |
1558 | if (usb_pipebulk(urb->pipe)) { | ||
1559 | skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); | ||
1560 | skip_map |= (1 << qh->slot); | ||
1561 | reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); | ||
1562 | priv->atl_slots[qh->slot].qh = NULL; | ||
1563 | priv->atl_slots[qh->slot].qtd = NULL; | ||
1564 | } else { | ||
1565 | skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG); | ||
1566 | skip_map |= (1 << qh->slot); | ||
1567 | reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map); | ||
1568 | priv->int_slots[qh->slot].qh = NULL; | ||
1569 | priv->int_slots[qh->slot].qtd = NULL; | ||
1570 | } | ||
1629 | 1571 | ||
1630 | free_mem(hcd, qtd); | 1572 | qh->slot = -1; |
1631 | qtd = clean_up_qtdlist(qtd, qh); | 1573 | priv->active_ptds--; |
1574 | } | ||
1632 | 1575 | ||
1633 | ints[i].qh = NULL; | 1576 | static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, |
1634 | ints[i].qtd = NULL; | 1577 | int status) |
1578 | { | ||
1579 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
1580 | unsigned long spinflags; | ||
1581 | struct isp1760_qh *qh; | ||
1582 | struct isp1760_qtd *qtd; | ||
1583 | int retval = 0; | ||
1635 | 1584 | ||
1636 | urb->status = status; | 1585 | spin_lock_irqsave(&priv->lock, spinflags); |
1637 | isp1760_urb_done(hcd, urb); | ||
1638 | if (qtd) | ||
1639 | pe(hcd, qh, qtd); | ||
1640 | break; | ||
1641 | 1586 | ||
1642 | } else { | 1587 | qh = urb->ep->hcpriv; |
1643 | struct isp1760_qtd *qtd; | 1588 | if (!qh) { |
1644 | 1589 | retval = -EINVAL; | |
1645 | list_for_each_entry(qtd, &ints[i].qtd->qtd_list, | 1590 | goto out; |
1646 | qtd_list) { | 1591 | } |
1647 | if (qtd->urb == urb) { | ||
1648 | clean_up_qtdlist(qtd, ints[i].qh); | ||
1649 | isp1760_urb_done(hcd, urb); | ||
1650 | qtd = NULL; | ||
1651 | break; | ||
1652 | } | ||
1653 | } | ||
1654 | 1592 | ||
1655 | /* We found the urb before the last slot */ | 1593 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list) |
1656 | if (!qtd) | 1594 | if (qtd->urb == urb) { |
1657 | break; | 1595 | if (qtd->status == QTD_XFER_STARTED) |
1596 | kill_transfer(hcd, urb, qh); | ||
1597 | qtd->status = QTD_RETIRE; | ||
1658 | } | 1598 | } |
1659 | } | ||
1660 | 1599 | ||
1661 | spin_unlock_irqrestore(&priv->lock, flags); | 1600 | urb->status = status; |
1662 | return 0; | 1601 | schedule_ptds(hcd); |
1602 | |||
1603 | out: | ||
1604 | spin_unlock_irqrestore(&priv->lock, spinflags); | ||
1605 | return retval; | ||
1663 | } | 1606 | } |
1664 | 1607 | ||
1665 | static irqreturn_t isp1760_irq(struct usb_hcd *hcd) | 1608 | static void isp1760_endpoint_disable(struct usb_hcd *hcd, |
1609 | struct usb_host_endpoint *ep) | ||
1666 | { | 1610 | { |
1667 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | 1611 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
1668 | u32 imask; | 1612 | unsigned long spinflags; |
1669 | irqreturn_t irqret = IRQ_NONE; | 1613 | struct isp1760_qh *qh; |
1614 | struct isp1760_qtd *qtd; | ||
1670 | 1615 | ||
1671 | spin_lock(&priv->lock); | 1616 | spin_lock_irqsave(&priv->lock, spinflags); |
1672 | 1617 | ||
1673 | if (!(hcd->state & HC_STATE_RUNNING)) | 1618 | qh = ep->hcpriv; |
1674 | goto leave; | 1619 | if (!qh) |
1620 | goto out; | ||
1675 | 1621 | ||
1676 | imask = reg_read32(hcd->regs, HC_INTERRUPT_REG); | 1622 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list) { |
1677 | if (unlikely(!imask)) | 1623 | if (qtd->status == QTD_XFER_STARTED) |
1678 | goto leave; | 1624 | kill_transfer(hcd, qtd->urb, qh); |
1625 | qtd->status = QTD_RETIRE; | ||
1626 | qtd->urb->status = -ECONNRESET; | ||
1627 | } | ||
1679 | 1628 | ||
1680 | reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); | 1629 | ep->hcpriv = NULL; |
1681 | if (imask & (HC_ATL_INT | HC_SOT_INT)) | 1630 | /* Cannot free qh here since it will be parsed by schedule_ptds() */ |
1682 | do_atl_int(hcd); | ||
1683 | 1631 | ||
1684 | if (imask & HC_INTL_INT) | 1632 | schedule_ptds(hcd); |
1685 | do_intl_int(hcd); | ||
1686 | 1633 | ||
1687 | irqret = IRQ_HANDLED; | 1634 | out: |
1688 | leave: | 1635 | spin_unlock_irqrestore(&priv->lock, spinflags); |
1689 | spin_unlock(&priv->lock); | ||
1690 | return irqret; | ||
1691 | } | 1636 | } |
1692 | 1637 | ||
1693 | static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) | 1638 | static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) |
@@ -1778,7 +1723,7 @@ static int check_reset_complete(struct usb_hcd *hcd, int index, | |||
1778 | /* if reset finished and it's still not enabled -- handoff */ | 1723 | /* if reset finished and it's still not enabled -- handoff */ |
1779 | if (!(port_status & PORT_PE)) { | 1724 | if (!(port_status & PORT_PE)) { |
1780 | 1725 | ||
1781 | dev_err(hcd->self.controller, | 1726 | dev_info(hcd->self.controller, |
1782 | "port %d full speed --> companion\n", | 1727 | "port %d full speed --> companion\n", |
1783 | index + 1); | 1728 | index + 1); |
1784 | 1729 | ||
@@ -1787,7 +1732,7 @@ static int check_reset_complete(struct usb_hcd *hcd, int index, | |||
1787 | reg_write32(hcd->regs, HC_PORTSC1, port_status); | 1732 | reg_write32(hcd->regs, HC_PORTSC1, port_status); |
1788 | 1733 | ||
1789 | } else | 1734 | } else |
1790 | dev_err(hcd->self.controller, "port %d high speed\n", | 1735 | dev_info(hcd->self.controller, "port %d high speed\n", |
1791 | index + 1); | 1736 | index + 1); |
1792 | 1737 | ||
1793 | return port_status; | 1738 | return port_status; |
@@ -2059,51 +2004,6 @@ error: | |||
2059 | return retval; | 2004 | return retval; |
2060 | } | 2005 | } |
2061 | 2006 | ||
2062 | static void isp1760_endpoint_disable(struct usb_hcd *hcd, | ||
2063 | struct usb_host_endpoint *ep) | ||
2064 | { | ||
2065 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | ||
2066 | struct isp1760_qh *qh; | ||
2067 | struct isp1760_qtd *qtd; | ||
2068 | unsigned long flags; | ||
2069 | |||
2070 | spin_lock_irqsave(&priv->lock, flags); | ||
2071 | qh = ep->hcpriv; | ||
2072 | if (!qh) | ||
2073 | goto out; | ||
2074 | |||
2075 | ep->hcpriv = NULL; | ||
2076 | do { | ||
2077 | /* more than entry might get removed */ | ||
2078 | if (list_empty(&qh->qtd_list)) | ||
2079 | break; | ||
2080 | |||
2081 | qtd = list_first_entry(&qh->qtd_list, struct isp1760_qtd, | ||
2082 | qtd_list); | ||
2083 | |||
2084 | if (qtd->status & URB_ENQUEUED) { | ||
2085 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2086 | isp1760_urb_dequeue(hcd, qtd->urb, -ECONNRESET); | ||
2087 | spin_lock_irqsave(&priv->lock, flags); | ||
2088 | } else { | ||
2089 | struct urb *urb; | ||
2090 | |||
2091 | urb = qtd->urb; | ||
2092 | clean_up_qtdlist(qtd, qh); | ||
2093 | urb->status = -ECONNRESET; | ||
2094 | isp1760_urb_done(hcd, urb); | ||
2095 | } | ||
2096 | } while (1); | ||
2097 | |||
2098 | qh_destroy(qh); | ||
2099 | /* remove requests and leak them. | ||
2100 | * ATL are pretty fast done, INT could take a while... | ||
2101 | * The latter shoule be removed | ||
2102 | */ | ||
2103 | out: | ||
2104 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2105 | } | ||
2106 | |||
2107 | static int isp1760_get_frame(struct usb_hcd *hcd) | 2007 | static int isp1760_get_frame(struct usb_hcd *hcd) |
2108 | { | 2008 | { |
2109 | struct isp1760_hcd *priv = hcd_to_priv(hcd); | 2009 | struct isp1760_hcd *priv = hcd_to_priv(hcd); |
@@ -2165,6 +2065,13 @@ static const struct hc_driver isp1760_hc_driver = { | |||
2165 | 2065 | ||
2166 | int __init init_kmem_once(void) | 2066 | int __init init_kmem_once(void) |
2167 | { | 2067 | { |
2068 | urb_listitem_cachep = kmem_cache_create("isp1760 urb_listitem", | ||
2069 | sizeof(struct urb_listitem), 0, SLAB_TEMPORARY | | ||
2070 | SLAB_MEM_SPREAD, NULL); | ||
2071 | |||
2072 | if (!urb_listitem_cachep) | ||
2073 | return -ENOMEM; | ||
2074 | |||
2168 | qtd_cachep = kmem_cache_create("isp1760_qtd", | 2075 | qtd_cachep = kmem_cache_create("isp1760_qtd", |
2169 | sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY | | 2076 | sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY | |
2170 | SLAB_MEM_SPREAD, NULL); | 2077 | SLAB_MEM_SPREAD, NULL); |
@@ -2187,6 +2094,7 @@ void deinit_kmem_cache(void) | |||
2187 | { | 2094 | { |
2188 | kmem_cache_destroy(qtd_cachep); | 2095 | kmem_cache_destroy(qtd_cachep); |
2189 | kmem_cache_destroy(qh_cachep); | 2096 | kmem_cache_destroy(qh_cachep); |
2097 | kmem_cache_destroy(urb_listitem_cachep); | ||
2190 | } | 2098 | } |
2191 | 2099 | ||
2192 | struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len, | 2100 | struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len, |
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 870507690607..014a7dfadf91 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h | |||
@@ -49,10 +49,9 @@ void deinit_kmem_cache(void); | |||
49 | #define SW_RESET_RESET_ALL (1 << 0) | 49 | #define SW_RESET_RESET_ALL (1 << 0) |
50 | 50 | ||
51 | #define HC_BUFFER_STATUS_REG 0x334 | 51 | #define HC_BUFFER_STATUS_REG 0x334 |
52 | #define ATL_BUFFER 0x1 | 52 | #define ISO_BUF_FILL (1 << 2) |
53 | #define INT_BUFFER 0x2 | 53 | #define INT_BUF_FILL (1 << 1) |
54 | #define ISO_BUFFER 0x4 | 54 | #define ATL_BUF_FILL (1 << 0) |
55 | #define BUFFER_MAP 0x7 | ||
56 | 55 | ||
57 | #define HC_MEMORY_REG 0x33c | 56 | #define HC_MEMORY_REG 0x33c |
58 | #define ISP_BANK(x) ((x) << 16) | 57 | #define ISP_BANK(x) ((x) << 16) |
@@ -68,14 +67,13 @@ void deinit_kmem_cache(void); | |||
68 | #define HC_INTERRUPT_REG 0x310 | 67 | #define HC_INTERRUPT_REG 0x310 |
69 | 68 | ||
70 | #define HC_INTERRUPT_ENABLE 0x314 | 69 | #define HC_INTERRUPT_ENABLE 0x314 |
71 | #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) | ||
72 | #define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) | ||
73 | |||
74 | #define HC_ISO_INT (1 << 9) | 70 | #define HC_ISO_INT (1 << 9) |
75 | #define HC_ATL_INT (1 << 8) | 71 | #define HC_ATL_INT (1 << 8) |
76 | #define HC_INTL_INT (1 << 7) | 72 | #define HC_INTL_INT (1 << 7) |
77 | #define HC_EOT_INT (1 << 3) | 73 | #define HC_EOT_INT (1 << 3) |
78 | #define HC_SOT_INT (1 << 1) | 74 | #define HC_SOT_INT (1 << 1) |
75 | #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT) | ||
76 | #define INTERRUPT_ENABLE_SOT_MASK (HC_SOT_INT) | ||
79 | 77 | ||
80 | #define HC_ISO_IRQ_MASK_OR_REG 0x318 | 78 | #define HC_ISO_IRQ_MASK_OR_REG 0x318 |
81 | #define HC_INT_IRQ_MASK_OR_REG 0x31C | 79 | #define HC_INT_IRQ_MASK_OR_REG 0x31C |
@@ -106,7 +104,7 @@ struct ptd { | |||
106 | #define ATL_PTD_OFFSET 0x0c00 | 104 | #define ATL_PTD_OFFSET 0x0c00 |
107 | #define PAYLOAD_OFFSET 0x1000 | 105 | #define PAYLOAD_OFFSET 0x1000 |
108 | 106 | ||
109 | struct inter_packet_info { | 107 | struct slotinfo { |
110 | struct isp1760_qh *qh; | 108 | struct isp1760_qh *qh; |
111 | struct isp1760_qtd *qtd; | 109 | struct isp1760_qtd *qtd; |
112 | }; | 110 | }; |
@@ -156,54 +154,52 @@ struct memory_chunk { | |||
156 | 154 | ||
157 | /* ATL */ | 155 | /* ATL */ |
158 | /* DW0 */ | 156 | /* DW0 */ |
159 | #define PTD_VALID 1 | 157 | #define DW0_VALID_BIT 1 |
160 | #define PTD_LENGTH(x) (((u32) x) << 3) | 158 | #define FROM_DW0_VALID(x) ((x) & 0x01) |
161 | #define PTD_MAXPACKET(x) (((u32) x) << 18) | 159 | #define TO_DW0_LENGTH(x) (((u32) x) << 3) |
162 | #define PTD_MULTI(x) (((u32) x) << 29) | 160 | #define TO_DW0_MAXPACKET(x) (((u32) x) << 18) |
163 | #define PTD_ENDPOINT(x) (((u32) x) << 31) | 161 | #define TO_DW0_MULTI(x) (((u32) x) << 29) |
162 | #define TO_DW0_ENDPOINT(x) (((u32) x) << 31) | ||
164 | /* DW1 */ | 163 | /* DW1 */ |
165 | #define PTD_DEVICE_ADDR(x) (((u32) x) << 3) | 164 | #define TO_DW1_DEVICE_ADDR(x) (((u32) x) << 3) |
166 | #define PTD_PID_TOKEN(x) (((u32) x) << 10) | 165 | #define TO_DW1_PID_TOKEN(x) (((u32) x) << 10) |
167 | #define PTD_TRANS_BULK ((u32) 2 << 12) | 166 | #define DW1_TRANS_BULK ((u32) 2 << 12) |
168 | #define PTD_TRANS_INT ((u32) 3 << 12) | 167 | #define DW1_TRANS_INT ((u32) 3 << 12) |
169 | #define PTD_TRANS_SPLIT ((u32) 1 << 14) | 168 | #define DW1_TRANS_SPLIT ((u32) 1 << 14) |
170 | #define PTD_SE_USB_LOSPEED ((u32) 2 << 16) | 169 | #define DW1_SE_USB_LOSPEED ((u32) 2 << 16) |
171 | #define PTD_PORT_NUM(x) (((u32) x) << 18) | 170 | #define TO_DW1_PORT_NUM(x) (((u32) x) << 18) |
172 | #define PTD_HUB_NUM(x) (((u32) x) << 25) | 171 | #define TO_DW1_HUB_NUM(x) (((u32) x) << 25) |
173 | #define PTD_PING(x) (((u32) x) << 26) | ||
174 | /* DW2 */ | 172 | /* DW2 */ |
175 | #define PTD_RL_CNT(x) (((u32) x) << 25) | 173 | #define TO_DW2_DATA_START_ADDR(x) (((u32) x) << 8) |
176 | #define PTD_DATA_START_ADDR(x) (((u32) x) << 8) | 174 | #define TO_DW2_RL(x) ((x) << 25) |
177 | #define BASE_ADDR 0x1000 | 175 | #define FROM_DW2_RL(x) (((x) >> 25) & 0xf) |
178 | /* DW3 */ | 176 | /* DW3 */ |
179 | #define PTD_CERR(x) (((u32) x) << 23) | 177 | #define FROM_DW3_NRBYTESTRANSFERRED(x) ((x) & 0x7fff) |
180 | #define PTD_NAC_CNT(x) (((u32) x) << 19) | 178 | #define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) ((x) & 0x07ff) |
181 | #define PTD_ACTIVE ((u32) 1 << 31) | 179 | #define TO_DW3_NAKCOUNT(x) ((x) << 19) |
182 | #define PTD_DATA_TOGGLE(x) (((u32) x) << 25) | 180 | #define FROM_DW3_NAKCOUNT(x) (((x) >> 19) & 0xf) |
183 | 181 | #define TO_DW3_CERR(x) ((x) << 23) | |
184 | #define DW3_HALT_BIT (1 << 30) | 182 | #define FROM_DW3_CERR(x) (((x) >> 23) & 0x3) |
183 | #define TO_DW3_DATA_TOGGLE(x) ((x) << 25) | ||
184 | #define FROM_DW3_DATA_TOGGLE(x) (((x) >> 25) & 0x1) | ||
185 | #define TO_DW3_PING(x) ((x) << 26) | ||
186 | #define FROM_DW3_PING(x) (((x) >> 26) & 0x1) | ||
185 | #define DW3_ERROR_BIT (1 << 28) | 187 | #define DW3_ERROR_BIT (1 << 28) |
186 | #define DW3_QTD_ACTIVE (1 << 31) | 188 | #define DW3_BABBLE_BIT (1 << 29) |
189 | #define DW3_HALT_BIT (1 << 30) | ||
190 | #define DW3_ACTIVE_BIT (1 << 31) | ||
187 | 191 | ||
188 | #define INT_UNDERRUN (1 << 2) | 192 | #define INT_UNDERRUN (1 << 2) |
189 | #define INT_BABBLE (1 << 1) | 193 | #define INT_BABBLE (1 << 1) |
190 | #define INT_EXACT (1 << 0) | 194 | #define INT_EXACT (1 << 0) |
191 | 195 | ||
192 | #define DW1_GET_PID(x) (((x) >> 10) & 0x3) | ||
193 | #define PTD_XFERRED_LENGTH(x) ((x) & 0x7fff) | ||
194 | #define PTD_XFERRED_LENGTH_LO(x) ((x) & 0x7ff) | ||
195 | |||
196 | #define SETUP_PID (2) | 196 | #define SETUP_PID (2) |
197 | #define IN_PID (1) | 197 | #define IN_PID (1) |
198 | #define OUT_PID (0) | 198 | #define OUT_PID (0) |
199 | #define GET_QTD_TOKEN_TYPE(x) ((x) & 0x3) | ||
200 | |||
201 | #define DATA_TOGGLE (1 << 31) | ||
202 | #define GET_DATA_TOGGLE(x) ((x) >> 31) | ||
203 | 199 | ||
204 | /* Errata 1 */ | 200 | /* Errata 1 */ |
205 | #define RL_COUNTER (0) | 201 | #define RL_COUNTER (0) |
206 | #define NAK_COUNTER (0) | 202 | #define NAK_COUNTER (0) |
207 | #define ERR_COUNTER (2) | 203 | #define ERR_COUNTER (2) |
208 | 204 | ||
209 | #endif | 205 | #endif /* _ISP1760_HCD_H_ */ |
diff --git a/drivers/usb/host/octeon2-common.c b/drivers/usb/host/octeon2-common.c index 72d672cfcf39..d9df423f3d12 100644 --- a/drivers/usb/host/octeon2-common.c +++ b/drivers/usb/host/octeon2-common.c | |||
@@ -3,18 +3,19 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2010 Cavium Networks | 6 | * Copyright (C) 2010, 2011 Cavium Networks |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/mutex.h> | ||
10 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
11 | 12 | ||
12 | #include <asm/atomic.h> | ||
13 | |||
14 | #include <asm/octeon/octeon.h> | 13 | #include <asm/octeon/octeon.h> |
15 | #include <asm/octeon/cvmx-uctlx-defs.h> | 14 | #include <asm/octeon/cvmx-uctlx-defs.h> |
16 | 15 | ||
17 | static atomic_t octeon2_usb_clock_start_cnt = ATOMIC_INIT(0); | 16 | static DEFINE_MUTEX(octeon2_usb_clocks_mutex); |
17 | |||
18 | static int octeon2_usb_clock_start_cnt; | ||
18 | 19 | ||
19 | void octeon2_usb_clocks_start(void) | 20 | void octeon2_usb_clocks_start(void) |
20 | { | 21 | { |
@@ -26,8 +27,12 @@ void octeon2_usb_clocks_start(void) | |||
26 | int i; | 27 | int i; |
27 | unsigned long io_clk_64_to_ns; | 28 | unsigned long io_clk_64_to_ns; |
28 | 29 | ||
29 | if (atomic_inc_return(&octeon2_usb_clock_start_cnt) != 1) | 30 | |
30 | return; | 31 | mutex_lock(&octeon2_usb_clocks_mutex); |
32 | |||
33 | octeon2_usb_clock_start_cnt++; | ||
34 | if (octeon2_usb_clock_start_cnt != 1) | ||
35 | goto exit; | ||
31 | 36 | ||
32 | io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate(); | 37 | io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate(); |
33 | 38 | ||
@@ -43,6 +48,13 @@ void octeon2_usb_clocks_start(void) | |||
43 | 48 | ||
44 | /* Step 3: Configure the reference clock, PHY, and HCLK */ | 49 | /* Step 3: Configure the reference clock, PHY, and HCLK */ |
45 | clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); | 50 | clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); |
51 | |||
52 | /* | ||
53 | * If the UCTL looks like it has already been started, skip | ||
54 | * the initialization, otherwise bus errors are obtained. | ||
55 | */ | ||
56 | if (clk_rst_ctl.s.hrst) | ||
57 | goto end_clock; | ||
46 | /* 3a */ | 58 | /* 3a */ |
47 | clk_rst_ctl.s.p_por = 1; | 59 | clk_rst_ctl.s.p_por = 1; |
48 | clk_rst_ctl.s.hrst = 0; | 60 | clk_rst_ctl.s.hrst = 0; |
@@ -158,28 +170,31 @@ void octeon2_usb_clocks_start(void) | |||
158 | clk_rst_ctl.s.hrst = 1; | 170 | clk_rst_ctl.s.hrst = 1; |
159 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); | 171 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); |
160 | 172 | ||
173 | end_clock: | ||
161 | /* Now we can set some other registers. */ | 174 | /* Now we can set some other registers. */ |
162 | 175 | ||
163 | for (i = 0; i <= 1; i++) { | 176 | for (i = 0; i <= 1; i++) { |
164 | port_ctl_status.u64 = | 177 | port_ctl_status.u64 = |
165 | cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); | 178 | cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); |
166 | /* Set txvreftune to 15 to obtain complient 'eye' diagram. */ | 179 | /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ |
167 | port_ctl_status.s.txvreftune = 15; | 180 | port_ctl_status.s.txvreftune = 15; |
181 | port_ctl_status.s.txrisetune = 1; | ||
182 | port_ctl_status.s.txpreemphasistune = 1; | ||
168 | cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), | 183 | cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), |
169 | port_ctl_status.u64); | 184 | port_ctl_status.u64); |
170 | } | 185 | } |
186 | |||
187 | /* Set uSOF cycle period to 60,000 bits. */ | ||
188 | cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull); | ||
189 | exit: | ||
190 | mutex_unlock(&octeon2_usb_clocks_mutex); | ||
171 | } | 191 | } |
172 | EXPORT_SYMBOL(octeon2_usb_clocks_start); | 192 | EXPORT_SYMBOL(octeon2_usb_clocks_start); |
173 | 193 | ||
174 | void octeon2_usb_clocks_stop(void) | 194 | void octeon2_usb_clocks_stop(void) |
175 | { | 195 | { |
176 | union cvmx_uctlx_if_ena if_ena; | 196 | mutex_lock(&octeon2_usb_clocks_mutex); |
177 | 197 | octeon2_usb_clock_start_cnt--; | |
178 | if (atomic_dec_return(&octeon2_usb_clock_start_cnt) != 0) | 198 | mutex_unlock(&octeon2_usb_clocks_mutex); |
179 | return; | ||
180 | |||
181 | if_ena.u64 = 0; | ||
182 | if_ena.s.en = 0; | ||
183 | cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64); | ||
184 | } | 199 | } |
185 | EXPORT_SYMBOL(octeon2_usb_clocks_stop); | 200 | EXPORT_SYMBOL(octeon2_usb_clocks_stop); |
diff --git a/drivers/usb/host/ohci-ath79.c b/drivers/usb/host/ohci-ath79.c new file mode 100644 index 000000000000..ffea3e7cb0a8 --- /dev/null +++ b/drivers/usb/host/ohci-ath79.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * OHCI HCD (Host Controller Driver) for USB. | ||
3 | * | ||
4 | * Bus Glue for Atheros AR71XX/AR724X built-in OHCI controller. | ||
5 | * | ||
6 | * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> | ||
7 | * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> | ||
8 | * | ||
9 | * Parts of this file are based on Atheros' 2.6.15 BSP | ||
10 | * Copyright (C) 2007 Atheros Communications, Inc. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License version 2 as published | ||
14 | * by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/platform_device.h> | ||
18 | |||
19 | static int __devinit ohci_ath79_start(struct usb_hcd *hcd) | ||
20 | { | ||
21 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | ||
22 | int ret; | ||
23 | |||
24 | ret = ohci_init(ohci); | ||
25 | if (ret < 0) | ||
26 | return ret; | ||
27 | |||
28 | ret = ohci_run(ohci); | ||
29 | if (ret < 0) | ||
30 | goto err; | ||
31 | |||
32 | return 0; | ||
33 | |||
34 | err: | ||
35 | ohci_stop(hcd); | ||
36 | return ret; | ||
37 | } | ||
38 | |||
39 | static const struct hc_driver ohci_ath79_hc_driver = { | ||
40 | .description = hcd_name, | ||
41 | .product_desc = "Atheros built-in OHCI controller", | ||
42 | .hcd_priv_size = sizeof(struct ohci_hcd), | ||
43 | |||
44 | .irq = ohci_irq, | ||
45 | .flags = HCD_USB11 | HCD_MEMORY, | ||
46 | |||
47 | .start = ohci_ath79_start, | ||
48 | .stop = ohci_stop, | ||
49 | .shutdown = ohci_shutdown, | ||
50 | |||
51 | .urb_enqueue = ohci_urb_enqueue, | ||
52 | .urb_dequeue = ohci_urb_dequeue, | ||
53 | .endpoint_disable = ohci_endpoint_disable, | ||
54 | |||
55 | /* | ||
56 | * scheduling support | ||
57 | */ | ||
58 | .get_frame_number = ohci_get_frame, | ||
59 | |||
60 | /* | ||
61 | * root hub support | ||
62 | */ | ||
63 | .hub_status_data = ohci_hub_status_data, | ||
64 | .hub_control = ohci_hub_control, | ||
65 | .start_port_reset = ohci_start_port_reset, | ||
66 | }; | ||
67 | |||
68 | static int ohci_ath79_probe(struct platform_device *pdev) | ||
69 | { | ||
70 | struct usb_hcd *hcd; | ||
71 | struct resource *res; | ||
72 | int irq; | ||
73 | int ret; | ||
74 | |||
75 | if (usb_disabled()) | ||
76 | return -ENODEV; | ||
77 | |||
78 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
79 | if (!res) { | ||
80 | dev_dbg(&pdev->dev, "no IRQ specified\n"); | ||
81 | return -ENODEV; | ||
82 | } | ||
83 | irq = res->start; | ||
84 | |||
85 | hcd = usb_create_hcd(&ohci_ath79_hc_driver, &pdev->dev, | ||
86 | dev_name(&pdev->dev)); | ||
87 | if (!hcd) | ||
88 | return -ENOMEM; | ||
89 | |||
90 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
91 | if (!res) { | ||
92 | dev_dbg(&pdev->dev, "no base address specified\n"); | ||
93 | ret = -ENODEV; | ||
94 | goto err_put_hcd; | ||
95 | } | ||
96 | hcd->rsrc_start = res->start; | ||
97 | hcd->rsrc_len = res->end - res->start + 1; | ||
98 | |||
99 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | ||
100 | dev_dbg(&pdev->dev, "controller already in use\n"); | ||
101 | ret = -EBUSY; | ||
102 | goto err_put_hcd; | ||
103 | } | ||
104 | |||
105 | hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); | ||
106 | if (!hcd->regs) { | ||
107 | dev_dbg(&pdev->dev, "error mapping memory\n"); | ||
108 | ret = -EFAULT; | ||
109 | goto err_release_region; | ||
110 | } | ||
111 | |||
112 | ohci_hcd_init(hcd_to_ohci(hcd)); | ||
113 | |||
114 | ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); | ||
115 | if (ret) | ||
116 | goto err_stop_hcd; | ||
117 | |||
118 | return 0; | ||
119 | |||
120 | err_stop_hcd: | ||
121 | iounmap(hcd->regs); | ||
122 | err_release_region: | ||
123 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
124 | err_put_hcd: | ||
125 | usb_put_hcd(hcd); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | static int ohci_ath79_remove(struct platform_device *pdev) | ||
130 | { | ||
131 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | ||
132 | |||
133 | usb_remove_hcd(hcd); | ||
134 | iounmap(hcd->regs); | ||
135 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
136 | usb_put_hcd(hcd); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static struct platform_driver ohci_hcd_ath79_driver = { | ||
142 | .probe = ohci_ath79_probe, | ||
143 | .remove = ohci_ath79_remove, | ||
144 | .shutdown = usb_hcd_platform_shutdown, | ||
145 | .driver = { | ||
146 | .name = "ath79-ohci", | ||
147 | .owner = THIS_MODULE, | ||
148 | }, | ||
149 | }; | ||
150 | |||
151 | MODULE_ALIAS(PLATFORM_MODULE_PREFIX "ath79-ohci"); | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index d55723514860..9aa10bdf3918 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -764,6 +764,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) | |||
764 | if (ints == ~(u32)0) { | 764 | if (ints == ~(u32)0) { |
765 | disable (ohci); | 765 | disable (ohci); |
766 | ohci_dbg (ohci, "device removed!\n"); | 766 | ohci_dbg (ohci, "device removed!\n"); |
767 | usb_hc_died(hcd); | ||
767 | return IRQ_HANDLED; | 768 | return IRQ_HANDLED; |
768 | } | 769 | } |
769 | 770 | ||
@@ -771,7 +772,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) | |||
771 | ints &= ohci_readl(ohci, ®s->intrenable); | 772 | ints &= ohci_readl(ohci, ®s->intrenable); |
772 | 773 | ||
773 | /* interrupt for some other device? */ | 774 | /* interrupt for some other device? */ |
774 | if (ints == 0) | 775 | if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT)) |
775 | return IRQ_NOTMINE; | 776 | return IRQ_NOTMINE; |
776 | 777 | ||
777 | if (ints & OHCI_INTR_UE) { | 778 | if (ints & OHCI_INTR_UE) { |
@@ -788,6 +789,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) | |||
788 | } else { | 789 | } else { |
789 | disable (ohci); | 790 | disable (ohci); |
790 | ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n"); | 791 | ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n"); |
792 | usb_hc_died(hcd); | ||
791 | } | 793 | } |
792 | 794 | ||
793 | ohci_dump (ohci, 1); | 795 | ohci_dump (ohci, 1); |
@@ -1105,6 +1107,11 @@ MODULE_LICENSE ("GPL"); | |||
1105 | #define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver | 1107 | #define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver |
1106 | #endif | 1108 | #endif |
1107 | 1109 | ||
1110 | #ifdef CONFIG_USB_OHCI_ATH79 | ||
1111 | #include "ohci-ath79.c" | ||
1112 | #define PLATFORM_DRIVER ohci_hcd_ath79_driver | ||
1113 | #endif | ||
1114 | |||
1108 | #if !defined(PCI_DRIVER) && \ | 1115 | #if !defined(PCI_DRIVER) && \ |
1109 | !defined(PLATFORM_DRIVER) && \ | 1116 | !defined(PLATFORM_DRIVER) && \ |
1110 | !defined(OMAP1_PLATFORM_DRIVER) && \ | 1117 | !defined(OMAP1_PLATFORM_DRIVER) && \ |
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index d84d6f0314f9..ad8166c681e2 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -181,10 +181,18 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) | |||
181 | */ | 181 | */ |
182 | static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) | 182 | static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) |
183 | { | 183 | { |
184 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
184 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 185 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
185 | 186 | ||
186 | ohci->flags |= OHCI_QUIRK_SHUTDOWN; | 187 | /* Evidently nVidia fixed their later hardware; this is a guess at |
187 | ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); | 188 | * the changeover point. |
189 | */ | ||
190 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d | ||
191 | |||
192 | if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) { | ||
193 | ohci->flags |= OHCI_QUIRK_SHUTDOWN; | ||
194 | ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); | ||
195 | } | ||
188 | 196 | ||
189 | return 0; | 197 | return 0; |
190 | } | 198 | } |
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index a68af2dd55ca..7c9a4d55526b 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c | |||
@@ -56,9 +56,8 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd) | |||
56 | info->hcd = hcd; | 56 | info->hcd = hcd; |
57 | info->report_oc = s3c2410_hcd_oc; | 57 | info->report_oc = s3c2410_hcd_oc; |
58 | 58 | ||
59 | if (info->enable_oc != NULL) { | 59 | if (info->enable_oc != NULL) |
60 | (info->enable_oc)(info, 1); | 60 | (info->enable_oc)(info, 1); |
61 | } | ||
62 | } | 61 | } |
63 | } | 62 | } |
64 | 63 | ||
@@ -72,9 +71,8 @@ static void s3c2410_stop_hc(struct platform_device *dev) | |||
72 | info->report_oc = NULL; | 71 | info->report_oc = NULL; |
73 | info->hcd = NULL; | 72 | info->hcd = NULL; |
74 | 73 | ||
75 | if (info->enable_oc != NULL) { | 74 | if (info->enable_oc != NULL) |
76 | (info->enable_oc)(info, 0); | 75 | (info->enable_oc)(info, 0); |
77 | } | ||
78 | } | 76 | } |
79 | 77 | ||
80 | clk_disable(clk); | 78 | clk_disable(clk); |
@@ -88,14 +86,14 @@ static void s3c2410_stop_hc(struct platform_device *dev) | |||
88 | */ | 86 | */ |
89 | 87 | ||
90 | static int | 88 | static int |
91 | ohci_s3c2410_hub_status_data (struct usb_hcd *hcd, char *buf) | 89 | ohci_s3c2410_hub_status_data(struct usb_hcd *hcd, char *buf) |
92 | { | 90 | { |
93 | struct s3c2410_hcd_info *info = to_s3c2410_info(hcd); | 91 | struct s3c2410_hcd_info *info = to_s3c2410_info(hcd); |
94 | struct s3c2410_hcd_port *port; | 92 | struct s3c2410_hcd_port *port; |
95 | int orig; | 93 | int orig; |
96 | int portno; | 94 | int portno; |
97 | 95 | ||
98 | orig = ohci_hub_status_data (hcd, buf); | 96 | orig = ohci_hub_status_data(hcd, buf); |
99 | 97 | ||
100 | if (info == NULL) | 98 | if (info == NULL) |
101 | return orig; | 99 | return orig; |
@@ -145,7 +143,7 @@ static void s3c2410_usb_set_power(struct s3c2410_hcd_info *info, | |||
145 | * request. | 143 | * request. |
146 | */ | 144 | */ |
147 | 145 | ||
148 | static int ohci_s3c2410_hub_control ( | 146 | static int ohci_s3c2410_hub_control( |
149 | struct usb_hcd *hcd, | 147 | struct usb_hcd *hcd, |
150 | u16 typeReq, | 148 | u16 typeReq, |
151 | u16 wValue, | 149 | u16 wValue, |
@@ -199,9 +197,8 @@ static int ohci_s3c2410_hub_control ( | |||
199 | dev_dbg(hcd->self.controller, | 197 | dev_dbg(hcd->self.controller, |
200 | "ClearPortFeature: OVER_CURRENT\n"); | 198 | "ClearPortFeature: OVER_CURRENT\n"); |
201 | 199 | ||
202 | if (valid_port(wIndex)) { | 200 | if (valid_port(wIndex)) |
203 | info->port[wIndex-1].oc_status = 0; | 201 | info->port[wIndex-1].oc_status = 0; |
204 | } | ||
205 | 202 | ||
206 | goto out; | 203 | goto out; |
207 | 204 | ||
@@ -242,8 +239,11 @@ static int ohci_s3c2410_hub_control ( | |||
242 | desc->wHubCharacteristics |= cpu_to_le16(0x0001); | 239 | desc->wHubCharacteristics |= cpu_to_le16(0x0001); |
243 | 240 | ||
244 | if (info->enable_oc) { | 241 | if (info->enable_oc) { |
245 | desc->wHubCharacteristics &= ~cpu_to_le16(HUB_CHAR_OCPM); | 242 | desc->wHubCharacteristics &= ~cpu_to_le16( |
246 | desc->wHubCharacteristics |= cpu_to_le16(0x0008|0x0001); | 243 | HUB_CHAR_OCPM); |
244 | desc->wHubCharacteristics |= cpu_to_le16( | ||
245 | 0x0008 | | ||
246 | 0x0001); | ||
247 | } | 247 | } |
248 | 248 | ||
249 | dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n", | 249 | dev_dbg(hcd->self.controller, "wHubCharacteristics after 0x%04x\n", |
@@ -257,13 +257,11 @@ static int ohci_s3c2410_hub_control ( | |||
257 | dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex); | 257 | dev_dbg(hcd->self.controller, "GetPortStatus(%d)\n", wIndex); |
258 | 258 | ||
259 | if (valid_port(wIndex)) { | 259 | if (valid_port(wIndex)) { |
260 | if (info->port[wIndex-1].oc_changed) { | 260 | if (info->port[wIndex-1].oc_changed) |
261 | *data |= cpu_to_le32(RH_PS_OCIC); | 261 | *data |= cpu_to_le32(RH_PS_OCIC); |
262 | } | ||
263 | 262 | ||
264 | if (info->port[wIndex-1].oc_status) { | 263 | if (info->port[wIndex-1].oc_status) |
265 | *data |= cpu_to_le32(RH_PS_POCI); | 264 | *data |= cpu_to_le32(RH_PS_POCI); |
266 | } | ||
267 | } | 265 | } |
268 | } | 266 | } |
269 | 267 | ||
@@ -321,7 +319,7 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc) | |||
321 | */ | 319 | */ |
322 | 320 | ||
323 | static void | 321 | static void |
324 | usb_hcd_s3c2410_remove (struct usb_hcd *hcd, struct platform_device *dev) | 322 | usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev) |
325 | { | 323 | { |
326 | usb_remove_hcd(hcd); | 324 | usb_remove_hcd(hcd); |
327 | s3c2410_stop_hc(dev); | 325 | s3c2410_stop_hc(dev); |
@@ -339,7 +337,7 @@ usb_hcd_s3c2410_remove (struct usb_hcd *hcd, struct platform_device *dev) | |||
339 | * through the hotplug entry's driver_data. | 337 | * through the hotplug entry's driver_data. |
340 | * | 338 | * |
341 | */ | 339 | */ |
342 | static int usb_hcd_s3c2410_probe (const struct hc_driver *driver, | 340 | static int usb_hcd_s3c2410_probe(const struct hc_driver *driver, |
343 | struct platform_device *dev) | 341 | struct platform_device *dev) |
344 | { | 342 | { |
345 | struct usb_hcd *hcd = NULL; | 343 | struct usb_hcd *hcd = NULL; |
@@ -353,7 +351,7 @@ static int usb_hcd_s3c2410_probe (const struct hc_driver *driver, | |||
353 | return -ENOMEM; | 351 | return -ENOMEM; |
354 | 352 | ||
355 | hcd->rsrc_start = dev->resource[0].start; | 353 | hcd->rsrc_start = dev->resource[0].start; |
356 | hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1; | 354 | hcd->rsrc_len = resource_size(&dev->resource[0]); |
357 | 355 | ||
358 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | 356 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { |
359 | dev_err(&dev->dev, "request_mem_region failed\n"); | 357 | dev_err(&dev->dev, "request_mem_region failed\n"); |
@@ -364,14 +362,14 @@ static int usb_hcd_s3c2410_probe (const struct hc_driver *driver, | |||
364 | clk = clk_get(&dev->dev, "usb-host"); | 362 | clk = clk_get(&dev->dev, "usb-host"); |
365 | if (IS_ERR(clk)) { | 363 | if (IS_ERR(clk)) { |
366 | dev_err(&dev->dev, "cannot get usb-host clock\n"); | 364 | dev_err(&dev->dev, "cannot get usb-host clock\n"); |
367 | retval = -ENOENT; | 365 | retval = PTR_ERR(clk); |
368 | goto err_mem; | 366 | goto err_mem; |
369 | } | 367 | } |
370 | 368 | ||
371 | usb_clk = clk_get(&dev->dev, "usb-bus-host"); | 369 | usb_clk = clk_get(&dev->dev, "usb-bus-host"); |
372 | if (IS_ERR(usb_clk)) { | 370 | if (IS_ERR(usb_clk)) { |
373 | dev_err(&dev->dev, "cannot get usb-bus-host clock\n"); | 371 | dev_err(&dev->dev, "cannot get usb-bus-host clock\n"); |
374 | retval = -ENOENT; | 372 | retval = PTR_ERR(usb_clk); |
375 | goto err_clk; | 373 | goto err_clk; |
376 | } | 374 | } |
377 | 375 | ||
@@ -411,17 +409,19 @@ static int usb_hcd_s3c2410_probe (const struct hc_driver *driver, | |||
411 | /*-------------------------------------------------------------------------*/ | 409 | /*-------------------------------------------------------------------------*/ |
412 | 410 | ||
413 | static int | 411 | static int |
414 | ohci_s3c2410_start (struct usb_hcd *hcd) | 412 | ohci_s3c2410_start(struct usb_hcd *hcd) |
415 | { | 413 | { |
416 | struct ohci_hcd *ohci = hcd_to_ohci (hcd); | 414 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
417 | int ret; | 415 | int ret; |
418 | 416 | ||
419 | if ((ret = ohci_init(ohci)) < 0) | 417 | ret = ohci_init(ohci); |
418 | if (ret < 0) | ||
420 | return ret; | 419 | return ret; |
421 | 420 | ||
422 | if ((ret = ohci_run (ohci)) < 0) { | 421 | ret = ohci_run(ohci); |
423 | err ("can't start %s", hcd->self.bus_name); | 422 | if (ret < 0) { |
424 | ohci_stop (hcd); | 423 | err("can't start %s", hcd->self.bus_name); |
424 | ohci_stop(hcd); | ||
425 | return ret; | 425 | return ret; |
426 | } | 426 | } |
427 | 427 | ||
@@ -473,12 +473,12 @@ static const struct hc_driver ohci_s3c2410_hc_driver = { | |||
473 | 473 | ||
474 | /* device driver */ | 474 | /* device driver */ |
475 | 475 | ||
476 | static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev) | 476 | static int __devinit ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev) |
477 | { | 477 | { |
478 | return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev); | 478 | return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev); |
479 | } | 479 | } |
480 | 480 | ||
481 | static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev) | 481 | static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev) |
482 | { | 482 | { |
483 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 483 | struct usb_hcd *hcd = platform_get_drvdata(pdev); |
484 | 484 | ||
@@ -488,7 +488,7 @@ static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev) | |||
488 | 488 | ||
489 | static struct platform_driver ohci_hcd_s3c2410_driver = { | 489 | static struct platform_driver ohci_hcd_s3c2410_driver = { |
490 | .probe = ohci_hcd_s3c2410_drv_probe, | 490 | .probe = ohci_hcd_s3c2410_drv_probe, |
491 | .remove = ohci_hcd_s3c2410_drv_remove, | 491 | .remove = __devexit_p(ohci_hcd_s3c2410_drv_remove), |
492 | .shutdown = usb_hcd_platform_shutdown, | 492 | .shutdown = usb_hcd_platform_shutdown, |
493 | /*.suspend = ohci_hcd_s3c2410_drv_suspend, */ | 493 | /*.suspend = ohci_hcd_s3c2410_drv_suspend, */ |
494 | /*.resume = ohci_hcd_s3c2410_drv_resume, */ | 494 | /*.resume = ohci_hcd_s3c2410_drv_resume, */ |
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index 4a771f6cc822..5fbe997dc6df 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c | |||
@@ -1884,6 +1884,7 @@ static int enable_periodic(struct oxu_hcd *oxu) | |||
1884 | status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); | 1884 | status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); |
1885 | if (status != 0) { | 1885 | if (status != 0) { |
1886 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | 1886 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; |
1887 | usb_hc_died(oxu_to_hcd(oxu)); | ||
1887 | return status; | 1888 | return status; |
1888 | } | 1889 | } |
1889 | 1890 | ||
@@ -1909,6 +1910,7 @@ static int disable_periodic(struct oxu_hcd *oxu) | |||
1909 | status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); | 1910 | status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); |
1910 | if (status != 0) { | 1911 | if (status != 0) { |
1911 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | 1912 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; |
1913 | usb_hc_died(oxu_to_hcd(oxu)); | ||
1912 | return status; | 1914 | return status; |
1913 | } | 1915 | } |
1914 | 1916 | ||
@@ -2449,8 +2451,9 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) | |||
2449 | goto dead; | 2451 | goto dead; |
2450 | } | 2452 | } |
2451 | 2453 | ||
2454 | /* Shared IRQ? */ | ||
2452 | status &= INTR_MASK; | 2455 | status &= INTR_MASK; |
2453 | if (!status) { /* irq sharing? */ | 2456 | if (!status || unlikely(hcd->state == HC_STATE_HALT)) { |
2454 | spin_unlock(&oxu->lock); | 2457 | spin_unlock(&oxu->lock); |
2455 | return IRQ_NONE; | 2458 | return IRQ_NONE; |
2456 | } | 2459 | } |
@@ -2516,6 +2519,7 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) | |||
2516 | dead: | 2519 | dead: |
2517 | ehci_reset(oxu); | 2520 | ehci_reset(oxu); |
2518 | writel(0, &oxu->regs->configured_flag); | 2521 | writel(0, &oxu->regs->configured_flag); |
2522 | usb_hc_died(hcd); | ||
2519 | /* generic layer kills/unlinks all urbs, then | 2523 | /* generic layer kills/unlinks all urbs, then |
2520 | * uses oxu_stop to clean up the rest | 2524 | * uses oxu_stop to clean up the rest |
2521 | */ | 2525 | */ |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 9b166d70ae91..f16c59d5f487 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
17 | #include <linux/dmi.h> | ||
17 | #include "pci-quirks.h" | 18 | #include "pci-quirks.h" |
18 | #include "xhci-ext-caps.h" | 19 | #include "xhci-ext-caps.h" |
19 | 20 | ||
@@ -503,14 +504,84 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) | |||
503 | iounmap(base); | 504 | iounmap(base); |
504 | } | 505 | } |
505 | 506 | ||
507 | static void __devinit ehci_bios_handoff(struct pci_dev *pdev, | ||
508 | void __iomem *op_reg_base, | ||
509 | u32 cap, u8 offset) | ||
510 | { | ||
511 | int try_handoff = 1, tried_handoff = 0; | ||
512 | |||
513 | /* The Pegatron Lucid (ExoPC) tablet sporadically waits for 90 | ||
514 | * seconds trying the handoff on its unused controller. Skip | ||
515 | * it. */ | ||
516 | if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { | ||
517 | const char *dmi_bn = dmi_get_system_info(DMI_BOARD_NAME); | ||
518 | const char *dmi_bv = dmi_get_system_info(DMI_BIOS_VERSION); | ||
519 | if (dmi_bn && !strcmp(dmi_bn, "EXOPG06411") && | ||
520 | dmi_bv && !strcmp(dmi_bv, "Lucid-CE-133")) | ||
521 | try_handoff = 0; | ||
522 | } | ||
523 | |||
524 | if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { | ||
525 | dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); | ||
526 | |||
527 | #if 0 | ||
528 | /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, | ||
529 | * but that seems dubious in general (the BIOS left it off intentionally) | ||
530 | * and is known to prevent some systems from booting. so we won't do this | ||
531 | * unless maybe we can determine when we're on a system that needs SMI forced. | ||
532 | */ | ||
533 | /* BIOS workaround (?): be sure the pre-Linux code | ||
534 | * receives the SMI | ||
535 | */ | ||
536 | pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); | ||
537 | pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, | ||
538 | val | EHCI_USBLEGCTLSTS_SOOE); | ||
539 | #endif | ||
540 | |||
541 | /* some systems get upset if this semaphore is | ||
542 | * set for any other reason than forcing a BIOS | ||
543 | * handoff.. | ||
544 | */ | ||
545 | pci_write_config_byte(pdev, offset + 3, 1); | ||
546 | } | ||
547 | |||
548 | /* if boot firmware now owns EHCI, spin till it hands it over. */ | ||
549 | if (try_handoff) { | ||
550 | int msec = 1000; | ||
551 | while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { | ||
552 | tried_handoff = 1; | ||
553 | msleep(10); | ||
554 | msec -= 10; | ||
555 | pci_read_config_dword(pdev, offset, &cap); | ||
556 | } | ||
557 | } | ||
558 | |||
559 | if (cap & EHCI_USBLEGSUP_BIOS) { | ||
560 | /* well, possibly buggy BIOS... try to shut it down, | ||
561 | * and hope nothing goes too wrong | ||
562 | */ | ||
563 | if (try_handoff) | ||
564 | dev_warn(&pdev->dev, "EHCI: BIOS handoff failed" | ||
565 | " (BIOS bug?) %08x\n", cap); | ||
566 | pci_write_config_byte(pdev, offset + 2, 0); | ||
567 | } | ||
568 | |||
569 | /* just in case, always disable EHCI SMIs */ | ||
570 | pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); | ||
571 | |||
572 | /* If the BIOS ever owned the controller then we can't expect | ||
573 | * any power sessions to remain intact. | ||
574 | */ | ||
575 | if (tried_handoff) | ||
576 | writel(0, op_reg_base + EHCI_CONFIGFLAG); | ||
577 | } | ||
578 | |||
506 | static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) | 579 | static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) |
507 | { | 580 | { |
508 | int wait_time, delta; | ||
509 | void __iomem *base, *op_reg_base; | 581 | void __iomem *base, *op_reg_base; |
510 | u32 hcc_params, val; | 582 | u32 hcc_params, cap, val; |
511 | u8 offset, cap_length; | 583 | u8 offset, cap_length; |
512 | int count = 256/4; | 584 | int wait_time, delta, count = 256/4; |
513 | int tried_handoff = 0; | ||
514 | 585 | ||
515 | if (!mmio_resource_enabled(pdev, 0)) | 586 | if (!mmio_resource_enabled(pdev, 0)) |
516 | return; | 587 | return; |
@@ -529,77 +600,17 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) | |||
529 | hcc_params = readl(base + EHCI_HCC_PARAMS); | 600 | hcc_params = readl(base + EHCI_HCC_PARAMS); |
530 | offset = (hcc_params >> 8) & 0xff; | 601 | offset = (hcc_params >> 8) & 0xff; |
531 | while (offset && --count) { | 602 | while (offset && --count) { |
532 | u32 cap; | ||
533 | int msec; | ||
534 | |||
535 | pci_read_config_dword(pdev, offset, &cap); | 603 | pci_read_config_dword(pdev, offset, &cap); |
536 | switch (cap & 0xff) { | ||
537 | case 1: /* BIOS/SMM/... handoff support */ | ||
538 | if ((cap & EHCI_USBLEGSUP_BIOS)) { | ||
539 | dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); | ||
540 | 604 | ||
541 | #if 0 | 605 | switch (cap & 0xff) { |
542 | /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, | 606 | case 1: |
543 | * but that seems dubious in general (the BIOS left it off intentionally) | 607 | ehci_bios_handoff(pdev, op_reg_base, cap, offset); |
544 | * and is known to prevent some systems from booting. so we won't do this | ||
545 | * unless maybe we can determine when we're on a system that needs SMI forced. | ||
546 | */ | ||
547 | /* BIOS workaround (?): be sure the | ||
548 | * pre-Linux code receives the SMI | ||
549 | */ | ||
550 | pci_read_config_dword(pdev, | ||
551 | offset + EHCI_USBLEGCTLSTS, | ||
552 | &val); | ||
553 | pci_write_config_dword(pdev, | ||
554 | offset + EHCI_USBLEGCTLSTS, | ||
555 | val | EHCI_USBLEGCTLSTS_SOOE); | ||
556 | #endif | ||
557 | |||
558 | /* some systems get upset if this semaphore is | ||
559 | * set for any other reason than forcing a BIOS | ||
560 | * handoff.. | ||
561 | */ | ||
562 | pci_write_config_byte(pdev, offset + 3, 1); | ||
563 | } | ||
564 | |||
565 | /* if boot firmware now owns EHCI, spin till | ||
566 | * it hands it over. | ||
567 | */ | ||
568 | msec = 1000; | ||
569 | while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { | ||
570 | tried_handoff = 1; | ||
571 | msleep(10); | ||
572 | msec -= 10; | ||
573 | pci_read_config_dword(pdev, offset, &cap); | ||
574 | } | ||
575 | |||
576 | if (cap & EHCI_USBLEGSUP_BIOS) { | ||
577 | /* well, possibly buggy BIOS... try to shut | ||
578 | * it down, and hope nothing goes too wrong | ||
579 | */ | ||
580 | dev_warn(&pdev->dev, "EHCI: BIOS handoff failed" | ||
581 | " (BIOS bug?) %08x\n", cap); | ||
582 | pci_write_config_byte(pdev, offset + 2, 0); | ||
583 | } | ||
584 | |||
585 | /* just in case, always disable EHCI SMIs */ | ||
586 | pci_write_config_dword(pdev, | ||
587 | offset + EHCI_USBLEGCTLSTS, | ||
588 | 0); | ||
589 | |||
590 | /* If the BIOS ever owned the controller then we | ||
591 | * can't expect any power sessions to remain intact. | ||
592 | */ | ||
593 | if (tried_handoff) | ||
594 | writel(0, op_reg_base + EHCI_CONFIGFLAG); | ||
595 | break; | 608 | break; |
596 | case 0: /* illegal reserved capability */ | 609 | case 0: /* Illegal reserved cap, set cap=0 so we exit */ |
597 | cap = 0; | 610 | cap = 0; /* then fallthrough... */ |
598 | /* FALLTHROUGH */ | ||
599 | default: | 611 | default: |
600 | dev_warn(&pdev->dev, "EHCI: unrecognized capability " | 612 | dev_warn(&pdev->dev, "EHCI: unrecognized capability " |
601 | "%02x\n", cap & 0xff); | 613 | "%02x\n", cap & 0xff); |
602 | break; | ||
603 | } | 614 | } |
604 | offset = (cap >> 8) & 0xff; | 615 | offset = (cap >> 8) & 0xff; |
605 | } | 616 | } |
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index fafccc2fd331..1a996245ab98 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c | |||
@@ -72,12 +72,6 @@ MODULE_ALIAS("platform:sl811-hcd"); | |||
72 | /* for now, use only one transfer register bank */ | 72 | /* for now, use only one transfer register bank */ |
73 | #undef USE_B | 73 | #undef USE_B |
74 | 74 | ||
75 | /* this doesn't understand urb->iso_frame_desc[], but if you had a driver | ||
76 | * that just queued one ISO frame per URB then iso transfers "should" work | ||
77 | * using the normal urb status fields. | ||
78 | */ | ||
79 | #define DISABLE_ISO | ||
80 | |||
81 | // #define QUIRK2 | 75 | // #define QUIRK2 |
82 | #define QUIRK3 | 76 | #define QUIRK3 |
83 | 77 | ||
@@ -808,7 +802,7 @@ static int sl811h_urb_enqueue( | |||
808 | int retval; | 802 | int retval; |
809 | struct usb_host_endpoint *hep = urb->ep; | 803 | struct usb_host_endpoint *hep = urb->ep; |
810 | 804 | ||
811 | #ifdef DISABLE_ISO | 805 | #ifndef CONFIG_USB_SL811_HCD_ISO |
812 | if (type == PIPE_ISOCHRONOUS) | 806 | if (type == PIPE_ISOCHRONOUS) |
813 | return -ENOSPC; | 807 | return -ENOSPC; |
814 | #endif | 808 | #endif |
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index b4785934e091..533d12cca371 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -3230,8 +3230,7 @@ static int __init u132_hcd_init(void) | |||
3230 | mutex_init(&u132_module_lock); | 3230 | mutex_init(&u132_module_lock); |
3231 | if (usb_disabled()) | 3231 | if (usb_disabled()) |
3232 | return -ENODEV; | 3232 | return -ENODEV; |
3233 | printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__, | 3233 | printk(KERN_INFO "driver %s\n", hcd_name); |
3234 | __DATE__); | ||
3235 | workqueue = create_singlethread_workqueue("u132"); | 3234 | workqueue = create_singlethread_workqueue("u132"); |
3236 | retval = platform_driver_register(&u132_platform_driver); | 3235 | retval = platform_driver_register(&u132_platform_driver); |
3237 | return retval; | 3236 | return retval; |
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c index ee60cd3ea642..fc0b0daac93d 100644 --- a/drivers/usb/host/uhci-debug.c +++ b/drivers/usb/host/uhci-debug.c | |||
@@ -37,7 +37,8 @@ static void lprintk(char *buf) | |||
37 | } | 37 | } |
38 | } | 38 | } |
39 | 39 | ||
40 | static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space) | 40 | static int uhci_show_td(struct uhci_hcd *uhci, struct uhci_td *td, char *buf, |
41 | int len, int space) | ||
41 | { | 42 | { |
42 | char *out = buf; | 43 | char *out = buf; |
43 | char *spid; | 44 | char *spid; |
@@ -47,8 +48,9 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space) | |||
47 | if (len < 160) | 48 | if (len < 160) |
48 | return 0; | 49 | return 0; |
49 | 50 | ||
50 | status = td_status(td); | 51 | status = td_status(uhci, td); |
51 | out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, le32_to_cpu(td->link)); | 52 | out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, |
53 | hc32_to_cpu(uhci, td->link)); | ||
52 | out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ", | 54 | out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ", |
53 | ((status >> 27) & 3), | 55 | ((status >> 27) & 3), |
54 | (status & TD_CTRL_SPD) ? "SPD " : "", | 56 | (status & TD_CTRL_SPD) ? "SPD " : "", |
@@ -63,7 +65,7 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space) | |||
63 | (status & TD_CTRL_BITSTUFF) ? "BitStuff " : "", | 65 | (status & TD_CTRL_BITSTUFF) ? "BitStuff " : "", |
64 | status & 0x7ff); | 66 | status & 0x7ff); |
65 | 67 | ||
66 | token = td_token(td); | 68 | token = td_token(uhci, td); |
67 | switch (uhci_packetid(token)) { | 69 | switch (uhci_packetid(token)) { |
68 | case USB_PID_SETUP: | 70 | case USB_PID_SETUP: |
69 | spid = "SETUP"; | 71 | spid = "SETUP"; |
@@ -86,12 +88,13 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space) | |||
86 | (token >> 8) & 127, | 88 | (token >> 8) & 127, |
87 | (token & 0xff), | 89 | (token & 0xff), |
88 | spid); | 90 | spid); |
89 | out += sprintf(out, "(buf=%08x)\n", le32_to_cpu(td->buffer)); | 91 | out += sprintf(out, "(buf=%08x)\n", hc32_to_cpu(uhci, td->buffer)); |
90 | 92 | ||
91 | return out - buf; | 93 | return out - buf; |
92 | } | 94 | } |
93 | 95 | ||
94 | static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space) | 96 | static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, |
97 | char *buf, int len, int space) | ||
95 | { | 98 | { |
96 | char *out = buf; | 99 | char *out = buf; |
97 | struct uhci_td *td; | 100 | struct uhci_td *td; |
@@ -130,9 +133,10 @@ static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space) | |||
130 | if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC && | 133 | if (urbp->qh->type != USB_ENDPOINT_XFER_ISOC && |
131 | (++i <= 10 || debug > 2)) { | 134 | (++i <= 10 || debug > 2)) { |
132 | out += sprintf(out, "%*s%d: ", space + 2, "", i); | 135 | out += sprintf(out, "%*s%d: ", space + 2, "", i); |
133 | out += uhci_show_td(td, out, len - (out - buf), 0); | 136 | out += uhci_show_td(uhci, td, out, |
137 | len - (out - buf), 0); | ||
134 | } else { | 138 | } else { |
135 | if (td_status(td) & TD_CTRL_ACTIVE) | 139 | if (td_status(uhci, td) & TD_CTRL_ACTIVE) |
136 | ++nactive; | 140 | ++nactive; |
137 | else | 141 | else |
138 | ++ninactive; | 142 | ++ninactive; |
@@ -151,7 +155,7 @@ static int uhci_show_qh(struct uhci_hcd *uhci, | |||
151 | { | 155 | { |
152 | char *out = buf; | 156 | char *out = buf; |
153 | int i, nurbs; | 157 | int i, nurbs; |
154 | __le32 element = qh_element(qh); | 158 | __hc32 element = qh_element(qh); |
155 | char *qtype; | 159 | char *qtype; |
156 | 160 | ||
157 | /* Try to make sure there's enough memory */ | 161 | /* Try to make sure there's enough memory */ |
@@ -168,7 +172,8 @@ static int uhci_show_qh(struct uhci_hcd *uhci, | |||
168 | 172 | ||
169 | out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n", | 173 | out += sprintf(out, "%*s[%p] %s QH link (%08x) element (%08x)\n", |
170 | space, "", qh, qtype, | 174 | space, "", qh, qtype, |
171 | le32_to_cpu(qh->link), le32_to_cpu(element)); | 175 | hc32_to_cpu(uhci, qh->link), |
176 | hc32_to_cpu(uhci, element)); | ||
172 | if (qh->type == USB_ENDPOINT_XFER_ISOC) | 177 | if (qh->type == USB_ENDPOINT_XFER_ISOC) |
173 | out += sprintf(out, "%*s period %d phase %d load %d us, " | 178 | out += sprintf(out, "%*s period %d phase %d load %d us, " |
174 | "frame %x desc [%p]\n", | 179 | "frame %x desc [%p]\n", |
@@ -178,22 +183,22 @@ static int uhci_show_qh(struct uhci_hcd *uhci, | |||
178 | out += sprintf(out, "%*s period %d phase %d load %d us\n", | 183 | out += sprintf(out, "%*s period %d phase %d load %d us\n", |
179 | space, "", qh->period, qh->phase, qh->load); | 184 | space, "", qh->period, qh->phase, qh->load); |
180 | 185 | ||
181 | if (element & UHCI_PTR_QH) | 186 | if (element & UHCI_PTR_QH(uhci)) |
182 | out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); | 187 | out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); |
183 | 188 | ||
184 | if (element & UHCI_PTR_DEPTH) | 189 | if (element & UHCI_PTR_DEPTH(uhci)) |
185 | out += sprintf(out, "%*s Depth traverse\n", space, ""); | 190 | out += sprintf(out, "%*s Depth traverse\n", space, ""); |
186 | 191 | ||
187 | if (element & cpu_to_le32(8)) | 192 | if (element & cpu_to_hc32(uhci, 8)) |
188 | out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, ""); | 193 | out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, ""); |
189 | 194 | ||
190 | if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH))) | 195 | if (!(element & ~(UHCI_PTR_QH(uhci) | UHCI_PTR_DEPTH(uhci)))) |
191 | out += sprintf(out, "%*s Element is NULL (bug?)\n", space, ""); | 196 | out += sprintf(out, "%*s Element is NULL (bug?)\n", space, ""); |
192 | 197 | ||
193 | if (list_empty(&qh->queue)) { | 198 | if (list_empty(&qh->queue)) { |
194 | out += sprintf(out, "%*s queue is empty\n", space, ""); | 199 | out += sprintf(out, "%*s queue is empty\n", space, ""); |
195 | if (qh == uhci->skel_async_qh) | 200 | if (qh == uhci->skel_async_qh) |
196 | out += uhci_show_td(uhci->term_td, out, | 201 | out += uhci_show_td(uhci, uhci->term_td, out, |
197 | len - (out - buf), 0); | 202 | len - (out - buf), 0); |
198 | } else { | 203 | } else { |
199 | struct urb_priv *urbp = list_entry(qh->queue.next, | 204 | struct urb_priv *urbp = list_entry(qh->queue.next, |
@@ -201,13 +206,13 @@ static int uhci_show_qh(struct uhci_hcd *uhci, | |||
201 | struct uhci_td *td = list_entry(urbp->td_list.next, | 206 | struct uhci_td *td = list_entry(urbp->td_list.next, |
202 | struct uhci_td, list); | 207 | struct uhci_td, list); |
203 | 208 | ||
204 | if (element != LINK_TO_TD(td)) | 209 | if (element != LINK_TO_TD(uhci, td)) |
205 | out += sprintf(out, "%*s Element != First TD\n", | 210 | out += sprintf(out, "%*s Element != First TD\n", |
206 | space, ""); | 211 | space, ""); |
207 | i = nurbs = 0; | 212 | i = nurbs = 0; |
208 | list_for_each_entry(urbp, &qh->queue, node) { | 213 | list_for_each_entry(urbp, &qh->queue, node) { |
209 | if (++i <= 10) | 214 | if (++i <= 10) |
210 | out += uhci_show_urbp(urbp, out, | 215 | out += uhci_show_urbp(uhci, urbp, out, |
211 | len - (out - buf), space + 2); | 216 | len - (out - buf), space + 2); |
212 | else | 217 | else |
213 | ++nurbs; | 218 | ++nurbs; |
@@ -219,7 +224,8 @@ static int uhci_show_qh(struct uhci_hcd *uhci, | |||
219 | 224 | ||
220 | if (qh->dummy_td) { | 225 | if (qh->dummy_td) { |
221 | out += sprintf(out, "%*s Dummy TD\n", space, ""); | 226 | out += sprintf(out, "%*s Dummy TD\n", space, ""); |
222 | out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0); | 227 | out += uhci_show_td(uhci, qh->dummy_td, out, |
228 | len - (out - buf), 0); | ||
223 | } | 229 | } |
224 | 230 | ||
225 | return out - buf; | 231 | return out - buf; |
@@ -285,7 +291,6 @@ static int uhci_show_root_hub_state(struct uhci_hcd *uhci, char *buf, int len) | |||
285 | static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len) | 291 | static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len) |
286 | { | 292 | { |
287 | char *out = buf; | 293 | char *out = buf; |
288 | unsigned long io_addr = uhci->io_addr; | ||
289 | unsigned short usbcmd, usbstat, usbint, usbfrnum; | 294 | unsigned short usbcmd, usbstat, usbint, usbfrnum; |
290 | unsigned int flbaseadd; | 295 | unsigned int flbaseadd; |
291 | unsigned char sof; | 296 | unsigned char sof; |
@@ -295,14 +300,14 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len) | |||
295 | if (len < 80 * 9) | 300 | if (len < 80 * 9) |
296 | return 0; | 301 | return 0; |
297 | 302 | ||
298 | usbcmd = inw(io_addr + 0); | 303 | usbcmd = uhci_readw(uhci, 0); |
299 | usbstat = inw(io_addr + 2); | 304 | usbstat = uhci_readw(uhci, 2); |
300 | usbint = inw(io_addr + 4); | 305 | usbint = uhci_readw(uhci, 4); |
301 | usbfrnum = inw(io_addr + 6); | 306 | usbfrnum = uhci_readw(uhci, 6); |
302 | flbaseadd = inl(io_addr + 8); | 307 | flbaseadd = uhci_readl(uhci, 8); |
303 | sof = inb(io_addr + 12); | 308 | sof = uhci_readb(uhci, 12); |
304 | portsc1 = inw(io_addr + 16); | 309 | portsc1 = uhci_readw(uhci, 16); |
305 | portsc2 = inw(io_addr + 18); | 310 | portsc2 = uhci_readw(uhci, 18); |
306 | 311 | ||
307 | out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n", | 312 | out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n", |
308 | usbcmd, | 313 | usbcmd, |
@@ -347,8 +352,8 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) | |||
347 | struct uhci_td *td; | 352 | struct uhci_td *td; |
348 | struct list_head *tmp, *head; | 353 | struct list_head *tmp, *head; |
349 | int nframes, nerrs; | 354 | int nframes, nerrs; |
350 | __le32 link; | 355 | __hc32 link; |
351 | __le32 fsbr_link; | 356 | __hc32 fsbr_link; |
352 | 357 | ||
353 | static const char * const qh_names[] = { | 358 | static const char * const qh_names[] = { |
354 | "unlink", "iso", "int128", "int64", "int32", "int16", | 359 | "unlink", "iso", "int128", "int64", "int32", "int16", |
@@ -376,7 +381,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) | |||
376 | nframes = 10; | 381 | nframes = 10; |
377 | nerrs = 0; | 382 | nerrs = 0; |
378 | for (i = 0; i < UHCI_NUMFRAMES; ++i) { | 383 | for (i = 0; i < UHCI_NUMFRAMES; ++i) { |
379 | __le32 qh_dma; | 384 | __hc32 qh_dma; |
380 | 385 | ||
381 | j = 0; | 386 | j = 0; |
382 | td = uhci->frame_cpu[i]; | 387 | td = uhci->frame_cpu[i]; |
@@ -386,7 +391,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) | |||
386 | 391 | ||
387 | if (nframes > 0) { | 392 | if (nframes > 0) { |
388 | out += sprintf(out, "- Frame %d -> (%08x)\n", | 393 | out += sprintf(out, "- Frame %d -> (%08x)\n", |
389 | i, le32_to_cpu(link)); | 394 | i, hc32_to_cpu(uhci, link)); |
390 | j = 1; | 395 | j = 1; |
391 | } | 396 | } |
392 | 397 | ||
@@ -395,7 +400,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) | |||
395 | do { | 400 | do { |
396 | td = list_entry(tmp, struct uhci_td, fl_list); | 401 | td = list_entry(tmp, struct uhci_td, fl_list); |
397 | tmp = tmp->next; | 402 | tmp = tmp->next; |
398 | if (link != LINK_TO_TD(td)) { | 403 | if (link != LINK_TO_TD(uhci, td)) { |
399 | if (nframes > 0) | 404 | if (nframes > 0) |
400 | out += sprintf(out, " link does " | 405 | out += sprintf(out, " link does " |
401 | "not match list entry!\n"); | 406 | "not match list entry!\n"); |
@@ -403,7 +408,7 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) | |||
403 | ++nerrs; | 408 | ++nerrs; |
404 | } | 409 | } |
405 | if (nframes > 0) | 410 | if (nframes > 0) |
406 | out += uhci_show_td(td, out, | 411 | out += uhci_show_td(uhci, td, out, |
407 | len - (out - buf), 4); | 412 | len - (out - buf), 4); |
408 | link = td->link; | 413 | link = td->link; |
409 | } while (tmp != head); | 414 | } while (tmp != head); |
@@ -415,11 +420,12 @@ check_link: | |||
415 | if (!j) { | 420 | if (!j) { |
416 | out += sprintf(out, | 421 | out += sprintf(out, |
417 | "- Frame %d -> (%08x)\n", | 422 | "- Frame %d -> (%08x)\n", |
418 | i, le32_to_cpu(link)); | 423 | i, hc32_to_cpu(uhci, link)); |
419 | j = 1; | 424 | j = 1; |
420 | } | 425 | } |
421 | out += sprintf(out, " link does not match " | 426 | out += sprintf(out, " link does not match " |
422 | "QH (%08x)!\n", le32_to_cpu(qh_dma)); | 427 | "QH (%08x)!\n", |
428 | hc32_to_cpu(uhci, qh_dma)); | ||
423 | } else | 429 | } else |
424 | ++nerrs; | 430 | ++nerrs; |
425 | } | 431 | } |
@@ -440,11 +446,11 @@ check_link: | |||
440 | 446 | ||
441 | /* Last QH is the Terminating QH, it's different */ | 447 | /* Last QH is the Terminating QH, it's different */ |
442 | if (i == SKEL_TERM) { | 448 | if (i == SKEL_TERM) { |
443 | if (qh_element(qh) != LINK_TO_TD(uhci->term_td)) | 449 | if (qh_element(qh) != LINK_TO_TD(uhci, uhci->term_td)) |
444 | out += sprintf(out, " skel_term_qh element is not set to term_td!\n"); | 450 | out += sprintf(out, " skel_term_qh element is not set to term_td!\n"); |
445 | link = fsbr_link; | 451 | link = fsbr_link; |
446 | if (!link) | 452 | if (!link) |
447 | link = LINK_TO_QH(uhci->skel_term_qh); | 453 | link = LINK_TO_QH(uhci, uhci->skel_term_qh); |
448 | goto check_qh_link; | 454 | goto check_qh_link; |
449 | } | 455 | } |
450 | 456 | ||
@@ -458,20 +464,20 @@ check_link: | |||
458 | out += uhci_show_qh(uhci, qh, out, | 464 | out += uhci_show_qh(uhci, qh, out, |
459 | len - (out - buf), 4); | 465 | len - (out - buf), 4); |
460 | if (!fsbr_link && qh->skel >= SKEL_FSBR) | 466 | if (!fsbr_link && qh->skel >= SKEL_FSBR) |
461 | fsbr_link = LINK_TO_QH(qh); | 467 | fsbr_link = LINK_TO_QH(uhci, qh); |
462 | } | 468 | } |
463 | if ((cnt -= 10) > 0) | 469 | if ((cnt -= 10) > 0) |
464 | out += sprintf(out, " Skipped %d QHs\n", cnt); | 470 | out += sprintf(out, " Skipped %d QHs\n", cnt); |
465 | 471 | ||
466 | link = UHCI_PTR_TERM; | 472 | link = UHCI_PTR_TERM(uhci); |
467 | if (i <= SKEL_ISO) | 473 | if (i <= SKEL_ISO) |
468 | ; | 474 | ; |
469 | else if (i < SKEL_ASYNC) | 475 | else if (i < SKEL_ASYNC) |
470 | link = LINK_TO_QH(uhci->skel_async_qh); | 476 | link = LINK_TO_QH(uhci, uhci->skel_async_qh); |
471 | else if (!uhci->fsbr_is_on) | 477 | else if (!uhci->fsbr_is_on) |
472 | ; | 478 | ; |
473 | else | 479 | else |
474 | link = LINK_TO_QH(uhci->skel_term_qh); | 480 | link = LINK_TO_QH(uhci, uhci->skel_term_qh); |
475 | check_qh_link: | 481 | check_qh_link: |
476 | if (qh->link != link) | 482 | if (qh->link != link) |
477 | out += sprintf(out, " last QH not linked to next skeleton!\n"); | 483 | out += sprintf(out, " last QH not linked to next skeleton!\n"); |
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c new file mode 100644 index 000000000000..d01c1e227681 --- /dev/null +++ b/drivers/usb/host/uhci-grlib.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* | ||
2 | * UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC | ||
3 | * | ||
4 | * Copyright (c) 2011 Jan Andersson <jan@gaisler.com> | ||
5 | * | ||
6 | * This file is based on UHCI PCI HCD: | ||
7 | * (C) Copyright 1999 Linus Torvalds | ||
8 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | ||
9 | * (C) Copyright 1999 Randy Dunlap | ||
10 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | ||
11 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | ||
12 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | ||
13 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | ||
14 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | ||
15 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | ||
16 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | ||
17 | * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu | ||
18 | */ | ||
19 | |||
20 | #include <linux/of_irq.h> | ||
21 | #include <linux/of_address.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | |||
24 | static int uhci_grlib_init(struct usb_hcd *hcd) | ||
25 | { | ||
26 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
27 | |||
28 | /* | ||
29 | * Probe to determine the endianness of the controller. | ||
30 | * We know that bit 7 of the PORTSC1 register is always set | ||
31 | * and bit 15 is always clear. If uhci_readw() yields a value | ||
32 | * with bit 7 (0x80) turned on then the current little-endian | ||
33 | * setting is correct. Otherwise we assume the value was | ||
34 | * byte-swapped; hence the register interface and presumably | ||
35 | * also the descriptors are big-endian. | ||
36 | */ | ||
37 | if (!(uhci_readw(uhci, USBPORTSC1) & 0x80)) { | ||
38 | uhci->big_endian_mmio = 1; | ||
39 | uhci->big_endian_desc = 1; | ||
40 | } | ||
41 | |||
42 | uhci->rh_numports = uhci_count_ports(hcd); | ||
43 | |||
44 | /* Set up pointers to to generic functions */ | ||
45 | uhci->reset_hc = uhci_generic_reset_hc; | ||
46 | uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc; | ||
47 | /* No special actions need to be taken for the functions below */ | ||
48 | uhci->configure_hc = NULL; | ||
49 | uhci->resume_detect_interrupts_are_broken = NULL; | ||
50 | uhci->global_suspend_mode_is_broken = NULL; | ||
51 | |||
52 | /* Reset if the controller isn't already safely quiescent. */ | ||
53 | check_and_reset_hc(uhci); | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static const struct hc_driver uhci_grlib_hc_driver = { | ||
58 | .description = hcd_name, | ||
59 | .product_desc = "GRLIB GRUSBHC UHCI Host Controller", | ||
60 | .hcd_priv_size = sizeof(struct uhci_hcd), | ||
61 | |||
62 | /* Generic hardware linkage */ | ||
63 | .irq = uhci_irq, | ||
64 | .flags = HCD_MEMORY | HCD_USB11, | ||
65 | |||
66 | /* Basic lifecycle operations */ | ||
67 | .reset = uhci_grlib_init, | ||
68 | .start = uhci_start, | ||
69 | #ifdef CONFIG_PM | ||
70 | .pci_suspend = NULL, | ||
71 | .pci_resume = NULL, | ||
72 | .bus_suspend = uhci_rh_suspend, | ||
73 | .bus_resume = uhci_rh_resume, | ||
74 | #endif | ||
75 | .stop = uhci_stop, | ||
76 | |||
77 | .urb_enqueue = uhci_urb_enqueue, | ||
78 | .urb_dequeue = uhci_urb_dequeue, | ||
79 | |||
80 | .endpoint_disable = uhci_hcd_endpoint_disable, | ||
81 | .get_frame_number = uhci_hcd_get_frame_number, | ||
82 | |||
83 | .hub_status_data = uhci_hub_status_data, | ||
84 | .hub_control = uhci_hub_control, | ||
85 | }; | ||
86 | |||
87 | |||
88 | static int __devinit uhci_hcd_grlib_probe(struct platform_device *op) | ||
89 | { | ||
90 | struct device_node *dn = op->dev.of_node; | ||
91 | struct usb_hcd *hcd; | ||
92 | struct uhci_hcd *uhci = NULL; | ||
93 | struct resource res; | ||
94 | int irq; | ||
95 | int rv; | ||
96 | |||
97 | if (usb_disabled()) | ||
98 | return -ENODEV; | ||
99 | |||
100 | dev_dbg(&op->dev, "initializing GRUSBHC UHCI USB Controller\n"); | ||
101 | |||
102 | rv = of_address_to_resource(dn, 0, &res); | ||
103 | if (rv) | ||
104 | return rv; | ||
105 | |||
106 | /* usb_create_hcd requires dma_mask != NULL */ | ||
107 | op->dev.dma_mask = &op->dev.coherent_dma_mask; | ||
108 | hcd = usb_create_hcd(&uhci_grlib_hc_driver, &op->dev, | ||
109 | "GRUSBHC UHCI USB"); | ||
110 | if (!hcd) | ||
111 | return -ENOMEM; | ||
112 | |||
113 | hcd->rsrc_start = res.start; | ||
114 | hcd->rsrc_len = res.end - res.start + 1; | ||
115 | |||
116 | if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { | ||
117 | printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); | ||
118 | rv = -EBUSY; | ||
119 | goto err_rmr; | ||
120 | } | ||
121 | |||
122 | irq = irq_of_parse_and_map(dn, 0); | ||
123 | if (irq == NO_IRQ) { | ||
124 | printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); | ||
125 | rv = -EBUSY; | ||
126 | goto err_irq; | ||
127 | } | ||
128 | |||
129 | hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); | ||
130 | if (!hcd->regs) { | ||
131 | printk(KERN_ERR "%s: ioremap failed\n", __FILE__); | ||
132 | rv = -ENOMEM; | ||
133 | goto err_ioremap; | ||
134 | } | ||
135 | |||
136 | uhci = hcd_to_uhci(hcd); | ||
137 | |||
138 | uhci->regs = hcd->regs; | ||
139 | |||
140 | rv = usb_add_hcd(hcd, irq, 0); | ||
141 | if (rv) | ||
142 | goto err_uhci; | ||
143 | |||
144 | return 0; | ||
145 | |||
146 | err_uhci: | ||
147 | iounmap(hcd->regs); | ||
148 | err_ioremap: | ||
149 | irq_dispose_mapping(irq); | ||
150 | err_irq: | ||
151 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
152 | err_rmr: | ||
153 | usb_put_hcd(hcd); | ||
154 | |||
155 | return rv; | ||
156 | } | ||
157 | |||
158 | static int uhci_hcd_grlib_remove(struct platform_device *op) | ||
159 | { | ||
160 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | ||
161 | |||
162 | dev_set_drvdata(&op->dev, NULL); | ||
163 | |||
164 | dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n"); | ||
165 | |||
166 | usb_remove_hcd(hcd); | ||
167 | |||
168 | iounmap(hcd->regs); | ||
169 | irq_dispose_mapping(hcd->irq); | ||
170 | release_mem_region(hcd->rsrc_start, hcd->rsrc_len); | ||
171 | |||
172 | usb_put_hcd(hcd); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* Make sure the controller is quiescent and that we're not using it | ||
178 | * any more. This is mainly for the benefit of programs which, like kexec, | ||
179 | * expect the hardware to be idle: not doing DMA or generating IRQs. | ||
180 | * | ||
181 | * This routine may be called in a damaged or failing kernel. Hence we | ||
182 | * do not acquire the spinlock before shutting down the controller. | ||
183 | */ | ||
184 | static void uhci_hcd_grlib_shutdown(struct platform_device *op) | ||
185 | { | ||
186 | struct usb_hcd *hcd = dev_get_drvdata(&op->dev); | ||
187 | |||
188 | uhci_hc_died(hcd_to_uhci(hcd)); | ||
189 | } | ||
190 | |||
191 | static const struct of_device_id uhci_hcd_grlib_of_match[] = { | ||
192 | { .name = "GAISLER_UHCI", }, | ||
193 | { .name = "01_027", }, | ||
194 | {}, | ||
195 | }; | ||
196 | MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match); | ||
197 | |||
198 | |||
199 | static struct platform_driver uhci_grlib_driver = { | ||
200 | .probe = uhci_hcd_grlib_probe, | ||
201 | .remove = uhci_hcd_grlib_remove, | ||
202 | .shutdown = uhci_hcd_grlib_shutdown, | ||
203 | .driver = { | ||
204 | .name = "grlib-uhci", | ||
205 | .owner = THIS_MODULE, | ||
206 | .of_match_table = uhci_hcd_grlib_of_match, | ||
207 | }, | ||
208 | }; | ||
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 448b9d1f0e70..fba99b120588 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c | |||
@@ -48,7 +48,6 @@ | |||
48 | #include <asm/system.h> | 48 | #include <asm/system.h> |
49 | 49 | ||
50 | #include "uhci-hcd.h" | 50 | #include "uhci-hcd.h" |
51 | #include "pci-quirks.h" | ||
52 | 51 | ||
53 | /* | 52 | /* |
54 | * Version Information | 53 | * Version Information |
@@ -94,7 +93,7 @@ static void uhci_get_current_frame_number(struct uhci_hcd *uhci); | |||
94 | /* | 93 | /* |
95 | * Calculate the link pointer DMA value for the first Skeleton QH in a frame. | 94 | * Calculate the link pointer DMA value for the first Skeleton QH in a frame. |
96 | */ | 95 | */ |
97 | static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame) | 96 | static __hc32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame) |
98 | { | 97 | { |
99 | int skelnum; | 98 | int skelnum; |
100 | 99 | ||
@@ -116,7 +115,7 @@ static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame) | |||
116 | skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES); | 115 | skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES); |
117 | if (skelnum <= 1) | 116 | if (skelnum <= 1) |
118 | skelnum = 9; | 117 | skelnum = 9; |
119 | return LINK_TO_QH(uhci->skelqh[skelnum]); | 118 | return LINK_TO_QH(uhci, uhci->skelqh[skelnum]); |
120 | } | 119 | } |
121 | 120 | ||
122 | #include "uhci-debug.c" | 121 | #include "uhci-debug.c" |
@@ -135,15 +134,12 @@ static void finish_reset(struct uhci_hcd *uhci) | |||
135 | * We have to clear them by hand. | 134 | * We have to clear them by hand. |
136 | */ | 135 | */ |
137 | for (port = 0; port < uhci->rh_numports; ++port) | 136 | for (port = 0; port < uhci->rh_numports; ++port) |
138 | outw(0, uhci->io_addr + USBPORTSC1 + (port * 2)); | 137 | uhci_writew(uhci, 0, USBPORTSC1 + (port * 2)); |
139 | 138 | ||
140 | uhci->port_c_suspend = uhci->resuming_ports = 0; | 139 | uhci->port_c_suspend = uhci->resuming_ports = 0; |
141 | uhci->rh_state = UHCI_RH_RESET; | 140 | uhci->rh_state = UHCI_RH_RESET; |
142 | uhci->is_stopped = UHCI_IS_STOPPED; | 141 | uhci->is_stopped = UHCI_IS_STOPPED; |
143 | uhci_to_hcd(uhci)->state = HC_STATE_HALT; | ||
144 | clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); | 142 | clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); |
145 | |||
146 | uhci->dead = 0; /* Full reset resurrects the controller */ | ||
147 | } | 143 | } |
148 | 144 | ||
149 | /* | 145 | /* |
@@ -153,7 +149,7 @@ static void finish_reset(struct uhci_hcd *uhci) | |||
153 | static void uhci_hc_died(struct uhci_hcd *uhci) | 149 | static void uhci_hc_died(struct uhci_hcd *uhci) |
154 | { | 150 | { |
155 | uhci_get_current_frame_number(uhci); | 151 | uhci_get_current_frame_number(uhci); |
156 | uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); | 152 | uhci->reset_hc(uhci); |
157 | finish_reset(uhci); | 153 | finish_reset(uhci); |
158 | uhci->dead = 1; | 154 | uhci->dead = 1; |
159 | 155 | ||
@@ -168,97 +164,118 @@ static void uhci_hc_died(struct uhci_hcd *uhci) | |||
168 | */ | 164 | */ |
169 | static void check_and_reset_hc(struct uhci_hcd *uhci) | 165 | static void check_and_reset_hc(struct uhci_hcd *uhci) |
170 | { | 166 | { |
171 | if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr)) | 167 | if (uhci->check_and_reset_hc(uhci)) |
172 | finish_reset(uhci); | 168 | finish_reset(uhci); |
173 | } | 169 | } |
174 | 170 | ||
171 | #if defined(CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC) | ||
172 | /* | ||
173 | * The two functions below are generic reset functions that are used on systems | ||
174 | * that do not have keyboard and mouse legacy support. We assume that we are | ||
175 | * running on such a system if CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC is defined. | ||
176 | */ | ||
177 | |||
178 | /* | ||
179 | * Make sure the controller is completely inactive, unable to | ||
180 | * generate interrupts or do DMA. | ||
181 | */ | ||
182 | static void uhci_generic_reset_hc(struct uhci_hcd *uhci) | ||
183 | { | ||
184 | /* Reset the HC - this will force us to get a | ||
185 | * new notification of any already connected | ||
186 | * ports due to the virtual disconnect that it | ||
187 | * implies. | ||
188 | */ | ||
189 | uhci_writew(uhci, USBCMD_HCRESET, USBCMD); | ||
190 | mb(); | ||
191 | udelay(5); | ||
192 | if (uhci_readw(uhci, USBCMD) & USBCMD_HCRESET) | ||
193 | dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n"); | ||
194 | |||
195 | /* Just to be safe, disable interrupt requests and | ||
196 | * make sure the controller is stopped. | ||
197 | */ | ||
198 | uhci_writew(uhci, 0, USBINTR); | ||
199 | uhci_writew(uhci, 0, USBCMD); | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Initialize a controller that was newly discovered or has just been | ||
204 | * resumed. In either case we can't be sure of its previous state. | ||
205 | * | ||
206 | * Returns: 1 if the controller was reset, 0 otherwise. | ||
207 | */ | ||
208 | static int uhci_generic_check_and_reset_hc(struct uhci_hcd *uhci) | ||
209 | { | ||
210 | unsigned int cmd, intr; | ||
211 | |||
212 | /* | ||
213 | * When restarting a suspended controller, we expect all the | ||
214 | * settings to be the same as we left them: | ||
215 | * | ||
216 | * Controller is stopped and configured with EGSM set; | ||
217 | * No interrupts enabled except possibly Resume Detect. | ||
218 | * | ||
219 | * If any of these conditions are violated we do a complete reset. | ||
220 | */ | ||
221 | |||
222 | cmd = uhci_readw(uhci, USBCMD); | ||
223 | if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) || !(cmd & USBCMD_EGSM)) { | ||
224 | dev_dbg(uhci_dev(uhci), "%s: cmd = 0x%04x\n", | ||
225 | __func__, cmd); | ||
226 | goto reset_needed; | ||
227 | } | ||
228 | |||
229 | intr = uhci_readw(uhci, USBINTR); | ||
230 | if (intr & (~USBINTR_RESUME)) { | ||
231 | dev_dbg(uhci_dev(uhci), "%s: intr = 0x%04x\n", | ||
232 | __func__, intr); | ||
233 | goto reset_needed; | ||
234 | } | ||
235 | return 0; | ||
236 | |||
237 | reset_needed: | ||
238 | dev_dbg(uhci_dev(uhci), "Performing full reset\n"); | ||
239 | uhci_generic_reset_hc(uhci); | ||
240 | return 1; | ||
241 | } | ||
242 | #endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */ | ||
243 | |||
175 | /* | 244 | /* |
176 | * Store the basic register settings needed by the controller. | 245 | * Store the basic register settings needed by the controller. |
177 | */ | 246 | */ |
178 | static void configure_hc(struct uhci_hcd *uhci) | 247 | static void configure_hc(struct uhci_hcd *uhci) |
179 | { | 248 | { |
180 | struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci)); | ||
181 | |||
182 | /* Set the frame length to the default: 1 ms exactly */ | 249 | /* Set the frame length to the default: 1 ms exactly */ |
183 | outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF); | 250 | uhci_writeb(uhci, USBSOF_DEFAULT, USBSOF); |
184 | 251 | ||
185 | /* Store the frame list base address */ | 252 | /* Store the frame list base address */ |
186 | outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD); | 253 | uhci_writel(uhci, uhci->frame_dma_handle, USBFLBASEADD); |
187 | 254 | ||
188 | /* Set the current frame number */ | 255 | /* Set the current frame number */ |
189 | outw(uhci->frame_number & UHCI_MAX_SOF_NUMBER, | 256 | uhci_writew(uhci, uhci->frame_number & UHCI_MAX_SOF_NUMBER, |
190 | uhci->io_addr + USBFRNUM); | 257 | USBFRNUM); |
191 | |||
192 | /* Mark controller as not halted before we enable interrupts */ | ||
193 | uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED; | ||
194 | mb(); | ||
195 | |||
196 | /* Enable PIRQ */ | ||
197 | pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT); | ||
198 | 258 | ||
199 | /* Disable platform-specific non-PME# wakeup */ | 259 | /* perform any arch/bus specific configuration */ |
200 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | 260 | if (uhci->configure_hc) |
201 | pci_write_config_byte(pdev, USBRES_INTEL, 0); | 261 | uhci->configure_hc(uhci); |
202 | } | 262 | } |
203 | 263 | ||
204 | |||
205 | static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) | 264 | static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) |
206 | { | 265 | { |
207 | int port; | ||
208 | |||
209 | /* If we have to ignore overcurrent events then almost by definition | 266 | /* If we have to ignore overcurrent events then almost by definition |
210 | * we can't depend on resume-detect interrupts. */ | 267 | * we can't depend on resume-detect interrupts. */ |
211 | if (ignore_oc) | 268 | if (ignore_oc) |
212 | return 1; | 269 | return 1; |
213 | 270 | ||
214 | switch (to_pci_dev(uhci_dev(uhci))->vendor) { | 271 | return uhci->resume_detect_interrupts_are_broken ? |
215 | default: | 272 | uhci->resume_detect_interrupts_are_broken(uhci) : 0; |
216 | break; | ||
217 | |||
218 | case PCI_VENDOR_ID_GENESYS: | ||
219 | /* Genesys Logic's GL880S controllers don't generate | ||
220 | * resume-detect interrupts. | ||
221 | */ | ||
222 | return 1; | ||
223 | |||
224 | case PCI_VENDOR_ID_INTEL: | ||
225 | /* Some of Intel's USB controllers have a bug that causes | ||
226 | * resume-detect interrupts if any port has an over-current | ||
227 | * condition. To make matters worse, some motherboards | ||
228 | * hardwire unused USB ports' over-current inputs active! | ||
229 | * To prevent problems, we will not enable resume-detect | ||
230 | * interrupts if any ports are OC. | ||
231 | */ | ||
232 | for (port = 0; port < uhci->rh_numports; ++port) { | ||
233 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & | ||
234 | USBPORTSC_OC) | ||
235 | return 1; | ||
236 | } | ||
237 | break; | ||
238 | } | ||
239 | return 0; | ||
240 | } | 273 | } |
241 | 274 | ||
242 | static int global_suspend_mode_is_broken(struct uhci_hcd *uhci) | 275 | static int global_suspend_mode_is_broken(struct uhci_hcd *uhci) |
243 | { | 276 | { |
244 | int port; | 277 | return uhci->global_suspend_mode_is_broken ? |
245 | const char *sys_info; | 278 | uhci->global_suspend_mode_is_broken(uhci) : 0; |
246 | static char bad_Asus_board[] = "A7V8X"; | ||
247 | |||
248 | /* One of Asus's motherboards has a bug which causes it to | ||
249 | * wake up immediately from suspend-to-RAM if any of the ports | ||
250 | * are connected. In such cases we will not set EGSM. | ||
251 | */ | ||
252 | sys_info = dmi_get_system_info(DMI_BOARD_NAME); | ||
253 | if (sys_info && !strcmp(sys_info, bad_Asus_board)) { | ||
254 | for (port = 0; port < uhci->rh_numports; ++port) { | ||
255 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & | ||
256 | USBPORTSC_CCS) | ||
257 | return 1; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | return 0; | ||
262 | } | 279 | } |
263 | 280 | ||
264 | static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) | 281 | static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) |
@@ -321,8 +338,8 @@ __acquires(uhci->lock) | |||
321 | !int_enable) | 338 | !int_enable) |
322 | uhci->RD_enable = int_enable = 0; | 339 | uhci->RD_enable = int_enable = 0; |
323 | 340 | ||
324 | outw(int_enable, uhci->io_addr + USBINTR); | 341 | uhci_writew(uhci, int_enable, USBINTR); |
325 | outw(egsm_enable | USBCMD_CF, uhci->io_addr + USBCMD); | 342 | uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD); |
326 | mb(); | 343 | mb(); |
327 | udelay(5); | 344 | udelay(5); |
328 | 345 | ||
@@ -331,7 +348,7 @@ __acquires(uhci->lock) | |||
331 | * controller should stop after a few microseconds. Otherwise | 348 | * controller should stop after a few microseconds. Otherwise |
332 | * we will give the controller one frame to stop. | 349 | * we will give the controller one frame to stop. |
333 | */ | 350 | */ |
334 | if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) { | 351 | if (!auto_stop && !(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) { |
335 | uhci->rh_state = UHCI_RH_SUSPENDING; | 352 | uhci->rh_state = UHCI_RH_SUSPENDING; |
336 | spin_unlock_irq(&uhci->lock); | 353 | spin_unlock_irq(&uhci->lock); |
337 | msleep(1); | 354 | msleep(1); |
@@ -339,7 +356,7 @@ __acquires(uhci->lock) | |||
339 | if (uhci->dead) | 356 | if (uhci->dead) |
340 | return; | 357 | return; |
341 | } | 358 | } |
342 | if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) | 359 | if (!(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) |
343 | dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); | 360 | dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); |
344 | 361 | ||
345 | uhci_get_current_frame_number(uhci); | 362 | uhci_get_current_frame_number(uhci); |
@@ -361,15 +378,14 @@ __acquires(uhci->lock) | |||
361 | 378 | ||
362 | static void start_rh(struct uhci_hcd *uhci) | 379 | static void start_rh(struct uhci_hcd *uhci) |
363 | { | 380 | { |
364 | uhci_to_hcd(uhci)->state = HC_STATE_RUNNING; | ||
365 | uhci->is_stopped = 0; | 381 | uhci->is_stopped = 0; |
366 | 382 | ||
367 | /* Mark it configured and running with a 64-byte max packet. | 383 | /* Mark it configured and running with a 64-byte max packet. |
368 | * All interrupts are enabled, even though RESUME won't do anything. | 384 | * All interrupts are enabled, even though RESUME won't do anything. |
369 | */ | 385 | */ |
370 | outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD); | 386 | uhci_writew(uhci, USBCMD_RS | USBCMD_CF | USBCMD_MAXP, USBCMD); |
371 | outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, | 387 | uhci_writew(uhci, USBINTR_TIMEOUT | USBINTR_RESUME | |
372 | uhci->io_addr + USBINTR); | 388 | USBINTR_IOC | USBINTR_SP, USBINTR); |
373 | mb(); | 389 | mb(); |
374 | uhci->rh_state = UHCI_RH_RUNNING; | 390 | uhci->rh_state = UHCI_RH_RUNNING; |
375 | set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); | 391 | set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); |
@@ -392,9 +408,9 @@ __acquires(uhci->lock) | |||
392 | unsigned egsm; | 408 | unsigned egsm; |
393 | 409 | ||
394 | /* Keep EGSM on if it was set before */ | 410 | /* Keep EGSM on if it was set before */ |
395 | egsm = inw(uhci->io_addr + USBCMD) & USBCMD_EGSM; | 411 | egsm = uhci_readw(uhci, USBCMD) & USBCMD_EGSM; |
396 | uhci->rh_state = UHCI_RH_RESUMING; | 412 | uhci->rh_state = UHCI_RH_RESUMING; |
397 | outw(USBCMD_FGR | USBCMD_CF | egsm, uhci->io_addr + USBCMD); | 413 | uhci_writew(uhci, USBCMD_FGR | USBCMD_CF | egsm, USBCMD); |
398 | spin_unlock_irq(&uhci->lock); | 414 | spin_unlock_irq(&uhci->lock); |
399 | msleep(20); | 415 | msleep(20); |
400 | spin_lock_irq(&uhci->lock); | 416 | spin_lock_irq(&uhci->lock); |
@@ -402,10 +418,10 @@ __acquires(uhci->lock) | |||
402 | return; | 418 | return; |
403 | 419 | ||
404 | /* End Global Resume and wait for EOP to be sent */ | 420 | /* End Global Resume and wait for EOP to be sent */ |
405 | outw(USBCMD_CF, uhci->io_addr + USBCMD); | 421 | uhci_writew(uhci, USBCMD_CF, USBCMD); |
406 | mb(); | 422 | mb(); |
407 | udelay(4); | 423 | udelay(4); |
408 | if (inw(uhci->io_addr + USBCMD) & USBCMD_FGR) | 424 | if (uhci_readw(uhci, USBCMD) & USBCMD_FGR) |
409 | dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); | 425 | dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); |
410 | } | 426 | } |
411 | 427 | ||
@@ -425,10 +441,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) | |||
425 | * interrupt cause. Contrary to the UHCI specification, the | 441 | * interrupt cause. Contrary to the UHCI specification, the |
426 | * "HC Halted" status bit is persistent: it is RO, not R/WC. | 442 | * "HC Halted" status bit is persistent: it is RO, not R/WC. |
427 | */ | 443 | */ |
428 | status = inw(uhci->io_addr + USBSTS); | 444 | status = uhci_readw(uhci, USBSTS); |
429 | if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ | 445 | if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ |
430 | return IRQ_NONE; | 446 | return IRQ_NONE; |
431 | outw(status, uhci->io_addr + USBSTS); /* Clear it */ | 447 | uhci_writew(uhci, status, USBSTS); /* Clear it */ |
432 | 448 | ||
433 | if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { | 449 | if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { |
434 | if (status & USBSTS_HSE) | 450 | if (status & USBSTS_HSE) |
@@ -450,6 +466,7 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) | |||
450 | lprintk(errbuf); | 466 | lprintk(errbuf); |
451 | } | 467 | } |
452 | uhci_hc_died(uhci); | 468 | uhci_hc_died(uhci); |
469 | usb_hc_died(hcd); | ||
453 | 470 | ||
454 | /* Force a callback in case there are | 471 | /* Force a callback in case there are |
455 | * pending unlinks */ | 472 | * pending unlinks */ |
@@ -483,7 +500,7 @@ static void uhci_get_current_frame_number(struct uhci_hcd *uhci) | |||
483 | if (!uhci->is_stopped) { | 500 | if (!uhci->is_stopped) { |
484 | unsigned delta; | 501 | unsigned delta; |
485 | 502 | ||
486 | delta = (inw(uhci->io_addr + USBFRNUM) - uhci->frame_number) & | 503 | delta = (uhci_readw(uhci, USBFRNUM) - uhci->frame_number) & |
487 | (UHCI_NUMFRAMES - 1); | 504 | (UHCI_NUMFRAMES - 1); |
488 | uhci->frame_number += delta; | 505 | uhci->frame_number += delta; |
489 | } | 506 | } |
@@ -520,61 +537,6 @@ static void release_uhci(struct uhci_hcd *uhci) | |||
520 | uhci->frame, uhci->frame_dma_handle); | 537 | uhci->frame, uhci->frame_dma_handle); |
521 | } | 538 | } |
522 | 539 | ||
523 | static int uhci_init(struct usb_hcd *hcd) | ||
524 | { | ||
525 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
526 | unsigned io_size = (unsigned) hcd->rsrc_len; | ||
527 | int port; | ||
528 | |||
529 | uhci->io_addr = (unsigned long) hcd->rsrc_start; | ||
530 | |||
531 | /* The UHCI spec says devices must have 2 ports, and goes on to say | ||
532 | * they may have more but gives no way to determine how many there | ||
533 | * are. However according to the UHCI spec, Bit 7 of the port | ||
534 | * status and control register is always set to 1. So we try to | ||
535 | * use this to our advantage. Another common failure mode when | ||
536 | * a nonexistent register is addressed is to return all ones, so | ||
537 | * we test for that also. | ||
538 | */ | ||
539 | for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { | ||
540 | unsigned int portstatus; | ||
541 | |||
542 | portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2)); | ||
543 | if (!(portstatus & 0x0080) || portstatus == 0xffff) | ||
544 | break; | ||
545 | } | ||
546 | if (debug) | ||
547 | dev_info(uhci_dev(uhci), "detected %d ports\n", port); | ||
548 | |||
549 | /* Anything greater than 7 is weird so we'll ignore it. */ | ||
550 | if (port > UHCI_RH_MAXCHILD) { | ||
551 | dev_info(uhci_dev(uhci), "port count misdetected? " | ||
552 | "forcing to 2 ports\n"); | ||
553 | port = 2; | ||
554 | } | ||
555 | uhci->rh_numports = port; | ||
556 | |||
557 | /* Kick BIOS off this hardware and reset if the controller | ||
558 | * isn't already safely quiescent. | ||
559 | */ | ||
560 | check_and_reset_hc(uhci); | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /* Make sure the controller is quiescent and that we're not using it | ||
565 | * any more. This is mainly for the benefit of programs which, like kexec, | ||
566 | * expect the hardware to be idle: not doing DMA or generating IRQs. | ||
567 | * | ||
568 | * This routine may be called in a damaged or failing kernel. Hence we | ||
569 | * do not acquire the spinlock before shutting down the controller. | ||
570 | */ | ||
571 | static void uhci_shutdown(struct pci_dev *pdev) | ||
572 | { | ||
573 | struct usb_hcd *hcd = pci_get_drvdata(pdev); | ||
574 | |||
575 | uhci_hc_died(hcd_to_uhci(hcd)); | ||
576 | } | ||
577 | |||
578 | /* | 540 | /* |
579 | * Allocate a frame list, and then setup the skeleton | 541 | * Allocate a frame list, and then setup the skeleton |
580 | * | 542 | * |
@@ -669,16 +631,16 @@ static int uhci_start(struct usb_hcd *hcd) | |||
669 | * 8 Interrupt queues; link all higher int queues to int1 = async | 631 | * 8 Interrupt queues; link all higher int queues to int1 = async |
670 | */ | 632 | */ |
671 | for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i) | 633 | for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i) |
672 | uhci->skelqh[i]->link = LINK_TO_QH(uhci->skel_async_qh); | 634 | uhci->skelqh[i]->link = LINK_TO_QH(uhci, uhci->skel_async_qh); |
673 | uhci->skel_async_qh->link = UHCI_PTR_TERM; | 635 | uhci->skel_async_qh->link = UHCI_PTR_TERM(uhci); |
674 | uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh); | 636 | uhci->skel_term_qh->link = LINK_TO_QH(uhci, uhci->skel_term_qh); |
675 | 637 | ||
676 | /* This dummy TD is to work around a bug in Intel PIIX controllers */ | 638 | /* This dummy TD is to work around a bug in Intel PIIX controllers */ |
677 | uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | | 639 | uhci_fill_td(uhci, uhci->term_td, 0, uhci_explen(0) | |
678 | (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); | 640 | (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); |
679 | uhci->term_td->link = UHCI_PTR_TERM; | 641 | uhci->term_td->link = UHCI_PTR_TERM(uhci); |
680 | uhci->skel_async_qh->element = uhci->skel_term_qh->element = | 642 | uhci->skel_async_qh->element = uhci->skel_term_qh->element = |
681 | LINK_TO_TD(uhci->term_td); | 643 | LINK_TO_TD(uhci, uhci->term_td); |
682 | 644 | ||
683 | /* | 645 | /* |
684 | * Fill the frame list: make all entries point to the proper | 646 | * Fill the frame list: make all entries point to the proper |
@@ -791,86 +753,6 @@ static int uhci_rh_resume(struct usb_hcd *hcd) | |||
791 | return rc; | 753 | return rc; |
792 | } | 754 | } |
793 | 755 | ||
794 | static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | ||
795 | { | ||
796 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
797 | struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci)); | ||
798 | int rc = 0; | ||
799 | |||
800 | dev_dbg(uhci_dev(uhci), "%s\n", __func__); | ||
801 | |||
802 | spin_lock_irq(&uhci->lock); | ||
803 | if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead) | ||
804 | goto done_okay; /* Already suspended or dead */ | ||
805 | |||
806 | if (uhci->rh_state > UHCI_RH_SUSPENDED) { | ||
807 | dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n"); | ||
808 | rc = -EBUSY; | ||
809 | goto done; | ||
810 | }; | ||
811 | |||
812 | /* All PCI host controllers are required to disable IRQ generation | ||
813 | * at the source, so we must turn off PIRQ. | ||
814 | */ | ||
815 | pci_write_config_word(pdev, USBLEGSUP, 0); | ||
816 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); | ||
817 | |||
818 | /* Enable platform-specific non-PME# wakeup */ | ||
819 | if (do_wakeup) { | ||
820 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | ||
821 | pci_write_config_byte(pdev, USBRES_INTEL, | ||
822 | USBPORT1EN | USBPORT2EN); | ||
823 | } | ||
824 | |||
825 | done_okay: | ||
826 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
827 | done: | ||
828 | spin_unlock_irq(&uhci->lock); | ||
829 | return rc; | ||
830 | } | ||
831 | |||
832 | static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated) | ||
833 | { | ||
834 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
835 | |||
836 | dev_dbg(uhci_dev(uhci), "%s\n", __func__); | ||
837 | |||
838 | /* Since we aren't in D3 any more, it's safe to set this flag | ||
839 | * even if the controller was dead. | ||
840 | */ | ||
841 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
842 | |||
843 | spin_lock_irq(&uhci->lock); | ||
844 | |||
845 | /* Make sure resume from hibernation re-enumerates everything */ | ||
846 | if (hibernated) | ||
847 | uhci_hc_died(uhci); | ||
848 | |||
849 | /* The firmware or a boot kernel may have changed the controller | ||
850 | * settings during a system wakeup. Check it and reconfigure | ||
851 | * to avoid problems. | ||
852 | */ | ||
853 | check_and_reset_hc(uhci); | ||
854 | |||
855 | /* If the controller was dead before, it's back alive now */ | ||
856 | configure_hc(uhci); | ||
857 | |||
858 | /* Tell the core if the controller had to be reset */ | ||
859 | if (uhci->rh_state == UHCI_RH_RESET) | ||
860 | usb_root_hub_lost_power(hcd->self.root_hub); | ||
861 | |||
862 | spin_unlock_irq(&uhci->lock); | ||
863 | |||
864 | /* If interrupts don't work and remote wakeup is enabled then | ||
865 | * the suspended root hub needs to be polled. | ||
866 | */ | ||
867 | if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup) | ||
868 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); | ||
869 | |||
870 | /* Does the root hub have a port wakeup pending? */ | ||
871 | usb_hcd_poll_rh_status(hcd); | ||
872 | return 0; | ||
873 | } | ||
874 | #endif | 756 | #endif |
875 | 757 | ||
876 | /* Wait until a particular device/endpoint's QH is idle, and free it */ | 758 | /* Wait until a particular device/endpoint's QH is idle, and free it */ |
@@ -908,67 +790,62 @@ static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) | |||
908 | /* Minimize latency by avoiding the spinlock */ | 790 | /* Minimize latency by avoiding the spinlock */ |
909 | frame_number = uhci->frame_number; | 791 | frame_number = uhci->frame_number; |
910 | barrier(); | 792 | barrier(); |
911 | delta = (inw(uhci->io_addr + USBFRNUM) - frame_number) & | 793 | delta = (uhci_readw(uhci, USBFRNUM) - frame_number) & |
912 | (UHCI_NUMFRAMES - 1); | 794 | (UHCI_NUMFRAMES - 1); |
913 | return frame_number + delta; | 795 | return frame_number + delta; |
914 | } | 796 | } |
915 | 797 | ||
916 | static const char hcd_name[] = "uhci_hcd"; | 798 | /* Determines number of ports on controller */ |
917 | 799 | static int uhci_count_ports(struct usb_hcd *hcd) | |
918 | static const struct hc_driver uhci_driver = { | 800 | { |
919 | .description = hcd_name, | 801 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
920 | .product_desc = "UHCI Host Controller", | 802 | unsigned io_size = (unsigned) hcd->rsrc_len; |
921 | .hcd_priv_size = sizeof(struct uhci_hcd), | 803 | int port; |
922 | |||
923 | /* Generic hardware linkage */ | ||
924 | .irq = uhci_irq, | ||
925 | .flags = HCD_USB11, | ||
926 | |||
927 | /* Basic lifecycle operations */ | ||
928 | .reset = uhci_init, | ||
929 | .start = uhci_start, | ||
930 | #ifdef CONFIG_PM | ||
931 | .pci_suspend = uhci_pci_suspend, | ||
932 | .pci_resume = uhci_pci_resume, | ||
933 | .bus_suspend = uhci_rh_suspend, | ||
934 | .bus_resume = uhci_rh_resume, | ||
935 | #endif | ||
936 | .stop = uhci_stop, | ||
937 | 804 | ||
938 | .urb_enqueue = uhci_urb_enqueue, | 805 | /* The UHCI spec says devices must have 2 ports, and goes on to say |
939 | .urb_dequeue = uhci_urb_dequeue, | 806 | * they may have more but gives no way to determine how many there |
807 | * are. However according to the UHCI spec, Bit 7 of the port | ||
808 | * status and control register is always set to 1. So we try to | ||
809 | * use this to our advantage. Another common failure mode when | ||
810 | * a nonexistent register is addressed is to return all ones, so | ||
811 | * we test for that also. | ||
812 | */ | ||
813 | for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { | ||
814 | unsigned int portstatus; | ||
940 | 815 | ||
941 | .endpoint_disable = uhci_hcd_endpoint_disable, | 816 | portstatus = uhci_readw(uhci, USBPORTSC1 + (port * 2)); |
942 | .get_frame_number = uhci_hcd_get_frame_number, | 817 | if (!(portstatus & 0x0080) || portstatus == 0xffff) |
818 | break; | ||
819 | } | ||
820 | if (debug) | ||
821 | dev_info(uhci_dev(uhci), "detected %d ports\n", port); | ||
943 | 822 | ||
944 | .hub_status_data = uhci_hub_status_data, | 823 | /* Anything greater than 7 is weird so we'll ignore it. */ |
945 | .hub_control = uhci_hub_control, | 824 | if (port > UHCI_RH_MAXCHILD) { |
946 | }; | 825 | dev_info(uhci_dev(uhci), "port count misdetected? " |
826 | "forcing to 2 ports\n"); | ||
827 | port = 2; | ||
828 | } | ||
947 | 829 | ||
948 | static const struct pci_device_id uhci_pci_ids[] = { { | 830 | return port; |
949 | /* handle any USB UHCI controller */ | 831 | } |
950 | PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), | ||
951 | .driver_data = (unsigned long) &uhci_driver, | ||
952 | }, { /* end: all zeroes */ } | ||
953 | }; | ||
954 | 832 | ||
955 | MODULE_DEVICE_TABLE(pci, uhci_pci_ids); | 833 | static const char hcd_name[] = "uhci_hcd"; |
956 | 834 | ||
957 | static struct pci_driver uhci_pci_driver = { | 835 | #ifdef CONFIG_PCI |
958 | .name = (char *)hcd_name, | 836 | #include "uhci-pci.c" |
959 | .id_table = uhci_pci_ids, | 837 | #define PCI_DRIVER uhci_pci_driver |
838 | #endif | ||
960 | 839 | ||
961 | .probe = usb_hcd_pci_probe, | 840 | #ifdef CONFIG_SPARC_LEON |
962 | .remove = usb_hcd_pci_remove, | 841 | #include "uhci-grlib.c" |
963 | .shutdown = uhci_shutdown, | 842 | #define PLATFORM_DRIVER uhci_grlib_driver |
843 | #endif | ||
964 | 844 | ||
965 | #ifdef CONFIG_PM_SLEEP | 845 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) |
966 | .driver = { | 846 | #error "missing bus glue for uhci-hcd" |
967 | .pm = &usb_hcd_pci_pm_ops | ||
968 | }, | ||
969 | #endif | 847 | #endif |
970 | }; | 848 | |
971 | |||
972 | static int __init uhci_hcd_init(void) | 849 | static int __init uhci_hcd_init(void) |
973 | { | 850 | { |
974 | int retval = -ENOMEM; | 851 | int retval = -ENOMEM; |
@@ -994,13 +871,27 @@ static int __init uhci_hcd_init(void) | |||
994 | if (!uhci_up_cachep) | 871 | if (!uhci_up_cachep) |
995 | goto up_failed; | 872 | goto up_failed; |
996 | 873 | ||
997 | retval = pci_register_driver(&uhci_pci_driver); | 874 | #ifdef PLATFORM_DRIVER |
998 | if (retval) | 875 | retval = platform_driver_register(&PLATFORM_DRIVER); |
999 | goto init_failed; | 876 | if (retval < 0) |
877 | goto clean0; | ||
878 | #endif | ||
879 | |||
880 | #ifdef PCI_DRIVER | ||
881 | retval = pci_register_driver(&PCI_DRIVER); | ||
882 | if (retval < 0) | ||
883 | goto clean1; | ||
884 | #endif | ||
1000 | 885 | ||
1001 | return 0; | 886 | return 0; |
1002 | 887 | ||
1003 | init_failed: | 888 | #ifdef PCI_DRIVER |
889 | clean1: | ||
890 | #endif | ||
891 | #ifdef PLATFORM_DRIVER | ||
892 | platform_driver_unregister(&PLATFORM_DRIVER); | ||
893 | clean0: | ||
894 | #endif | ||
1004 | kmem_cache_destroy(uhci_up_cachep); | 895 | kmem_cache_destroy(uhci_up_cachep); |
1005 | 896 | ||
1006 | up_failed: | 897 | up_failed: |
@@ -1017,7 +908,12 @@ errbuf_failed: | |||
1017 | 908 | ||
1018 | static void __exit uhci_hcd_cleanup(void) | 909 | static void __exit uhci_hcd_cleanup(void) |
1019 | { | 910 | { |
1020 | pci_unregister_driver(&uhci_pci_driver); | 911 | #ifdef PLATFORM_DRIVER |
912 | platform_driver_unregister(&PLATFORM_DRIVER); | ||
913 | #endif | ||
914 | #ifdef PCI_DRIVER | ||
915 | pci_unregister_driver(&PCI_DRIVER); | ||
916 | #endif | ||
1021 | kmem_cache_destroy(uhci_up_cachep); | 917 | kmem_cache_destroy(uhci_up_cachep); |
1022 | debugfs_remove(uhci_debugfs_root); | 918 | debugfs_remove(uhci_debugfs_root); |
1023 | kfree(errbuf); | 919 | kfree(errbuf); |
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index 49bf2790f9c2..7af2b7052047 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h | |||
@@ -78,11 +78,11 @@ | |||
78 | #define USBPORT1EN 0x01 | 78 | #define USBPORT1EN 0x01 |
79 | #define USBPORT2EN 0x02 | 79 | #define USBPORT2EN 0x02 |
80 | 80 | ||
81 | #define UHCI_PTR_BITS cpu_to_le32(0x000F) | 81 | #define UHCI_PTR_BITS(uhci) cpu_to_hc32((uhci), 0x000F) |
82 | #define UHCI_PTR_TERM cpu_to_le32(0x0001) | 82 | #define UHCI_PTR_TERM(uhci) cpu_to_hc32((uhci), 0x0001) |
83 | #define UHCI_PTR_QH cpu_to_le32(0x0002) | 83 | #define UHCI_PTR_QH(uhci) cpu_to_hc32((uhci), 0x0002) |
84 | #define UHCI_PTR_DEPTH cpu_to_le32(0x0004) | 84 | #define UHCI_PTR_DEPTH(uhci) cpu_to_hc32((uhci), 0x0004) |
85 | #define UHCI_PTR_BREADTH cpu_to_le32(0x0000) | 85 | #define UHCI_PTR_BREADTH(uhci) cpu_to_hc32((uhci), 0x0000) |
86 | 86 | ||
87 | #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ | 87 | #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ |
88 | #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ | 88 | #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ |
@@ -99,6 +99,22 @@ | |||
99 | 99 | ||
100 | 100 | ||
101 | /* | 101 | /* |
102 | * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to | ||
103 | * __leXX (normally) or __beXX (given UHCI_BIG_ENDIAN_DESC), depending on | ||
104 | * the host controller implementation. | ||
105 | * | ||
106 | * To facilitate the strongest possible byte-order checking from "sparse" | ||
107 | * and so on, we use __leXX unless that's not practical. | ||
108 | */ | ||
109 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC | ||
110 | typedef __u32 __bitwise __hc32; | ||
111 | typedef __u16 __bitwise __hc16; | ||
112 | #else | ||
113 | #define __hc32 __le32 | ||
114 | #define __hc16 __le16 | ||
115 | #endif | ||
116 | |||
117 | /* | ||
102 | * Queue Headers | 118 | * Queue Headers |
103 | */ | 119 | */ |
104 | 120 | ||
@@ -130,8 +146,8 @@ | |||
130 | 146 | ||
131 | struct uhci_qh { | 147 | struct uhci_qh { |
132 | /* Hardware fields */ | 148 | /* Hardware fields */ |
133 | __le32 link; /* Next QH in the schedule */ | 149 | __hc32 link; /* Next QH in the schedule */ |
134 | __le32 element; /* Queue element (TD) pointer */ | 150 | __hc32 element; /* Queue element (TD) pointer */ |
135 | 151 | ||
136 | /* Software fields */ | 152 | /* Software fields */ |
137 | dma_addr_t dma_handle; | 153 | dma_addr_t dma_handle; |
@@ -168,14 +184,10 @@ struct uhci_qh { | |||
168 | * We need a special accessor for the element pointer because it is | 184 | * We need a special accessor for the element pointer because it is |
169 | * subject to asynchronous updates by the controller. | 185 | * subject to asynchronous updates by the controller. |
170 | */ | 186 | */ |
171 | static inline __le32 qh_element(struct uhci_qh *qh) { | 187 | #define qh_element(qh) ACCESS_ONCE((qh)->element) |
172 | __le32 element = qh->element; | ||
173 | 188 | ||
174 | barrier(); | 189 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ |
175 | return element; | 190 | cpu_to_hc32((uhci), (qh)->dma_handle)) |
176 | } | ||
177 | |||
178 | #define LINK_TO_QH(qh) (UHCI_PTR_QH | cpu_to_le32((qh)->dma_handle)) | ||
179 | 191 | ||
180 | 192 | ||
181 | /* | 193 | /* |
@@ -212,7 +224,7 @@ static inline __le32 qh_element(struct uhci_qh *qh) { | |||
212 | /* | 224 | /* |
213 | * for TD <info>: (a.k.a. Token) | 225 | * for TD <info>: (a.k.a. Token) |
214 | */ | 226 | */ |
215 | #define td_token(td) le32_to_cpu((td)->token) | 227 | #define td_token(uhci, td) hc32_to_cpu((uhci), (td)->token) |
216 | #define TD_TOKEN_DEVADDR_SHIFT 8 | 228 | #define TD_TOKEN_DEVADDR_SHIFT 8 |
217 | #define TD_TOKEN_TOGGLE_SHIFT 19 | 229 | #define TD_TOKEN_TOGGLE_SHIFT 19 |
218 | #define TD_TOKEN_TOGGLE (1 << 19) | 230 | #define TD_TOKEN_TOGGLE (1 << 19) |
@@ -245,10 +257,10 @@ static inline __le32 qh_element(struct uhci_qh *qh) { | |||
245 | */ | 257 | */ |
246 | struct uhci_td { | 258 | struct uhci_td { |
247 | /* Hardware fields */ | 259 | /* Hardware fields */ |
248 | __le32 link; | 260 | __hc32 link; |
249 | __le32 status; | 261 | __hc32 status; |
250 | __le32 token; | 262 | __hc32 token; |
251 | __le32 buffer; | 263 | __hc32 buffer; |
252 | 264 | ||
253 | /* Software fields */ | 265 | /* Software fields */ |
254 | dma_addr_t dma_handle; | 266 | dma_addr_t dma_handle; |
@@ -263,14 +275,10 @@ struct uhci_td { | |||
263 | * We need a special accessor for the control/status word because it is | 275 | * We need a special accessor for the control/status word because it is |
264 | * subject to asynchronous updates by the controller. | 276 | * subject to asynchronous updates by the controller. |
265 | */ | 277 | */ |
266 | static inline u32 td_status(struct uhci_td *td) { | 278 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ |
267 | __le32 status = td->status; | 279 | ACCESS_ONCE((td)->status)) |
268 | 280 | ||
269 | barrier(); | 281 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) |
270 | return le32_to_cpu(status); | ||
271 | } | ||
272 | |||
273 | #define LINK_TO_TD(td) (cpu_to_le32((td)->dma_handle)) | ||
274 | 282 | ||
275 | 283 | ||
276 | /* | 284 | /* |
@@ -380,6 +388,9 @@ struct uhci_hcd { | |||
380 | /* Grabbed from PCI */ | 388 | /* Grabbed from PCI */ |
381 | unsigned long io_addr; | 389 | unsigned long io_addr; |
382 | 390 | ||
391 | /* Used when registers are memory mapped */ | ||
392 | void __iomem *regs; | ||
393 | |||
383 | struct dma_pool *qh_pool; | 394 | struct dma_pool *qh_pool; |
384 | struct dma_pool *td_pool; | 395 | struct dma_pool *td_pool; |
385 | 396 | ||
@@ -390,7 +401,7 @@ struct uhci_hcd { | |||
390 | spinlock_t lock; | 401 | spinlock_t lock; |
391 | 402 | ||
392 | dma_addr_t frame_dma_handle; /* Hardware frame list */ | 403 | dma_addr_t frame_dma_handle; /* Hardware frame list */ |
393 | __le32 *frame; | 404 | __hc32 *frame; |
394 | void **frame_cpu; /* CPU's frame list */ | 405 | void **frame_cpu; /* CPU's frame list */ |
395 | 406 | ||
396 | enum uhci_rh_state rh_state; | 407 | enum uhci_rh_state rh_state; |
@@ -415,6 +426,12 @@ struct uhci_hcd { | |||
415 | 426 | ||
416 | struct timer_list fsbr_timer; /* For turning off FBSR */ | 427 | struct timer_list fsbr_timer; /* For turning off FBSR */ |
417 | 428 | ||
429 | /* Silicon quirks */ | ||
430 | unsigned int oc_low:1; /* OverCurrent bit active low */ | ||
431 | unsigned int wait_for_hp:1; /* Wait for HP port reset */ | ||
432 | unsigned int big_endian_mmio:1; /* Big endian registers */ | ||
433 | unsigned int big_endian_desc:1; /* Big endian descriptors */ | ||
434 | |||
418 | /* Support for port suspend/resume/reset */ | 435 | /* Support for port suspend/resume/reset */ |
419 | unsigned long port_c_suspend; /* Bit-arrays of ports */ | 436 | unsigned long port_c_suspend; /* Bit-arrays of ports */ |
420 | unsigned long resuming_ports; | 437 | unsigned long resuming_ports; |
@@ -429,6 +446,16 @@ struct uhci_hcd { | |||
429 | 446 | ||
430 | int total_load; /* Sum of array values */ | 447 | int total_load; /* Sum of array values */ |
431 | short load[MAX_PHASE]; /* Periodic allocations */ | 448 | short load[MAX_PHASE]; /* Periodic allocations */ |
449 | |||
450 | /* Reset host controller */ | ||
451 | void (*reset_hc) (struct uhci_hcd *uhci); | ||
452 | int (*check_and_reset_hc) (struct uhci_hcd *uhci); | ||
453 | /* configure_hc should perform arch specific settings, if needed */ | ||
454 | void (*configure_hc) (struct uhci_hcd *uhci); | ||
455 | /* Check for broken resume detect interrupts */ | ||
456 | int (*resume_detect_interrupts_are_broken) (struct uhci_hcd *uhci); | ||
457 | /* Check for broken global suspend */ | ||
458 | int (*global_suspend_mode_is_broken) (struct uhci_hcd *uhci); | ||
432 | }; | 459 | }; |
433 | 460 | ||
434 | /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ | 461 | /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ |
@@ -467,4 +494,171 @@ struct urb_priv { | |||
467 | #define PCI_VENDOR_ID_GENESYS 0x17a0 | 494 | #define PCI_VENDOR_ID_GENESYS 0x17a0 |
468 | #define PCI_DEVICE_ID_GL880S_UHCI 0x8083 | 495 | #define PCI_DEVICE_ID_GL880S_UHCI 0x8083 |
469 | 496 | ||
497 | /* | ||
498 | * Functions used to access controller registers. The UCHI spec says that host | ||
499 | * controller I/O registers are mapped into PCI I/O space. For non-PCI hosts | ||
500 | * we use memory mapped registers. | ||
501 | */ | ||
502 | |||
503 | #ifndef CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC | ||
504 | /* Support PCI only */ | ||
505 | static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg) | ||
506 | { | ||
507 | return inl(uhci->io_addr + reg); | ||
508 | } | ||
509 | |||
510 | static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg) | ||
511 | { | ||
512 | outl(val, uhci->io_addr + reg); | ||
513 | } | ||
514 | |||
515 | static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg) | ||
516 | { | ||
517 | return inw(uhci->io_addr + reg); | ||
518 | } | ||
519 | |||
520 | static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg) | ||
521 | { | ||
522 | outw(val, uhci->io_addr + reg); | ||
523 | } | ||
524 | |||
525 | static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg) | ||
526 | { | ||
527 | return inb(uhci->io_addr + reg); | ||
528 | } | ||
529 | |||
530 | static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg) | ||
531 | { | ||
532 | outb(val, uhci->io_addr + reg); | ||
533 | } | ||
534 | |||
535 | #else | ||
536 | /* Support non-PCI host controllers */ | ||
537 | #ifdef CONFIG_PCI | ||
538 | /* Support PCI and non-PCI host controllers */ | ||
539 | #define uhci_has_pci_registers(u) ((u)->io_addr != 0) | ||
540 | #else | ||
541 | /* Support non-PCI host controllers only */ | ||
542 | #define uhci_has_pci_registers(u) 0 | ||
543 | #endif | ||
544 | |||
545 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
546 | /* Support (non-PCI) big endian host controllers */ | ||
547 | #define uhci_big_endian_mmio(u) ((u)->big_endian_mmio) | ||
548 | #else | ||
549 | #define uhci_big_endian_mmio(u) 0 | ||
550 | #endif | ||
551 | |||
552 | static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg) | ||
553 | { | ||
554 | if (uhci_has_pci_registers(uhci)) | ||
555 | return inl(uhci->io_addr + reg); | ||
556 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
557 | else if (uhci_big_endian_mmio(uhci)) | ||
558 | return readl_be(uhci->regs + reg); | ||
559 | #endif | ||
560 | else | ||
561 | return readl(uhci->regs + reg); | ||
562 | } | ||
563 | |||
564 | static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg) | ||
565 | { | ||
566 | if (uhci_has_pci_registers(uhci)) | ||
567 | outl(val, uhci->io_addr + reg); | ||
568 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
569 | else if (uhci_big_endian_mmio(uhci)) | ||
570 | writel_be(val, uhci->regs + reg); | ||
571 | #endif | ||
572 | else | ||
573 | writel(val, uhci->regs + reg); | ||
574 | } | ||
575 | |||
576 | static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg) | ||
577 | { | ||
578 | if (uhci_has_pci_registers(uhci)) | ||
579 | return inw(uhci->io_addr + reg); | ||
580 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
581 | else if (uhci_big_endian_mmio(uhci)) | ||
582 | return readw_be(uhci->regs + reg); | ||
583 | #endif | ||
584 | else | ||
585 | return readw(uhci->regs + reg); | ||
586 | } | ||
587 | |||
588 | static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg) | ||
589 | { | ||
590 | if (uhci_has_pci_registers(uhci)) | ||
591 | outw(val, uhci->io_addr + reg); | ||
592 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
593 | else if (uhci_big_endian_mmio(uhci)) | ||
594 | writew_be(val, uhci->regs + reg); | ||
595 | #endif | ||
596 | else | ||
597 | writew(val, uhci->regs + reg); | ||
598 | } | ||
599 | |||
600 | static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg) | ||
601 | { | ||
602 | if (uhci_has_pci_registers(uhci)) | ||
603 | return inb(uhci->io_addr + reg); | ||
604 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
605 | else if (uhci_big_endian_mmio(uhci)) | ||
606 | return readb_be(uhci->regs + reg); | ||
607 | #endif | ||
608 | else | ||
609 | return readb(uhci->regs + reg); | ||
610 | } | ||
611 | |||
612 | static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg) | ||
613 | { | ||
614 | if (uhci_has_pci_registers(uhci)) | ||
615 | outb(val, uhci->io_addr + reg); | ||
616 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO | ||
617 | else if (uhci_big_endian_mmio(uhci)) | ||
618 | writeb_be(val, uhci->regs + reg); | ||
619 | #endif | ||
620 | else | ||
621 | writeb(val, uhci->regs + reg); | ||
622 | } | ||
623 | #endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */ | ||
624 | |||
625 | /* | ||
626 | * The GRLIB GRUSBHC controller can use big endian format for its descriptors. | ||
627 | * | ||
628 | * UHCI controllers accessed through PCI work normally (little-endian | ||
629 | * everywhere), so we don't bother supporting a BE-only mode. | ||
630 | */ | ||
631 | #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_DESC | ||
632 | #define uhci_big_endian_desc(u) ((u)->big_endian_desc) | ||
633 | |||
634 | /* cpu to uhci */ | ||
635 | static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x) | ||
636 | { | ||
637 | return uhci_big_endian_desc(uhci) | ||
638 | ? (__force __hc32)cpu_to_be32(x) | ||
639 | : (__force __hc32)cpu_to_le32(x); | ||
640 | } | ||
641 | |||
642 | /* uhci to cpu */ | ||
643 | static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x) | ||
644 | { | ||
645 | return uhci_big_endian_desc(uhci) | ||
646 | ? be32_to_cpu((__force __be32)x) | ||
647 | : le32_to_cpu((__force __le32)x); | ||
648 | } | ||
649 | |||
650 | #else | ||
651 | /* cpu to uhci */ | ||
652 | static inline __hc32 cpu_to_hc32(const struct uhci_hcd *uhci, const u32 x) | ||
653 | { | ||
654 | return cpu_to_le32(x); | ||
655 | } | ||
656 | |||
657 | /* uhci to cpu */ | ||
658 | static inline u32 hc32_to_cpu(const struct uhci_hcd *uhci, const __hc32 x) | ||
659 | { | ||
660 | return le32_to_cpu(x); | ||
661 | } | ||
662 | #endif | ||
663 | |||
470 | #endif | 664 | #endif |
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 6d59c0f77f25..045cde4cbc3d 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c | |||
@@ -44,7 +44,7 @@ static int any_ports_active(struct uhci_hcd *uhci) | |||
44 | int port; | 44 | int port; |
45 | 45 | ||
46 | for (port = 0; port < uhci->rh_numports; ++port) { | 46 | for (port = 0; port < uhci->rh_numports; ++port) { |
47 | if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) & | 47 | if ((uhci_readw(uhci, USBPORTSC1 + port * 2) & |
48 | (USBPORTSC_CCS | RWC_BITS)) || | 48 | (USBPORTSC_CCS | RWC_BITS)) || |
49 | test_bit(port, &uhci->port_c_suspend)) | 49 | test_bit(port, &uhci->port_c_suspend)) |
50 | return 1; | 50 | return 1; |
@@ -68,7 +68,7 @@ static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf) | |||
68 | 68 | ||
69 | *buf = 0; | 69 | *buf = 0; |
70 | for (port = 0; port < uhci->rh_numports; ++port) { | 70 | for (port = 0; port < uhci->rh_numports; ++port) { |
71 | if ((inw(uhci->io_addr + USBPORTSC1 + port * 2) & mask) || | 71 | if ((uhci_readw(uhci, USBPORTSC1 + port * 2) & mask) || |
72 | test_bit(port, &uhci->port_c_suspend)) | 72 | test_bit(port, &uhci->port_c_suspend)) |
73 | *buf |= (1 << (port + 1)); | 73 | *buf |= (1 << (port + 1)); |
74 | } | 74 | } |
@@ -78,17 +78,17 @@ static inline int get_hub_status_data(struct uhci_hcd *uhci, char *buf) | |||
78 | #define OK(x) len = (x); break | 78 | #define OK(x) len = (x); break |
79 | 79 | ||
80 | #define CLR_RH_PORTSTAT(x) \ | 80 | #define CLR_RH_PORTSTAT(x) \ |
81 | status = inw(port_addr); \ | 81 | status = uhci_readw(uhci, port_addr); \ |
82 | status &= ~(RWC_BITS|WZ_BITS); \ | 82 | status &= ~(RWC_BITS|WZ_BITS); \ |
83 | status &= ~(x); \ | 83 | status &= ~(x); \ |
84 | status |= RWC_BITS & (x); \ | 84 | status |= RWC_BITS & (x); \ |
85 | outw(status, port_addr) | 85 | uhci_writew(uhci, status, port_addr) |
86 | 86 | ||
87 | #define SET_RH_PORTSTAT(x) \ | 87 | #define SET_RH_PORTSTAT(x) \ |
88 | status = inw(port_addr); \ | 88 | status = uhci_readw(uhci, port_addr); \ |
89 | status |= (x); \ | 89 | status |= (x); \ |
90 | status &= ~(RWC_BITS|WZ_BITS); \ | 90 | status &= ~(RWC_BITS|WZ_BITS); \ |
91 | outw(status, port_addr) | 91 | uhci_writew(uhci, status, port_addr) |
92 | 92 | ||
93 | /* UHCI controllers don't automatically stop resume signalling after 20 msec, | 93 | /* UHCI controllers don't automatically stop resume signalling after 20 msec, |
94 | * so we have to poll and check timeouts in order to take care of it. | 94 | * so we have to poll and check timeouts in order to take care of it. |
@@ -99,7 +99,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port, | |||
99 | int status; | 99 | int status; |
100 | int i; | 100 | int i; |
101 | 101 | ||
102 | if (inw(port_addr) & SUSPEND_BITS) { | 102 | if (uhci_readw(uhci, port_addr) & SUSPEND_BITS) { |
103 | CLR_RH_PORTSTAT(SUSPEND_BITS); | 103 | CLR_RH_PORTSTAT(SUSPEND_BITS); |
104 | if (test_bit(port, &uhci->resuming_ports)) | 104 | if (test_bit(port, &uhci->resuming_ports)) |
105 | set_bit(port, &uhci->port_c_suspend); | 105 | set_bit(port, &uhci->port_c_suspend); |
@@ -110,7 +110,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port, | |||
110 | * Experiments show that some controllers take longer, so | 110 | * Experiments show that some controllers take longer, so |
111 | * we'll poll for completion. */ | 111 | * we'll poll for completion. */ |
112 | for (i = 0; i < 10; ++i) { | 112 | for (i = 0; i < 10; ++i) { |
113 | if (!(inw(port_addr) & SUSPEND_BITS)) | 113 | if (!(uhci_readw(uhci, port_addr) & SUSPEND_BITS)) |
114 | break; | 114 | break; |
115 | udelay(1); | 115 | udelay(1); |
116 | } | 116 | } |
@@ -121,12 +121,12 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port, | |||
121 | /* Wait for the UHCI controller in HP's iLO2 server management chip. | 121 | /* Wait for the UHCI controller in HP's iLO2 server management chip. |
122 | * It can take up to 250 us to finish a reset and set the CSC bit. | 122 | * It can take up to 250 us to finish a reset and set the CSC bit. |
123 | */ | 123 | */ |
124 | static void wait_for_HP(unsigned long port_addr) | 124 | static void wait_for_HP(struct uhci_hcd *uhci, unsigned long port_addr) |
125 | { | 125 | { |
126 | int i; | 126 | int i; |
127 | 127 | ||
128 | for (i = 10; i < 250; i += 10) { | 128 | for (i = 10; i < 250; i += 10) { |
129 | if (inw(port_addr) & USBPORTSC_CSC) | 129 | if (uhci_readw(uhci, port_addr) & USBPORTSC_CSC) |
130 | return; | 130 | return; |
131 | udelay(10); | 131 | udelay(10); |
132 | } | 132 | } |
@@ -140,8 +140,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci) | |||
140 | int status; | 140 | int status; |
141 | 141 | ||
142 | for (port = 0; port < uhci->rh_numports; ++port) { | 142 | for (port = 0; port < uhci->rh_numports; ++port) { |
143 | port_addr = uhci->io_addr + USBPORTSC1 + 2 * port; | 143 | port_addr = USBPORTSC1 + 2 * port; |
144 | status = inw(port_addr); | 144 | status = uhci_readw(uhci, port_addr); |
145 | if (unlikely(status & USBPORTSC_PR)) { | 145 | if (unlikely(status & USBPORTSC_PR)) { |
146 | if (time_after_eq(jiffies, uhci->ports_timeout)) { | 146 | if (time_after_eq(jiffies, uhci->ports_timeout)) { |
147 | CLR_RH_PORTSTAT(USBPORTSC_PR); | 147 | CLR_RH_PORTSTAT(USBPORTSC_PR); |
@@ -149,9 +149,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci) | |||
149 | 149 | ||
150 | /* HP's server management chip requires | 150 | /* HP's server management chip requires |
151 | * a longer delay. */ | 151 | * a longer delay. */ |
152 | if (to_pci_dev(uhci_dev(uhci))->vendor == | 152 | if (uhci->wait_for_hp) |
153 | PCI_VENDOR_ID_HP) | 153 | wait_for_HP(uhci, port_addr); |
154 | wait_for_HP(port_addr); | ||
155 | 154 | ||
156 | /* If the port was enabled before, turning | 155 | /* If the port was enabled before, turning |
157 | * reset on caused a port enable change. | 156 | * reset on caused a port enable change. |
@@ -242,7 +241,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
242 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | 241 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); |
243 | int status, lstatus, retval = 0, len = 0; | 242 | int status, lstatus, retval = 0, len = 0; |
244 | unsigned int port = wIndex - 1; | 243 | unsigned int port = wIndex - 1; |
245 | unsigned long port_addr = uhci->io_addr + USBPORTSC1 + 2 * port; | 244 | unsigned long port_addr = USBPORTSC1 + 2 * port; |
246 | u16 wPortChange, wPortStatus; | 245 | u16 wPortChange, wPortStatus; |
247 | unsigned long flags; | 246 | unsigned long flags; |
248 | 247 | ||
@@ -260,14 +259,13 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
260 | goto err; | 259 | goto err; |
261 | 260 | ||
262 | uhci_check_ports(uhci); | 261 | uhci_check_ports(uhci); |
263 | status = inw(port_addr); | 262 | status = uhci_readw(uhci, port_addr); |
264 | 263 | ||
265 | /* Intel controllers report the OverCurrent bit active on. | 264 | /* Intel controllers report the OverCurrent bit active on. |
266 | * VIA controllers report it active off, so we'll adjust the | 265 | * VIA controllers report it active off, so we'll adjust the |
267 | * bit value. (It's not standardized in the UHCI spec.) | 266 | * bit value. (It's not standardized in the UHCI spec.) |
268 | */ | 267 | */ |
269 | if (to_pci_dev(hcd->self.controller)->vendor == | 268 | if (uhci->oc_low) |
270 | PCI_VENDOR_ID_VIA) | ||
271 | status ^= USBPORTSC_OC; | 269 | status ^= USBPORTSC_OC; |
272 | 270 | ||
273 | /* UHCI doesn't support C_RESET (always false) */ | 271 | /* UHCI doesn't support C_RESET (always false) */ |
@@ -358,7 +356,7 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
358 | CLR_RH_PORTSTAT(USBPORTSC_PEC); | 356 | CLR_RH_PORTSTAT(USBPORTSC_PEC); |
359 | OK(0); | 357 | OK(0); |
360 | case USB_PORT_FEAT_SUSPEND: | 358 | case USB_PORT_FEAT_SUSPEND: |
361 | if (!(inw(port_addr) & USBPORTSC_SUSP)) { | 359 | if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) { |
362 | 360 | ||
363 | /* Make certain the port isn't suspended */ | 361 | /* Make certain the port isn't suspended */ |
364 | uhci_finish_suspend(uhci, port, port_addr); | 362 | uhci_finish_suspend(uhci, port, port_addr); |
@@ -370,7 +368,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
370 | * if the port is disabled. When this happens | 368 | * if the port is disabled. When this happens |
371 | * just skip the Resume signalling. | 369 | * just skip the Resume signalling. |
372 | */ | 370 | */ |
373 | if (!(inw(port_addr) & USBPORTSC_RD)) | 371 | if (!(uhci_readw(uhci, port_addr) & |
372 | USBPORTSC_RD)) | ||
374 | uhci_finish_suspend(uhci, port, | 373 | uhci_finish_suspend(uhci, port, |
375 | port_addr); | 374 | port_addr); |
376 | else | 375 | else |
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c new file mode 100644 index 000000000000..c300bd2f7d1c --- /dev/null +++ b/drivers/usb/host/uhci-pci.c | |||
@@ -0,0 +1,301 @@ | |||
1 | /* | ||
2 | * UHCI HCD (Host Controller Driver) PCI Bus Glue. | ||
3 | * | ||
4 | * Extracted from uhci-hcd.c: | ||
5 | * Maintainer: Alan Stern <stern@rowland.harvard.edu> | ||
6 | * | ||
7 | * (C) Copyright 1999 Linus Torvalds | ||
8 | * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com | ||
9 | * (C) Copyright 1999 Randy Dunlap | ||
10 | * (C) Copyright 1999 Georg Acher, acher@in.tum.de | ||
11 | * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de | ||
12 | * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch | ||
13 | * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at | ||
14 | * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface | ||
15 | * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). | ||
16 | * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) | ||
17 | * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu | ||
18 | */ | ||
19 | |||
20 | #include "pci-quirks.h" | ||
21 | |||
22 | /* | ||
23 | * Make sure the controller is completely inactive, unable to | ||
24 | * generate interrupts or do DMA. | ||
25 | */ | ||
26 | static void uhci_pci_reset_hc(struct uhci_hcd *uhci) | ||
27 | { | ||
28 | uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); | ||
29 | } | ||
30 | |||
31 | /* | ||
32 | * Initialize a controller that was newly discovered or has just been | ||
33 | * resumed. In either case we can't be sure of its previous state. | ||
34 | * | ||
35 | * Returns: 1 if the controller was reset, 0 otherwise. | ||
36 | */ | ||
37 | static int uhci_pci_check_and_reset_hc(struct uhci_hcd *uhci) | ||
38 | { | ||
39 | return uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), | ||
40 | uhci->io_addr); | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Store the basic register settings needed by the controller. | ||
45 | * This function is called at the end of configure_hc in uhci-hcd.c. | ||
46 | */ | ||
47 | static void uhci_pci_configure_hc(struct uhci_hcd *uhci) | ||
48 | { | ||
49 | struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci)); | ||
50 | |||
51 | /* Enable PIRQ */ | ||
52 | pci_write_config_word(pdev, USBLEGSUP, USBLEGSUP_DEFAULT); | ||
53 | |||
54 | /* Disable platform-specific non-PME# wakeup */ | ||
55 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | ||
56 | pci_write_config_byte(pdev, USBRES_INTEL, 0); | ||
57 | } | ||
58 | |||
59 | static int uhci_pci_resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) | ||
60 | { | ||
61 | int port; | ||
62 | |||
63 | switch (to_pci_dev(uhci_dev(uhci))->vendor) { | ||
64 | default: | ||
65 | break; | ||
66 | |||
67 | case PCI_VENDOR_ID_GENESYS: | ||
68 | /* Genesys Logic's GL880S controllers don't generate | ||
69 | * resume-detect interrupts. | ||
70 | */ | ||
71 | return 1; | ||
72 | |||
73 | case PCI_VENDOR_ID_INTEL: | ||
74 | /* Some of Intel's USB controllers have a bug that causes | ||
75 | * resume-detect interrupts if any port has an over-current | ||
76 | * condition. To make matters worse, some motherboards | ||
77 | * hardwire unused USB ports' over-current inputs active! | ||
78 | * To prevent problems, we will not enable resume-detect | ||
79 | * interrupts if any ports are OC. | ||
80 | */ | ||
81 | for (port = 0; port < uhci->rh_numports; ++port) { | ||
82 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & | ||
83 | USBPORTSC_OC) | ||
84 | return 1; | ||
85 | } | ||
86 | break; | ||
87 | } | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int uhci_pci_global_suspend_mode_is_broken(struct uhci_hcd *uhci) | ||
92 | { | ||
93 | int port; | ||
94 | const char *sys_info; | ||
95 | static const char bad_Asus_board[] = "A7V8X"; | ||
96 | |||
97 | /* One of Asus's motherboards has a bug which causes it to | ||
98 | * wake up immediately from suspend-to-RAM if any of the ports | ||
99 | * are connected. In such cases we will not set EGSM. | ||
100 | */ | ||
101 | sys_info = dmi_get_system_info(DMI_BOARD_NAME); | ||
102 | if (sys_info && !strcmp(sys_info, bad_Asus_board)) { | ||
103 | for (port = 0; port < uhci->rh_numports; ++port) { | ||
104 | if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & | ||
105 | USBPORTSC_CCS) | ||
106 | return 1; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int uhci_pci_init(struct usb_hcd *hcd) | ||
114 | { | ||
115 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
116 | |||
117 | uhci->io_addr = (unsigned long) hcd->rsrc_start; | ||
118 | |||
119 | uhci->rh_numports = uhci_count_ports(hcd); | ||
120 | |||
121 | /* Intel controllers report the OverCurrent bit active on. | ||
122 | * VIA controllers report it active off, so we'll adjust the | ||
123 | * bit value. (It's not standardized in the UHCI spec.) | ||
124 | */ | ||
125 | if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA) | ||
126 | uhci->oc_low = 1; | ||
127 | |||
128 | /* HP's server management chip requires a longer port reset delay. */ | ||
129 | if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP) | ||
130 | uhci->wait_for_hp = 1; | ||
131 | |||
132 | /* Set up pointers to PCI-specific functions */ | ||
133 | uhci->reset_hc = uhci_pci_reset_hc; | ||
134 | uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; | ||
135 | uhci->configure_hc = uhci_pci_configure_hc; | ||
136 | uhci->resume_detect_interrupts_are_broken = | ||
137 | uhci_pci_resume_detect_interrupts_are_broken; | ||
138 | uhci->global_suspend_mode_is_broken = | ||
139 | uhci_pci_global_suspend_mode_is_broken; | ||
140 | |||
141 | |||
142 | /* Kick BIOS off this hardware and reset if the controller | ||
143 | * isn't already safely quiescent. | ||
144 | */ | ||
145 | check_and_reset_hc(uhci); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /* Make sure the controller is quiescent and that we're not using it | ||
150 | * any more. This is mainly for the benefit of programs which, like kexec, | ||
151 | * expect the hardware to be idle: not doing DMA or generating IRQs. | ||
152 | * | ||
153 | * This routine may be called in a damaged or failing kernel. Hence we | ||
154 | * do not acquire the spinlock before shutting down the controller. | ||
155 | */ | ||
156 | static void uhci_shutdown(struct pci_dev *pdev) | ||
157 | { | ||
158 | struct usb_hcd *hcd = pci_get_drvdata(pdev); | ||
159 | |||
160 | uhci_hc_died(hcd_to_uhci(hcd)); | ||
161 | } | ||
162 | |||
163 | #ifdef CONFIG_PM | ||
164 | |||
165 | static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | ||
166 | { | ||
167 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
168 | struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci)); | ||
169 | int rc = 0; | ||
170 | |||
171 | dev_dbg(uhci_dev(uhci), "%s\n", __func__); | ||
172 | |||
173 | spin_lock_irq(&uhci->lock); | ||
174 | if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead) | ||
175 | goto done_okay; /* Already suspended or dead */ | ||
176 | |||
177 | if (uhci->rh_state > UHCI_RH_SUSPENDED) { | ||
178 | dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n"); | ||
179 | rc = -EBUSY; | ||
180 | goto done; | ||
181 | }; | ||
182 | |||
183 | /* All PCI host controllers are required to disable IRQ generation | ||
184 | * at the source, so we must turn off PIRQ. | ||
185 | */ | ||
186 | pci_write_config_word(pdev, USBLEGSUP, 0); | ||
187 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); | ||
188 | |||
189 | /* Enable platform-specific non-PME# wakeup */ | ||
190 | if (do_wakeup) { | ||
191 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | ||
192 | pci_write_config_byte(pdev, USBRES_INTEL, | ||
193 | USBPORT1EN | USBPORT2EN); | ||
194 | } | ||
195 | |||
196 | done_okay: | ||
197 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
198 | done: | ||
199 | spin_unlock_irq(&uhci->lock); | ||
200 | return rc; | ||
201 | } | ||
202 | |||
203 | static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated) | ||
204 | { | ||
205 | struct uhci_hcd *uhci = hcd_to_uhci(hcd); | ||
206 | |||
207 | dev_dbg(uhci_dev(uhci), "%s\n", __func__); | ||
208 | |||
209 | /* Since we aren't in D3 any more, it's safe to set this flag | ||
210 | * even if the controller was dead. | ||
211 | */ | ||
212 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
213 | |||
214 | spin_lock_irq(&uhci->lock); | ||
215 | |||
216 | /* Make sure resume from hibernation re-enumerates everything */ | ||
217 | if (hibernated) { | ||
218 | uhci->reset_hc(uhci); | ||
219 | finish_reset(uhci); | ||
220 | } | ||
221 | |||
222 | /* The firmware may have changed the controller settings during | ||
223 | * a system wakeup. Check it and reconfigure to avoid problems. | ||
224 | */ | ||
225 | else { | ||
226 | check_and_reset_hc(uhci); | ||
227 | } | ||
228 | configure_hc(uhci); | ||
229 | |||
230 | /* Tell the core if the controller had to be reset */ | ||
231 | if (uhci->rh_state == UHCI_RH_RESET) | ||
232 | usb_root_hub_lost_power(hcd->self.root_hub); | ||
233 | |||
234 | spin_unlock_irq(&uhci->lock); | ||
235 | |||
236 | /* If interrupts don't work and remote wakeup is enabled then | ||
237 | * the suspended root hub needs to be polled. | ||
238 | */ | ||
239 | if (!uhci->RD_enable && hcd->self.root_hub->do_remote_wakeup) | ||
240 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); | ||
241 | |||
242 | /* Does the root hub have a port wakeup pending? */ | ||
243 | usb_hcd_poll_rh_status(hcd); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | #endif | ||
248 | |||
249 | static const struct hc_driver uhci_driver = { | ||
250 | .description = hcd_name, | ||
251 | .product_desc = "UHCI Host Controller", | ||
252 | .hcd_priv_size = sizeof(struct uhci_hcd), | ||
253 | |||
254 | /* Generic hardware linkage */ | ||
255 | .irq = uhci_irq, | ||
256 | .flags = HCD_USB11, | ||
257 | |||
258 | /* Basic lifecycle operations */ | ||
259 | .reset = uhci_pci_init, | ||
260 | .start = uhci_start, | ||
261 | #ifdef CONFIG_PM | ||
262 | .pci_suspend = uhci_pci_suspend, | ||
263 | .pci_resume = uhci_pci_resume, | ||
264 | .bus_suspend = uhci_rh_suspend, | ||
265 | .bus_resume = uhci_rh_resume, | ||
266 | #endif | ||
267 | .stop = uhci_stop, | ||
268 | |||
269 | .urb_enqueue = uhci_urb_enqueue, | ||
270 | .urb_dequeue = uhci_urb_dequeue, | ||
271 | |||
272 | .endpoint_disable = uhci_hcd_endpoint_disable, | ||
273 | .get_frame_number = uhci_hcd_get_frame_number, | ||
274 | |||
275 | .hub_status_data = uhci_hub_status_data, | ||
276 | .hub_control = uhci_hub_control, | ||
277 | }; | ||
278 | |||
279 | static DEFINE_PCI_DEVICE_TABLE(uhci_pci_ids) = { { | ||
280 | /* handle any USB UHCI controller */ | ||
281 | PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), | ||
282 | .driver_data = (unsigned long) &uhci_driver, | ||
283 | }, { /* end: all zeroes */ } | ||
284 | }; | ||
285 | |||
286 | MODULE_DEVICE_TABLE(pci, uhci_pci_ids); | ||
287 | |||
288 | static struct pci_driver uhci_pci_driver = { | ||
289 | .name = (char *)hcd_name, | ||
290 | .id_table = uhci_pci_ids, | ||
291 | |||
292 | .probe = usb_hcd_pci_probe, | ||
293 | .remove = usb_hcd_pci_remove, | ||
294 | .shutdown = uhci_shutdown, | ||
295 | |||
296 | #ifdef CONFIG_PM_SLEEP | ||
297 | .driver = { | ||
298 | .pm = &usb_hcd_pci_pm_ops | ||
299 | }, | ||
300 | #endif | ||
301 | }; | ||
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index af77abb5c68b..84ed28b34f93 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -29,12 +29,12 @@ static void uhci_set_next_interrupt(struct uhci_hcd *uhci) | |||
29 | { | 29 | { |
30 | if (uhci->is_stopped) | 30 | if (uhci->is_stopped) |
31 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); | 31 | mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); |
32 | uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); | 32 | uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) | 35 | static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) |
36 | { | 36 | { |
37 | uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); | 37 | uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC); |
38 | } | 38 | } |
39 | 39 | ||
40 | 40 | ||
@@ -53,7 +53,7 @@ static void uhci_fsbr_on(struct uhci_hcd *uhci) | |||
53 | uhci->fsbr_is_on = 1; | 53 | uhci->fsbr_is_on = 1; |
54 | lqh = list_entry(uhci->skel_async_qh->node.prev, | 54 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
55 | struct uhci_qh, node); | 55 | struct uhci_qh, node); |
56 | lqh->link = LINK_TO_QH(uhci->skel_term_qh); | 56 | lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh); |
57 | } | 57 | } |
58 | 58 | ||
59 | static void uhci_fsbr_off(struct uhci_hcd *uhci) | 59 | static void uhci_fsbr_off(struct uhci_hcd *uhci) |
@@ -65,7 +65,7 @@ static void uhci_fsbr_off(struct uhci_hcd *uhci) | |||
65 | uhci->fsbr_is_on = 0; | 65 | uhci->fsbr_is_on = 0; |
66 | lqh = list_entry(uhci->skel_async_qh->node.prev, | 66 | lqh = list_entry(uhci->skel_async_qh->node.prev, |
67 | struct uhci_qh, node); | 67 | struct uhci_qh, node); |
68 | lqh->link = UHCI_PTR_TERM; | 68 | lqh->link = UHCI_PTR_TERM(uhci); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) | 71 | static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) |
@@ -131,12 +131,12 @@ static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) | |||
131 | dma_pool_free(uhci->td_pool, td, td->dma_handle); | 131 | dma_pool_free(uhci->td_pool, td, td->dma_handle); |
132 | } | 132 | } |
133 | 133 | ||
134 | static inline void uhci_fill_td(struct uhci_td *td, u32 status, | 134 | static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td, |
135 | u32 token, u32 buffer) | 135 | u32 status, u32 token, u32 buffer) |
136 | { | 136 | { |
137 | td->status = cpu_to_le32(status); | 137 | td->status = cpu_to_hc32(uhci, status); |
138 | td->token = cpu_to_le32(token); | 138 | td->token = cpu_to_hc32(uhci, token); |
139 | td->buffer = cpu_to_le32(buffer); | 139 | td->buffer = cpu_to_hc32(uhci, buffer); |
140 | } | 140 | } |
141 | 141 | ||
142 | static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) | 142 | static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) |
@@ -170,11 +170,11 @@ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, | |||
170 | 170 | ||
171 | td->link = ltd->link; | 171 | td->link = ltd->link; |
172 | wmb(); | 172 | wmb(); |
173 | ltd->link = LINK_TO_TD(td); | 173 | ltd->link = LINK_TO_TD(uhci, td); |
174 | } else { | 174 | } else { |
175 | td->link = uhci->frame[framenum]; | 175 | td->link = uhci->frame[framenum]; |
176 | wmb(); | 176 | wmb(); |
177 | uhci->frame[framenum] = LINK_TO_TD(td); | 177 | uhci->frame[framenum] = LINK_TO_TD(uhci, td); |
178 | uhci->frame_cpu[framenum] = td; | 178 | uhci->frame_cpu[framenum] = td; |
179 | } | 179 | } |
180 | } | 180 | } |
@@ -198,7 +198,7 @@ static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, | |||
198 | ntd = list_entry(td->fl_list.next, | 198 | ntd = list_entry(td->fl_list.next, |
199 | struct uhci_td, | 199 | struct uhci_td, |
200 | fl_list); | 200 | fl_list); |
201 | uhci->frame[td->frame] = LINK_TO_TD(ntd); | 201 | uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd); |
202 | uhci->frame_cpu[td->frame] = ntd; | 202 | uhci->frame_cpu[td->frame] = ntd; |
203 | } | 203 | } |
204 | } else { | 204 | } else { |
@@ -255,8 +255,8 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, | |||
255 | memset(qh, 0, sizeof(*qh)); | 255 | memset(qh, 0, sizeof(*qh)); |
256 | qh->dma_handle = dma_handle; | 256 | qh->dma_handle = dma_handle; |
257 | 257 | ||
258 | qh->element = UHCI_PTR_TERM; | 258 | qh->element = UHCI_PTR_TERM(uhci); |
259 | qh->link = UHCI_PTR_TERM; | 259 | qh->link = UHCI_PTR_TERM(uhci); |
260 | 260 | ||
261 | INIT_LIST_HEAD(&qh->queue); | 261 | INIT_LIST_HEAD(&qh->queue); |
262 | INIT_LIST_HEAD(&qh->node); | 262 | INIT_LIST_HEAD(&qh->node); |
@@ -348,9 +348,9 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, | |||
348 | 348 | ||
349 | /* If the QH element pointer is UHCI_PTR_TERM then then currently | 349 | /* If the QH element pointer is UHCI_PTR_TERM then then currently |
350 | * executing URB has already been unlinked, so this one isn't it. */ | 350 | * executing URB has already been unlinked, so this one isn't it. */ |
351 | if (qh_element(qh) == UHCI_PTR_TERM) | 351 | if (qh_element(qh) == UHCI_PTR_TERM(uhci)) |
352 | goto done; | 352 | goto done; |
353 | qh->element = UHCI_PTR_TERM; | 353 | qh->element = UHCI_PTR_TERM(uhci); |
354 | 354 | ||
355 | /* Control pipes don't have to worry about toggles */ | 355 | /* Control pipes don't have to worry about toggles */ |
356 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) | 356 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) |
@@ -360,7 +360,7 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, | |||
360 | WARN_ON(list_empty(&urbp->td_list)); | 360 | WARN_ON(list_empty(&urbp->td_list)); |
361 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | 361 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
362 | qh->needs_fixup = 1; | 362 | qh->needs_fixup = 1; |
363 | qh->initial_toggle = uhci_toggle(td_token(td)); | 363 | qh->initial_toggle = uhci_toggle(td_token(uhci, td)); |
364 | 364 | ||
365 | done: | 365 | done: |
366 | return ret; | 366 | return ret; |
@@ -370,7 +370,8 @@ done: | |||
370 | * Fix up the data toggles for URBs in a queue, when one of them | 370 | * Fix up the data toggles for URBs in a queue, when one of them |
371 | * terminates early (short transfer, error, or dequeued). | 371 | * terminates early (short transfer, error, or dequeued). |
372 | */ | 372 | */ |
373 | static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | 373 | static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh, |
374 | int skip_first) | ||
374 | { | 375 | { |
375 | struct urb_priv *urbp = NULL; | 376 | struct urb_priv *urbp = NULL; |
376 | struct uhci_td *td; | 377 | struct uhci_td *td; |
@@ -384,7 +385,7 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |||
384 | 385 | ||
385 | /* When starting with the first URB, if the QH element pointer is | 386 | /* When starting with the first URB, if the QH element pointer is |
386 | * still valid then we know the URB's toggles are okay. */ | 387 | * still valid then we know the URB's toggles are okay. */ |
387 | else if (qh_element(qh) != UHCI_PTR_TERM) | 388 | else if (qh_element(qh) != UHCI_PTR_TERM(uhci)) |
388 | toggle = 2; | 389 | toggle = 2; |
389 | 390 | ||
390 | /* Fix up the toggle for the URBs in the queue. Normally this | 391 | /* Fix up the toggle for the URBs in the queue. Normally this |
@@ -396,15 +397,15 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) | |||
396 | /* If the first TD has the right toggle value, we don't | 397 | /* If the first TD has the right toggle value, we don't |
397 | * need to change any toggles in this URB */ | 398 | * need to change any toggles in this URB */ |
398 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | 399 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
399 | if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { | 400 | if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) { |
400 | td = list_entry(urbp->td_list.prev, struct uhci_td, | 401 | td = list_entry(urbp->td_list.prev, struct uhci_td, |
401 | list); | 402 | list); |
402 | toggle = uhci_toggle(td_token(td)) ^ 1; | 403 | toggle = uhci_toggle(td_token(uhci, td)) ^ 1; |
403 | 404 | ||
404 | /* Otherwise all the toggles in the URB have to be switched */ | 405 | /* Otherwise all the toggles in the URB have to be switched */ |
405 | } else { | 406 | } else { |
406 | list_for_each_entry(td, &urbp->td_list, list) { | 407 | list_for_each_entry(td, &urbp->td_list, list) { |
407 | td->token ^= cpu_to_le32( | 408 | td->token ^= cpu_to_hc32(uhci, |
408 | TD_TOKEN_TOGGLE); | 409 | TD_TOKEN_TOGGLE); |
409 | toggle ^= 1; | 410 | toggle ^= 1; |
410 | } | 411 | } |
@@ -441,7 +442,7 @@ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
441 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | 442 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
442 | qh->link = pqh->link; | 443 | qh->link = pqh->link; |
443 | wmb(); | 444 | wmb(); |
444 | pqh->link = LINK_TO_QH(qh); | 445 | pqh->link = LINK_TO_QH(uhci, qh); |
445 | } | 446 | } |
446 | 447 | ||
447 | /* | 448 | /* |
@@ -451,7 +452,7 @@ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
451 | static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | 452 | static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) |
452 | { | 453 | { |
453 | struct uhci_qh *pqh; | 454 | struct uhci_qh *pqh; |
454 | __le32 link_to_new_qh; | 455 | __hc32 link_to_new_qh; |
455 | 456 | ||
456 | /* Find the predecessor QH for our new one and insert it in the list. | 457 | /* Find the predecessor QH for our new one and insert it in the list. |
457 | * The list of QHs is expected to be short, so linear search won't | 458 | * The list of QHs is expected to be short, so linear search won't |
@@ -465,7 +466,7 @@ static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
465 | /* Link it into the schedule */ | 466 | /* Link it into the schedule */ |
466 | qh->link = pqh->link; | 467 | qh->link = pqh->link; |
467 | wmb(); | 468 | wmb(); |
468 | link_to_new_qh = LINK_TO_QH(qh); | 469 | link_to_new_qh = LINK_TO_QH(uhci, qh); |
469 | pqh->link = link_to_new_qh; | 470 | pqh->link = link_to_new_qh; |
470 | 471 | ||
471 | /* If this is now the first FSBR QH, link the terminating skeleton | 472 | /* If this is now the first FSBR QH, link the terminating skeleton |
@@ -483,13 +484,13 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
483 | 484 | ||
484 | /* Set the element pointer if it isn't set already. | 485 | /* Set the element pointer if it isn't set already. |
485 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ | 486 | * This isn't needed for Isochronous queues, but it doesn't hurt. */ |
486 | if (qh_element(qh) == UHCI_PTR_TERM) { | 487 | if (qh_element(qh) == UHCI_PTR_TERM(uhci)) { |
487 | struct urb_priv *urbp = list_entry(qh->queue.next, | 488 | struct urb_priv *urbp = list_entry(qh->queue.next, |
488 | struct urb_priv, node); | 489 | struct urb_priv, node); |
489 | struct uhci_td *td = list_entry(urbp->td_list.next, | 490 | struct uhci_td *td = list_entry(urbp->td_list.next, |
490 | struct uhci_td, list); | 491 | struct uhci_td, list); |
491 | 492 | ||
492 | qh->element = LINK_TO_TD(td); | 493 | qh->element = LINK_TO_TD(uhci, td); |
493 | } | 494 | } |
494 | 495 | ||
495 | /* Treat the queue as if it has just advanced */ | 496 | /* Treat the queue as if it has just advanced */ |
@@ -533,7 +534,7 @@ static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
533 | static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) | 534 | static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) |
534 | { | 535 | { |
535 | struct uhci_qh *pqh; | 536 | struct uhci_qh *pqh; |
536 | __le32 link_to_next_qh = qh->link; | 537 | __hc32 link_to_next_qh = qh->link; |
537 | 538 | ||
538 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); | 539 | pqh = list_entry(qh->node.prev, struct uhci_qh, node); |
539 | pqh->link = link_to_next_qh; | 540 | pqh->link = link_to_next_qh; |
@@ -757,8 +758,8 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci, | |||
757 | /* | 758 | /* |
758 | * Map status to standard result codes | 759 | * Map status to standard result codes |
759 | * | 760 | * |
760 | * <status> is (td_status(td) & 0xF60000), a.k.a. | 761 | * <status> is (td_status(uhci, td) & 0xF60000), a.k.a. |
761 | * uhci_status_bits(td_status(td)). | 762 | * uhci_status_bits(td_status(uhci, td)). |
762 | * Note: <status> does not include the TD_CTRL_NAK bit. | 763 | * Note: <status> does not include the TD_CTRL_NAK bit. |
763 | * <dir_out> is True for output TDs and False for input TDs. | 764 | * <dir_out> is True for output TDs and False for input TDs. |
764 | */ | 765 | */ |
@@ -794,7 +795,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
794 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); | 795 | int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); |
795 | int len = urb->transfer_buffer_length; | 796 | int len = urb->transfer_buffer_length; |
796 | dma_addr_t data = urb->transfer_dma; | 797 | dma_addr_t data = urb->transfer_dma; |
797 | __le32 *plink; | 798 | __hc32 *plink; |
798 | struct urb_priv *urbp = urb->hcpriv; | 799 | struct urb_priv *urbp = urb->hcpriv; |
799 | int skel; | 800 | int skel; |
800 | 801 | ||
@@ -811,7 +812,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
811 | */ | 812 | */ |
812 | td = qh->dummy_td; | 813 | td = qh->dummy_td; |
813 | uhci_add_td_to_urbp(td, urbp); | 814 | uhci_add_td_to_urbp(td, urbp); |
814 | uhci_fill_td(td, status, destination | uhci_explen(8), | 815 | uhci_fill_td(uhci, td, status, destination | uhci_explen(8), |
815 | urb->setup_dma); | 816 | urb->setup_dma); |
816 | plink = &td->link; | 817 | plink = &td->link; |
817 | status |= TD_CTRL_ACTIVE; | 818 | status |= TD_CTRL_ACTIVE; |
@@ -844,14 +845,14 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
844 | td = uhci_alloc_td(uhci); | 845 | td = uhci_alloc_td(uhci); |
845 | if (!td) | 846 | if (!td) |
846 | goto nomem; | 847 | goto nomem; |
847 | *plink = LINK_TO_TD(td); | 848 | *plink = LINK_TO_TD(uhci, td); |
848 | 849 | ||
849 | /* Alternate Data0/1 (start with Data1) */ | 850 | /* Alternate Data0/1 (start with Data1) */ |
850 | destination ^= TD_TOKEN_TOGGLE; | 851 | destination ^= TD_TOKEN_TOGGLE; |
851 | 852 | ||
852 | uhci_add_td_to_urbp(td, urbp); | 853 | uhci_add_td_to_urbp(td, urbp); |
853 | uhci_fill_td(td, status, destination | uhci_explen(pktsze), | 854 | uhci_fill_td(uhci, td, status, |
854 | data); | 855 | destination | uhci_explen(pktsze), data); |
855 | plink = &td->link; | 856 | plink = &td->link; |
856 | 857 | ||
857 | data += pktsze; | 858 | data += pktsze; |
@@ -864,14 +865,14 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
864 | td = uhci_alloc_td(uhci); | 865 | td = uhci_alloc_td(uhci); |
865 | if (!td) | 866 | if (!td) |
866 | goto nomem; | 867 | goto nomem; |
867 | *plink = LINK_TO_TD(td); | 868 | *plink = LINK_TO_TD(uhci, td); |
868 | 869 | ||
869 | /* Change direction for the status transaction */ | 870 | /* Change direction for the status transaction */ |
870 | destination ^= (USB_PID_IN ^ USB_PID_OUT); | 871 | destination ^= (USB_PID_IN ^ USB_PID_OUT); |
871 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ | 872 | destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ |
872 | 873 | ||
873 | uhci_add_td_to_urbp(td, urbp); | 874 | uhci_add_td_to_urbp(td, urbp); |
874 | uhci_fill_td(td, status | TD_CTRL_IOC, | 875 | uhci_fill_td(uhci, td, status | TD_CTRL_IOC, |
875 | destination | uhci_explen(0), 0); | 876 | destination | uhci_explen(0), 0); |
876 | plink = &td->link; | 877 | plink = &td->link; |
877 | 878 | ||
@@ -881,11 +882,11 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, | |||
881 | td = uhci_alloc_td(uhci); | 882 | td = uhci_alloc_td(uhci); |
882 | if (!td) | 883 | if (!td) |
883 | goto nomem; | 884 | goto nomem; |
884 | *plink = LINK_TO_TD(td); | 885 | *plink = LINK_TO_TD(uhci, td); |
885 | 886 | ||
886 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | 887 | uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0); |
887 | wmb(); | 888 | wmb(); |
888 | qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE); | 889 | qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); |
889 | qh->dummy_td = td; | 890 | qh->dummy_td = td; |
890 | 891 | ||
891 | /* Low-speed transfers get a different queue, and won't hog the bus. | 892 | /* Low-speed transfers get a different queue, and won't hog the bus. |
@@ -921,7 +922,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
921 | int len = urb->transfer_buffer_length; | 922 | int len = urb->transfer_buffer_length; |
922 | int this_sg_len; | 923 | int this_sg_len; |
923 | dma_addr_t data; | 924 | dma_addr_t data; |
924 | __le32 *plink; | 925 | __hc32 *plink; |
925 | struct urb_priv *urbp = urb->hcpriv; | 926 | struct urb_priv *urbp = urb->hcpriv; |
926 | unsigned int toggle; | 927 | unsigned int toggle; |
927 | struct scatterlist *sg; | 928 | struct scatterlist *sg; |
@@ -974,10 +975,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
974 | td = uhci_alloc_td(uhci); | 975 | td = uhci_alloc_td(uhci); |
975 | if (!td) | 976 | if (!td) |
976 | goto nomem; | 977 | goto nomem; |
977 | *plink = LINK_TO_TD(td); | 978 | *plink = LINK_TO_TD(uhci, td); |
978 | } | 979 | } |
979 | uhci_add_td_to_urbp(td, urbp); | 980 | uhci_add_td_to_urbp(td, urbp); |
980 | uhci_fill_td(td, status, | 981 | uhci_fill_td(uhci, td, status, |
981 | destination | uhci_explen(pktsze) | | 982 | destination | uhci_explen(pktsze) | |
982 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | 983 | (toggle << TD_TOKEN_TOGGLE_SHIFT), |
983 | data); | 984 | data); |
@@ -1010,10 +1011,10 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
1010 | td = uhci_alloc_td(uhci); | 1011 | td = uhci_alloc_td(uhci); |
1011 | if (!td) | 1012 | if (!td) |
1012 | goto nomem; | 1013 | goto nomem; |
1013 | *plink = LINK_TO_TD(td); | 1014 | *plink = LINK_TO_TD(uhci, td); |
1014 | 1015 | ||
1015 | uhci_add_td_to_urbp(td, urbp); | 1016 | uhci_add_td_to_urbp(td, urbp); |
1016 | uhci_fill_td(td, status, | 1017 | uhci_fill_td(uhci, td, status, |
1017 | destination | uhci_explen(0) | | 1018 | destination | uhci_explen(0) | |
1018 | (toggle << TD_TOKEN_TOGGLE_SHIFT), | 1019 | (toggle << TD_TOKEN_TOGGLE_SHIFT), |
1019 | data); | 1020 | data); |
@@ -1028,7 +1029,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
1028 | * fast side but not enough to justify delaying an interrupt | 1029 | * fast side but not enough to justify delaying an interrupt |
1029 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT | 1030 | * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT |
1030 | * flag setting. */ | 1031 | * flag setting. */ |
1031 | td->status |= cpu_to_le32(TD_CTRL_IOC); | 1032 | td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
1032 | 1033 | ||
1033 | /* | 1034 | /* |
1034 | * Build the new dummy TD and activate the old one | 1035 | * Build the new dummy TD and activate the old one |
@@ -1036,11 +1037,11 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, | |||
1036 | td = uhci_alloc_td(uhci); | 1037 | td = uhci_alloc_td(uhci); |
1037 | if (!td) | 1038 | if (!td) |
1038 | goto nomem; | 1039 | goto nomem; |
1039 | *plink = LINK_TO_TD(td); | 1040 | *plink = LINK_TO_TD(uhci, td); |
1040 | 1041 | ||
1041 | uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); | 1042 | uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0); |
1042 | wmb(); | 1043 | wmb(); |
1043 | qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE); | 1044 | qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE); |
1044 | qh->dummy_td = td; | 1045 | qh->dummy_td = td; |
1045 | 1046 | ||
1046 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), | 1047 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), |
@@ -1133,7 +1134,7 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, | |||
1133 | * the queue at the status stage transaction, which is | 1134 | * the queue at the status stage transaction, which is |
1134 | * the last TD. */ | 1135 | * the last TD. */ |
1135 | WARN_ON(list_empty(&urbp->td_list)); | 1136 | WARN_ON(list_empty(&urbp->td_list)); |
1136 | qh->element = LINK_TO_TD(td); | 1137 | qh->element = LINK_TO_TD(uhci, td); |
1137 | tmp = td->list.prev; | 1138 | tmp = td->list.prev; |
1138 | ret = -EINPROGRESS; | 1139 | ret = -EINPROGRESS; |
1139 | 1140 | ||
@@ -1142,8 +1143,9 @@ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, | |||
1142 | /* When a bulk/interrupt transfer is short, we have to | 1143 | /* When a bulk/interrupt transfer is short, we have to |
1143 | * fix up the toggles of the following URBs on the queue | 1144 | * fix up the toggles of the following URBs on the queue |
1144 | * before restarting the queue at the next URB. */ | 1145 | * before restarting the queue at the next URB. */ |
1145 | qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; | 1146 | qh->initial_toggle = |
1146 | uhci_fixup_toggles(qh, 1); | 1147 | uhci_toggle(td_token(uhci, qh->post_td)) ^ 1; |
1148 | uhci_fixup_toggles(uhci, qh, 1); | ||
1147 | 1149 | ||
1148 | if (list_empty(&urbp->td_list)) | 1150 | if (list_empty(&urbp->td_list)) |
1149 | td = qh->post_td; | 1151 | td = qh->post_td; |
@@ -1178,7 +1180,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |||
1178 | unsigned int ctrlstat; | 1180 | unsigned int ctrlstat; |
1179 | int len; | 1181 | int len; |
1180 | 1182 | ||
1181 | ctrlstat = td_status(td); | 1183 | ctrlstat = td_status(uhci, td); |
1182 | status = uhci_status_bits(ctrlstat); | 1184 | status = uhci_status_bits(ctrlstat); |
1183 | if (status & TD_CTRL_ACTIVE) | 1185 | if (status & TD_CTRL_ACTIVE) |
1184 | return -EINPROGRESS; | 1186 | return -EINPROGRESS; |
@@ -1188,7 +1190,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |||
1188 | 1190 | ||
1189 | if (status) { | 1191 | if (status) { |
1190 | ret = uhci_map_status(status, | 1192 | ret = uhci_map_status(status, |
1191 | uhci_packetout(td_token(td))); | 1193 | uhci_packetout(td_token(uhci, td))); |
1192 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { | 1194 | if ((debug == 1 && ret != -EPIPE) || debug > 1) { |
1193 | /* Some debugging code */ | 1195 | /* Some debugging code */ |
1194 | dev_dbg(&urb->dev->dev, | 1196 | dev_dbg(&urb->dev->dev, |
@@ -1204,7 +1206,7 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) | |||
1204 | } | 1206 | } |
1205 | 1207 | ||
1206 | /* Did we receive a short packet? */ | 1208 | /* Did we receive a short packet? */ |
1207 | } else if (len < uhci_expected_length(td_token(td))) { | 1209 | } else if (len < uhci_expected_length(td_token(uhci, td))) { |
1208 | 1210 | ||
1209 | /* For control transfers, go to the status TD if | 1211 | /* For control transfers, go to the status TD if |
1210 | * this isn't already the last data TD */ | 1212 | * this isn't already the last data TD */ |
@@ -1236,10 +1238,10 @@ err: | |||
1236 | if (ret < 0) { | 1238 | if (ret < 0) { |
1237 | /* Note that the queue has stopped and save | 1239 | /* Note that the queue has stopped and save |
1238 | * the next toggle value */ | 1240 | * the next toggle value */ |
1239 | qh->element = UHCI_PTR_TERM; | 1241 | qh->element = UHCI_PTR_TERM(uhci); |
1240 | qh->is_stopped = 1; | 1242 | qh->is_stopped = 1; |
1241 | qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); | 1243 | qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); |
1242 | qh->initial_toggle = uhci_toggle(td_token(td)) ^ | 1244 | qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^ |
1243 | (ret == -EREMOTEIO); | 1245 | (ret == -EREMOTEIO); |
1244 | 1246 | ||
1245 | } else /* Short packet received */ | 1247 | } else /* Short packet received */ |
@@ -1335,14 +1337,14 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1335 | return -ENOMEM; | 1337 | return -ENOMEM; |
1336 | 1338 | ||
1337 | uhci_add_td_to_urbp(td, urbp); | 1339 | uhci_add_td_to_urbp(td, urbp); |
1338 | uhci_fill_td(td, status, destination | | 1340 | uhci_fill_td(uhci, td, status, destination | |
1339 | uhci_explen(urb->iso_frame_desc[i].length), | 1341 | uhci_explen(urb->iso_frame_desc[i].length), |
1340 | urb->transfer_dma + | 1342 | urb->transfer_dma + |
1341 | urb->iso_frame_desc[i].offset); | 1343 | urb->iso_frame_desc[i].offset); |
1342 | } | 1344 | } |
1343 | 1345 | ||
1344 | /* Set the interrupt-on-completion flag on the last packet. */ | 1346 | /* Set the interrupt-on-completion flag on the last packet. */ |
1345 | td->status |= cpu_to_le32(TD_CTRL_IOC); | 1347 | td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
1346 | 1348 | ||
1347 | /* Add the TDs to the frame list */ | 1349 | /* Add the TDs to the frame list */ |
1348 | frame = urb->start_frame; | 1350 | frame = urb->start_frame; |
@@ -1378,7 +1380,7 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) | |||
1378 | 1380 | ||
1379 | uhci_remove_tds_from_frame(uhci, qh->iso_frame); | 1381 | uhci_remove_tds_from_frame(uhci, qh->iso_frame); |
1380 | 1382 | ||
1381 | ctrlstat = td_status(td); | 1383 | ctrlstat = td_status(uhci, td); |
1382 | if (ctrlstat & TD_CTRL_ACTIVE) { | 1384 | if (ctrlstat & TD_CTRL_ACTIVE) { |
1383 | status = -EXDEV; /* TD was added too late? */ | 1385 | status = -EXDEV; /* TD was added too late? */ |
1384 | } else { | 1386 | } else { |
@@ -1629,7 +1631,7 @@ restart: | |||
1629 | * queue, the QH can now be re-activated. */ | 1631 | * queue, the QH can now be re-activated. */ |
1630 | if (!list_empty(&qh->queue)) { | 1632 | if (!list_empty(&qh->queue)) { |
1631 | if (qh->needs_fixup) | 1633 | if (qh->needs_fixup) |
1632 | uhci_fixup_toggles(qh, 0); | 1634 | uhci_fixup_toggles(uhci, qh, 0); |
1633 | 1635 | ||
1634 | /* If the first URB on the queue wants FSBR but its time | 1636 | /* If the first URB on the queue wants FSBR but its time |
1635 | * limit has expired, set the next TD to interrupt on | 1637 | * limit has expired, set the next TD to interrupt on |
@@ -1639,7 +1641,7 @@ restart: | |||
1639 | struct uhci_td *td = list_entry(urbp->td_list.next, | 1641 | struct uhci_td *td = list_entry(urbp->td_list.next, |
1640 | struct uhci_td, list); | 1642 | struct uhci_td, list); |
1641 | 1643 | ||
1642 | td->status |= __cpu_to_le32(TD_CTRL_IOC); | 1644 | td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC); |
1643 | } | 1645 | } |
1644 | 1646 | ||
1645 | uhci_activate_qh(uhci, qh); | 1647 | uhci_activate_qh(uhci, qh); |
@@ -1686,7 +1688,7 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
1686 | } else { | 1688 | } else { |
1687 | urbp = list_entry(qh->queue.next, struct urb_priv, node); | 1689 | urbp = list_entry(qh->queue.next, struct urb_priv, node); |
1688 | td = list_entry(urbp->td_list.next, struct uhci_td, list); | 1690 | td = list_entry(urbp->td_list.next, struct uhci_td, list); |
1689 | status = td_status(td); | 1691 | status = td_status(uhci, td); |
1690 | if (!(status & TD_CTRL_ACTIVE)) { | 1692 | if (!(status & TD_CTRL_ACTIVE)) { |
1691 | 1693 | ||
1692 | /* We're okay, the queue has advanced */ | 1694 | /* We're okay, the queue has advanced */ |
@@ -1704,7 +1706,8 @@ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) | |||
1704 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { | 1706 | if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { |
1705 | 1707 | ||
1706 | /* Detect the Intel bug and work around it */ | 1708 | /* Detect the Intel bug and work around it */ |
1707 | if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) { | 1709 | if (qh->post_td && qh_element(qh) == |
1710 | LINK_TO_TD(uhci, qh->post_td)) { | ||
1708 | qh->element = qh->post_td->link; | 1711 | qh->element = qh->post_td->link; |
1709 | qh->advance_jiffies = jiffies; | 1712 | qh->advance_jiffies = jiffies; |
1710 | ret = 1; | 1713 | ret = 1; |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 0231814a97a5..2e0486178dbe 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci) | |||
147 | 147 | ||
148 | static void xhci_print_ports(struct xhci_hcd *xhci) | 148 | static void xhci_print_ports(struct xhci_hcd *xhci) |
149 | { | 149 | { |
150 | u32 __iomem *addr; | 150 | __le32 __iomem *addr; |
151 | int i, j; | 151 | int i, j; |
152 | int ports; | 152 | int ports; |
153 | char *names[NUM_PORT_REGS] = { | 153 | char *names[NUM_PORT_REGS] = { |
@@ -253,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
253 | void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | 253 | void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) |
254 | { | 254 | { |
255 | u64 address; | 255 | u64 address; |
256 | u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK; | 256 | u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK; |
257 | 257 | ||
258 | switch (type) { | 258 | switch (type) { |
259 | case TRB_TYPE(TRB_LINK): | 259 | case TRB_TYPE(TRB_LINK): |
260 | xhci_dbg(xhci, "Link TRB:\n"); | 260 | xhci_dbg(xhci, "Link TRB:\n"); |
261 | xhci_print_trb_offsets(xhci, trb); | 261 | xhci_print_trb_offsets(xhci, trb); |
262 | 262 | ||
263 | address = trb->link.segment_ptr; | 263 | address = le64_to_cpu(trb->link.segment_ptr); |
264 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); | 264 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); |
265 | 265 | ||
266 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", | 266 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", |
267 | GET_INTR_TARGET(trb->link.intr_target)); | 267 | GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target))); |
268 | xhci_dbg(xhci, "Cycle bit = %u\n", | 268 | xhci_dbg(xhci, "Cycle bit = %u\n", |
269 | (unsigned int) (trb->link.control & TRB_CYCLE)); | 269 | (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE)); |
270 | xhci_dbg(xhci, "Toggle cycle bit = %u\n", | 270 | xhci_dbg(xhci, "Toggle cycle bit = %u\n", |
271 | (unsigned int) (trb->link.control & LINK_TOGGLE)); | 271 | (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE)); |
272 | xhci_dbg(xhci, "No Snoop bit = %u\n", | 272 | xhci_dbg(xhci, "No Snoop bit = %u\n", |
273 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); | 273 | (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP)); |
274 | break; | 274 | break; |
275 | case TRB_TYPE(TRB_TRANSFER): | 275 | case TRB_TYPE(TRB_TRANSFER): |
276 | address = trb->trans_event.buffer; | 276 | address = le64_to_cpu(trb->trans_event.buffer); |
277 | /* | 277 | /* |
278 | * FIXME: look at flags to figure out if it's an address or if | 278 | * FIXME: look at flags to figure out if it's an address or if |
279 | * the data is directly in the buffer field. | 279 | * the data is directly in the buffer field. |
@@ -281,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
281 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); | 281 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); |
282 | break; | 282 | break; |
283 | case TRB_TYPE(TRB_COMPLETION): | 283 | case TRB_TYPE(TRB_COMPLETION): |
284 | address = trb->event_cmd.cmd_trb; | 284 | address = le64_to_cpu(trb->event_cmd.cmd_trb); |
285 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); | 285 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); |
286 | xhci_dbg(xhci, "Completion status = %u\n", | 286 | xhci_dbg(xhci, "Completion status = %u\n", |
287 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); | 287 | (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status))); |
288 | xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags); | 288 | xhci_dbg(xhci, "Flags = 0x%x\n", |
289 | (unsigned int) le32_to_cpu(trb->event_cmd.flags)); | ||
289 | break; | 290 | break; |
290 | default: | 291 | default: |
291 | xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", | 292 | xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", |
@@ -311,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
311 | void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) | 312 | void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) |
312 | { | 313 | { |
313 | int i; | 314 | int i; |
314 | u32 addr = (u32) seg->dma; | 315 | u64 addr = seg->dma; |
315 | union xhci_trb *trb = seg->trbs; | 316 | union xhci_trb *trb = seg->trbs; |
316 | 317 | ||
317 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { | 318 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { |
318 | trb = &seg->trbs[i]; | 319 | trb = &seg->trbs[i]; |
319 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, | 320 | xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr, |
320 | lower_32_bits(trb->link.segment_ptr), | 321 | (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), |
321 | upper_32_bits(trb->link.segment_ptr), | 322 | (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)), |
322 | (unsigned int) trb->link.intr_target, | 323 | (unsigned int) le32_to_cpu(trb->link.intr_target), |
323 | (unsigned int) trb->link.control); | 324 | (unsigned int) le32_to_cpu(trb->link.control)); |
324 | addr += sizeof(*trb); | 325 | addr += sizeof(*trb); |
325 | } | 326 | } |
326 | } | 327 | } |
@@ -391,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci, | |||
391 | 392 | ||
392 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | 393 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) |
393 | { | 394 | { |
394 | u32 addr = (u32) erst->erst_dma_addr; | 395 | u64 addr = erst->erst_dma_addr; |
395 | int i; | 396 | int i; |
396 | struct xhci_erst_entry *entry; | 397 | struct xhci_erst_entry *entry; |
397 | 398 | ||
398 | for (i = 0; i < erst->num_entries; ++i) { | 399 | for (i = 0; i < erst->num_entries; ++i) { |
399 | entry = &erst->entries[i]; | 400 | entry = &erst->entries[i]; |
400 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", | 401 | xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", |
401 | (unsigned int) addr, | 402 | addr, |
402 | lower_32_bits(entry->seg_addr), | 403 | lower_32_bits(le64_to_cpu(entry->seg_addr)), |
403 | upper_32_bits(entry->seg_addr), | 404 | upper_32_bits(le64_to_cpu(entry->seg_addr)), |
404 | (unsigned int) entry->seg_size, | 405 | (unsigned int) le32_to_cpu(entry->seg_size), |
405 | (unsigned int) entry->rsvd); | 406 | (unsigned int) le32_to_cpu(entry->rsvd)); |
406 | addr += sizeof(*entry); | 407 | addr += sizeof(*entry); |
407 | } | 408 | } |
408 | } | 409 | } |
@@ -436,7 +437,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, | |||
436 | { | 437 | { |
437 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); | 438 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); |
438 | 439 | ||
439 | switch (GET_SLOT_STATE(slot_ctx->dev_state)) { | 440 | switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) { |
440 | case 0: | 441 | case 0: |
441 | return "enabled/disabled"; | 442 | return "enabled/disabled"; |
442 | case 1: | 443 | case 1: |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 73f75d26436c..0be788cc2fdb 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -50,7 +50,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, | |||
50 | temp |= 0x0008; | 50 | temp |= 0x0008; |
51 | /* Bits 6:5 - no TTs in root ports */ | 51 | /* Bits 6:5 - no TTs in root ports */ |
52 | /* Bit 7 - no port indicators */ | 52 | /* Bit 7 - no port indicators */ |
53 | desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp); | 53 | desc->wHubCharacteristics = cpu_to_le16(temp); |
54 | } | 54 | } |
55 | 55 | ||
56 | /* Fill in the USB 2.0 roothub descriptor */ | 56 | /* Fill in the USB 2.0 roothub descriptor */ |
@@ -314,7 +314,7 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) | |||
314 | } | 314 | } |
315 | 315 | ||
316 | static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, | 316 | static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, |
317 | u16 wIndex, u32 __iomem *addr, u32 port_status) | 317 | u16 wIndex, __le32 __iomem *addr, u32 port_status) |
318 | { | 318 | { |
319 | /* Don't allow the USB core to disable SuperSpeed ports. */ | 319 | /* Don't allow the USB core to disable SuperSpeed ports. */ |
320 | if (hcd->speed == HCD_USB3) { | 320 | if (hcd->speed == HCD_USB3) { |
@@ -331,7 +331,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, | |||
331 | } | 331 | } |
332 | 332 | ||
333 | static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, | 333 | static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, |
334 | u16 wIndex, u32 __iomem *addr, u32 port_status) | 334 | u16 wIndex, __le32 __iomem *addr, u32 port_status) |
335 | { | 335 | { |
336 | char *port_change_bit; | 336 | char *port_change_bit; |
337 | u32 status; | 337 | u32 status; |
@@ -341,6 +341,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, | |||
341 | status = PORT_RC; | 341 | status = PORT_RC; |
342 | port_change_bit = "reset"; | 342 | port_change_bit = "reset"; |
343 | break; | 343 | break; |
344 | case USB_PORT_FEAT_C_BH_PORT_RESET: | ||
345 | status = PORT_WRC; | ||
346 | port_change_bit = "warm(BH) reset"; | ||
347 | break; | ||
344 | case USB_PORT_FEAT_C_CONNECTION: | 348 | case USB_PORT_FEAT_C_CONNECTION: |
345 | status = PORT_CSC; | 349 | status = PORT_CSC; |
346 | port_change_bit = "connect"; | 350 | port_change_bit = "connect"; |
@@ -357,6 +361,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, | |||
357 | status = PORT_PLC; | 361 | status = PORT_PLC; |
358 | port_change_bit = "suspend/resume"; | 362 | port_change_bit = "suspend/resume"; |
359 | break; | 363 | break; |
364 | case USB_PORT_FEAT_C_PORT_LINK_STATE: | ||
365 | status = PORT_PLC; | ||
366 | port_change_bit = "link state"; | ||
367 | break; | ||
360 | default: | 368 | default: |
361 | /* Should never happen */ | 369 | /* Should never happen */ |
362 | return; | 370 | return; |
@@ -368,25 +376,36 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, | |||
368 | port_change_bit, wIndex, port_status); | 376 | port_change_bit, wIndex, port_status); |
369 | } | 377 | } |
370 | 378 | ||
379 | static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array) | ||
380 | { | ||
381 | int max_ports; | ||
382 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
383 | |||
384 | if (hcd->speed == HCD_USB3) { | ||
385 | max_ports = xhci->num_usb3_ports; | ||
386 | *port_array = xhci->usb3_ports; | ||
387 | } else { | ||
388 | max_ports = xhci->num_usb2_ports; | ||
389 | *port_array = xhci->usb2_ports; | ||
390 | } | ||
391 | |||
392 | return max_ports; | ||
393 | } | ||
394 | |||
371 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | 395 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, |
372 | u16 wIndex, char *buf, u16 wLength) | 396 | u16 wIndex, char *buf, u16 wLength) |
373 | { | 397 | { |
374 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 398 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
375 | int ports; | 399 | int max_ports; |
376 | unsigned long flags; | 400 | unsigned long flags; |
377 | u32 temp, temp1, status; | 401 | u32 temp, temp1, status; |
378 | int retval = 0; | 402 | int retval = 0; |
379 | u32 __iomem **port_array; | 403 | __le32 __iomem **port_array; |
380 | int slot_id; | 404 | int slot_id; |
381 | struct xhci_bus_state *bus_state; | 405 | struct xhci_bus_state *bus_state; |
406 | u16 link_state = 0; | ||
382 | 407 | ||
383 | if (hcd->speed == HCD_USB3) { | 408 | max_ports = xhci_get_ports(hcd, &port_array); |
384 | ports = xhci->num_usb3_ports; | ||
385 | port_array = xhci->usb3_ports; | ||
386 | } else { | ||
387 | ports = xhci->num_usb2_ports; | ||
388 | port_array = xhci->usb2_ports; | ||
389 | } | ||
390 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 409 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
391 | 410 | ||
392 | spin_lock_irqsave(&xhci->lock, flags); | 411 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -411,7 +430,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
411 | (struct usb_hub_descriptor *) buf); | 430 | (struct usb_hub_descriptor *) buf); |
412 | break; | 431 | break; |
413 | case GetPortStatus: | 432 | case GetPortStatus: |
414 | if (!wIndex || wIndex > ports) | 433 | if (!wIndex || wIndex > max_ports) |
415 | goto error; | 434 | goto error; |
416 | wIndex--; | 435 | wIndex--; |
417 | status = 0; | 436 | status = 0; |
@@ -422,9 +441,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
422 | } | 441 | } |
423 | xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); | 442 | xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); |
424 | 443 | ||
425 | /* FIXME - should we return a port status value like the USB | ||
426 | * 3.0 external hubs do? | ||
427 | */ | ||
428 | /* wPortChange bits */ | 444 | /* wPortChange bits */ |
429 | if (temp & PORT_CSC) | 445 | if (temp & PORT_CSC) |
430 | status |= USB_PORT_STAT_C_CONNECTION << 16; | 446 | status |= USB_PORT_STAT_C_CONNECTION << 16; |
@@ -432,13 +448,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
432 | status |= USB_PORT_STAT_C_ENABLE << 16; | 448 | status |= USB_PORT_STAT_C_ENABLE << 16; |
433 | if ((temp & PORT_OCC)) | 449 | if ((temp & PORT_OCC)) |
434 | status |= USB_PORT_STAT_C_OVERCURRENT << 16; | 450 | status |= USB_PORT_STAT_C_OVERCURRENT << 16; |
435 | /* | 451 | if ((temp & PORT_RC)) |
436 | * FIXME ignoring reset and USB 2.1/3.0 specific | 452 | status |= USB_PORT_STAT_C_RESET << 16; |
437 | * changes | 453 | /* USB3.0 only */ |
438 | */ | 454 | if (hcd->speed == HCD_USB3) { |
439 | if ((temp & PORT_PLS_MASK) == XDEV_U3 | 455 | if ((temp & PORT_PLC)) |
440 | && (temp & PORT_POWER)) | 456 | status |= USB_PORT_STAT_C_LINK_STATE << 16; |
441 | status |= 1 << USB_PORT_FEAT_SUSPEND; | 457 | if ((temp & PORT_WRC)) |
458 | status |= USB_PORT_STAT_C_BH_RESET << 16; | ||
459 | } | ||
460 | |||
461 | if (hcd->speed != HCD_USB3) { | ||
462 | if ((temp & PORT_PLS_MASK) == XDEV_U3 | ||
463 | && (temp & PORT_POWER)) | ||
464 | status |= USB_PORT_STAT_SUSPEND; | ||
465 | } | ||
442 | if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { | 466 | if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { |
443 | if ((temp & PORT_RESET) || !(temp & PORT_PE)) | 467 | if ((temp & PORT_RESET) || !(temp & PORT_PE)) |
444 | goto error; | 468 | goto error; |
@@ -469,7 +493,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
469 | && (temp & PORT_POWER) | 493 | && (temp & PORT_POWER) |
470 | && (bus_state->suspended_ports & (1 << wIndex))) { | 494 | && (bus_state->suspended_ports & (1 << wIndex))) { |
471 | bus_state->suspended_ports &= ~(1 << wIndex); | 495 | bus_state->suspended_ports &= ~(1 << wIndex); |
472 | bus_state->port_c_suspend |= 1 << wIndex; | 496 | if (hcd->speed != HCD_USB3) |
497 | bus_state->port_c_suspend |= 1 << wIndex; | ||
473 | } | 498 | } |
474 | if (temp & PORT_CONNECT) { | 499 | if (temp & PORT_CONNECT) { |
475 | status |= USB_PORT_STAT_CONNECTION; | 500 | status |= USB_PORT_STAT_CONNECTION; |
@@ -481,16 +506,30 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
481 | status |= USB_PORT_STAT_OVERCURRENT; | 506 | status |= USB_PORT_STAT_OVERCURRENT; |
482 | if (temp & PORT_RESET) | 507 | if (temp & PORT_RESET) |
483 | status |= USB_PORT_STAT_RESET; | 508 | status |= USB_PORT_STAT_RESET; |
484 | if (temp & PORT_POWER) | 509 | if (temp & PORT_POWER) { |
485 | status |= USB_PORT_STAT_POWER; | 510 | if (hcd->speed == HCD_USB3) |
511 | status |= USB_SS_PORT_STAT_POWER; | ||
512 | else | ||
513 | status |= USB_PORT_STAT_POWER; | ||
514 | } | ||
515 | /* Port Link State */ | ||
516 | if (hcd->speed == HCD_USB3) { | ||
517 | /* resume state is a xHCI internal state. | ||
518 | * Do not report it to usb core. | ||
519 | */ | ||
520 | if ((temp & PORT_PLS_MASK) != XDEV_RESUME) | ||
521 | status |= (temp & PORT_PLS_MASK); | ||
522 | } | ||
486 | if (bus_state->port_c_suspend & (1 << wIndex)) | 523 | if (bus_state->port_c_suspend & (1 << wIndex)) |
487 | status |= 1 << USB_PORT_FEAT_C_SUSPEND; | 524 | status |= 1 << USB_PORT_FEAT_C_SUSPEND; |
488 | xhci_dbg(xhci, "Get port status returned 0x%x\n", status); | 525 | xhci_dbg(xhci, "Get port status returned 0x%x\n", status); |
489 | put_unaligned(cpu_to_le32(status), (__le32 *) buf); | 526 | put_unaligned(cpu_to_le32(status), (__le32 *) buf); |
490 | break; | 527 | break; |
491 | case SetPortFeature: | 528 | case SetPortFeature: |
529 | if (wValue == USB_PORT_FEAT_LINK_STATE) | ||
530 | link_state = (wIndex & 0xff00) >> 3; | ||
492 | wIndex &= 0xff; | 531 | wIndex &= 0xff; |
493 | if (!wIndex || wIndex > ports) | 532 | if (!wIndex || wIndex > max_ports) |
494 | goto error; | 533 | goto error; |
495 | wIndex--; | 534 | wIndex--; |
496 | temp = xhci_readl(xhci, port_array[wIndex]); | 535 | temp = xhci_readl(xhci, port_array[wIndex]); |
@@ -537,6 +576,44 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
537 | temp = xhci_readl(xhci, port_array[wIndex]); | 576 | temp = xhci_readl(xhci, port_array[wIndex]); |
538 | bus_state->suspended_ports |= 1 << wIndex; | 577 | bus_state->suspended_ports |= 1 << wIndex; |
539 | break; | 578 | break; |
579 | case USB_PORT_FEAT_LINK_STATE: | ||
580 | temp = xhci_readl(xhci, port_array[wIndex]); | ||
581 | /* Software should not attempt to set | ||
582 | * port link state above '5' (Rx.Detect) and the port | ||
583 | * must be enabled. | ||
584 | */ | ||
585 | if ((temp & PORT_PE) == 0 || | ||
586 | (link_state > USB_SS_PORT_LS_RX_DETECT)) { | ||
587 | xhci_warn(xhci, "Cannot set link state.\n"); | ||
588 | goto error; | ||
589 | } | ||
590 | |||
591 | if (link_state == USB_SS_PORT_LS_U3) { | ||
592 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | ||
593 | wIndex + 1); | ||
594 | if (slot_id) { | ||
595 | /* unlock to execute stop endpoint | ||
596 | * commands */ | ||
597 | spin_unlock_irqrestore(&xhci->lock, | ||
598 | flags); | ||
599 | xhci_stop_device(xhci, slot_id, 1); | ||
600 | spin_lock_irqsave(&xhci->lock, flags); | ||
601 | } | ||
602 | } | ||
603 | |||
604 | temp = xhci_port_state_to_neutral(temp); | ||
605 | temp &= ~PORT_PLS_MASK; | ||
606 | temp |= PORT_LINK_STROBE | link_state; | ||
607 | xhci_writel(xhci, temp, port_array[wIndex]); | ||
608 | |||
609 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
610 | msleep(20); /* wait device to enter */ | ||
611 | spin_lock_irqsave(&xhci->lock, flags); | ||
612 | |||
613 | temp = xhci_readl(xhci, port_array[wIndex]); | ||
614 | if (link_state == USB_SS_PORT_LS_U3) | ||
615 | bus_state->suspended_ports |= 1 << wIndex; | ||
616 | break; | ||
540 | case USB_PORT_FEAT_POWER: | 617 | case USB_PORT_FEAT_POWER: |
541 | /* | 618 | /* |
542 | * Turn on ports, even if there isn't per-port switching. | 619 | * Turn on ports, even if there isn't per-port switching. |
@@ -557,6 +634,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
557 | temp = xhci_readl(xhci, port_array[wIndex]); | 634 | temp = xhci_readl(xhci, port_array[wIndex]); |
558 | xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp); | 635 | xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp); |
559 | break; | 636 | break; |
637 | case USB_PORT_FEAT_BH_PORT_RESET: | ||
638 | temp |= PORT_WR; | ||
639 | xhci_writel(xhci, temp, port_array[wIndex]); | ||
640 | |||
641 | temp = xhci_readl(xhci, port_array[wIndex]); | ||
642 | break; | ||
560 | default: | 643 | default: |
561 | goto error; | 644 | goto error; |
562 | } | 645 | } |
@@ -564,7 +647,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
564 | temp = xhci_readl(xhci, port_array[wIndex]); | 647 | temp = xhci_readl(xhci, port_array[wIndex]); |
565 | break; | 648 | break; |
566 | case ClearPortFeature: | 649 | case ClearPortFeature: |
567 | if (!wIndex || wIndex > ports) | 650 | if (!wIndex || wIndex > max_ports) |
568 | goto error; | 651 | goto error; |
569 | wIndex--; | 652 | wIndex--; |
570 | temp = xhci_readl(xhci, port_array[wIndex]); | 653 | temp = xhci_readl(xhci, port_array[wIndex]); |
@@ -584,35 +667,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
584 | if (temp & XDEV_U3) { | 667 | if (temp & XDEV_U3) { |
585 | if ((temp & PORT_PE) == 0) | 668 | if ((temp & PORT_PE) == 0) |
586 | goto error; | 669 | goto error; |
587 | if (DEV_SUPERSPEED(temp)) { | ||
588 | temp = xhci_port_state_to_neutral(temp); | ||
589 | temp &= ~PORT_PLS_MASK; | ||
590 | temp |= PORT_LINK_STROBE | XDEV_U0; | ||
591 | xhci_writel(xhci, temp, | ||
592 | port_array[wIndex]); | ||
593 | xhci_readl(xhci, port_array[wIndex]); | ||
594 | } else { | ||
595 | temp = xhci_port_state_to_neutral(temp); | ||
596 | temp &= ~PORT_PLS_MASK; | ||
597 | temp |= PORT_LINK_STROBE | XDEV_RESUME; | ||
598 | xhci_writel(xhci, temp, | ||
599 | port_array[wIndex]); | ||
600 | 670 | ||
601 | spin_unlock_irqrestore(&xhci->lock, | 671 | temp = xhci_port_state_to_neutral(temp); |
602 | flags); | 672 | temp &= ~PORT_PLS_MASK; |
603 | msleep(20); | 673 | temp |= PORT_LINK_STROBE | XDEV_RESUME; |
604 | spin_lock_irqsave(&xhci->lock, flags); | 674 | xhci_writel(xhci, temp, |
675 | port_array[wIndex]); | ||
605 | 676 | ||
606 | temp = xhci_readl(xhci, | 677 | spin_unlock_irqrestore(&xhci->lock, |
607 | port_array[wIndex]); | 678 | flags); |
608 | temp = xhci_port_state_to_neutral(temp); | 679 | msleep(20); |
609 | temp &= ~PORT_PLS_MASK; | 680 | spin_lock_irqsave(&xhci->lock, flags); |
610 | temp |= PORT_LINK_STROBE | XDEV_U0; | 681 | |
611 | xhci_writel(xhci, temp, | 682 | temp = xhci_readl(xhci, |
612 | port_array[wIndex]); | 683 | port_array[wIndex]); |
613 | } | 684 | temp = xhci_port_state_to_neutral(temp); |
614 | bus_state->port_c_suspend |= 1 << wIndex; | 685 | temp &= ~PORT_PLS_MASK; |
686 | temp |= PORT_LINK_STROBE | XDEV_U0; | ||
687 | xhci_writel(xhci, temp, | ||
688 | port_array[wIndex]); | ||
615 | } | 689 | } |
690 | bus_state->port_c_suspend |= 1 << wIndex; | ||
616 | 691 | ||
617 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | 692 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, |
618 | wIndex + 1); | 693 | wIndex + 1); |
@@ -625,9 +700,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
625 | case USB_PORT_FEAT_C_SUSPEND: | 700 | case USB_PORT_FEAT_C_SUSPEND: |
626 | bus_state->port_c_suspend &= ~(1 << wIndex); | 701 | bus_state->port_c_suspend &= ~(1 << wIndex); |
627 | case USB_PORT_FEAT_C_RESET: | 702 | case USB_PORT_FEAT_C_RESET: |
703 | case USB_PORT_FEAT_C_BH_PORT_RESET: | ||
628 | case USB_PORT_FEAT_C_CONNECTION: | 704 | case USB_PORT_FEAT_C_CONNECTION: |
629 | case USB_PORT_FEAT_C_OVER_CURRENT: | 705 | case USB_PORT_FEAT_C_OVER_CURRENT: |
630 | case USB_PORT_FEAT_C_ENABLE: | 706 | case USB_PORT_FEAT_C_ENABLE: |
707 | case USB_PORT_FEAT_C_PORT_LINK_STATE: | ||
631 | xhci_clear_port_change_bit(xhci, wValue, wIndex, | 708 | xhci_clear_port_change_bit(xhci, wValue, wIndex, |
632 | port_array[wIndex], temp); | 709 | port_array[wIndex], temp); |
633 | break; | 710 | break; |
@@ -663,29 +740,23 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
663 | u32 mask; | 740 | u32 mask; |
664 | int i, retval; | 741 | int i, retval; |
665 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 742 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
666 | int ports; | 743 | int max_ports; |
667 | u32 __iomem **port_array; | 744 | __le32 __iomem **port_array; |
668 | struct xhci_bus_state *bus_state; | 745 | struct xhci_bus_state *bus_state; |
669 | 746 | ||
670 | if (hcd->speed == HCD_USB3) { | 747 | max_ports = xhci_get_ports(hcd, &port_array); |
671 | ports = xhci->num_usb3_ports; | ||
672 | port_array = xhci->usb3_ports; | ||
673 | } else { | ||
674 | ports = xhci->num_usb2_ports; | ||
675 | port_array = xhci->usb2_ports; | ||
676 | } | ||
677 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 748 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
678 | 749 | ||
679 | /* Initial status is no changes */ | 750 | /* Initial status is no changes */ |
680 | retval = (ports + 8) / 8; | 751 | retval = (max_ports + 8) / 8; |
681 | memset(buf, 0, retval); | 752 | memset(buf, 0, retval); |
682 | status = 0; | 753 | status = 0; |
683 | 754 | ||
684 | mask = PORT_CSC | PORT_PEC | PORT_OCC; | 755 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; |
685 | 756 | ||
686 | spin_lock_irqsave(&xhci->lock, flags); | 757 | spin_lock_irqsave(&xhci->lock, flags); |
687 | /* For each port, did anything change? If so, set that bit in buf. */ | 758 | /* For each port, did anything change? If so, set that bit in buf. */ |
688 | for (i = 0; i < ports; i++) { | 759 | for (i = 0; i < max_ports; i++) { |
689 | temp = xhci_readl(xhci, port_array[i]); | 760 | temp = xhci_readl(xhci, port_array[i]); |
690 | if (temp == 0xffffffff) { | 761 | if (temp == 0xffffffff) { |
691 | retval = -ENODEV; | 762 | retval = -ENODEV; |
@@ -709,19 +780,11 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
709 | { | 780 | { |
710 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 781 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
711 | int max_ports, port_index; | 782 | int max_ports, port_index; |
712 | u32 __iomem **port_array; | 783 | __le32 __iomem **port_array; |
713 | struct xhci_bus_state *bus_state; | 784 | struct xhci_bus_state *bus_state; |
714 | unsigned long flags; | 785 | unsigned long flags; |
715 | 786 | ||
716 | if (hcd->speed == HCD_USB3) { | 787 | max_ports = xhci_get_ports(hcd, &port_array); |
717 | max_ports = xhci->num_usb3_ports; | ||
718 | port_array = xhci->usb3_ports; | ||
719 | xhci_dbg(xhci, "suspend USB 3.0 root hub\n"); | ||
720 | } else { | ||
721 | max_ports = xhci->num_usb2_ports; | ||
722 | port_array = xhci->usb2_ports; | ||
723 | xhci_dbg(xhci, "suspend USB 2.0 root hub\n"); | ||
724 | } | ||
725 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 788 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
726 | 789 | ||
727 | spin_lock_irqsave(&xhci->lock, flags); | 790 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -779,7 +842,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
779 | 842 | ||
780 | if (hcd->speed != HCD_USB3) { | 843 | if (hcd->speed != HCD_USB3) { |
781 | /* enable remote wake up for USB 2.0 */ | 844 | /* enable remote wake up for USB 2.0 */ |
782 | u32 __iomem *addr; | 845 | __le32 __iomem *addr; |
783 | u32 tmp; | 846 | u32 tmp; |
784 | 847 | ||
785 | /* Add one to the port status register address to get | 848 | /* Add one to the port status register address to get |
@@ -801,20 +864,12 @@ int xhci_bus_resume(struct usb_hcd *hcd) | |||
801 | { | 864 | { |
802 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 865 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
803 | int max_ports, port_index; | 866 | int max_ports, port_index; |
804 | u32 __iomem **port_array; | 867 | __le32 __iomem **port_array; |
805 | struct xhci_bus_state *bus_state; | 868 | struct xhci_bus_state *bus_state; |
806 | u32 temp; | 869 | u32 temp; |
807 | unsigned long flags; | 870 | unsigned long flags; |
808 | 871 | ||
809 | if (hcd->speed == HCD_USB3) { | 872 | max_ports = xhci_get_ports(hcd, &port_array); |
810 | max_ports = xhci->num_usb3_ports; | ||
811 | port_array = xhci->usb3_ports; | ||
812 | xhci_dbg(xhci, "resume USB 3.0 root hub\n"); | ||
813 | } else { | ||
814 | max_ports = xhci->num_usb2_ports; | ||
815 | port_array = xhci->usb2_ports; | ||
816 | xhci_dbg(xhci, "resume USB 2.0 root hub\n"); | ||
817 | } | ||
818 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 873 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
819 | 874 | ||
820 | if (time_before(jiffies, bus_state->next_statechange)) | 875 | if (time_before(jiffies, bus_state->next_statechange)) |
@@ -890,7 +945,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) | |||
890 | 945 | ||
891 | if (hcd->speed != HCD_USB3) { | 946 | if (hcd->speed != HCD_USB3) { |
892 | /* disable remote wake up for USB 2.0 */ | 947 | /* disable remote wake up for USB 2.0 */ |
893 | u32 __iomem *addr; | 948 | __le32 __iomem *addr; |
894 | u32 tmp; | 949 | u32 tmp; |
895 | 950 | ||
896 | /* Add one to the port status register address to get | 951 | /* Add one to the port status register address to get |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 627f3438028c..26caba4c1950 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
89 | return; | 89 | return; |
90 | prev->next = next; | 90 | prev->next = next; |
91 | if (link_trbs) { | 91 | if (link_trbs) { |
92 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; | 92 | prev->trbs[TRBS_PER_SEGMENT-1].link. |
93 | segment_ptr = cpu_to_le64(next->dma); | ||
93 | 94 | ||
94 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
95 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 96 | val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); |
96 | val &= ~TRB_TYPE_BITMASK; | 97 | val &= ~TRB_TYPE_BITMASK; |
97 | val |= TRB_TYPE(TRB_LINK); | 98 | val |= TRB_TYPE(TRB_LINK); |
98 | /* Always set the chain bit with 0.95 hardware */ | 99 | /* Always set the chain bit with 0.95 hardware */ |
99 | if (xhci_link_trb_quirk(xhci)) | 100 | if (xhci_link_trb_quirk(xhci)) |
100 | val |= TRB_CHAIN; | 101 | val |= TRB_CHAIN; |
101 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | 102 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); |
102 | } | 103 | } |
103 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", | 104 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", |
104 | (unsigned long long)prev->dma, | 105 | (unsigned long long)prev->dma, |
@@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |||
186 | 187 | ||
187 | if (link_trbs) { | 188 | if (link_trbs) { |
188 | /* See section 4.9.2.1 and 6.4.4.1 */ | 189 | /* See section 4.9.2.1 and 6.4.4.1 */ |
189 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | 190 | prev->trbs[TRBS_PER_SEGMENT-1].link. |
191 | control |= cpu_to_le32(LINK_TOGGLE); | ||
190 | xhci_dbg(xhci, "Wrote link toggle flag to" | 192 | xhci_dbg(xhci, "Wrote link toggle flag to" |
191 | " segment %p (virtual), 0x%llx (DMA)\n", | 193 | " segment %p (virtual), 0x%llx (DMA)\n", |
192 | prev, (unsigned long long)prev->dma); | 194 | prev, (unsigned long long)prev->dma); |
@@ -207,14 +209,13 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, | |||
207 | 209 | ||
208 | rings_cached = virt_dev->num_rings_cached; | 210 | rings_cached = virt_dev->num_rings_cached; |
209 | if (rings_cached < XHCI_MAX_RINGS_CACHED) { | 211 | if (rings_cached < XHCI_MAX_RINGS_CACHED) { |
210 | virt_dev->num_rings_cached++; | ||
211 | rings_cached = virt_dev->num_rings_cached; | ||
212 | virt_dev->ring_cache[rings_cached] = | 212 | virt_dev->ring_cache[rings_cached] = |
213 | virt_dev->eps[ep_index].ring; | 213 | virt_dev->eps[ep_index].ring; |
214 | virt_dev->num_rings_cached++; | ||
214 | xhci_dbg(xhci, "Cached old ring, " | 215 | xhci_dbg(xhci, "Cached old ring, " |
215 | "%d ring%s cached\n", | 216 | "%d ring%s cached\n", |
216 | rings_cached, | 217 | virt_dev->num_rings_cached, |
217 | (rings_cached > 1) ? "s" : ""); | 218 | (virt_dev->num_rings_cached > 1) ? "s" : ""); |
218 | } else { | 219 | } else { |
219 | xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); | 220 | xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); |
220 | xhci_dbg(xhci, "Ring cache full (%d rings), " | 221 | xhci_dbg(xhci, "Ring cache full (%d rings), " |
@@ -548,7 +549,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, | |||
548 | addr = cur_ring->first_seg->dma | | 549 | addr = cur_ring->first_seg->dma | |
549 | SCT_FOR_CTX(SCT_PRI_TR) | | 550 | SCT_FOR_CTX(SCT_PRI_TR) | |
550 | cur_ring->cycle_state; | 551 | cur_ring->cycle_state; |
551 | stream_info->stream_ctx_array[cur_stream].stream_ring = addr; | 552 | stream_info->stream_ctx_array[cur_stream]. |
553 | stream_ring = cpu_to_le64(addr); | ||
552 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", | 554 | xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", |
553 | cur_stream, (unsigned long long) addr); | 555 | cur_stream, (unsigned long long) addr); |
554 | 556 | ||
@@ -614,10 +616,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, | |||
614 | max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; | 616 | max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; |
615 | xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", | 617 | xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", |
616 | 1 << (max_primary_streams + 1)); | 618 | 1 << (max_primary_streams + 1)); |
617 | ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; | 619 | ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); |
618 | ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); | 620 | ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) |
619 | ep_ctx->ep_info |= EP_HAS_LSA; | 621 | | EP_HAS_LSA); |
620 | ep_ctx->deq = stream_info->ctx_array_dma; | 622 | ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); |
621 | } | 623 | } |
622 | 624 | ||
623 | /* | 625 | /* |
@@ -630,10 +632,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, | |||
630 | struct xhci_virt_ep *ep) | 632 | struct xhci_virt_ep *ep) |
631 | { | 633 | { |
632 | dma_addr_t addr; | 634 | dma_addr_t addr; |
633 | ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; | 635 | ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); |
634 | ep_ctx->ep_info &= ~EP_HAS_LSA; | ||
635 | addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); | 636 | addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); |
636 | ep_ctx->deq = addr | ep->ring->cycle_state; | 637 | ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); |
637 | } | 638 | } |
638 | 639 | ||
639 | /* Frees all stream contexts associated with the endpoint, | 640 | /* Frees all stream contexts associated with the endpoint, |
@@ -781,11 +782,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
781 | dev->udev = udev; | 782 | dev->udev = udev; |
782 | 783 | ||
783 | /* Point to output device context in dcbaa. */ | 784 | /* Point to output device context in dcbaa. */ |
784 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; | 785 | xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); |
785 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 786 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
786 | slot_id, | 787 | slot_id, |
787 | &xhci->dcbaa->dev_context_ptrs[slot_id], | 788 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
788 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); | 789 | (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); |
789 | 790 | ||
790 | return 1; | 791 | return 1; |
791 | fail: | 792 | fail: |
@@ -810,8 +811,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, | |||
810 | * configured device has reset, so all control transfers should have | 811 | * configured device has reset, so all control transfers should have |
811 | * been completed or cancelled before the reset. | 812 | * been completed or cancelled before the reset. |
812 | */ | 813 | */ |
813 | ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); | 814 | ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, |
814 | ep0_ctx->deq |= ep_ring->cycle_state; | 815 | ep_ring->enqueue) |
816 | | ep_ring->cycle_state); | ||
815 | } | 817 | } |
816 | 818 | ||
817 | /* | 819 | /* |
@@ -885,24 +887,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
885 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | 887 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); |
886 | 888 | ||
887 | /* 2) New slot context and endpoint 0 context are valid*/ | 889 | /* 2) New slot context and endpoint 0 context are valid*/ |
888 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | 890 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
889 | 891 | ||
890 | /* 3) Only the control endpoint is valid - one endpoint context */ | 892 | /* 3) Only the control endpoint is valid - one endpoint context */ |
891 | slot_ctx->dev_info |= LAST_CTX(1); | 893 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route); |
892 | |||
893 | slot_ctx->dev_info |= (u32) udev->route; | ||
894 | switch (udev->speed) { | 894 | switch (udev->speed) { |
895 | case USB_SPEED_SUPER: | 895 | case USB_SPEED_SUPER: |
896 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; | 896 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS); |
897 | break; | 897 | break; |
898 | case USB_SPEED_HIGH: | 898 | case USB_SPEED_HIGH: |
899 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; | 899 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS); |
900 | break; | 900 | break; |
901 | case USB_SPEED_FULL: | 901 | case USB_SPEED_FULL: |
902 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; | 902 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS); |
903 | break; | 903 | break; |
904 | case USB_SPEED_LOW: | 904 | case USB_SPEED_LOW: |
905 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; | 905 | slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS); |
906 | break; | 906 | break; |
907 | case USB_SPEED_WIRELESS: | 907 | case USB_SPEED_WIRELESS: |
908 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 908 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -916,7 +916,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
916 | port_num = xhci_find_real_port_number(xhci, udev); | 916 | port_num = xhci_find_real_port_number(xhci, udev); |
917 | if (!port_num) | 917 | if (!port_num) |
918 | return -EINVAL; | 918 | return -EINVAL; |
919 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num); | 919 | slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num)); |
920 | /* Set the port number in the virtual_device to the faked port number */ | 920 | /* Set the port number in the virtual_device to the faked port number */ |
921 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 921 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
922 | top_dev = top_dev->parent) | 922 | top_dev = top_dev->parent) |
@@ -927,31 +927,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
927 | 927 | ||
928 | /* Is this a LS/FS device under an external HS hub? */ | 928 | /* Is this a LS/FS device under an external HS hub? */ |
929 | if (udev->tt && udev->tt->hub->parent) { | 929 | if (udev->tt && udev->tt->hub->parent) { |
930 | slot_ctx->tt_info = udev->tt->hub->slot_id; | 930 | slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | |
931 | slot_ctx->tt_info |= udev->ttport << 8; | 931 | (udev->ttport << 8)); |
932 | if (udev->tt->multi) | 932 | if (udev->tt->multi) |
933 | slot_ctx->dev_info |= DEV_MTT; | 933 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
934 | } | 934 | } |
935 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 935 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
936 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 936 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
937 | 937 | ||
938 | /* Step 4 - ring already allocated */ | 938 | /* Step 4 - ring already allocated */ |
939 | /* Step 5 */ | 939 | /* Step 5 */ |
940 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); | 940 | ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); |
941 | /* | 941 | /* |
942 | * XXX: Not sure about wireless USB devices. | 942 | * XXX: Not sure about wireless USB devices. |
943 | */ | 943 | */ |
944 | switch (udev->speed) { | 944 | switch (udev->speed) { |
945 | case USB_SPEED_SUPER: | 945 | case USB_SPEED_SUPER: |
946 | ep0_ctx->ep_info2 |= MAX_PACKET(512); | 946 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); |
947 | break; | 947 | break; |
948 | case USB_SPEED_HIGH: | 948 | case USB_SPEED_HIGH: |
949 | /* USB core guesses at a 64-byte max packet first for FS devices */ | 949 | /* USB core guesses at a 64-byte max packet first for FS devices */ |
950 | case USB_SPEED_FULL: | 950 | case USB_SPEED_FULL: |
951 | ep0_ctx->ep_info2 |= MAX_PACKET(64); | 951 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); |
952 | break; | 952 | break; |
953 | case USB_SPEED_LOW: | 953 | case USB_SPEED_LOW: |
954 | ep0_ctx->ep_info2 |= MAX_PACKET(8); | 954 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); |
955 | break; | 955 | break; |
956 | case USB_SPEED_WIRELESS: | 956 | case USB_SPEED_WIRELESS: |
957 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 957 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -962,12 +962,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
962 | BUG(); | 962 | BUG(); |
963 | } | 963 | } |
964 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ | 964 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ |
965 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 965 | ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); |
966 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | ||
967 | 966 | ||
968 | ep0_ctx->deq = | 967 | ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | |
969 | dev->eps[0].ring->first_seg->dma; | 968 | dev->eps[0].ring->cycle_state); |
970 | ep0_ctx->deq |= dev->eps[0].ring->cycle_state; | ||
971 | 969 | ||
972 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 970 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
973 | 971 | ||
@@ -1046,12 +1044,12 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |||
1046 | break; | 1044 | break; |
1047 | 1045 | ||
1048 | case USB_SPEED_FULL: | 1046 | case USB_SPEED_FULL: |
1049 | if (usb_endpoint_xfer_int(&ep->desc)) { | 1047 | if (usb_endpoint_xfer_isoc(&ep->desc)) { |
1050 | interval = xhci_parse_exponent_interval(udev, ep); | 1048 | interval = xhci_parse_exponent_interval(udev, ep); |
1051 | break; | 1049 | break; |
1052 | } | 1050 | } |
1053 | /* | 1051 | /* |
1054 | * Fall through for isochronous endpoint interval decoding | 1052 | * Fall through for interrupt endpoint interval decoding |
1055 | * since it uses the same rules as low speed interrupt | 1053 | * since it uses the same rules as low speed interrupt |
1056 | * endpoints. | 1054 | * endpoints. |
1057 | */ | 1055 | */ |
@@ -1131,10 +1129,10 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, | |||
1131 | return 0; | 1129 | return 0; |
1132 | 1130 | ||
1133 | if (udev->speed == USB_SPEED_SUPER) | 1131 | if (udev->speed == USB_SPEED_SUPER) |
1134 | return ep->ss_ep_comp.wBytesPerInterval; | 1132 | return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); |
1135 | 1133 | ||
1136 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); | 1134 | max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); |
1137 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | 1135 | max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11; |
1138 | /* A 0 in max burst means 1 transfer per ESIT */ | 1136 | /* A 0 in max burst means 1 transfer per ESIT */ |
1139 | return max_packet * (max_burst + 1); | 1137 | return max_packet * (max_burst + 1); |
1140 | } | 1138 | } |
@@ -1183,33 +1181,33 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1183 | } | 1181 | } |
1184 | virt_dev->eps[ep_index].skip = false; | 1182 | virt_dev->eps[ep_index].skip = false; |
1185 | ep_ring = virt_dev->eps[ep_index].new_ring; | 1183 | ep_ring = virt_dev->eps[ep_index].new_ring; |
1186 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; | 1184 | ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); |
1187 | 1185 | ||
1188 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 1186 | ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) |
1189 | ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); | 1187 | | EP_MULT(xhci_get_endpoint_mult(udev, ep))); |
1190 | 1188 | ||
1191 | /* FIXME dig Mult and streams info out of ep companion desc */ | 1189 | /* FIXME dig Mult and streams info out of ep companion desc */ |
1192 | 1190 | ||
1193 | /* Allow 3 retries for everything but isoc; | 1191 | /* Allow 3 retries for everything but isoc; |
1194 | * error count = 0 means infinite retries. | 1192 | * CErr shall be set to 0 for Isoch endpoints. |
1195 | */ | 1193 | */ |
1196 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | 1194 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
1197 | ep_ctx->ep_info2 = ERROR_COUNT(3); | 1195 | ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3)); |
1198 | else | 1196 | else |
1199 | ep_ctx->ep_info2 = ERROR_COUNT(1); | 1197 | ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0)); |
1200 | 1198 | ||
1201 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | 1199 | ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); |
1202 | 1200 | ||
1203 | /* Set the max packet size and max burst */ | 1201 | /* Set the max packet size and max burst */ |
1204 | switch (udev->speed) { | 1202 | switch (udev->speed) { |
1205 | case USB_SPEED_SUPER: | 1203 | case USB_SPEED_SUPER: |
1206 | max_packet = ep->desc.wMaxPacketSize; | 1204 | max_packet = le16_to_cpu(ep->desc.wMaxPacketSize); |
1207 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 1205 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
1208 | /* dig out max burst from ep companion desc */ | 1206 | /* dig out max burst from ep companion desc */ |
1209 | max_packet = ep->ss_ep_comp.bMaxBurst; | 1207 | max_packet = ep->ss_ep_comp.bMaxBurst; |
1210 | if (!max_packet) | 1208 | if (!max_packet) |
1211 | xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); | 1209 | xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); |
1212 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); | 1210 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); |
1213 | break; | 1211 | break; |
1214 | case USB_SPEED_HIGH: | 1212 | case USB_SPEED_HIGH: |
1215 | /* bits 11:12 specify the number of additional transaction | 1213 | /* bits 11:12 specify the number of additional transaction |
@@ -1217,20 +1215,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1217 | */ | 1215 | */ |
1218 | if (usb_endpoint_xfer_isoc(&ep->desc) || | 1216 | if (usb_endpoint_xfer_isoc(&ep->desc) || |
1219 | usb_endpoint_xfer_int(&ep->desc)) { | 1217 | usb_endpoint_xfer_int(&ep->desc)) { |
1220 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | 1218 | max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) |
1221 | ep_ctx->ep_info2 |= MAX_BURST(max_burst); | 1219 | & 0x1800) >> 11; |
1220 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); | ||
1222 | } | 1221 | } |
1223 | /* Fall through */ | 1222 | /* Fall through */ |
1224 | case USB_SPEED_FULL: | 1223 | case USB_SPEED_FULL: |
1225 | case USB_SPEED_LOW: | 1224 | case USB_SPEED_LOW: |
1226 | max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); | 1225 | max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); |
1227 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 1226 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); |
1228 | break; | 1227 | break; |
1229 | default: | 1228 | default: |
1230 | BUG(); | 1229 | BUG(); |
1231 | } | 1230 | } |
1232 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); | 1231 | max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); |
1233 | ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); | 1232 | ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); |
1234 | 1233 | ||
1235 | /* | 1234 | /* |
1236 | * XXX no idea how to calculate the average TRB buffer length for bulk | 1235 | * XXX no idea how to calculate the average TRB buffer length for bulk |
@@ -1246,8 +1245,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1246 | * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't | 1245 | * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't |
1247 | * use Event Data TRBs, and we don't chain in a link TRB on short | 1246 | * use Event Data TRBs, and we don't chain in a link TRB on short |
1248 | * transfers, we're basically dividing by 1. | 1247 | * transfers, we're basically dividing by 1. |
1248 | * | ||
1249 | * xHCI 1.0 specification indicates that the Average TRB Length should | ||
1250 | * be set to 8 for control endpoints. | ||
1249 | */ | 1251 | */ |
1250 | ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); | 1252 | if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) |
1253 | ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); | ||
1254 | else | ||
1255 | ep_ctx->tx_info |= | ||
1256 | cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); | ||
1251 | 1257 | ||
1252 | /* FIXME Debug endpoint context */ | 1258 | /* FIXME Debug endpoint context */ |
1253 | return 0; | 1259 | return 0; |
@@ -1347,7 +1353,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | |||
1347 | if (!xhci->scratchpad->sp_dma_buffers) | 1353 | if (!xhci->scratchpad->sp_dma_buffers) |
1348 | goto fail_sp4; | 1354 | goto fail_sp4; |
1349 | 1355 | ||
1350 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | 1356 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); |
1351 | for (i = 0; i < num_sp; i++) { | 1357 | for (i = 0; i < num_sp; i++) { |
1352 | dma_addr_t dma; | 1358 | dma_addr_t dma; |
1353 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | 1359 | void *buf = pci_alloc_consistent(to_pci_dev(dev), |
@@ -1724,7 +1730,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
1724 | } | 1730 | } |
1725 | 1731 | ||
1726 | static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | 1732 | static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, |
1727 | u32 __iomem *addr, u8 major_revision) | 1733 | __le32 __iomem *addr, u8 major_revision) |
1728 | { | 1734 | { |
1729 | u32 temp, port_offset, port_count; | 1735 | u32 temp, port_offset, port_count; |
1730 | int i; | 1736 | int i; |
@@ -1789,7 +1795,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, | |||
1789 | */ | 1795 | */ |
1790 | static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) | 1796 | static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) |
1791 | { | 1797 | { |
1792 | u32 __iomem *addr; | 1798 | __le32 __iomem *addr; |
1793 | u32 offset; | 1799 | u32 offset; |
1794 | unsigned int num_ports; | 1800 | unsigned int num_ports; |
1795 | int i, port_index; | 1801 | int i, port_index; |
@@ -2042,8 +2048,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2042 | /* set ring base address and size for each segment table entry */ | 2048 | /* set ring base address and size for each segment table entry */ |
2043 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 2049 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
2044 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 2050 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
2045 | entry->seg_addr = seg->dma; | 2051 | entry->seg_addr = cpu_to_le64(seg->dma); |
2046 | entry->seg_size = TRBS_PER_SEGMENT; | 2052 | entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); |
2047 | entry->rsvd = 0; | 2053 | entry->rsvd = 0; |
2048 | seg = seg->next; | 2054 | seg = seg->next; |
2049 | } | 2055 | } |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index a10494c2f3c7..cbc4d491e626 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> | ||
24 | 25 | ||
25 | #include "xhci.h" | 26 | #include "xhci.h" |
26 | 27 | ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7437386a9a50..237a765f8d18 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -100,7 +100,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
100 | return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && | 100 | return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && |
101 | (seg->next == xhci->event_ring->first_seg); | 101 | (seg->next == xhci->event_ring->first_seg); |
102 | else | 102 | else |
103 | return trb->link.control & LINK_TOGGLE; | 103 | return le32_to_cpu(trb->link.control) & LINK_TOGGLE; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring | 106 | /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring |
@@ -113,13 +113,15 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
113 | if (ring == xhci->event_ring) | 113 | if (ring == xhci->event_ring) |
114 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; | 114 | return trb == &seg->trbs[TRBS_PER_SEGMENT]; |
115 | else | 115 | else |
116 | return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); | 116 | return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK) |
117 | == TRB_TYPE(TRB_LINK); | ||
117 | } | 118 | } |
118 | 119 | ||
119 | static int enqueue_is_link_trb(struct xhci_ring *ring) | 120 | static int enqueue_is_link_trb(struct xhci_ring *ring) |
120 | { | 121 | { |
121 | struct xhci_link_trb *link = &ring->enqueue->link; | 122 | struct xhci_link_trb *link = &ring->enqueue->link; |
122 | return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); | 123 | return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) == |
124 | TRB_TYPE(TRB_LINK)); | ||
123 | } | 125 | } |
124 | 126 | ||
125 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next | 127 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
@@ -197,7 +199,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
197 | union xhci_trb *next; | 199 | union xhci_trb *next; |
198 | unsigned long long addr; | 200 | unsigned long long addr; |
199 | 201 | ||
200 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | 202 | chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; |
201 | next = ++(ring->enqueue); | 203 | next = ++(ring->enqueue); |
202 | 204 | ||
203 | ring->enq_updates++; | 205 | ring->enq_updates++; |
@@ -223,12 +225,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
223 | * (which may mean the chain bit is cleared). | 225 | * (which may mean the chain bit is cleared). |
224 | */ | 226 | */ |
225 | if (!xhci_link_trb_quirk(xhci)) { | 227 | if (!xhci_link_trb_quirk(xhci)) { |
226 | next->link.control &= ~TRB_CHAIN; | 228 | next->link.control &= |
227 | next->link.control |= chain; | 229 | cpu_to_le32(~TRB_CHAIN); |
230 | next->link.control |= | ||
231 | cpu_to_le32(chain); | ||
228 | } | 232 | } |
229 | /* Give this link TRB to the hardware */ | 233 | /* Give this link TRB to the hardware */ |
230 | wmb(); | 234 | wmb(); |
231 | next->link.control ^= TRB_CYCLE; | 235 | next->link.control ^= cpu_to_le32(TRB_CYCLE); |
232 | } | 236 | } |
233 | /* Toggle the cycle bit after the last ring segment. */ | 237 | /* Toggle the cycle bit after the last ring segment. */ |
234 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | 238 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { |
@@ -319,7 +323,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, | |||
319 | unsigned int ep_index, | 323 | unsigned int ep_index, |
320 | unsigned int stream_id) | 324 | unsigned int stream_id) |
321 | { | 325 | { |
322 | __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; | 326 | __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; |
323 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | 327 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
324 | unsigned int ep_state = ep->ep_state; | 328 | unsigned int ep_state = ep->ep_state; |
325 | 329 | ||
@@ -380,7 +384,7 @@ static struct xhci_segment *find_trb_seg( | |||
380 | while (cur_seg->trbs > trb || | 384 | while (cur_seg->trbs > trb || |
381 | &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { | 385 | &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { |
382 | generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; | 386 | generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; |
383 | if (generic_trb->field[3] & LINK_TOGGLE) | 387 | if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE) |
384 | *cycle_state ^= 0x1; | 388 | *cycle_state ^= 0x1; |
385 | cur_seg = cur_seg->next; | 389 | cur_seg = cur_seg->next; |
386 | if (cur_seg == start_seg) | 390 | if (cur_seg == start_seg) |
@@ -447,6 +451,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, | |||
447 | * any link TRBs with the toggle cycle bit set. | 451 | * any link TRBs with the toggle cycle bit set. |
448 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | 452 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit |
449 | * if we've moved it past a link TRB with the toggle cycle bit set. | 453 | * if we've moved it past a link TRB with the toggle cycle bit set. |
454 | * | ||
455 | * Some of the uses of xhci_generic_trb are grotty, but if they're done | ||
456 | * with correct __le32 accesses they should work fine. Only users of this are | ||
457 | * in here. | ||
450 | */ | 458 | */ |
451 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | 459 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
452 | unsigned int slot_id, unsigned int ep_index, | 460 | unsigned int slot_id, unsigned int ep_index, |
@@ -480,7 +488,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
480 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 488 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
481 | xhci_dbg(xhci, "Finding endpoint context\n"); | 489 | xhci_dbg(xhci, "Finding endpoint context\n"); |
482 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 490 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
483 | state->new_cycle_state = 0x1 & ep_ctx->deq; | 491 | state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); |
484 | 492 | ||
485 | state->new_deq_ptr = cur_td->last_trb; | 493 | state->new_deq_ptr = cur_td->last_trb; |
486 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | 494 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); |
@@ -493,8 +501,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
493 | } | 501 | } |
494 | 502 | ||
495 | trb = &state->new_deq_ptr->generic; | 503 | trb = &state->new_deq_ptr->generic; |
496 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && | 504 | if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) == |
497 | (trb->field[3] & LINK_TOGGLE)) | 505 | TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE)) |
498 | state->new_cycle_state ^= 0x1; | 506 | state->new_cycle_state ^= 0x1; |
499 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 507 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
500 | 508 | ||
@@ -529,12 +537,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
529 | for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; | 537 | for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; |
530 | true; | 538 | true; |
531 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | 539 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
532 | if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == | 540 | if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK) |
533 | TRB_TYPE(TRB_LINK)) { | 541 | == TRB_TYPE(TRB_LINK)) { |
534 | /* Unchain any chained Link TRBs, but | 542 | /* Unchain any chained Link TRBs, but |
535 | * leave the pointers intact. | 543 | * leave the pointers intact. |
536 | */ | 544 | */ |
537 | cur_trb->generic.field[3] &= ~TRB_CHAIN; | 545 | cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); |
538 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); | 546 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); |
539 | xhci_dbg(xhci, "Address = %p (0x%llx dma); " | 547 | xhci_dbg(xhci, "Address = %p (0x%llx dma); " |
540 | "in seg %p (0x%llx dma)\n", | 548 | "in seg %p (0x%llx dma)\n", |
@@ -547,8 +555,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
547 | cur_trb->generic.field[1] = 0; | 555 | cur_trb->generic.field[1] = 0; |
548 | cur_trb->generic.field[2] = 0; | 556 | cur_trb->generic.field[2] = 0; |
549 | /* Preserve only the cycle bit of this TRB */ | 557 | /* Preserve only the cycle bit of this TRB */ |
550 | cur_trb->generic.field[3] &= TRB_CYCLE; | 558 | cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); |
551 | cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); | 559 | cur_trb->generic.field[3] |= cpu_to_le32( |
560 | TRB_TYPE(TRB_TR_NOOP)); | ||
552 | xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " | 561 | xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " |
553 | "in seg %p (0x%llx dma)\n", | 562 | "in seg %p (0x%llx dma)\n", |
554 | cur_trb, | 563 | cur_trb, |
@@ -662,9 +671,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
662 | struct xhci_dequeue_state deq_state; | 671 | struct xhci_dequeue_state deq_state; |
663 | 672 | ||
664 | if (unlikely(TRB_TO_SUSPEND_PORT( | 673 | if (unlikely(TRB_TO_SUSPEND_PORT( |
665 | xhci->cmd_ring->dequeue->generic.field[3]))) { | 674 | le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) { |
666 | slot_id = TRB_TO_SLOT_ID( | 675 | slot_id = TRB_TO_SLOT_ID( |
667 | xhci->cmd_ring->dequeue->generic.field[3]); | 676 | le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); |
668 | virt_dev = xhci->devs[slot_id]; | 677 | virt_dev = xhci->devs[slot_id]; |
669 | if (virt_dev) | 678 | if (virt_dev) |
670 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, | 679 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, |
@@ -677,8 +686,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
677 | } | 686 | } |
678 | 687 | ||
679 | memset(&deq_state, 0, sizeof(deq_state)); | 688 | memset(&deq_state, 0, sizeof(deq_state)); |
680 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 689 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); |
681 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 690 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
682 | ep = &xhci->devs[slot_id]->eps[ep_index]; | 691 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
683 | 692 | ||
684 | if (list_empty(&ep->cancelled_td_list)) { | 693 | if (list_empty(&ep->cancelled_td_list)) { |
@@ -910,9 +919,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
910 | struct xhci_ep_ctx *ep_ctx; | 919 | struct xhci_ep_ctx *ep_ctx; |
911 | struct xhci_slot_ctx *slot_ctx; | 920 | struct xhci_slot_ctx *slot_ctx; |
912 | 921 | ||
913 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 922 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); |
914 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 923 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
915 | stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); | 924 | stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); |
916 | dev = xhci->devs[slot_id]; | 925 | dev = xhci->devs[slot_id]; |
917 | 926 | ||
918 | ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); | 927 | ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); |
@@ -928,11 +937,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
928 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 937 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
929 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | 938 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); |
930 | 939 | ||
931 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { | 940 | if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) { |
932 | unsigned int ep_state; | 941 | unsigned int ep_state; |
933 | unsigned int slot_state; | 942 | unsigned int slot_state; |
934 | 943 | ||
935 | switch (GET_COMP_CODE(event->status)) { | 944 | switch (GET_COMP_CODE(le32_to_cpu(event->status))) { |
936 | case COMP_TRB_ERR: | 945 | case COMP_TRB_ERR: |
937 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " | 946 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " |
938 | "of stream ID configuration\n"); | 947 | "of stream ID configuration\n"); |
@@ -940,9 +949,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
940 | case COMP_CTX_STATE: | 949 | case COMP_CTX_STATE: |
941 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " | 950 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " |
942 | "to incorrect slot or ep state.\n"); | 951 | "to incorrect slot or ep state.\n"); |
943 | ep_state = ep_ctx->ep_info; | 952 | ep_state = le32_to_cpu(ep_ctx->ep_info); |
944 | ep_state &= EP_STATE_MASK; | 953 | ep_state &= EP_STATE_MASK; |
945 | slot_state = slot_ctx->dev_state; | 954 | slot_state = le32_to_cpu(slot_ctx->dev_state); |
946 | slot_state = GET_SLOT_STATE(slot_state); | 955 | slot_state = GET_SLOT_STATE(slot_state); |
947 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | 956 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", |
948 | slot_state, ep_state); | 957 | slot_state, ep_state); |
@@ -954,7 +963,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
954 | default: | 963 | default: |
955 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " | 964 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " |
956 | "completion code of %u.\n", | 965 | "completion code of %u.\n", |
957 | GET_COMP_CODE(event->status)); | 966 | GET_COMP_CODE(le32_to_cpu(event->status))); |
958 | break; | 967 | break; |
959 | } | 968 | } |
960 | /* OK what do we do now? The endpoint state is hosed, and we | 969 | /* OK what do we do now? The endpoint state is hosed, and we |
@@ -965,10 +974,10 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
965 | */ | 974 | */ |
966 | } else { | 975 | } else { |
967 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", | 976 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
968 | ep_ctx->deq); | 977 | le64_to_cpu(ep_ctx->deq)); |
969 | if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, | 978 | if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, |
970 | dev->eps[ep_index].queued_deq_ptr) == | 979 | dev->eps[ep_index].queued_deq_ptr) == |
971 | (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { | 980 | (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) { |
972 | /* Update the ring's dequeue segment and dequeue pointer | 981 | /* Update the ring's dequeue segment and dequeue pointer |
973 | * to reflect the new position. | 982 | * to reflect the new position. |
974 | */ | 983 | */ |
@@ -997,13 +1006,13 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, | |||
997 | int slot_id; | 1006 | int slot_id; |
998 | unsigned int ep_index; | 1007 | unsigned int ep_index; |
999 | 1008 | ||
1000 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 1009 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); |
1001 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 1010 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
1002 | /* This command will only fail if the endpoint wasn't halted, | 1011 | /* This command will only fail if the endpoint wasn't halted, |
1003 | * but we don't care. | 1012 | * but we don't care. |
1004 | */ | 1013 | */ |
1005 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | 1014 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", |
1006 | (unsigned int) GET_COMP_CODE(event->status)); | 1015 | (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status))); |
1007 | 1016 | ||
1008 | /* HW with the reset endpoint quirk needs to have a configure endpoint | 1017 | /* HW with the reset endpoint quirk needs to have a configure endpoint |
1009 | * command complete before the endpoint can be used. Queue that here | 1018 | * command complete before the endpoint can be used. Queue that here |
@@ -1040,8 +1049,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, | |||
1040 | if (xhci->cmd_ring->dequeue != command->command_trb) | 1049 | if (xhci->cmd_ring->dequeue != command->command_trb) |
1041 | return 0; | 1050 | return 0; |
1042 | 1051 | ||
1043 | command->status = | 1052 | command->status = GET_COMP_CODE(le32_to_cpu(event->status)); |
1044 | GET_COMP_CODE(event->status); | ||
1045 | list_del(&command->cmd_list); | 1053 | list_del(&command->cmd_list); |
1046 | if (command->completion) | 1054 | if (command->completion) |
1047 | complete(command->completion); | 1055 | complete(command->completion); |
@@ -1053,7 +1061,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, | |||
1053 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 1061 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
1054 | struct xhci_event_cmd *event) | 1062 | struct xhci_event_cmd *event) |
1055 | { | 1063 | { |
1056 | int slot_id = TRB_TO_SLOT_ID(event->flags); | 1064 | int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1057 | u64 cmd_dma; | 1065 | u64 cmd_dma; |
1058 | dma_addr_t cmd_dequeue_dma; | 1066 | dma_addr_t cmd_dequeue_dma; |
1059 | struct xhci_input_control_ctx *ctrl_ctx; | 1067 | struct xhci_input_control_ctx *ctrl_ctx; |
@@ -1062,7 +1070,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1062 | struct xhci_ring *ep_ring; | 1070 | struct xhci_ring *ep_ring; |
1063 | unsigned int ep_state; | 1071 | unsigned int ep_state; |
1064 | 1072 | ||
1065 | cmd_dma = event->cmd_trb; | 1073 | cmd_dma = le64_to_cpu(event->cmd_trb); |
1066 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 1074 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
1067 | xhci->cmd_ring->dequeue); | 1075 | xhci->cmd_ring->dequeue); |
1068 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 1076 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
@@ -1075,9 +1083,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1075 | xhci->error_bitmask |= 1 << 5; | 1083 | xhci->error_bitmask |= 1 << 5; |
1076 | return; | 1084 | return; |
1077 | } | 1085 | } |
1078 | switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { | 1086 | switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) |
1087 | & TRB_TYPE_BITMASK) { | ||
1079 | case TRB_TYPE(TRB_ENABLE_SLOT): | 1088 | case TRB_TYPE(TRB_ENABLE_SLOT): |
1080 | if (GET_COMP_CODE(event->status) == COMP_SUCCESS) | 1089 | if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS) |
1081 | xhci->slot_id = slot_id; | 1090 | xhci->slot_id = slot_id; |
1082 | else | 1091 | else |
1083 | xhci->slot_id = 0; | 1092 | xhci->slot_id = 0; |
@@ -1102,7 +1111,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1102 | ctrl_ctx = xhci_get_input_control_ctx(xhci, | 1111 | ctrl_ctx = xhci_get_input_control_ctx(xhci, |
1103 | virt_dev->in_ctx); | 1112 | virt_dev->in_ctx); |
1104 | /* Input ctx add_flags are the endpoint index plus one */ | 1113 | /* Input ctx add_flags are the endpoint index plus one */ |
1105 | ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; | 1114 | ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1; |
1106 | /* A usb_set_interface() call directly after clearing a halted | 1115 | /* A usb_set_interface() call directly after clearing a halted |
1107 | * condition may race on this quirky hardware. Not worth | 1116 | * condition may race on this quirky hardware. Not worth |
1108 | * worrying about, since this is prototype hardware. Not sure | 1117 | * worrying about, since this is prototype hardware. Not sure |
@@ -1111,8 +1120,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1111 | */ | 1120 | */ |
1112 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && | 1121 | if (xhci->quirks & XHCI_RESET_EP_QUIRK && |
1113 | ep_index != (unsigned int) -1 && | 1122 | ep_index != (unsigned int) -1 && |
1114 | ctrl_ctx->add_flags - SLOT_FLAG == | 1123 | le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG == |
1115 | ctrl_ctx->drop_flags) { | 1124 | le32_to_cpu(ctrl_ctx->drop_flags)) { |
1116 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; | 1125 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
1117 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | 1126 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
1118 | if (!(ep_state & EP_HALTED)) | 1127 | if (!(ep_state & EP_HALTED)) |
@@ -1129,18 +1138,18 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1129 | bandwidth_change: | 1138 | bandwidth_change: |
1130 | xhci_dbg(xhci, "Completed config ep cmd\n"); | 1139 | xhci_dbg(xhci, "Completed config ep cmd\n"); |
1131 | xhci->devs[slot_id]->cmd_status = | 1140 | xhci->devs[slot_id]->cmd_status = |
1132 | GET_COMP_CODE(event->status); | 1141 | GET_COMP_CODE(le32_to_cpu(event->status)); |
1133 | complete(&xhci->devs[slot_id]->cmd_completion); | 1142 | complete(&xhci->devs[slot_id]->cmd_completion); |
1134 | break; | 1143 | break; |
1135 | case TRB_TYPE(TRB_EVAL_CONTEXT): | 1144 | case TRB_TYPE(TRB_EVAL_CONTEXT): |
1136 | virt_dev = xhci->devs[slot_id]; | 1145 | virt_dev = xhci->devs[slot_id]; |
1137 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) | 1146 | if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) |
1138 | break; | 1147 | break; |
1139 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | 1148 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); |
1140 | complete(&xhci->devs[slot_id]->cmd_completion); | 1149 | complete(&xhci->devs[slot_id]->cmd_completion); |
1141 | break; | 1150 | break; |
1142 | case TRB_TYPE(TRB_ADDR_DEV): | 1151 | case TRB_TYPE(TRB_ADDR_DEV): |
1143 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); | 1152 | xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); |
1144 | complete(&xhci->addr_dev); | 1153 | complete(&xhci->addr_dev); |
1145 | break; | 1154 | break; |
1146 | case TRB_TYPE(TRB_STOP_RING): | 1155 | case TRB_TYPE(TRB_STOP_RING): |
@@ -1157,7 +1166,7 @@ bandwidth_change: | |||
1157 | case TRB_TYPE(TRB_RESET_DEV): | 1166 | case TRB_TYPE(TRB_RESET_DEV): |
1158 | xhci_dbg(xhci, "Completed reset device command.\n"); | 1167 | xhci_dbg(xhci, "Completed reset device command.\n"); |
1159 | slot_id = TRB_TO_SLOT_ID( | 1168 | slot_id = TRB_TO_SLOT_ID( |
1160 | xhci->cmd_ring->dequeue->generic.field[3]); | 1169 | le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); |
1161 | virt_dev = xhci->devs[slot_id]; | 1170 | virt_dev = xhci->devs[slot_id]; |
1162 | if (virt_dev) | 1171 | if (virt_dev) |
1163 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); | 1172 | handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); |
@@ -1171,8 +1180,8 @@ bandwidth_change: | |||
1171 | break; | 1180 | break; |
1172 | } | 1181 | } |
1173 | xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", | 1182 | xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", |
1174 | NEC_FW_MAJOR(event->status), | 1183 | NEC_FW_MAJOR(le32_to_cpu(event->status)), |
1175 | NEC_FW_MINOR(event->status)); | 1184 | NEC_FW_MINOR(le32_to_cpu(event->status))); |
1176 | break; | 1185 | break; |
1177 | default: | 1186 | default: |
1178 | /* Skip over unknown commands on the event ring */ | 1187 | /* Skip over unknown commands on the event ring */ |
@@ -1187,7 +1196,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci, | |||
1187 | { | 1196 | { |
1188 | u32 trb_type; | 1197 | u32 trb_type; |
1189 | 1198 | ||
1190 | trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]); | 1199 | trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); |
1191 | xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); | 1200 | xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); |
1192 | if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) | 1201 | if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) |
1193 | handle_cmd_completion(xhci, &event->event_cmd); | 1202 | handle_cmd_completion(xhci, &event->event_cmd); |
@@ -1241,15 +1250,15 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1241 | unsigned int faked_port_index; | 1250 | unsigned int faked_port_index; |
1242 | u8 major_revision; | 1251 | u8 major_revision; |
1243 | struct xhci_bus_state *bus_state; | 1252 | struct xhci_bus_state *bus_state; |
1244 | u32 __iomem **port_array; | 1253 | __le32 __iomem **port_array; |
1245 | bool bogus_port_status = false; | 1254 | bool bogus_port_status = false; |
1246 | 1255 | ||
1247 | /* Port status change events always have a successful completion code */ | 1256 | /* Port status change events always have a successful completion code */ |
1248 | if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { | 1257 | if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { |
1249 | xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); | 1258 | xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); |
1250 | xhci->error_bitmask |= 1 << 8; | 1259 | xhci->error_bitmask |= 1 << 8; |
1251 | } | 1260 | } |
1252 | port_id = GET_PORT_ID(event->generic.field[0]); | 1261 | port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); |
1253 | xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); | 1262 | xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); |
1254 | 1263 | ||
1255 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); | 1264 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
@@ -1456,7 +1465,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, | |||
1456 | * endpoint anyway. Check if a babble halted the | 1465 | * endpoint anyway. Check if a babble halted the |
1457 | * endpoint. | 1466 | * endpoint. |
1458 | */ | 1467 | */ |
1459 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) | 1468 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED) |
1460 | return 1; | 1469 | return 1; |
1461 | 1470 | ||
1462 | return 0; | 1471 | return 0; |
@@ -1494,12 +1503,12 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1494 | struct urb_priv *urb_priv; | 1503 | struct urb_priv *urb_priv; |
1495 | u32 trb_comp_code; | 1504 | u32 trb_comp_code; |
1496 | 1505 | ||
1497 | slot_id = TRB_TO_SLOT_ID(event->flags); | 1506 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1498 | xdev = xhci->devs[slot_id]; | 1507 | xdev = xhci->devs[slot_id]; |
1499 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 1508 | ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; |
1500 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | 1509 | ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); |
1501 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1510 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1502 | trb_comp_code = GET_COMP_CODE(event->transfer_len); | 1511 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1503 | 1512 | ||
1504 | if (skip) | 1513 | if (skip) |
1505 | goto td_cleanup; | 1514 | goto td_cleanup; |
@@ -1602,12 +1611,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1602 | struct xhci_ep_ctx *ep_ctx; | 1611 | struct xhci_ep_ctx *ep_ctx; |
1603 | u32 trb_comp_code; | 1612 | u32 trb_comp_code; |
1604 | 1613 | ||
1605 | slot_id = TRB_TO_SLOT_ID(event->flags); | 1614 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1606 | xdev = xhci->devs[slot_id]; | 1615 | xdev = xhci->devs[slot_id]; |
1607 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 1616 | ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; |
1608 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | 1617 | ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); |
1609 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1618 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1610 | trb_comp_code = GET_COMP_CODE(event->transfer_len); | 1619 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1611 | 1620 | ||
1612 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); | 1621 | xhci_debug_trb(xhci, xhci->event_ring->dequeue); |
1613 | switch (trb_comp_code) { | 1622 | switch (trb_comp_code) { |
@@ -1632,6 +1641,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1632 | else | 1641 | else |
1633 | *status = 0; | 1642 | *status = 0; |
1634 | break; | 1643 | break; |
1644 | case COMP_STOP_INVAL: | ||
1645 | case COMP_STOP: | ||
1646 | return finish_td(xhci, td, event_trb, event, ep, status, false); | ||
1635 | default: | 1647 | default: |
1636 | if (!xhci_requires_manual_halt_cleanup(xhci, | 1648 | if (!xhci_requires_manual_halt_cleanup(xhci, |
1637 | ep_ctx, trb_comp_code)) | 1649 | ep_ctx, trb_comp_code)) |
@@ -1646,7 +1658,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1646 | event_trb != td->last_trb) | 1658 | event_trb != td->last_trb) |
1647 | td->urb->actual_length = | 1659 | td->urb->actual_length = |
1648 | td->urb->transfer_buffer_length | 1660 | td->urb->transfer_buffer_length |
1649 | - TRB_LEN(event->transfer_len); | 1661 | - TRB_LEN(le32_to_cpu(event->transfer_len)); |
1650 | else | 1662 | else |
1651 | td->urb->actual_length = 0; | 1663 | td->urb->actual_length = 0; |
1652 | 1664 | ||
@@ -1676,15 +1688,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1676 | } | 1688 | } |
1677 | } else { | 1689 | } else { |
1678 | /* Maybe the event was for the data stage? */ | 1690 | /* Maybe the event was for the data stage? */ |
1679 | if (trb_comp_code != COMP_STOP_INVAL) { | 1691 | td->urb->actual_length = |
1680 | /* We didn't stop on a link TRB in the middle */ | 1692 | td->urb->transfer_buffer_length - |
1681 | td->urb->actual_length = | 1693 | TRB_LEN(le32_to_cpu(event->transfer_len)); |
1682 | td->urb->transfer_buffer_length - | 1694 | xhci_dbg(xhci, "Waiting for status " |
1683 | TRB_LEN(event->transfer_len); | 1695 | "stage event\n"); |
1684 | xhci_dbg(xhci, "Waiting for status " | 1696 | return 0; |
1685 | "stage event\n"); | ||
1686 | return 0; | ||
1687 | } | ||
1688 | } | 1697 | } |
1689 | } | 1698 | } |
1690 | 1699 | ||
@@ -1708,8 +1717,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1708 | u32 trb_comp_code; | 1717 | u32 trb_comp_code; |
1709 | bool skip_td = false; | 1718 | bool skip_td = false; |
1710 | 1719 | ||
1711 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | 1720 | ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); |
1712 | trb_comp_code = GET_COMP_CODE(event->transfer_len); | 1721 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1713 | urb_priv = td->urb->hcpriv; | 1722 | urb_priv = td->urb->hcpriv; |
1714 | idx = urb_priv->td_cnt; | 1723 | idx = urb_priv->td_cnt; |
1715 | frame = &td->urb->iso_frame_desc[idx]; | 1724 | frame = &td->urb->iso_frame_desc[idx]; |
@@ -1752,15 +1761,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1752 | for (cur_trb = ep_ring->dequeue, | 1761 | for (cur_trb = ep_ring->dequeue, |
1753 | cur_seg = ep_ring->deq_seg; cur_trb != event_trb; | 1762 | cur_seg = ep_ring->deq_seg; cur_trb != event_trb; |
1754 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | 1763 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
1755 | if ((cur_trb->generic.field[3] & | 1764 | if ((le32_to_cpu(cur_trb->generic.field[3]) & |
1756 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && | 1765 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && |
1757 | (cur_trb->generic.field[3] & | 1766 | (le32_to_cpu(cur_trb->generic.field[3]) & |
1758 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) | 1767 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) |
1759 | len += | 1768 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); |
1760 | TRB_LEN(cur_trb->generic.field[2]); | ||
1761 | } | 1769 | } |
1762 | len += TRB_LEN(cur_trb->generic.field[2]) - | 1770 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - |
1763 | TRB_LEN(event->transfer_len); | 1771 | TRB_LEN(le32_to_cpu(event->transfer_len)); |
1764 | 1772 | ||
1765 | if (trb_comp_code != COMP_STOP_INVAL) { | 1773 | if (trb_comp_code != COMP_STOP_INVAL) { |
1766 | frame->actual_length = len; | 1774 | frame->actual_length = len; |
@@ -1815,8 +1823,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1815 | struct xhci_segment *cur_seg; | 1823 | struct xhci_segment *cur_seg; |
1816 | u32 trb_comp_code; | 1824 | u32 trb_comp_code; |
1817 | 1825 | ||
1818 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | 1826 | ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); |
1819 | trb_comp_code = GET_COMP_CODE(event->transfer_len); | 1827 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1820 | 1828 | ||
1821 | switch (trb_comp_code) { | 1829 | switch (trb_comp_code) { |
1822 | case COMP_SUCCESS: | 1830 | case COMP_SUCCESS: |
@@ -1852,18 +1860,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1852 | "%d bytes untransferred\n", | 1860 | "%d bytes untransferred\n", |
1853 | td->urb->ep->desc.bEndpointAddress, | 1861 | td->urb->ep->desc.bEndpointAddress, |
1854 | td->urb->transfer_buffer_length, | 1862 | td->urb->transfer_buffer_length, |
1855 | TRB_LEN(event->transfer_len)); | 1863 | TRB_LEN(le32_to_cpu(event->transfer_len))); |
1856 | /* Fast path - was this the last TRB in the TD for this URB? */ | 1864 | /* Fast path - was this the last TRB in the TD for this URB? */ |
1857 | if (event_trb == td->last_trb) { | 1865 | if (event_trb == td->last_trb) { |
1858 | if (TRB_LEN(event->transfer_len) != 0) { | 1866 | if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { |
1859 | td->urb->actual_length = | 1867 | td->urb->actual_length = |
1860 | td->urb->transfer_buffer_length - | 1868 | td->urb->transfer_buffer_length - |
1861 | TRB_LEN(event->transfer_len); | 1869 | TRB_LEN(le32_to_cpu(event->transfer_len)); |
1862 | if (td->urb->transfer_buffer_length < | 1870 | if (td->urb->transfer_buffer_length < |
1863 | td->urb->actual_length) { | 1871 | td->urb->actual_length) { |
1864 | xhci_warn(xhci, "HC gave bad length " | 1872 | xhci_warn(xhci, "HC gave bad length " |
1865 | "of %d bytes left\n", | 1873 | "of %d bytes left\n", |
1866 | TRB_LEN(event->transfer_len)); | 1874 | TRB_LEN(le32_to_cpu(event->transfer_len))); |
1867 | td->urb->actual_length = 0; | 1875 | td->urb->actual_length = 0; |
1868 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1876 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
1869 | *status = -EREMOTEIO; | 1877 | *status = -EREMOTEIO; |
@@ -1894,20 +1902,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1894 | for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; | 1902 | for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; |
1895 | cur_trb != event_trb; | 1903 | cur_trb != event_trb; |
1896 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { | 1904 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
1897 | if ((cur_trb->generic.field[3] & | 1905 | if ((le32_to_cpu(cur_trb->generic.field[3]) & |
1898 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && | 1906 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && |
1899 | (cur_trb->generic.field[3] & | 1907 | (le32_to_cpu(cur_trb->generic.field[3]) & |
1900 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) | 1908 | TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) |
1901 | td->urb->actual_length += | 1909 | td->urb->actual_length += |
1902 | TRB_LEN(cur_trb->generic.field[2]); | 1910 | TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); |
1903 | } | 1911 | } |
1904 | /* If the ring didn't stop on a Link or No-op TRB, add | 1912 | /* If the ring didn't stop on a Link or No-op TRB, add |
1905 | * in the actual bytes transferred from the Normal TRB | 1913 | * in the actual bytes transferred from the Normal TRB |
1906 | */ | 1914 | */ |
1907 | if (trb_comp_code != COMP_STOP_INVAL) | 1915 | if (trb_comp_code != COMP_STOP_INVAL) |
1908 | td->urb->actual_length += | 1916 | td->urb->actual_length += |
1909 | TRB_LEN(cur_trb->generic.field[2]) - | 1917 | TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - |
1910 | TRB_LEN(event->transfer_len); | 1918 | TRB_LEN(le32_to_cpu(event->transfer_len)); |
1911 | } | 1919 | } |
1912 | 1920 | ||
1913 | return finish_td(xhci, td, event_trb, event, ep, status, false); | 1921 | return finish_td(xhci, td, event_trb, event, ep, status, false); |
@@ -1937,7 +1945,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1937 | u32 trb_comp_code; | 1945 | u32 trb_comp_code; |
1938 | int ret = 0; | 1946 | int ret = 0; |
1939 | 1947 | ||
1940 | slot_id = TRB_TO_SLOT_ID(event->flags); | 1948 | slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); |
1941 | xdev = xhci->devs[slot_id]; | 1949 | xdev = xhci->devs[slot_id]; |
1942 | if (!xdev) { | 1950 | if (!xdev) { |
1943 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | 1951 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); |
@@ -1945,20 +1953,21 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
1945 | } | 1953 | } |
1946 | 1954 | ||
1947 | /* Endpoint ID is 1 based, our index is zero based */ | 1955 | /* Endpoint ID is 1 based, our index is zero based */ |
1948 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 1956 | ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; |
1949 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | 1957 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); |
1950 | ep = &xdev->eps[ep_index]; | 1958 | ep = &xdev->eps[ep_index]; |
1951 | ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); | 1959 | ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); |
1952 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | 1960 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
1953 | if (!ep_ring || | 1961 | if (!ep_ring || |
1954 | (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 1962 | (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == |
1963 | EP_STATE_DISABLED) { | ||
1955 | xhci_err(xhci, "ERROR Transfer event for disabled endpoint " | 1964 | xhci_err(xhci, "ERROR Transfer event for disabled endpoint " |
1956 | "or incorrect stream ring\n"); | 1965 | "or incorrect stream ring\n"); |
1957 | return -ENODEV; | 1966 | return -ENODEV; |
1958 | } | 1967 | } |
1959 | 1968 | ||
1960 | event_dma = event->buffer; | 1969 | event_dma = le64_to_cpu(event->buffer); |
1961 | trb_comp_code = GET_COMP_CODE(event->transfer_len); | 1970 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
1962 | /* Look for common error cases */ | 1971 | /* Look for common error cases */ |
1963 | switch (trb_comp_code) { | 1972 | switch (trb_comp_code) { |
1964 | /* Skip codes that require special handling depending on | 1973 | /* Skip codes that require special handling depending on |
@@ -2011,14 +2020,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2011 | if (!list_empty(&ep_ring->td_list)) | 2020 | if (!list_empty(&ep_ring->td_list)) |
2012 | xhci_dbg(xhci, "Underrun Event for slot %d ep %d " | 2021 | xhci_dbg(xhci, "Underrun Event for slot %d ep %d " |
2013 | "still with TDs queued?\n", | 2022 | "still with TDs queued?\n", |
2014 | TRB_TO_SLOT_ID(event->flags), ep_index); | 2023 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2024 | ep_index); | ||
2015 | goto cleanup; | 2025 | goto cleanup; |
2016 | case COMP_OVERRUN: | 2026 | case COMP_OVERRUN: |
2017 | xhci_dbg(xhci, "overrun event on endpoint\n"); | 2027 | xhci_dbg(xhci, "overrun event on endpoint\n"); |
2018 | if (!list_empty(&ep_ring->td_list)) | 2028 | if (!list_empty(&ep_ring->td_list)) |
2019 | xhci_dbg(xhci, "Overrun Event for slot %d ep %d " | 2029 | xhci_dbg(xhci, "Overrun Event for slot %d ep %d " |
2020 | "still with TDs queued?\n", | 2030 | "still with TDs queued?\n", |
2021 | TRB_TO_SLOT_ID(event->flags), ep_index); | 2031 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2032 | ep_index); | ||
2022 | goto cleanup; | 2033 | goto cleanup; |
2023 | case COMP_MISSED_INT: | 2034 | case COMP_MISSED_INT: |
2024 | /* | 2035 | /* |
@@ -2047,9 +2058,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2047 | if (list_empty(&ep_ring->td_list)) { | 2058 | if (list_empty(&ep_ring->td_list)) { |
2048 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " | 2059 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " |
2049 | "with no TDs queued?\n", | 2060 | "with no TDs queued?\n", |
2050 | TRB_TO_SLOT_ID(event->flags), ep_index); | 2061 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), |
2062 | ep_index); | ||
2051 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 2063 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
2052 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | 2064 | (unsigned int) (le32_to_cpu(event->flags) |
2065 | & TRB_TYPE_BITMASK)>>10); | ||
2053 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | 2066 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); |
2054 | if (ep->skip) { | 2067 | if (ep->skip) { |
2055 | ep->skip = false; | 2068 | ep->skip = false; |
@@ -2092,7 +2105,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2092 | * corresponding TD has been cancelled. Just ignore | 2105 | * corresponding TD has been cancelled. Just ignore |
2093 | * the TD. | 2106 | * the TD. |
2094 | */ | 2107 | */ |
2095 | if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) | 2108 | if ((le32_to_cpu(event_trb->generic.field[3]) |
2109 | & TRB_TYPE_BITMASK) | ||
2096 | == TRB_TYPE(TRB_TR_NOOP)) { | 2110 | == TRB_TYPE(TRB_TR_NOOP)) { |
2097 | xhci_dbg(xhci, | 2111 | xhci_dbg(xhci, |
2098 | "event_trb is a no-op TRB. Skip it\n"); | 2112 | "event_trb is a no-op TRB. Skip it\n"); |
@@ -2157,8 +2171,10 @@ cleanup: | |||
2157 | /* | 2171 | /* |
2158 | * This function handles all OS-owned events on the event ring. It may drop | 2172 | * This function handles all OS-owned events on the event ring. It may drop |
2159 | * xhci->lock between event processing (e.g. to pass up port status changes). | 2173 | * xhci->lock between event processing (e.g. to pass up port status changes). |
2174 | * Returns >0 for "possibly more events to process" (caller should call again), | ||
2175 | * otherwise 0 if done. In future, <0 returns should indicate error code. | ||
2160 | */ | 2176 | */ |
2161 | static void xhci_handle_event(struct xhci_hcd *xhci) | 2177 | static int xhci_handle_event(struct xhci_hcd *xhci) |
2162 | { | 2178 | { |
2163 | union xhci_trb *event; | 2179 | union xhci_trb *event; |
2164 | int update_ptrs = 1; | 2180 | int update_ptrs = 1; |
@@ -2167,20 +2183,25 @@ static void xhci_handle_event(struct xhci_hcd *xhci) | |||
2167 | xhci_dbg(xhci, "In %s\n", __func__); | 2183 | xhci_dbg(xhci, "In %s\n", __func__); |
2168 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | 2184 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { |
2169 | xhci->error_bitmask |= 1 << 1; | 2185 | xhci->error_bitmask |= 1 << 1; |
2170 | return; | 2186 | return 0; |
2171 | } | 2187 | } |
2172 | 2188 | ||
2173 | event = xhci->event_ring->dequeue; | 2189 | event = xhci->event_ring->dequeue; |
2174 | /* Does the HC or OS own the TRB? */ | 2190 | /* Does the HC or OS own the TRB? */ |
2175 | if ((event->event_cmd.flags & TRB_CYCLE) != | 2191 | if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != |
2176 | xhci->event_ring->cycle_state) { | 2192 | xhci->event_ring->cycle_state) { |
2177 | xhci->error_bitmask |= 1 << 2; | 2193 | xhci->error_bitmask |= 1 << 2; |
2178 | return; | 2194 | return 0; |
2179 | } | 2195 | } |
2180 | xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); | 2196 | xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); |
2181 | 2197 | ||
2198 | /* | ||
2199 | * Barrier between reading the TRB_CYCLE (valid) flag above and any | ||
2200 | * speculative reads of the event's flags/data below. | ||
2201 | */ | ||
2202 | rmb(); | ||
2182 | /* FIXME: Handle more event types. */ | 2203 | /* FIXME: Handle more event types. */ |
2183 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { | 2204 | switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { |
2184 | case TRB_TYPE(TRB_COMPLETION): | 2205 | case TRB_TYPE(TRB_COMPLETION): |
2185 | xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); | 2206 | xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); |
2186 | handle_cmd_completion(xhci, &event->event_cmd); | 2207 | handle_cmd_completion(xhci, &event->event_cmd); |
@@ -2202,7 +2223,8 @@ static void xhci_handle_event(struct xhci_hcd *xhci) | |||
2202 | update_ptrs = 0; | 2223 | update_ptrs = 0; |
2203 | break; | 2224 | break; |
2204 | default: | 2225 | default: |
2205 | if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48)) | 2226 | if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= |
2227 | TRB_TYPE(48)) | ||
2206 | handle_vendor_event(xhci, event); | 2228 | handle_vendor_event(xhci, event); |
2207 | else | 2229 | else |
2208 | xhci->error_bitmask |= 1 << 3; | 2230 | xhci->error_bitmask |= 1 << 3; |
@@ -2213,15 +2235,17 @@ static void xhci_handle_event(struct xhci_hcd *xhci) | |||
2213 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 2235 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
2214 | xhci_dbg(xhci, "xHCI host dying, returning from " | 2236 | xhci_dbg(xhci, "xHCI host dying, returning from " |
2215 | "event handler.\n"); | 2237 | "event handler.\n"); |
2216 | return; | 2238 | return 0; |
2217 | } | 2239 | } |
2218 | 2240 | ||
2219 | if (update_ptrs) | 2241 | if (update_ptrs) |
2220 | /* Update SW event ring dequeue pointer */ | 2242 | /* Update SW event ring dequeue pointer */ |
2221 | inc_deq(xhci, xhci->event_ring, true); | 2243 | inc_deq(xhci, xhci->event_ring, true); |
2222 | 2244 | ||
2223 | /* Are there more items on the event ring? */ | 2245 | /* Are there more items on the event ring? Caller will call us again to |
2224 | xhci_handle_event(xhci); | 2246 | * check. |
2247 | */ | ||
2248 | return 1; | ||
2225 | } | 2249 | } |
2226 | 2250 | ||
2227 | /* | 2251 | /* |
@@ -2252,12 +2276,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
2252 | xhci_dbg(xhci, "op reg status = %08x\n", status); | 2276 | xhci_dbg(xhci, "op reg status = %08x\n", status); |
2253 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | 2277 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); |
2254 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | 2278 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", |
2255 | (unsigned long long) | 2279 | (unsigned long long) |
2256 | xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | 2280 | xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), |
2257 | lower_32_bits(trb->link.segment_ptr), | 2281 | lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), |
2258 | upper_32_bits(trb->link.segment_ptr), | 2282 | upper_32_bits(le64_to_cpu(trb->link.segment_ptr)), |
2259 | (unsigned int) trb->link.intr_target, | 2283 | (unsigned int) le32_to_cpu(trb->link.intr_target), |
2260 | (unsigned int) trb->link.control); | 2284 | (unsigned int) le32_to_cpu(trb->link.control)); |
2261 | 2285 | ||
2262 | if (status & STS_FATAL) { | 2286 | if (status & STS_FATAL) { |
2263 | xhci_warn(xhci, "WARNING: Host System Error\n"); | 2287 | xhci_warn(xhci, "WARNING: Host System Error\n"); |
@@ -2303,7 +2327,7 @@ hw_died: | |||
2303 | /* FIXME this should be a delayed service routine | 2327 | /* FIXME this should be a delayed service routine |
2304 | * that clears the EHB. | 2328 | * that clears the EHB. |
2305 | */ | 2329 | */ |
2306 | xhci_handle_event(xhci); | 2330 | while (xhci_handle_event(xhci) > 0) {} |
2307 | 2331 | ||
2308 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | 2332 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2309 | /* If necessary, update the HW's version of the event ring deq ptr. */ | 2333 | /* If necessary, update the HW's version of the event ring deq ptr. */ |
@@ -2358,10 +2382,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
2358 | struct xhci_generic_trb *trb; | 2382 | struct xhci_generic_trb *trb; |
2359 | 2383 | ||
2360 | trb = &ring->enqueue->generic; | 2384 | trb = &ring->enqueue->generic; |
2361 | trb->field[0] = field1; | 2385 | trb->field[0] = cpu_to_le32(field1); |
2362 | trb->field[1] = field2; | 2386 | trb->field[1] = cpu_to_le32(field2); |
2363 | trb->field[2] = field3; | 2387 | trb->field[2] = cpu_to_le32(field3); |
2364 | trb->field[3] = field4; | 2388 | trb->field[3] = cpu_to_le32(field4); |
2365 | inc_enq(xhci, ring, consumer, more_trbs_coming); | 2389 | inc_enq(xhci, ring, consumer, more_trbs_coming); |
2366 | } | 2390 | } |
2367 | 2391 | ||
@@ -2414,17 +2438,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
2414 | next = ring->enqueue; | 2438 | next = ring->enqueue; |
2415 | 2439 | ||
2416 | while (last_trb(xhci, ring, ring->enq_seg, next)) { | 2440 | while (last_trb(xhci, ring, ring->enq_seg, next)) { |
2417 | |||
2418 | /* If we're not dealing with 0.95 hardware, | 2441 | /* If we're not dealing with 0.95 hardware, |
2419 | * clear the chain bit. | 2442 | * clear the chain bit. |
2420 | */ | 2443 | */ |
2421 | if (!xhci_link_trb_quirk(xhci)) | 2444 | if (!xhci_link_trb_quirk(xhci)) |
2422 | next->link.control &= ~TRB_CHAIN; | 2445 | next->link.control &= cpu_to_le32(~TRB_CHAIN); |
2423 | else | 2446 | else |
2424 | next->link.control |= TRB_CHAIN; | 2447 | next->link.control |= cpu_to_le32(TRB_CHAIN); |
2425 | 2448 | ||
2426 | wmb(); | 2449 | wmb(); |
2427 | next->link.control ^= (u32) TRB_CYCLE; | 2450 | next->link.control ^= cpu_to_le32((u32) TRB_CYCLE); |
2428 | 2451 | ||
2429 | /* Toggle the cycle bit after the last ring segment. */ | 2452 | /* Toggle the cycle bit after the last ring segment. */ |
2430 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { | 2453 | if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { |
@@ -2467,8 +2490,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
2467 | } | 2490 | } |
2468 | 2491 | ||
2469 | ret = prepare_ring(xhci, ep_ring, | 2492 | ret = prepare_ring(xhci, ep_ring, |
2470 | ep_ctx->ep_info & EP_STATE_MASK, | 2493 | le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, |
2471 | num_trbs, mem_flags); | 2494 | num_trbs, mem_flags); |
2472 | if (ret) | 2495 | if (ret) |
2473 | return ret; | 2496 | return ret; |
2474 | 2497 | ||
@@ -2570,9 +2593,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, | |||
2570 | */ | 2593 | */ |
2571 | wmb(); | 2594 | wmb(); |
2572 | if (start_cycle) | 2595 | if (start_cycle) |
2573 | start_trb->field[3] |= start_cycle; | 2596 | start_trb->field[3] |= cpu_to_le32(start_cycle); |
2574 | else | 2597 | else |
2575 | start_trb->field[3] &= ~0x1; | 2598 | start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); |
2576 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); | 2599 | xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); |
2577 | } | 2600 | } |
2578 | 2601 | ||
@@ -2590,7 +2613,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2590 | int xhci_interval; | 2613 | int xhci_interval; |
2591 | int ep_interval; | 2614 | int ep_interval; |
2592 | 2615 | ||
2593 | xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); | 2616 | xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); |
2594 | ep_interval = urb->interval; | 2617 | ep_interval = urb->interval; |
2595 | /* Convert to microframes */ | 2618 | /* Convert to microframes */ |
2596 | if (urb->dev->speed == USB_SPEED_LOW || | 2619 | if (urb->dev->speed == USB_SPEED_LOW || |
@@ -2632,6 +2655,35 @@ static u32 xhci_td_remainder(unsigned int remainder) | |||
2632 | return (remainder >> 10) << 17; | 2655 | return (remainder >> 10) << 17; |
2633 | } | 2656 | } |
2634 | 2657 | ||
2658 | /* | ||
2659 | * For xHCI 1.0 host controllers, TD size is the number of packets remaining in | ||
2660 | * the TD (*not* including this TRB). | ||
2661 | * | ||
2662 | * Total TD packet count = total_packet_count = | ||
2663 | * roundup(TD size in bytes / wMaxPacketSize) | ||
2664 | * | ||
2665 | * Packets transferred up to and including this TRB = packets_transferred = | ||
2666 | * rounddown(total bytes transferred including this TRB / wMaxPacketSize) | ||
2667 | * | ||
2668 | * TD size = total_packet_count - packets_transferred | ||
2669 | * | ||
2670 | * It must fit in bits 21:17, so it can't be bigger than 31. | ||
2671 | */ | ||
2672 | |||
2673 | static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, | ||
2674 | unsigned int total_packet_count, struct urb *urb) | ||
2675 | { | ||
2676 | int packets_transferred; | ||
2677 | |||
2678 | /* All the TRB queueing functions don't count the current TRB in | ||
2679 | * running_total. | ||
2680 | */ | ||
2681 | packets_transferred = (running_total + trb_buff_len) / | ||
2682 | le16_to_cpu(urb->ep->desc.wMaxPacketSize); | ||
2683 | |||
2684 | return xhci_td_remainder(total_packet_count - packets_transferred); | ||
2685 | } | ||
2686 | |||
2635 | static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | 2687 | static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
2636 | struct urb *urb, int slot_id, unsigned int ep_index) | 2688 | struct urb *urb, int slot_id, unsigned int ep_index) |
2637 | { | 2689 | { |
@@ -2642,6 +2694,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2642 | struct scatterlist *sg; | 2694 | struct scatterlist *sg; |
2643 | int num_sgs; | 2695 | int num_sgs; |
2644 | int trb_buff_len, this_sg_len, running_total; | 2696 | int trb_buff_len, this_sg_len, running_total; |
2697 | unsigned int total_packet_count; | ||
2645 | bool first_trb; | 2698 | bool first_trb; |
2646 | u64 addr; | 2699 | u64 addr; |
2647 | bool more_trbs_coming; | 2700 | bool more_trbs_coming; |
@@ -2655,6 +2708,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2655 | 2708 | ||
2656 | num_trbs = count_sg_trbs_needed(xhci, urb); | 2709 | num_trbs = count_sg_trbs_needed(xhci, urb); |
2657 | num_sgs = urb->num_sgs; | 2710 | num_sgs = urb->num_sgs; |
2711 | total_packet_count = roundup(urb->transfer_buffer_length, | ||
2712 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | ||
2658 | 2713 | ||
2659 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], | 2714 | trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], |
2660 | ep_index, urb->stream_id, | 2715 | ep_index, urb->stream_id, |
@@ -2718,6 +2773,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2718 | td->last_trb = ep_ring->enqueue; | 2773 | td->last_trb = ep_ring->enqueue; |
2719 | field |= TRB_IOC; | 2774 | field |= TRB_IOC; |
2720 | } | 2775 | } |
2776 | |||
2777 | /* Only set interrupt on short packet for IN endpoints */ | ||
2778 | if (usb_urb_dir_in(urb)) | ||
2779 | field |= TRB_ISP; | ||
2780 | |||
2721 | xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " | 2781 | xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " |
2722 | "64KB boundary at %#x, end dma = %#x\n", | 2782 | "64KB boundary at %#x, end dma = %#x\n", |
2723 | (unsigned int) addr, trb_buff_len, trb_buff_len, | 2783 | (unsigned int) addr, trb_buff_len, trb_buff_len, |
@@ -2730,11 +2790,20 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2730 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2790 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
2731 | (unsigned int) addr + trb_buff_len); | 2791 | (unsigned int) addr + trb_buff_len); |
2732 | } | 2792 | } |
2733 | remainder = xhci_td_remainder(urb->transfer_buffer_length - | 2793 | |
2734 | running_total) ; | 2794 | /* Set the TRB length, TD size, and interrupter fields. */ |
2795 | if (xhci->hci_version < 0x100) { | ||
2796 | remainder = xhci_td_remainder( | ||
2797 | urb->transfer_buffer_length - | ||
2798 | running_total); | ||
2799 | } else { | ||
2800 | remainder = xhci_v1_0_td_remainder(running_total, | ||
2801 | trb_buff_len, total_packet_count, urb); | ||
2802 | } | ||
2735 | length_field = TRB_LEN(trb_buff_len) | | 2803 | length_field = TRB_LEN(trb_buff_len) | |
2736 | remainder | | 2804 | remainder | |
2737 | TRB_INTR_TARGET(0); | 2805 | TRB_INTR_TARGET(0); |
2806 | |||
2738 | if (num_trbs > 1) | 2807 | if (num_trbs > 1) |
2739 | more_trbs_coming = true; | 2808 | more_trbs_coming = true; |
2740 | else | 2809 | else |
@@ -2743,12 +2812,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2743 | lower_32_bits(addr), | 2812 | lower_32_bits(addr), |
2744 | upper_32_bits(addr), | 2813 | upper_32_bits(addr), |
2745 | length_field, | 2814 | length_field, |
2746 | /* We always want to know if the TRB was short, | 2815 | field | TRB_TYPE(TRB_NORMAL)); |
2747 | * or we won't get an event when it completes. | ||
2748 | * (Unless we use event data TRBs, which are a | ||
2749 | * waste of space and HC resources.) | ||
2750 | */ | ||
2751 | field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); | ||
2752 | --num_trbs; | 2816 | --num_trbs; |
2753 | running_total += trb_buff_len; | 2817 | running_total += trb_buff_len; |
2754 | 2818 | ||
@@ -2796,6 +2860,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2796 | u32 field, length_field; | 2860 | u32 field, length_field; |
2797 | 2861 | ||
2798 | int running_total, trb_buff_len, ret; | 2862 | int running_total, trb_buff_len, ret; |
2863 | unsigned int total_packet_count; | ||
2799 | u64 addr; | 2864 | u64 addr; |
2800 | 2865 | ||
2801 | if (urb->num_sgs) | 2866 | if (urb->num_sgs) |
@@ -2850,6 +2915,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2850 | start_cycle = ep_ring->cycle_state; | 2915 | start_cycle = ep_ring->cycle_state; |
2851 | 2916 | ||
2852 | running_total = 0; | 2917 | running_total = 0; |
2918 | total_packet_count = roundup(urb->transfer_buffer_length, | ||
2919 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | ||
2853 | /* How much data is in the first TRB? */ | 2920 | /* How much data is in the first TRB? */ |
2854 | addr = (u64) urb->transfer_dma; | 2921 | addr = (u64) urb->transfer_dma; |
2855 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2922 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
@@ -2882,11 +2949,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2882 | td->last_trb = ep_ring->enqueue; | 2949 | td->last_trb = ep_ring->enqueue; |
2883 | field |= TRB_IOC; | 2950 | field |= TRB_IOC; |
2884 | } | 2951 | } |
2885 | remainder = xhci_td_remainder(urb->transfer_buffer_length - | 2952 | |
2886 | running_total); | 2953 | /* Only set interrupt on short packet for IN endpoints */ |
2954 | if (usb_urb_dir_in(urb)) | ||
2955 | field |= TRB_ISP; | ||
2956 | |||
2957 | /* Set the TRB length, TD size, and interrupter fields. */ | ||
2958 | if (xhci->hci_version < 0x100) { | ||
2959 | remainder = xhci_td_remainder( | ||
2960 | urb->transfer_buffer_length - | ||
2961 | running_total); | ||
2962 | } else { | ||
2963 | remainder = xhci_v1_0_td_remainder(running_total, | ||
2964 | trb_buff_len, total_packet_count, urb); | ||
2965 | } | ||
2887 | length_field = TRB_LEN(trb_buff_len) | | 2966 | length_field = TRB_LEN(trb_buff_len) | |
2888 | remainder | | 2967 | remainder | |
2889 | TRB_INTR_TARGET(0); | 2968 | TRB_INTR_TARGET(0); |
2969 | |||
2890 | if (num_trbs > 1) | 2970 | if (num_trbs > 1) |
2891 | more_trbs_coming = true; | 2971 | more_trbs_coming = true; |
2892 | else | 2972 | else |
@@ -2895,12 +2975,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2895 | lower_32_bits(addr), | 2975 | lower_32_bits(addr), |
2896 | upper_32_bits(addr), | 2976 | upper_32_bits(addr), |
2897 | length_field, | 2977 | length_field, |
2898 | /* We always want to know if the TRB was short, | 2978 | field | TRB_TYPE(TRB_NORMAL)); |
2899 | * or we won't get an event when it completes. | ||
2900 | * (Unless we use event data TRBs, which are a | ||
2901 | * waste of space and HC resources.) | ||
2902 | */ | ||
2903 | field | TRB_ISP | TRB_TYPE(TRB_NORMAL)); | ||
2904 | --num_trbs; | 2979 | --num_trbs; |
2905 | running_total += trb_buff_len; | 2980 | running_total += trb_buff_len; |
2906 | 2981 | ||
@@ -2978,16 +3053,31 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2978 | field |= TRB_IDT | TRB_TYPE(TRB_SETUP); | 3053 | field |= TRB_IDT | TRB_TYPE(TRB_SETUP); |
2979 | if (start_cycle == 0) | 3054 | if (start_cycle == 0) |
2980 | field |= 0x1; | 3055 | field |= 0x1; |
3056 | |||
3057 | /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ | ||
3058 | if (xhci->hci_version == 0x100) { | ||
3059 | if (urb->transfer_buffer_length > 0) { | ||
3060 | if (setup->bRequestType & USB_DIR_IN) | ||
3061 | field |= TRB_TX_TYPE(TRB_DATA_IN); | ||
3062 | else | ||
3063 | field |= TRB_TX_TYPE(TRB_DATA_OUT); | ||
3064 | } | ||
3065 | } | ||
3066 | |||
2981 | queue_trb(xhci, ep_ring, false, true, | 3067 | queue_trb(xhci, ep_ring, false, true, |
2982 | /* FIXME endianness is probably going to bite my ass here. */ | 3068 | setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, |
2983 | setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, | 3069 | le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, |
2984 | setup->wIndex | setup->wLength << 16, | 3070 | TRB_LEN(8) | TRB_INTR_TARGET(0), |
2985 | TRB_LEN(8) | TRB_INTR_TARGET(0), | 3071 | /* Immediate data in pointer */ |
2986 | /* Immediate data in pointer */ | 3072 | field); |
2987 | field); | ||
2988 | 3073 | ||
2989 | /* If there's data, queue data TRBs */ | 3074 | /* If there's data, queue data TRBs */ |
2990 | field = 0; | 3075 | /* Only set interrupt on short packet for IN endpoints */ |
3076 | if (usb_urb_dir_in(urb)) | ||
3077 | field = TRB_ISP | TRB_TYPE(TRB_DATA); | ||
3078 | else | ||
3079 | field = TRB_TYPE(TRB_DATA); | ||
3080 | |||
2991 | length_field = TRB_LEN(urb->transfer_buffer_length) | | 3081 | length_field = TRB_LEN(urb->transfer_buffer_length) | |
2992 | xhci_td_remainder(urb->transfer_buffer_length) | | 3082 | xhci_td_remainder(urb->transfer_buffer_length) | |
2993 | TRB_INTR_TARGET(0); | 3083 | TRB_INTR_TARGET(0); |
@@ -2998,8 +3088,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2998 | lower_32_bits(urb->transfer_dma), | 3088 | lower_32_bits(urb->transfer_dma), |
2999 | upper_32_bits(urb->transfer_dma), | 3089 | upper_32_bits(urb->transfer_dma), |
3000 | length_field, | 3090 | length_field, |
3001 | /* Event on short tx */ | 3091 | field | ep_ring->cycle_state); |
3002 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | ||
3003 | } | 3092 | } |
3004 | 3093 | ||
3005 | /* Save the DMA address of the last TRB in the TD */ | 3094 | /* Save the DMA address of the last TRB in the TD */ |
@@ -3045,6 +3134,63 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, | |||
3045 | return num_trbs; | 3134 | return num_trbs; |
3046 | } | 3135 | } |
3047 | 3136 | ||
3137 | /* | ||
3138 | * The transfer burst count field of the isochronous TRB defines the number of | ||
3139 | * bursts that are required to move all packets in this TD. Only SuperSpeed | ||
3140 | * devices can burst up to bMaxBurst number of packets per service interval. | ||
3141 | * This field is zero based, meaning a value of zero in the field means one | ||
3142 | * burst. Basically, for everything but SuperSpeed devices, this field will be | ||
3143 | * zero. Only xHCI 1.0 host controllers support this field. | ||
3144 | */ | ||
3145 | static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, | ||
3146 | struct usb_device *udev, | ||
3147 | struct urb *urb, unsigned int total_packet_count) | ||
3148 | { | ||
3149 | unsigned int max_burst; | ||
3150 | |||
3151 | if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) | ||
3152 | return 0; | ||
3153 | |||
3154 | max_burst = urb->ep->ss_ep_comp.bMaxBurst; | ||
3155 | return roundup(total_packet_count, max_burst + 1) - 1; | ||
3156 | } | ||
3157 | |||
3158 | /* | ||
3159 | * Returns the number of packets in the last "burst" of packets. This field is | ||
3160 | * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so | ||
3161 | * the last burst packet count is equal to the total number of packets in the | ||
3162 | * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst | ||
3163 | * must contain (bMaxBurst + 1) number of packets, but the last burst can | ||
3164 | * contain 1 to (bMaxBurst + 1) packets. | ||
3165 | */ | ||
3166 | static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, | ||
3167 | struct usb_device *udev, | ||
3168 | struct urb *urb, unsigned int total_packet_count) | ||
3169 | { | ||
3170 | unsigned int max_burst; | ||
3171 | unsigned int residue; | ||
3172 | |||
3173 | if (xhci->hci_version < 0x100) | ||
3174 | return 0; | ||
3175 | |||
3176 | switch (udev->speed) { | ||
3177 | case USB_SPEED_SUPER: | ||
3178 | /* bMaxBurst is zero based: 0 means 1 packet per burst */ | ||
3179 | max_burst = urb->ep->ss_ep_comp.bMaxBurst; | ||
3180 | residue = total_packet_count % (max_burst + 1); | ||
3181 | /* If residue is zero, the last burst contains (max_burst + 1) | ||
3182 | * number of packets, but the TLBPC field is zero-based. | ||
3183 | */ | ||
3184 | if (residue == 0) | ||
3185 | return max_burst; | ||
3186 | return residue - 1; | ||
3187 | default: | ||
3188 | if (total_packet_count == 0) | ||
3189 | return 0; | ||
3190 | return total_packet_count - 1; | ||
3191 | } | ||
3192 | } | ||
3193 | |||
3048 | /* This is for isoc transfer */ | 3194 | /* This is for isoc transfer */ |
3049 | static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | 3195 | static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
3050 | struct urb *urb, int slot_id, unsigned int ep_index) | 3196 | struct urb *urb, int slot_id, unsigned int ep_index) |
@@ -3085,12 +3231,22 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3085 | 3231 | ||
3086 | /* Queue the first TRB, even if it's zero-length */ | 3232 | /* Queue the first TRB, even if it's zero-length */ |
3087 | for (i = 0; i < num_tds; i++) { | 3233 | for (i = 0; i < num_tds; i++) { |
3088 | first_trb = true; | 3234 | unsigned int total_packet_count; |
3235 | unsigned int burst_count; | ||
3236 | unsigned int residue; | ||
3089 | 3237 | ||
3238 | first_trb = true; | ||
3090 | running_total = 0; | 3239 | running_total = 0; |
3091 | addr = start_addr + urb->iso_frame_desc[i].offset; | 3240 | addr = start_addr + urb->iso_frame_desc[i].offset; |
3092 | td_len = urb->iso_frame_desc[i].length; | 3241 | td_len = urb->iso_frame_desc[i].length; |
3093 | td_remain_len = td_len; | 3242 | td_remain_len = td_len; |
3243 | /* FIXME: Ignoring zero-length packets, can those happen? */ | ||
3244 | total_packet_count = roundup(td_len, | ||
3245 | le16_to_cpu(urb->ep->desc.wMaxPacketSize)); | ||
3246 | burst_count = xhci_get_burst_count(xhci, urb->dev, urb, | ||
3247 | total_packet_count); | ||
3248 | residue = xhci_get_last_burst_packet_count(xhci, | ||
3249 | urb->dev, urb, total_packet_count); | ||
3094 | 3250 | ||
3095 | trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); | 3251 | trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); |
3096 | 3252 | ||
@@ -3104,7 +3260,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3104 | 3260 | ||
3105 | for (j = 0; j < trbs_per_td; j++) { | 3261 | for (j = 0; j < trbs_per_td; j++) { |
3106 | u32 remainder = 0; | 3262 | u32 remainder = 0; |
3107 | field = 0; | 3263 | field = TRB_TBC(burst_count) | TRB_TLBPC(residue); |
3108 | 3264 | ||
3109 | if (first_trb) { | 3265 | if (first_trb) { |
3110 | /* Queue the isoc TRB */ | 3266 | /* Queue the isoc TRB */ |
@@ -3123,6 +3279,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3123 | field |= ep_ring->cycle_state; | 3279 | field |= ep_ring->cycle_state; |
3124 | } | 3280 | } |
3125 | 3281 | ||
3282 | /* Only set interrupt on short packet for IN EPs */ | ||
3283 | if (usb_urb_dir_in(urb)) | ||
3284 | field |= TRB_ISP; | ||
3285 | |||
3126 | /* Chain all the TRBs together; clear the chain bit in | 3286 | /* Chain all the TRBs together; clear the chain bit in |
3127 | * the last TRB to indicate it's the last TRB in the | 3287 | * the last TRB to indicate it's the last TRB in the |
3128 | * chain. | 3288 | * chain. |
@@ -3133,6 +3293,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3133 | } else { | 3293 | } else { |
3134 | td->last_trb = ep_ring->enqueue; | 3294 | td->last_trb = ep_ring->enqueue; |
3135 | field |= TRB_IOC; | 3295 | field |= TRB_IOC; |
3296 | if (xhci->hci_version == 0x100) { | ||
3297 | /* Set BEI bit except for the last td */ | ||
3298 | if (i < num_tds - 1) | ||
3299 | field |= TRB_BEI; | ||
3300 | } | ||
3136 | more_trbs_coming = false; | 3301 | more_trbs_coming = false; |
3137 | } | 3302 | } |
3138 | 3303 | ||
@@ -3142,20 +3307,24 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3142 | if (trb_buff_len > td_remain_len) | 3307 | if (trb_buff_len > td_remain_len) |
3143 | trb_buff_len = td_remain_len; | 3308 | trb_buff_len = td_remain_len; |
3144 | 3309 | ||
3145 | remainder = xhci_td_remainder(td_len - running_total); | 3310 | /* Set the TRB length, TD size, & interrupter fields. */ |
3311 | if (xhci->hci_version < 0x100) { | ||
3312 | remainder = xhci_td_remainder( | ||
3313 | td_len - running_total); | ||
3314 | } else { | ||
3315 | remainder = xhci_v1_0_td_remainder( | ||
3316 | running_total, trb_buff_len, | ||
3317 | total_packet_count, urb); | ||
3318 | } | ||
3146 | length_field = TRB_LEN(trb_buff_len) | | 3319 | length_field = TRB_LEN(trb_buff_len) | |
3147 | remainder | | 3320 | remainder | |
3148 | TRB_INTR_TARGET(0); | 3321 | TRB_INTR_TARGET(0); |
3322 | |||
3149 | queue_trb(xhci, ep_ring, false, more_trbs_coming, | 3323 | queue_trb(xhci, ep_ring, false, more_trbs_coming, |
3150 | lower_32_bits(addr), | 3324 | lower_32_bits(addr), |
3151 | upper_32_bits(addr), | 3325 | upper_32_bits(addr), |
3152 | length_field, | 3326 | length_field, |
3153 | /* We always want to know if the TRB was short, | 3327 | field); |
3154 | * or we won't get an event when it completes. | ||
3155 | * (Unless we use event data TRBs, which are a | ||
3156 | * waste of space and HC resources.) | ||
3157 | */ | ||
3158 | field | TRB_ISP); | ||
3159 | running_total += trb_buff_len; | 3328 | running_total += trb_buff_len; |
3160 | 3329 | ||
3161 | addr += trb_buff_len; | 3330 | addr += trb_buff_len; |
@@ -3211,8 +3380,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3211 | /* Check the ring to guarantee there is enough room for the whole urb. | 3380 | /* Check the ring to guarantee there is enough room for the whole urb. |
3212 | * Do not insert any td of the urb to the ring if the check failed. | 3381 | * Do not insert any td of the urb to the ring if the check failed. |
3213 | */ | 3382 | */ |
3214 | ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK, | 3383 | ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, |
3215 | num_trbs, mem_flags); | 3384 | num_trbs, mem_flags); |
3216 | if (ret) | 3385 | if (ret) |
3217 | return ret; | 3386 | return ret; |
3218 | 3387 | ||
@@ -3224,7 +3393,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3224 | urb->dev->speed == USB_SPEED_FULL) | 3393 | urb->dev->speed == USB_SPEED_FULL) |
3225 | urb->start_frame >>= 3; | 3394 | urb->start_frame >>= 3; |
3226 | 3395 | ||
3227 | xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); | 3396 | xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); |
3228 | ep_interval = urb->interval; | 3397 | ep_interval = urb->interval; |
3229 | /* Convert to microframes */ | 3398 | /* Convert to microframes */ |
3230 | if (urb->dev->speed == USB_SPEED_LOW || | 3399 | if (urb->dev->speed == USB_SPEED_LOW || |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 81b976e45880..8f2a56ece44f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -973,8 +973,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
973 | 973 | ||
974 | out_ctx = xhci->devs[slot_id]->out_ctx; | 974 | out_ctx = xhci->devs[slot_id]->out_ctx; |
975 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | 975 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
976 | hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); | 976 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
977 | max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; | 977 | max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize); |
978 | if (hw_max_packet_size != max_packet_size) { | 978 | if (hw_max_packet_size != max_packet_size) { |
979 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | 979 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); |
980 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | 980 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", |
@@ -988,15 +988,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
988 | xhci->devs[slot_id]->out_ctx, ep_index); | 988 | xhci->devs[slot_id]->out_ctx, ep_index); |
989 | in_ctx = xhci->devs[slot_id]->in_ctx; | 989 | in_ctx = xhci->devs[slot_id]->in_ctx; |
990 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | 990 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
991 | ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; | 991 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
992 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); | 992 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
993 | 993 | ||
994 | /* Set up the input context flags for the command */ | 994 | /* Set up the input context flags for the command */ |
995 | /* FIXME: This won't work if a non-default control endpoint | 995 | /* FIXME: This won't work if a non-default control endpoint |
996 | * changes max packet sizes. | 996 | * changes max packet sizes. |
997 | */ | 997 | */ |
998 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 998 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
999 | ctrl_ctx->add_flags = EP0_FLAG; | 999 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
1000 | ctrl_ctx->drop_flags = 0; | 1000 | ctrl_ctx->drop_flags = 0; |
1001 | 1001 | ||
1002 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | 1002 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); |
@@ -1010,7 +1010,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
1010 | /* Clean up the input context for later use by bandwidth | 1010 | /* Clean up the input context for later use by bandwidth |
1011 | * functions. | 1011 | * functions. |
1012 | */ | 1012 | */ |
1013 | ctrl_ctx->add_flags = SLOT_FLAG; | 1013 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
1014 | } | 1014 | } |
1015 | return ret; | 1015 | return ret; |
1016 | } | 1016 | } |
@@ -1331,27 +1331,30 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1331 | /* If the HC already knows the endpoint is disabled, | 1331 | /* If the HC already knows the endpoint is disabled, |
1332 | * or the HCD has noted it is disabled, ignore this request | 1332 | * or the HCD has noted it is disabled, ignore this request |
1333 | */ | 1333 | */ |
1334 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | 1334 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == |
1335 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { | 1335 | EP_STATE_DISABLED || |
1336 | le32_to_cpu(ctrl_ctx->drop_flags) & | ||
1337 | xhci_get_endpoint_flag(&ep->desc)) { | ||
1336 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | 1338 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
1337 | __func__, ep); | 1339 | __func__, ep); |
1338 | return 0; | 1340 | return 0; |
1339 | } | 1341 | } |
1340 | 1342 | ||
1341 | ctrl_ctx->drop_flags |= drop_flag; | 1343 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
1342 | new_drop_flags = ctrl_ctx->drop_flags; | 1344 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1343 | 1345 | ||
1344 | ctrl_ctx->add_flags &= ~drop_flag; | 1346 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
1345 | new_add_flags = ctrl_ctx->add_flags; | 1347 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1346 | 1348 | ||
1347 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); | 1349 | last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); |
1348 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | 1350 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
1349 | /* Update the last valid endpoint context, if we deleted the last one */ | 1351 | /* Update the last valid endpoint context, if we deleted the last one */ |
1350 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { | 1352 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > |
1351 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | 1353 | LAST_CTX(last_ctx)) { |
1352 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | 1354 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1355 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); | ||
1353 | } | 1356 | } |
1354 | new_slot_info = slot_ctx->dev_info; | 1357 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
1355 | 1358 | ||
1356 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | 1359 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
1357 | 1360 | ||
@@ -1419,7 +1422,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1419 | /* If the HCD has already noted the endpoint is enabled, | 1422 | /* If the HCD has already noted the endpoint is enabled, |
1420 | * ignore this request. | 1423 | * ignore this request. |
1421 | */ | 1424 | */ |
1422 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { | 1425 | if (le32_to_cpu(ctrl_ctx->add_flags) & |
1426 | xhci_get_endpoint_flag(&ep->desc)) { | ||
1423 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | 1427 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
1424 | __func__, ep); | 1428 | __func__, ep); |
1425 | return 0; | 1429 | return 0; |
@@ -1437,8 +1441,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1437 | return -ENOMEM; | 1441 | return -ENOMEM; |
1438 | } | 1442 | } |
1439 | 1443 | ||
1440 | ctrl_ctx->add_flags |= added_ctxs; | 1444 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
1441 | new_add_flags = ctrl_ctx->add_flags; | 1445 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
1442 | 1446 | ||
1443 | /* If xhci_endpoint_disable() was called for this endpoint, but the | 1447 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
1444 | * xHC hasn't been notified yet through the check_bandwidth() call, | 1448 | * xHC hasn't been notified yet through the check_bandwidth() call, |
@@ -1446,15 +1450,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1446 | * descriptors. We must drop and re-add this endpoint, so we leave the | 1450 | * descriptors. We must drop and re-add this endpoint, so we leave the |
1447 | * drop flags alone. | 1451 | * drop flags alone. |
1448 | */ | 1452 | */ |
1449 | new_drop_flags = ctrl_ctx->drop_flags; | 1453 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
1450 | 1454 | ||
1451 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | 1455 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
1452 | /* Update the last valid endpoint context, if we just added one past */ | 1456 | /* Update the last valid endpoint context, if we just added one past */ |
1453 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { | 1457 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < |
1454 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | 1458 | LAST_CTX(last_ctx)) { |
1455 | slot_ctx->dev_info |= LAST_CTX(last_ctx); | 1459 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1460 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); | ||
1456 | } | 1461 | } |
1457 | new_slot_info = slot_ctx->dev_info; | 1462 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
1458 | 1463 | ||
1459 | /* Store the usb_device pointer for later use */ | 1464 | /* Store the usb_device pointer for later use */ |
1460 | ep->hcpriv = udev; | 1465 | ep->hcpriv = udev; |
@@ -1484,9 +1489,9 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
1484 | ctrl_ctx->drop_flags = 0; | 1489 | ctrl_ctx->drop_flags = 0; |
1485 | ctrl_ctx->add_flags = 0; | 1490 | ctrl_ctx->add_flags = 0; |
1486 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 1491 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1487 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | 1492 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
1488 | /* Endpoint 0 is always valid */ | 1493 | /* Endpoint 0 is always valid */ |
1489 | slot_ctx->dev_info |= LAST_CTX(1); | 1494 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
1490 | for (i = 1; i < 31; ++i) { | 1495 | for (i = 1; i < 31; ++i) { |
1491 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); | 1496 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
1492 | ep_ctx->ep_info = 0; | 1497 | ep_ctx->ep_info = 0; |
@@ -1497,7 +1502,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir | |||
1497 | } | 1502 | } |
1498 | 1503 | ||
1499 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | 1504 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
1500 | struct usb_device *udev, int *cmd_status) | 1505 | struct usb_device *udev, u32 *cmd_status) |
1501 | { | 1506 | { |
1502 | int ret; | 1507 | int ret; |
1503 | 1508 | ||
@@ -1535,7 +1540,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |||
1535 | } | 1540 | } |
1536 | 1541 | ||
1537 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | 1542 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
1538 | struct usb_device *udev, int *cmd_status) | 1543 | struct usb_device *udev, u32 *cmd_status) |
1539 | { | 1544 | { |
1540 | int ret; | 1545 | int ret; |
1541 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | 1546 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
@@ -1555,6 +1560,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1555 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | 1560 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); |
1556 | ret = -EINVAL; | 1561 | ret = -EINVAL; |
1557 | break; | 1562 | break; |
1563 | case COMP_MEL_ERR: | ||
1564 | /* Max Exit Latency too large error */ | ||
1565 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); | ||
1566 | ret = -EINVAL; | ||
1567 | break; | ||
1558 | case COMP_SUCCESS: | 1568 | case COMP_SUCCESS: |
1559 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); | 1569 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); |
1560 | ret = 0; | 1570 | ret = 0; |
@@ -1581,7 +1591,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1581 | unsigned long flags; | 1591 | unsigned long flags; |
1582 | struct xhci_container_ctx *in_ctx; | 1592 | struct xhci_container_ctx *in_ctx; |
1583 | struct completion *cmd_completion; | 1593 | struct completion *cmd_completion; |
1584 | int *cmd_status; | 1594 | u32 *cmd_status; |
1585 | struct xhci_virt_device *virt_dev; | 1595 | struct xhci_virt_device *virt_dev; |
1586 | 1596 | ||
1587 | spin_lock_irqsave(&xhci->lock, flags); | 1597 | spin_lock_irqsave(&xhci->lock, flags); |
@@ -1595,8 +1605,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1595 | /* Enqueue pointer can be left pointing to the link TRB, | 1605 | /* Enqueue pointer can be left pointing to the link TRB, |
1596 | * we must handle that | 1606 | * we must handle that |
1597 | */ | 1607 | */ |
1598 | if ((command->command_trb->link.control & TRB_TYPE_BITMASK) | 1608 | if ((le32_to_cpu(command->command_trb->link.control) |
1599 | == TRB_TYPE(TRB_LINK)) | 1609 | & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) |
1600 | command->command_trb = | 1610 | command->command_trb = |
1601 | xhci->cmd_ring->enq_seg->next->trbs; | 1611 | xhci->cmd_ring->enq_seg->next->trbs; |
1602 | 1612 | ||
@@ -1672,14 +1682,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1672 | 1682 | ||
1673 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | 1683 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
1674 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | 1684 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1675 | ctrl_ctx->add_flags |= SLOT_FLAG; | 1685 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1676 | ctrl_ctx->add_flags &= ~EP0_FLAG; | 1686 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
1677 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; | 1687 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
1678 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | ||
1679 | xhci_dbg(xhci, "New Input Control Context:\n"); | 1688 | xhci_dbg(xhci, "New Input Control Context:\n"); |
1680 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 1689 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1681 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 1690 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
1682 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1691 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
1683 | 1692 | ||
1684 | ret = xhci_configure_endpoint(xhci, udev, NULL, | 1693 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
1685 | false, false); | 1694 | false, false); |
@@ -1690,10 +1699,19 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1690 | 1699 | ||
1691 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | 1700 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
1692 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, | 1701 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
1693 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1702 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
1694 | 1703 | ||
1704 | /* Free any rings that were dropped, but not changed. */ | ||
1705 | for (i = 1; i < 31; ++i) { | ||
1706 | if ((ctrl_ctx->drop_flags & (1 << (i + 1))) && | ||
1707 | !(ctrl_ctx->add_flags & (1 << (i + 1)))) | ||
1708 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | ||
1709 | } | ||
1695 | xhci_zero_in_ctx(xhci, virt_dev); | 1710 | xhci_zero_in_ctx(xhci, virt_dev); |
1696 | /* Install new rings and free or cache any old rings */ | 1711 | /* |
1712 | * Install any rings for completely new endpoints or changed endpoints, | ||
1713 | * and free or cache any old rings from changed endpoints. | ||
1714 | */ | ||
1697 | for (i = 1; i < 31; ++i) { | 1715 | for (i = 1; i < 31; ++i) { |
1698 | if (!virt_dev->eps[i].new_ring) | 1716 | if (!virt_dev->eps[i].new_ring) |
1699 | continue; | 1717 | continue; |
@@ -1740,10 +1758,10 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | |||
1740 | { | 1758 | { |
1741 | struct xhci_input_control_ctx *ctrl_ctx; | 1759 | struct xhci_input_control_ctx *ctrl_ctx; |
1742 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | 1760 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
1743 | ctrl_ctx->add_flags = add_flags; | 1761 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
1744 | ctrl_ctx->drop_flags = drop_flags; | 1762 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
1745 | xhci_slot_copy(xhci, in_ctx, out_ctx); | 1763 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
1746 | ctrl_ctx->add_flags |= SLOT_FLAG; | 1764 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1747 | 1765 | ||
1748 | xhci_dbg(xhci, "Input Context:\n"); | 1766 | xhci_dbg(xhci, "Input Context:\n"); |
1749 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | 1767 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
@@ -1772,7 +1790,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | |||
1772 | deq_state->new_deq_ptr); | 1790 | deq_state->new_deq_ptr); |
1773 | return; | 1791 | return; |
1774 | } | 1792 | } |
1775 | ep_ctx->deq = addr | deq_state->new_cycle_state; | 1793 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
1776 | 1794 | ||
1777 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | 1795 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
1778 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, | 1796 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
@@ -2327,8 +2345,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2327 | /* Enqueue pointer can be left pointing to the link TRB, | 2345 | /* Enqueue pointer can be left pointing to the link TRB, |
2328 | * we must handle that | 2346 | * we must handle that |
2329 | */ | 2347 | */ |
2330 | if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK) | 2348 | if ((le32_to_cpu(reset_device_cmd->command_trb->link.control) |
2331 | == TRB_TYPE(TRB_LINK)) | 2349 | & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) |
2332 | reset_device_cmd->command_trb = | 2350 | reset_device_cmd->command_trb = |
2333 | xhci->cmd_ring->enq_seg->next->trbs; | 2351 | xhci->cmd_ring->enq_seg->next->trbs; |
2334 | 2352 | ||
@@ -2542,6 +2560,17 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2542 | 2560 | ||
2543 | virt_dev = xhci->devs[udev->slot_id]; | 2561 | virt_dev = xhci->devs[udev->slot_id]; |
2544 | 2562 | ||
2563 | if (WARN_ON(!virt_dev)) { | ||
2564 | /* | ||
2565 | * In plug/unplug torture test with an NEC controller, | ||
2566 | * a zero-dereference was observed once due to virt_dev = 0. | ||
2567 | * Print useful debug rather than crash if it is observed again! | ||
2568 | */ | ||
2569 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", | ||
2570 | udev->slot_id); | ||
2571 | return -EINVAL; | ||
2572 | } | ||
2573 | |||
2545 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 2574 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
2546 | /* | 2575 | /* |
2547 | * If this is the first Set Address since device plug-in or | 2576 | * If this is the first Set Address since device plug-in or |
@@ -2609,10 +2638,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2609 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | 2638 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
2610 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); | 2639 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
2611 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", | 2640 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
2612 | udev->slot_id, | 2641 | udev->slot_id, |
2613 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], | 2642 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
2614 | (unsigned long long) | 2643 | (unsigned long long) |
2615 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | 2644 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
2616 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", | 2645 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
2617 | (unsigned long long)virt_dev->out_ctx->dma); | 2646 | (unsigned long long)virt_dev->out_ctx->dma); |
2618 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 2647 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
@@ -2626,7 +2655,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2626 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | 2655 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
2627 | /* Use kernel assigned address for devices; store xHC assigned | 2656 | /* Use kernel assigned address for devices; store xHC assigned |
2628 | * address locally. */ | 2657 | * address locally. */ |
2629 | virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | 2658 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) |
2659 | + 1; | ||
2630 | /* Zero the input context control for later use */ | 2660 | /* Zero the input context control for later use */ |
2631 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | 2661 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
2632 | ctrl_ctx->add_flags = 0; | 2662 | ctrl_ctx->add_flags = 0; |
@@ -2670,24 +2700,29 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |||
2670 | spin_lock_irqsave(&xhci->lock, flags); | 2700 | spin_lock_irqsave(&xhci->lock, flags); |
2671 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | 2701 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
2672 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | 2702 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); |
2673 | ctrl_ctx->add_flags |= SLOT_FLAG; | 2703 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
2674 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | 2704 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
2675 | slot_ctx->dev_info |= DEV_HUB; | 2705 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
2676 | if (tt->multi) | 2706 | if (tt->multi) |
2677 | slot_ctx->dev_info |= DEV_MTT; | 2707 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
2678 | if (xhci->hci_version > 0x95) { | 2708 | if (xhci->hci_version > 0x95) { |
2679 | xhci_dbg(xhci, "xHCI version %x needs hub " | 2709 | xhci_dbg(xhci, "xHCI version %x needs hub " |
2680 | "TT think time and number of ports\n", | 2710 | "TT think time and number of ports\n", |
2681 | (unsigned int) xhci->hci_version); | 2711 | (unsigned int) xhci->hci_version); |
2682 | slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); | 2712 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
2683 | /* Set TT think time - convert from ns to FS bit times. | 2713 | /* Set TT think time - convert from ns to FS bit times. |
2684 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | 2714 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
2685 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | 2715 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
2716 | * | ||
2717 | * xHCI 1.0: this field shall be 0 if the device is not a | ||
2718 | * High-spped hub. | ||
2686 | */ | 2719 | */ |
2687 | think_time = tt->think_time; | 2720 | think_time = tt->think_time; |
2688 | if (think_time != 0) | 2721 | if (think_time != 0) |
2689 | think_time = (think_time / 666) - 1; | 2722 | think_time = (think_time / 666) - 1; |
2690 | slot_ctx->tt_info |= TT_THINK_TIME(think_time); | 2723 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
2724 | slot_ctx->tt_info |= | ||
2725 | cpu_to_le32(TT_THINK_TIME(think_time)); | ||
2691 | } else { | 2726 | } else { |
2692 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | 2727 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
2693 | "TT think time or number of ports\n", | 2728 | "TT think time or number of ports\n", |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ba1be6b7cc6d..e12db7cfb9bb 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -57,13 +57,13 @@ | |||
57 | * @run_regs_off: RTSOFF - Runtime register space offset | 57 | * @run_regs_off: RTSOFF - Runtime register space offset |
58 | */ | 58 | */ |
59 | struct xhci_cap_regs { | 59 | struct xhci_cap_regs { |
60 | u32 hc_capbase; | 60 | __le32 hc_capbase; |
61 | u32 hcs_params1; | 61 | __le32 hcs_params1; |
62 | u32 hcs_params2; | 62 | __le32 hcs_params2; |
63 | u32 hcs_params3; | 63 | __le32 hcs_params3; |
64 | u32 hcc_params; | 64 | __le32 hcc_params; |
65 | u32 db_off; | 65 | __le32 db_off; |
66 | u32 run_regs_off; | 66 | __le32 run_regs_off; |
67 | /* Reserved up to (CAPLENGTH - 0x1C) */ | 67 | /* Reserved up to (CAPLENGTH - 0x1C) */ |
68 | }; | 68 | }; |
69 | 69 | ||
@@ -155,26 +155,26 @@ struct xhci_cap_regs { | |||
155 | * devices. | 155 | * devices. |
156 | */ | 156 | */ |
157 | struct xhci_op_regs { | 157 | struct xhci_op_regs { |
158 | u32 command; | 158 | __le32 command; |
159 | u32 status; | 159 | __le32 status; |
160 | u32 page_size; | 160 | __le32 page_size; |
161 | u32 reserved1; | 161 | __le32 reserved1; |
162 | u32 reserved2; | 162 | __le32 reserved2; |
163 | u32 dev_notification; | 163 | __le32 dev_notification; |
164 | u64 cmd_ring; | 164 | __le64 cmd_ring; |
165 | /* rsvd: offset 0x20-2F */ | 165 | /* rsvd: offset 0x20-2F */ |
166 | u32 reserved3[4]; | 166 | __le32 reserved3[4]; |
167 | u64 dcbaa_ptr; | 167 | __le64 dcbaa_ptr; |
168 | u32 config_reg; | 168 | __le32 config_reg; |
169 | /* rsvd: offset 0x3C-3FF */ | 169 | /* rsvd: offset 0x3C-3FF */ |
170 | u32 reserved4[241]; | 170 | __le32 reserved4[241]; |
171 | /* port 1 registers, which serve as a base address for other ports */ | 171 | /* port 1 registers, which serve as a base address for other ports */ |
172 | u32 port_status_base; | 172 | __le32 port_status_base; |
173 | u32 port_power_base; | 173 | __le32 port_power_base; |
174 | u32 port_link_base; | 174 | __le32 port_link_base; |
175 | u32 reserved5; | 175 | __le32 reserved5; |
176 | /* registers for ports 2-255 */ | 176 | /* registers for ports 2-255 */ |
177 | u32 reserved6[NUM_PORT_REGS*254]; | 177 | __le32 reserved6[NUM_PORT_REGS*254]; |
178 | }; | 178 | }; |
179 | 179 | ||
180 | /* USBCMD - USB command - command bitmasks */ | 180 | /* USBCMD - USB command - command bitmasks */ |
@@ -382,12 +382,12 @@ struct xhci_op_regs { | |||
382 | * updates the dequeue pointer. | 382 | * updates the dequeue pointer. |
383 | */ | 383 | */ |
384 | struct xhci_intr_reg { | 384 | struct xhci_intr_reg { |
385 | u32 irq_pending; | 385 | __le32 irq_pending; |
386 | u32 irq_control; | 386 | __le32 irq_control; |
387 | u32 erst_size; | 387 | __le32 erst_size; |
388 | u32 rsvd; | 388 | __le32 rsvd; |
389 | u64 erst_base; | 389 | __le64 erst_base; |
390 | u64 erst_dequeue; | 390 | __le64 erst_dequeue; |
391 | }; | 391 | }; |
392 | 392 | ||
393 | /* irq_pending bitmasks */ | 393 | /* irq_pending bitmasks */ |
@@ -432,8 +432,8 @@ struct xhci_intr_reg { | |||
432 | * or larger accesses" | 432 | * or larger accesses" |
433 | */ | 433 | */ |
434 | struct xhci_run_regs { | 434 | struct xhci_run_regs { |
435 | u32 microframe_index; | 435 | __le32 microframe_index; |
436 | u32 rsvd[7]; | 436 | __le32 rsvd[7]; |
437 | struct xhci_intr_reg ir_set[128]; | 437 | struct xhci_intr_reg ir_set[128]; |
438 | }; | 438 | }; |
439 | 439 | ||
@@ -447,7 +447,7 @@ struct xhci_run_regs { | |||
447 | * Section 5.6 | 447 | * Section 5.6 |
448 | */ | 448 | */ |
449 | struct xhci_doorbell_array { | 449 | struct xhci_doorbell_array { |
450 | u32 doorbell[256]; | 450 | __le32 doorbell[256]; |
451 | }; | 451 | }; |
452 | 452 | ||
453 | #define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) | 453 | #define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) |
@@ -504,12 +504,12 @@ struct xhci_container_ctx { | |||
504 | * reserved at the end of the slot context for HC internal use. | 504 | * reserved at the end of the slot context for HC internal use. |
505 | */ | 505 | */ |
506 | struct xhci_slot_ctx { | 506 | struct xhci_slot_ctx { |
507 | u32 dev_info; | 507 | __le32 dev_info; |
508 | u32 dev_info2; | 508 | __le32 dev_info2; |
509 | u32 tt_info; | 509 | __le32 tt_info; |
510 | u32 dev_state; | 510 | __le32 dev_state; |
511 | /* offset 0x10 to 0x1f reserved for HC internal use */ | 511 | /* offset 0x10 to 0x1f reserved for HC internal use */ |
512 | u32 reserved[4]; | 512 | __le32 reserved[4]; |
513 | }; | 513 | }; |
514 | 514 | ||
515 | /* dev_info bitmasks */ | 515 | /* dev_info bitmasks */ |
@@ -580,12 +580,12 @@ struct xhci_slot_ctx { | |||
580 | * reserved at the end of the endpoint context for HC internal use. | 580 | * reserved at the end of the endpoint context for HC internal use. |
581 | */ | 581 | */ |
582 | struct xhci_ep_ctx { | 582 | struct xhci_ep_ctx { |
583 | u32 ep_info; | 583 | __le32 ep_info; |
584 | u32 ep_info2; | 584 | __le32 ep_info2; |
585 | u64 deq; | 585 | __le64 deq; |
586 | u32 tx_info; | 586 | __le32 tx_info; |
587 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 587 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
588 | u32 reserved[3]; | 588 | __le32 reserved[3]; |
589 | }; | 589 | }; |
590 | 590 | ||
591 | /* ep_info bitmasks */ | 591 | /* ep_info bitmasks */ |
@@ -660,9 +660,9 @@ struct xhci_ep_ctx { | |||
660 | * @add_context: set the bit of the endpoint context you want to enable | 660 | * @add_context: set the bit of the endpoint context you want to enable |
661 | */ | 661 | */ |
662 | struct xhci_input_control_ctx { | 662 | struct xhci_input_control_ctx { |
663 | u32 drop_flags; | 663 | __le32 drop_flags; |
664 | u32 add_flags; | 664 | __le32 add_flags; |
665 | u32 rsvd2[6]; | 665 | __le32 rsvd2[6]; |
666 | }; | 666 | }; |
667 | 667 | ||
668 | /* Represents everything that is needed to issue a command on the command ring. | 668 | /* Represents everything that is needed to issue a command on the command ring. |
@@ -688,9 +688,9 @@ struct xhci_command { | |||
688 | 688 | ||
689 | struct xhci_stream_ctx { | 689 | struct xhci_stream_ctx { |
690 | /* 64-bit stream ring address, cycle state, and stream type */ | 690 | /* 64-bit stream ring address, cycle state, and stream type */ |
691 | u64 stream_ring; | 691 | __le64 stream_ring; |
692 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 692 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
693 | u32 reserved[2]; | 693 | __le32 reserved[2]; |
694 | }; | 694 | }; |
695 | 695 | ||
696 | /* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ | 696 | /* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ |
@@ -803,7 +803,7 @@ struct xhci_virt_device { | |||
803 | */ | 803 | */ |
804 | struct xhci_device_context_array { | 804 | struct xhci_device_context_array { |
805 | /* 64-bit device addresses; we only write 32-bit addresses */ | 805 | /* 64-bit device addresses; we only write 32-bit addresses */ |
806 | u64 dev_context_ptrs[MAX_HC_SLOTS]; | 806 | __le64 dev_context_ptrs[MAX_HC_SLOTS]; |
807 | /* private xHCD pointers */ | 807 | /* private xHCD pointers */ |
808 | dma_addr_t dma; | 808 | dma_addr_t dma; |
809 | }; | 809 | }; |
@@ -816,10 +816,10 @@ struct xhci_device_context_array { | |||
816 | 816 | ||
817 | struct xhci_transfer_event { | 817 | struct xhci_transfer_event { |
818 | /* 64-bit buffer address, or immediate data */ | 818 | /* 64-bit buffer address, or immediate data */ |
819 | u64 buffer; | 819 | __le64 buffer; |
820 | u32 transfer_len; | 820 | __le32 transfer_len; |
821 | /* This field is interpreted differently based on the type of TRB */ | 821 | /* This field is interpreted differently based on the type of TRB */ |
822 | u32 flags; | 822 | __le32 flags; |
823 | }; | 823 | }; |
824 | 824 | ||
825 | /** Transfer Event bit fields **/ | 825 | /** Transfer Event bit fields **/ |
@@ -881,7 +881,9 @@ struct xhci_transfer_event { | |||
881 | #define COMP_STOP_INVAL 27 | 881 | #define COMP_STOP_INVAL 27 |
882 | /* Control Abort Error - Debug Capability - control pipe aborted */ | 882 | /* Control Abort Error - Debug Capability - control pipe aborted */ |
883 | #define COMP_DBG_ABORT 28 | 883 | #define COMP_DBG_ABORT 28 |
884 | /* TRB type 29 and 30 reserved */ | 884 | /* Max Exit Latency Too Large Error */ |
885 | #define COMP_MEL_ERR 29 | ||
886 | /* TRB type 30 reserved */ | ||
885 | /* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */ | 887 | /* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */ |
886 | #define COMP_BUFF_OVER 31 | 888 | #define COMP_BUFF_OVER 31 |
887 | /* Event Lost Error - xHC has an "internal event overrun condition" */ | 889 | /* Event Lost Error - xHC has an "internal event overrun condition" */ |
@@ -898,9 +900,9 @@ struct xhci_transfer_event { | |||
898 | 900 | ||
899 | struct xhci_link_trb { | 901 | struct xhci_link_trb { |
900 | /* 64-bit segment pointer*/ | 902 | /* 64-bit segment pointer*/ |
901 | u64 segment_ptr; | 903 | __le64 segment_ptr; |
902 | u32 intr_target; | 904 | __le32 intr_target; |
903 | u32 control; | 905 | __le32 control; |
904 | }; | 906 | }; |
905 | 907 | ||
906 | /* control bitfields */ | 908 | /* control bitfields */ |
@@ -909,9 +911,9 @@ struct xhci_link_trb { | |||
909 | /* Command completion event TRB */ | 911 | /* Command completion event TRB */ |
910 | struct xhci_event_cmd { | 912 | struct xhci_event_cmd { |
911 | /* Pointer to command TRB, or the value passed by the event data trb */ | 913 | /* Pointer to command TRB, or the value passed by the event data trb */ |
912 | u64 cmd_trb; | 914 | __le64 cmd_trb; |
913 | u32 status; | 915 | __le32 status; |
914 | u32 flags; | 916 | __le32 flags; |
915 | }; | 917 | }; |
916 | 918 | ||
917 | /* flags bitmasks */ | 919 | /* flags bitmasks */ |
@@ -943,6 +945,8 @@ struct xhci_event_cmd { | |||
943 | /* Interrupter Target - which MSI-X vector to target the completion event at */ | 945 | /* Interrupter Target - which MSI-X vector to target the completion event at */ |
944 | #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) | 946 | #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) |
945 | #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) | 947 | #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) |
948 | #define TRB_TBC(p) (((p) & 0x3) << 7) | ||
949 | #define TRB_TLBPC(p) (((p) & 0xf) << 16) | ||
946 | 950 | ||
947 | /* Cycle bit - indicates TRB ownership by HC or HCD */ | 951 | /* Cycle bit - indicates TRB ownership by HC or HCD */ |
948 | #define TRB_CYCLE (1<<0) | 952 | #define TRB_CYCLE (1<<0) |
@@ -962,15 +966,20 @@ struct xhci_event_cmd { | |||
962 | /* The buffer pointer contains immediate data */ | 966 | /* The buffer pointer contains immediate data */ |
963 | #define TRB_IDT (1<<6) | 967 | #define TRB_IDT (1<<6) |
964 | 968 | ||
969 | /* Block Event Interrupt */ | ||
970 | #define TRB_BEI (1<<9) | ||
965 | 971 | ||
966 | /* Control transfer TRB specific fields */ | 972 | /* Control transfer TRB specific fields */ |
967 | #define TRB_DIR_IN (1<<16) | 973 | #define TRB_DIR_IN (1<<16) |
974 | #define TRB_TX_TYPE(p) ((p) << 16) | ||
975 | #define TRB_DATA_OUT 2 | ||
976 | #define TRB_DATA_IN 3 | ||
968 | 977 | ||
969 | /* Isochronous TRB specific fields */ | 978 | /* Isochronous TRB specific fields */ |
970 | #define TRB_SIA (1<<31) | 979 | #define TRB_SIA (1<<31) |
971 | 980 | ||
972 | struct xhci_generic_trb { | 981 | struct xhci_generic_trb { |
973 | u32 field[4]; | 982 | __le32 field[4]; |
974 | }; | 983 | }; |
975 | 984 | ||
976 | union xhci_trb { | 985 | union xhci_trb { |
@@ -1118,10 +1127,10 @@ struct xhci_ring { | |||
1118 | 1127 | ||
1119 | struct xhci_erst_entry { | 1128 | struct xhci_erst_entry { |
1120 | /* 64-bit event ring segment address */ | 1129 | /* 64-bit event ring segment address */ |
1121 | u64 seg_addr; | 1130 | __le64 seg_addr; |
1122 | u32 seg_size; | 1131 | __le32 seg_size; |
1123 | /* Set to zero */ | 1132 | /* Set to zero */ |
1124 | u32 rsvd; | 1133 | __le32 rsvd; |
1125 | }; | 1134 | }; |
1126 | 1135 | ||
1127 | struct xhci_erst { | 1136 | struct xhci_erst { |
@@ -1286,10 +1295,10 @@ struct xhci_hcd { | |||
1286 | /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ | 1295 | /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ |
1287 | u8 *port_array; | 1296 | u8 *port_array; |
1288 | /* Array of pointers to USB 3.0 PORTSC registers */ | 1297 | /* Array of pointers to USB 3.0 PORTSC registers */ |
1289 | u32 __iomem **usb3_ports; | 1298 | __le32 __iomem **usb3_ports; |
1290 | unsigned int num_usb3_ports; | 1299 | unsigned int num_usb3_ports; |
1291 | /* Array of pointers to USB 2.0 PORTSC registers */ | 1300 | /* Array of pointers to USB 2.0 PORTSC registers */ |
1292 | u32 __iomem **usb2_ports; | 1301 | __le32 __iomem **usb2_ports; |
1293 | unsigned int num_usb2_ports; | 1302 | unsigned int num_usb2_ports; |
1294 | }; | 1303 | }; |
1295 | 1304 | ||
@@ -1322,12 +1331,12 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) | |||
1322 | /* TODO: copied from ehci.h - can be refactored? */ | 1331 | /* TODO: copied from ehci.h - can be refactored? */ |
1323 | /* xHCI spec says all registers are little endian */ | 1332 | /* xHCI spec says all registers are little endian */ |
1324 | static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, | 1333 | static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, |
1325 | __u32 __iomem *regs) | 1334 | __le32 __iomem *regs) |
1326 | { | 1335 | { |
1327 | return readl(regs); | 1336 | return readl(regs); |
1328 | } | 1337 | } |
1329 | static inline void xhci_writel(struct xhci_hcd *xhci, | 1338 | static inline void xhci_writel(struct xhci_hcd *xhci, |
1330 | const unsigned int val, __u32 __iomem *regs) | 1339 | const unsigned int val, __le32 __iomem *regs) |
1331 | { | 1340 | { |
1332 | xhci_dbg(xhci, | 1341 | xhci_dbg(xhci, |
1333 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", | 1342 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", |
@@ -1345,7 +1354,7 @@ static inline void xhci_writel(struct xhci_hcd *xhci, | |||
1345 | * the high dword, and write order is irrelevant. | 1354 | * the high dword, and write order is irrelevant. |
1346 | */ | 1355 | */ |
1347 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | 1356 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, |
1348 | __u64 __iomem *regs) | 1357 | __le64 __iomem *regs) |
1349 | { | 1358 | { |
1350 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | 1359 | __u32 __iomem *ptr = (__u32 __iomem *) regs; |
1351 | u64 val_lo = readl(ptr); | 1360 | u64 val_lo = readl(ptr); |
@@ -1353,7 +1362,7 @@ static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | |||
1353 | return val_lo + (val_hi << 32); | 1362 | return val_lo + (val_hi << 32); |
1354 | } | 1363 | } |
1355 | static inline void xhci_write_64(struct xhci_hcd *xhci, | 1364 | static inline void xhci_write_64(struct xhci_hcd *xhci, |
1356 | const u64 val, __u64 __iomem *regs) | 1365 | const u64 val, __le64 __iomem *regs) |
1357 | { | 1366 | { |
1358 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | 1367 | __u32 __iomem *ptr = (__u32 __iomem *) regs; |
1359 | u32 val_lo = lower_32_bits(val); | 1368 | u32 val_lo = lower_32_bits(val); |