aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:06:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:06:10 -0400
commite1f5b94fd0c93c3e27ede88b7ab652d086dc960f (patch)
treee8de7a132eb88521dd1c19e128eba2d5349bdf4f /drivers/usb/host
parent6fd03301d76bc439382710e449f58efbb233df1b (diff)
parent1b6ed69f974f6f32c8be0d9a7fc952822eb83b6f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (143 commits) USB: xhci depends on PCI. USB: xhci: Add Makefile, MAINTAINERS, and Kconfig entries. USB: xhci: Respect critical sections. USB: xHCI: Fix interrupt moderation. USB: xhci: Remove packed attribute from structures. usb; xhci: Fix TRB offset calculations. USB: xhci: replace if-elseif-else with switch-case USB: xhci: Make xhci-mem.c include linux/dmapool.h USB: xhci: drop spinlock in xhci_urb_enqueue() error path. USB: Change names of SuperSpeed ep companion descriptor structs. USB: xhci: Avoid compiler reordering in Link TRB giveback. USB: xhci: Clean up xhci_irq() function. USB: xhci: Avoid global namespace pollution. USB: xhci: Fix Link TRB handoff bit twiddling. USB: xhci: Fix register write order. USB: xhci: fix some compiler warnings in xhci.h USB: xhci: fix lots of compiler warnings. USB: xhci: use xhci_handle_event instead of handle_event USB: xhci: URB cancellation support. USB: xhci: Scatter gather list support for bulk transfers. ...
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig20
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c47
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c1
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-pci.c27
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c1
-rw-r--r--drivers/usb/host/ehci-q.c19
-rw-r--r--drivers/usb/host/ehci-sched.c8
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/hwa-hc.c21
-rw-r--r--drivers/usb/host/ohci-dbg.c31
-rw-r--r--drivers/usb/host/ohci-hcd.c38
-rw-r--r--drivers/usb/host/ohci-pci.c24
-rw-r--r--drivers/usb/host/pci-quirks.c123
-rw-r--r--drivers/usb/host/r8a66597-hcd.c62
-rw-r--r--drivers/usb/host/r8a66597.h38
-rw-r--r--drivers/usb/host/uhci-hcd.c23
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c485
-rw-r--r--drivers/usb/host/xhci-ext-caps.h145
-rw-r--r--drivers/usb/host/xhci-hcd.c1274
-rw-r--r--drivers/usb/host/xhci-hub.c308
-rw-r--r--drivers/usb/host/xhci-mem.c769
-rw-r--r--drivers/usb/host/xhci-pci.c166
-rw-r--r--drivers/usb/host/xhci-ring.c1648
-rw-r--r--drivers/usb/host/xhci.h1157
32 files changed, 6275 insertions, 177 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 845479f7c707..1576a0520adf 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -17,6 +17,26 @@ config USB_C67X00_HCD
17 To compile this driver as a module, choose M here: the 17 To compile this driver as a module, choose M here: the
18 module will be called c67x00. 18 module will be called c67x00.
19 19
20config USB_XHCI_HCD
21 tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
22 depends on USB && PCI && EXPERIMENTAL
23 ---help---
24 The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
25 "SuperSpeed" host controller hardware.
26
27 To compile this driver as a module, choose M here: the
28 module will be called xhci-hcd.
29
30config USB_XHCI_HCD_DEBUGGING
31 bool "Debugging for the xHCI host controller"
32 depends on USB_XHCI_HCD
33 ---help---
34 Say 'Y' to turn on debugging for the xHCI host controller driver.
35 This will spew debugging output, even in interrupt context.
36 This should only be used for debugging xHCI driver bugs.
37
38 If unsure, say N.
39
20config USB_EHCI_HCD 40config USB_EHCI_HCD
21 tristate "EHCI HCD (USB 2.0) support" 41 tristate "EHCI HCD (USB 2.0) support"
22 depends on USB && USB_ARCH_HAS_EHCI 42 depends on USB && USB_ARCH_HAS_EHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f163571e33d8..289d748bb414 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,6 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
12ifeq ($(CONFIG_FHCI_DEBUG),y) 12ifeq ($(CONFIG_FHCI_DEBUG),y)
13fhci-objs += fhci-dbg.o 13fhci-objs += fhci-dbg.o
14endif 14endif
15xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
15 16
16obj-$(CONFIG_USB_WHCI_HCD) += whci/ 17obj-$(CONFIG_USB_WHCI_HCD) += whci/
17 18
@@ -23,6 +24,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
23obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o 24obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
24obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o 25obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
25obj-$(CONFIG_USB_FHCI_HCD) += fhci.o 26obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
27obj-$(CONFIG_USB_XHCI_HCD) += xhci.o
26obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o 28obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
27obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 29obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
28obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 30obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index bf69f4739107..c3a778bd359c 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -97,6 +97,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
97 .urb_enqueue = ehci_urb_enqueue, 97 .urb_enqueue = ehci_urb_enqueue,
98 .urb_dequeue = ehci_urb_dequeue, 98 .urb_dequeue = ehci_urb_dequeue,
99 .endpoint_disable = ehci_endpoint_disable, 99 .endpoint_disable = ehci_endpoint_disable,
100 .endpoint_reset = ehci_endpoint_reset,
100 101
101 /* 102 /*
102 * scheduling support 103 * scheduling support
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 01c3da34f678..bf86809c5120 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -309,6 +309,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
309 .urb_enqueue = ehci_urb_enqueue, 309 .urb_enqueue = ehci_urb_enqueue,
310 .urb_dequeue = ehci_urb_dequeue, 310 .urb_dequeue = ehci_urb_dequeue,
311 .endpoint_disable = ehci_endpoint_disable, 311 .endpoint_disable = ehci_endpoint_disable,
312 .endpoint_reset = ehci_endpoint_reset,
312 313
313 /* 314 /*
314 * scheduling support 315 * scheduling support
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c637207a1c80..2b72473544d3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1024,6 +1024,51 @@ done:
1024 return; 1024 return;
1025} 1025}
1026 1026
1027static void
1028ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1029{
1030 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1031 struct ehci_qh *qh;
1032 int eptype = usb_endpoint_type(&ep->desc);
1033
1034 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
1035 return;
1036
1037 rescan:
1038 spin_lock_irq(&ehci->lock);
1039 qh = ep->hcpriv;
1040
1041 /* For Bulk and Interrupt endpoints we maintain the toggle state
1042 * in the hardware; the toggle bits in udev aren't used at all.
1043 * When an endpoint is reset by usb_clear_halt() we must reset
1044 * the toggle bit in the QH.
1045 */
1046 if (qh) {
1047 if (!list_empty(&qh->qtd_list)) {
1048 WARN_ONCE(1, "clear_halt for a busy endpoint\n");
1049 } else if (qh->qh_state == QH_STATE_IDLE) {
1050 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
1051 } else {
1052 /* It's not safe to write into the overlay area
1053 * while the QH is active. Unlink it first and
1054 * wait for the unlink to complete.
1055 */
1056 if (qh->qh_state == QH_STATE_LINKED) {
1057 if (eptype == USB_ENDPOINT_XFER_BULK) {
1058 unlink_async(ehci, qh);
1059 } else {
1060 intr_deschedule(ehci, qh);
1061 (void) qh_schedule(ehci, qh);
1062 }
1063 }
1064 spin_unlock_irq(&ehci->lock);
1065 schedule_timeout_uninterruptible(1);
1066 goto rescan;
1067 }
1068 }
1069 spin_unlock_irq(&ehci->lock);
1070}
1071
1027static int ehci_get_frame (struct usb_hcd *hcd) 1072static int ehci_get_frame (struct usb_hcd *hcd)
1028{ 1073{
1029 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 1074 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
@@ -1097,7 +1142,7 @@ static int __init ehci_hcd_init(void)
1097 sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); 1142 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
1098 1143
1099#ifdef DEBUG 1144#ifdef DEBUG
1100 ehci_debug_root = debugfs_create_dir("ehci", NULL); 1145 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
1101 if (!ehci_debug_root) { 1146 if (!ehci_debug_root) {
1102 retval = -ENOENT; 1147 retval = -ENOENT;
1103 goto err_debug; 1148 goto err_debug;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 97a53a48a3d8..f46ad27c9a90 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -391,7 +391,7 @@ static inline void create_companion_file(struct ehci_hcd *ehci)
391 391
392 /* with integrated TT there is no companion! */ 392 /* with integrated TT there is no companion! */
393 if (!ehci_is_TDI(ehci)) 393 if (!ehci_is_TDI(ehci))
394 i = device_create_file(ehci_to_hcd(ehci)->self.dev, 394 i = device_create_file(ehci_to_hcd(ehci)->self.controller,
395 &dev_attr_companion); 395 &dev_attr_companion);
396} 396}
397 397
@@ -399,7 +399,7 @@ static inline void remove_companion_file(struct ehci_hcd *ehci)
399{ 399{
400 /* with integrated TT there is no companion! */ 400 /* with integrated TT there is no companion! */
401 if (!ehci_is_TDI(ehci)) 401 if (!ehci_is_TDI(ehci))
402 device_remove_file(ehci_to_hcd(ehci)->self.dev, 402 device_remove_file(ehci_to_hcd(ehci)->self.controller,
403 &dev_attr_companion); 403 &dev_attr_companion);
404} 404}
405 405
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 9c32063a0c2f..a44bb4a94954 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -51,6 +51,7 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
51 .urb_enqueue = ehci_urb_enqueue, 51 .urb_enqueue = ehci_urb_enqueue,
52 .urb_dequeue = ehci_urb_dequeue, 52 .urb_dequeue = ehci_urb_dequeue,
53 .endpoint_disable = ehci_endpoint_disable, 53 .endpoint_disable = ehci_endpoint_disable,
54 .endpoint_reset = ehci_endpoint_reset,
54 .get_frame_number = ehci_get_frame, 55 .get_frame_number = ehci_get_frame,
55 .hub_status_data = ehci_hub_status_data, 56 .hub_status_data = ehci_hub_status_data,
56 .hub_control = ehci_hub_control, 57 .hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9d487908012e..770dd9aba62a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -149,6 +149,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
149 .urb_enqueue = ehci_urb_enqueue, 149 .urb_enqueue = ehci_urb_enqueue,
150 .urb_dequeue = ehci_urb_dequeue, 150 .urb_dequeue = ehci_urb_dequeue,
151 .endpoint_disable = ehci_endpoint_disable, 151 .endpoint_disable = ehci_endpoint_disable,
152 .endpoint_reset = ehci_endpoint_reset,
152 153
153 /* 154 /*
154 * scheduling support 155 * scheduling support
@@ -187,7 +188,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
187 } 188 }
188} 189}
189 190
190static int __init ehci_orion_drv_probe(struct platform_device *pdev) 191static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
191{ 192{
192 struct orion_ehci_data *pd = pdev->dev.platform_data; 193 struct orion_ehci_data *pd = pdev->dev.platform_data;
193 struct resource *res; 194 struct resource *res;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 5aa8bce90e1f..f3683e1da161 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -268,7 +268,7 @@ done:
268 * Also they depend on separate root hub suspend/resume. 268 * Also they depend on separate root hub suspend/resume.
269 */ 269 */
270 270
271static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message) 271static int ehci_pci_suspend(struct usb_hcd *hcd)
272{ 272{
273 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 273 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
274 unsigned long flags; 274 unsigned long flags;
@@ -293,12 +293,6 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
293 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 293 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
294 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 294 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
295 295
296 /* make sure snapshot being resumed re-enumerates everything */
297 if (message.event == PM_EVENT_PRETHAW) {
298 ehci_halt(ehci);
299 ehci_reset(ehci);
300 }
301
302 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 296 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
303 bail: 297 bail:
304 spin_unlock_irqrestore (&ehci->lock, flags); 298 spin_unlock_irqrestore (&ehci->lock, flags);
@@ -309,7 +303,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
309 return rc; 303 return rc;
310} 304}
311 305
312static int ehci_pci_resume(struct usb_hcd *hcd) 306static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
313{ 307{
314 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 308 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
315 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 309 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
@@ -322,10 +316,12 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
322 /* Mark hardware accessible again as we are out of D3 state by now */ 316 /* Mark hardware accessible again as we are out of D3 state by now */
323 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 317 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
324 318
325 /* If CF is still set, we maintained PCI Vaux power. 319 /* If CF is still set and we aren't resuming from hibernation
320 * then we maintained PCI Vaux power.
326 * Just undo the effect of ehci_pci_suspend(). 321 * Just undo the effect of ehci_pci_suspend().
327 */ 322 */
328 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { 323 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
324 !hibernated) {
329 int mask = INTR_MASK; 325 int mask = INTR_MASK;
330 326
331 if (!hcd->self.root_hub->do_remote_wakeup) 327 if (!hcd->self.root_hub->do_remote_wakeup)
@@ -335,7 +331,6 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
335 return 0; 331 return 0;
336 } 332 }
337 333
338 ehci_dbg(ehci, "lost power, restarting\n");
339 usb_root_hub_lost_power(hcd->self.root_hub); 334 usb_root_hub_lost_power(hcd->self.root_hub);
340 335
341 /* Else reset, to cope with power loss or flush-to-storage 336 /* Else reset, to cope with power loss or flush-to-storage
@@ -393,6 +388,7 @@ static const struct hc_driver ehci_pci_hc_driver = {
393 .urb_enqueue = ehci_urb_enqueue, 388 .urb_enqueue = ehci_urb_enqueue,
394 .urb_dequeue = ehci_urb_dequeue, 389 .urb_dequeue = ehci_urb_dequeue,
395 .endpoint_disable = ehci_endpoint_disable, 390 .endpoint_disable = ehci_endpoint_disable,
391 .endpoint_reset = ehci_endpoint_reset,
396 392
397 /* 393 /*
398 * scheduling support 394 * scheduling support
@@ -429,10 +425,11 @@ static struct pci_driver ehci_pci_driver = {
429 425
430 .probe = usb_hcd_pci_probe, 426 .probe = usb_hcd_pci_probe,
431 .remove = usb_hcd_pci_remove, 427 .remove = usb_hcd_pci_remove,
428 .shutdown = usb_hcd_pci_shutdown,
432 429
433#ifdef CONFIG_PM 430#ifdef CONFIG_PM_SLEEP
434 .suspend = usb_hcd_pci_suspend, 431 .driver = {
435 .resume = usb_hcd_pci_resume, 432 .pm = &usb_hcd_pci_pm_ops
433 },
436#endif 434#endif
437 .shutdown = usb_hcd_pci_shutdown,
438}; 435};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ef732b704f53..fbd272288fc2 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -61,6 +61,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
61 .urb_enqueue = ehci_urb_enqueue, 61 .urb_enqueue = ehci_urb_enqueue,
62 .urb_dequeue = ehci_urb_dequeue, 62 .urb_dequeue = ehci_urb_dequeue,
63 .endpoint_disable = ehci_endpoint_disable, 63 .endpoint_disable = ehci_endpoint_disable,
64 .endpoint_reset = ehci_endpoint_reset,
64 65
65 /* 66 /*
66 * scheduling support 67 * scheduling support
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index bb870b8f81bc..eecd2a0680a2 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -65,6 +65,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
65 .urb_enqueue = ehci_urb_enqueue, 65 .urb_enqueue = ehci_urb_enqueue,
66 .urb_dequeue = ehci_urb_dequeue, 66 .urb_dequeue = ehci_urb_dequeue,
67 .endpoint_disable = ehci_endpoint_disable, 67 .endpoint_disable = ehci_endpoint_disable,
68 .endpoint_reset = ehci_endpoint_reset,
68 .get_frame_number = ehci_get_frame, 69 .get_frame_number = ehci_get_frame,
69 .hub_status_data = ehci_hub_status_data, 70 .hub_status_data = ehci_hub_status_data,
70 .hub_control = ehci_hub_control, 71 .hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 1976b1b3778c..3192f683f807 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -93,22 +93,6 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
94 qh->hw_alt_next = EHCI_LIST_END(ehci); 94 qh->hw_alt_next = EHCI_LIST_END(ehci);
95 95
96 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
99 * ever clear it.
100 */
101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
102 unsigned is_out, epnum;
103
104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
108 usb_settoggle (qh->dev, epnum, is_out, 1);
109 }
110 }
111
112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 96 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
113 wmb (); 97 wmb ();
114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 98 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -850,7 +834,6 @@ done:
850 qh->qh_state = QH_STATE_IDLE; 834 qh->qh_state = QH_STATE_IDLE;
851 qh->hw_info1 = cpu_to_hc32(ehci, info1); 835 qh->hw_info1 = cpu_to_hc32(ehci, info1);
852 qh->hw_info2 = cpu_to_hc32(ehci, info2); 836 qh->hw_info2 = cpu_to_hc32(ehci, info2);
853 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
854 qh_refresh (ehci, qh); 837 qh_refresh (ehci, qh);
855 return qh; 838 return qh;
856} 839}
@@ -881,7 +864,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
881 } 864 }
882 } 865 }
883 866
884 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 867 /* clear halt and maybe recover from silicon quirk */
885 if (qh->qh_state == QH_STATE_IDLE) 868 if (qh->qh_state == QH_STATE_IDLE)
886 qh_refresh (ehci, qh); 869 qh_refresh (ehci, qh);
887 870
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 556d0ec0c1f8..9d1babc7ff65 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -760,8 +760,10 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
760 if (status) { 760 if (status) {
761 /* "normal" case, uframing flexible except with splits */ 761 /* "normal" case, uframing flexible except with splits */
762 if (qh->period) { 762 if (qh->period) {
763 frame = qh->period - 1; 763 int i;
764 do { 764
765 for (i = qh->period; status && i > 0; --i) {
766 frame = ++ehci->random_frame % qh->period;
765 for (uframe = 0; uframe < 8; uframe++) { 767 for (uframe = 0; uframe < 8; uframe++) {
766 status = check_intr_schedule (ehci, 768 status = check_intr_schedule (ehci,
767 frame, uframe, qh, 769 frame, uframe, qh,
@@ -769,7 +771,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
769 if (status == 0) 771 if (status == 0)
770 break; 772 break;
771 } 773 }
772 } while (status && frame--); 774 }
773 775
774 /* qh->period == 0 means every uframe */ 776 /* qh->period == 0 means every uframe */
775 } else { 777 } else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6cff195e1a36..90ad3395bb21 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -116,6 +116,7 @@ struct ehci_hcd { /* one per controller */
116 struct timer_list watchdog; 116 struct timer_list watchdog;
117 unsigned long actions; 117 unsigned long actions;
118 unsigned stamp; 118 unsigned stamp;
119 unsigned random_frame;
119 unsigned long next_statechange; 120 unsigned long next_statechange;
120 u32 command; 121 u32 command;
121 122
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index ea8a4255c5da..e799f86dab11 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -108,7 +108,7 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
108{ 108{
109 struct device *dev = fhci_to_hcd(fhci)->self.controller; 109 struct device *dev = fhci_to_hcd(fhci)->self.controller;
110 110
111 fhci->dfs_root = debugfs_create_dir(dev_name(dev), NULL); 111 fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
112 if (!fhci->dfs_root) { 112 if (!fhci->dfs_root) {
113 WARN_ON(1); 113 WARN_ON(1);
114 return; 114 return;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index cbf30e515f29..88b03214622b 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -172,25 +172,6 @@ error_cluster_id_get:
172 172
173} 173}
174 174
175static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
176{
177 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
178 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
179 dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
180 usb_hcd, hwahc, *(unsigned long *) &msg);
181 return -ENOSYS;
182}
183
184static int hwahc_op_resume(struct usb_hcd *usb_hcd)
185{
186 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
187 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
188
189 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
190 usb_hcd, hwahc);
191 return -ENOSYS;
192}
193
194/* 175/*
195 * No need to abort pipes, as when this is called, all the children 176 * No need to abort pipes, as when this is called, all the children
196 * has been disconnected and that has done it [through 177 * has been disconnected and that has done it [through
@@ -598,8 +579,6 @@ static struct hc_driver hwahc_hc_driver = {
598 .flags = HCD_USB2, /* FIXME */ 579 .flags = HCD_USB2, /* FIXME */
599 .reset = hwahc_op_reset, 580 .reset = hwahc_op_reset,
600 .start = hwahc_op_start, 581 .start = hwahc_op_start,
601 .pci_suspend = hwahc_op_suspend,
602 .pci_resume = hwahc_op_resume,
603 .stop = hwahc_op_stop, 582 .stop = hwahc_op_stop,
604 .get_frame_number = hwahc_op_get_frame_number, 583 .get_frame_number = hwahc_op_get_frame_number,
605 .urb_enqueue = hwahc_op_urb_enqueue, 584 .urb_enqueue = hwahc_op_urb_enqueue,
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d3269656aa4d..811f5dfdc582 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -431,7 +431,7 @@ static struct dentry *ohci_debug_root;
431 431
432struct debug_buffer { 432struct debug_buffer {
433 ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ 433 ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
434 struct device *dev; 434 struct ohci_hcd *ohci;
435 struct mutex mutex; /* protect filling of buffer */ 435 struct mutex mutex; /* protect filling of buffer */
436 size_t count; /* number of characters filled into buffer */ 436 size_t count; /* number of characters filled into buffer */
437 char *page; 437 char *page;
@@ -505,15 +505,11 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
505 505
506static ssize_t fill_async_buffer(struct debug_buffer *buf) 506static ssize_t fill_async_buffer(struct debug_buffer *buf)
507{ 507{
508 struct usb_bus *bus;
509 struct usb_hcd *hcd;
510 struct ohci_hcd *ohci; 508 struct ohci_hcd *ohci;
511 size_t temp; 509 size_t temp;
512 unsigned long flags; 510 unsigned long flags;
513 511
514 bus = dev_get_drvdata(buf->dev); 512 ohci = buf->ohci;
515 hcd = bus_to_hcd(bus);
516 ohci = hcd_to_ohci(hcd);
517 513
518 /* display control and bulk lists together, for simplicity */ 514 /* display control and bulk lists together, for simplicity */
519 spin_lock_irqsave (&ohci->lock, flags); 515 spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +525,6 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
529 525
530static ssize_t fill_periodic_buffer(struct debug_buffer *buf) 526static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
531{ 527{
532 struct usb_bus *bus;
533 struct usb_hcd *hcd;
534 struct ohci_hcd *ohci; 528 struct ohci_hcd *ohci;
535 struct ed **seen, *ed; 529 struct ed **seen, *ed;
536 unsigned long flags; 530 unsigned long flags;
@@ -542,9 +536,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
542 return 0; 536 return 0;
543 seen_count = 0; 537 seen_count = 0;
544 538
545 bus = (struct usb_bus *)dev_get_drvdata(buf->dev); 539 ohci = buf->ohci;
546 hcd = bus_to_hcd(bus);
547 ohci = hcd_to_ohci(hcd);
548 next = buf->page; 540 next = buf->page;
549 size = PAGE_SIZE; 541 size = PAGE_SIZE;
550 542
@@ -626,7 +618,6 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
626 618
627static ssize_t fill_registers_buffer(struct debug_buffer *buf) 619static ssize_t fill_registers_buffer(struct debug_buffer *buf)
628{ 620{
629 struct usb_bus *bus;
630 struct usb_hcd *hcd; 621 struct usb_hcd *hcd;
631 struct ohci_hcd *ohci; 622 struct ohci_hcd *ohci;
632 struct ohci_regs __iomem *regs; 623 struct ohci_regs __iomem *regs;
@@ -635,9 +626,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
635 char *next; 626 char *next;
636 u32 rdata; 627 u32 rdata;
637 628
638 bus = (struct usb_bus *)dev_get_drvdata(buf->dev); 629 ohci = buf->ohci;
639 hcd = bus_to_hcd(bus); 630 hcd = ohci_to_hcd(ohci);
640 ohci = hcd_to_ohci(hcd);
641 regs = ohci->regs; 631 regs = ohci->regs;
642 next = buf->page; 632 next = buf->page;
643 size = PAGE_SIZE; 633 size = PAGE_SIZE;
@@ -710,7 +700,7 @@ done:
710 return PAGE_SIZE - size; 700 return PAGE_SIZE - size;
711} 701}
712 702
713static struct debug_buffer *alloc_buffer(struct device *dev, 703static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
714 ssize_t (*fill_func)(struct debug_buffer *)) 704 ssize_t (*fill_func)(struct debug_buffer *))
715{ 705{
716 struct debug_buffer *buf; 706 struct debug_buffer *buf;
@@ -718,7 +708,7 @@ static struct debug_buffer *alloc_buffer(struct device *dev,
718 buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); 708 buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
719 709
720 if (buf) { 710 if (buf) {
721 buf->dev = dev; 711 buf->ohci = ohci;
722 buf->fill_func = fill_func; 712 buf->fill_func = fill_func;
723 mutex_init(&buf->mutex); 713 mutex_init(&buf->mutex);
724 } 714 }
@@ -810,26 +800,25 @@ static int debug_registers_open(struct inode *inode, struct file *file)
810static inline void create_debug_files (struct ohci_hcd *ohci) 800static inline void create_debug_files (struct ohci_hcd *ohci)
811{ 801{
812 struct usb_bus *bus = &ohci_to_hcd(ohci)->self; 802 struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
813 struct device *dev = bus->dev;
814 803
815 ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root); 804 ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
816 if (!ohci->debug_dir) 805 if (!ohci->debug_dir)
817 goto dir_error; 806 goto dir_error;
818 807
819 ohci->debug_async = debugfs_create_file("async", S_IRUGO, 808 ohci->debug_async = debugfs_create_file("async", S_IRUGO,
820 ohci->debug_dir, dev, 809 ohci->debug_dir, ohci,
821 &debug_async_fops); 810 &debug_async_fops);
822 if (!ohci->debug_async) 811 if (!ohci->debug_async)
823 goto async_error; 812 goto async_error;
824 813
825 ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO, 814 ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
826 ohci->debug_dir, dev, 815 ohci->debug_dir, ohci,
827 &debug_periodic_fops); 816 &debug_periodic_fops);
828 if (!ohci->debug_periodic) 817 if (!ohci->debug_periodic)
829 goto periodic_error; 818 goto periodic_error;
830 819
831 ohci->debug_registers = debugfs_create_file("registers", S_IRUGO, 820 ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
832 ohci->debug_dir, dev, 821 ohci->debug_dir, ohci,
833 &debug_registers_fops); 822 &debug_registers_fops);
834 if (!ohci->debug_registers) 823 if (!ohci->debug_registers)
835 goto registers_error; 824 goto registers_error;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 25db704f3a2a..58151687d351 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -571,7 +571,7 @@ static int ohci_init (struct ohci_hcd *ohci)
571 */ 571 */
572static int ohci_run (struct ohci_hcd *ohci) 572static int ohci_run (struct ohci_hcd *ohci)
573{ 573{
574 u32 mask, temp; 574 u32 mask, val;
575 int first = ohci->fminterval == 0; 575 int first = ohci->fminterval == 0;
576 struct usb_hcd *hcd = ohci_to_hcd(ohci); 576 struct usb_hcd *hcd = ohci_to_hcd(ohci);
577 577
@@ -580,8 +580,8 @@ static int ohci_run (struct ohci_hcd *ohci)
580 /* boot firmware should have set this up (5.1.1.3.1) */ 580 /* boot firmware should have set this up (5.1.1.3.1) */
581 if (first) { 581 if (first) {
582 582
583 temp = ohci_readl (ohci, &ohci->regs->fminterval); 583 val = ohci_readl (ohci, &ohci->regs->fminterval);
584 ohci->fminterval = temp & 0x3fff; 584 ohci->fminterval = val & 0x3fff;
585 if (ohci->fminterval != FI) 585 if (ohci->fminterval != FI)
586 ohci_dbg (ohci, "fminterval delta %d\n", 586 ohci_dbg (ohci, "fminterval delta %d\n",
587 ohci->fminterval - FI); 587 ohci->fminterval - FI);
@@ -600,25 +600,25 @@ static int ohci_run (struct ohci_hcd *ohci)
600 600
601 switch (ohci->hc_control & OHCI_CTRL_HCFS) { 601 switch (ohci->hc_control & OHCI_CTRL_HCFS) {
602 case OHCI_USB_OPER: 602 case OHCI_USB_OPER:
603 temp = 0; 603 val = 0;
604 break; 604 break;
605 case OHCI_USB_SUSPEND: 605 case OHCI_USB_SUSPEND:
606 case OHCI_USB_RESUME: 606 case OHCI_USB_RESUME:
607 ohci->hc_control &= OHCI_CTRL_RWC; 607 ohci->hc_control &= OHCI_CTRL_RWC;
608 ohci->hc_control |= OHCI_USB_RESUME; 608 ohci->hc_control |= OHCI_USB_RESUME;
609 temp = 10 /* msec wait */; 609 val = 10 /* msec wait */;
610 break; 610 break;
611 // case OHCI_USB_RESET: 611 // case OHCI_USB_RESET:
612 default: 612 default:
613 ohci->hc_control &= OHCI_CTRL_RWC; 613 ohci->hc_control &= OHCI_CTRL_RWC;
614 ohci->hc_control |= OHCI_USB_RESET; 614 ohci->hc_control |= OHCI_USB_RESET;
615 temp = 50 /* msec wait */; 615 val = 50 /* msec wait */;
616 break; 616 break;
617 } 617 }
618 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 618 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
619 // flush the writes 619 // flush the writes
620 (void) ohci_readl (ohci, &ohci->regs->control); 620 (void) ohci_readl (ohci, &ohci->regs->control);
621 msleep(temp); 621 msleep(val);
622 622
623 memset (ohci->hcca, 0, sizeof (struct ohci_hcca)); 623 memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
624 624
@@ -628,9 +628,9 @@ static int ohci_run (struct ohci_hcd *ohci)
628retry: 628retry:
629 /* HC Reset requires max 10 us delay */ 629 /* HC Reset requires max 10 us delay */
630 ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus); 630 ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
631 temp = 30; /* ... allow extra time */ 631 val = 30; /* ... allow extra time */
632 while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) { 632 while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
633 if (--temp == 0) { 633 if (--val == 0) {
634 spin_unlock_irq (&ohci->lock); 634 spin_unlock_irq (&ohci->lock);
635 ohci_err (ohci, "USB HC reset timed out!\n"); 635 ohci_err (ohci, "USB HC reset timed out!\n");
636 return -1; 636 return -1;
@@ -699,23 +699,23 @@ retry:
699 ohci_writel (ohci, mask, &ohci->regs->intrenable); 699 ohci_writel (ohci, mask, &ohci->regs->intrenable);
700 700
701 /* handle root hub init quirks ... */ 701 /* handle root hub init quirks ... */
702 temp = roothub_a (ohci); 702 val = roothub_a (ohci);
703 temp &= ~(RH_A_PSM | RH_A_OCPM); 703 val &= ~(RH_A_PSM | RH_A_OCPM);
704 if (ohci->flags & OHCI_QUIRK_SUPERIO) { 704 if (ohci->flags & OHCI_QUIRK_SUPERIO) {
705 /* NSC 87560 and maybe others */ 705 /* NSC 87560 and maybe others */
706 temp |= RH_A_NOCP; 706 val |= RH_A_NOCP;
707 temp &= ~(RH_A_POTPGT | RH_A_NPS); 707 val &= ~(RH_A_POTPGT | RH_A_NPS);
708 ohci_writel (ohci, temp, &ohci->regs->roothub.a); 708 ohci_writel (ohci, val, &ohci->regs->roothub.a);
709 } else if ((ohci->flags & OHCI_QUIRK_AMD756) || 709 } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
710 (ohci->flags & OHCI_QUIRK_HUB_POWER)) { 710 (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
711 /* hub power always on; required for AMD-756 and some 711 /* hub power always on; required for AMD-756 and some
712 * Mac platforms. ganged overcurrent reporting, if any. 712 * Mac platforms. ganged overcurrent reporting, if any.
713 */ 713 */
714 temp |= RH_A_NPS; 714 val |= RH_A_NPS;
715 ohci_writel (ohci, temp, &ohci->regs->roothub.a); 715 ohci_writel (ohci, val, &ohci->regs->roothub.a);
716 } 716 }
717 ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status); 717 ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
718 ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM, 718 ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
719 &ohci->regs->roothub.b); 719 &ohci->regs->roothub.b);
720 // flush those writes 720 // flush those writes
721 (void) ohci_readl (ohci, &ohci->regs->control); 721 (void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,7 +724,7 @@ retry:
724 spin_unlock_irq (&ohci->lock); 724 spin_unlock_irq (&ohci->lock);
725 725
726 // POTPGT delay is bits 24-31, in 2 ms units. 726 // POTPGT delay is bits 24-31, in 2 ms units.
727 mdelay ((temp >> 23) & 0x1fe); 727 mdelay ((val >> 23) & 0x1fe);
728 hcd->state = HC_STATE_RUNNING; 728 hcd->state = HC_STATE_RUNNING;
729 729
730 if (quirk_zfmicro(ohci)) { 730 if (quirk_zfmicro(ohci)) {
@@ -1105,7 +1105,7 @@ static int __init ohci_hcd_mod_init(void)
1105 set_bit(USB_OHCI_LOADED, &usb_hcds_loaded); 1105 set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
1106 1106
1107#ifdef DEBUG 1107#ifdef DEBUG
1108 ohci_debug_root = debugfs_create_dir("ohci", NULL); 1108 ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
1109 if (!ohci_debug_root) { 1109 if (!ohci_debug_root) {
1110 retval = -ENOENT; 1110 retval = -ENOENT;
1111 goto error_debug; 1111 goto error_debug;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index f9961b4c0da3..d2ba04dd785e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -372,7 +372,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
372 372
373#ifdef CONFIG_PM 373#ifdef CONFIG_PM
374 374
375static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message) 375static int ohci_pci_suspend(struct usb_hcd *hcd)
376{ 376{
377 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 377 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
378 unsigned long flags; 378 unsigned long flags;
@@ -394,10 +394,6 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
394 ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 394 ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
395 (void)ohci_readl(ohci, &ohci->regs->intrdisable); 395 (void)ohci_readl(ohci, &ohci->regs->intrdisable);
396 396
397 /* make sure snapshot being resumed re-enumerates everything */
398 if (message.event == PM_EVENT_PRETHAW)
399 ohci_usb_reset(ohci);
400
401 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 397 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
402 bail: 398 bail:
403 spin_unlock_irqrestore (&ohci->lock, flags); 399 spin_unlock_irqrestore (&ohci->lock, flags);
@@ -406,9 +402,14 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
406} 402}
407 403
408 404
409static int ohci_pci_resume (struct usb_hcd *hcd) 405static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
410{ 406{
411 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 407 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
408
409 /* Make sure resume from hibernation re-enumerates everything */
410 if (hibernated)
411 ohci_usb_reset(hcd_to_ohci(hcd));
412
412 ohci_finish_controller_resume(hcd); 413 ohci_finish_controller_resume(hcd);
413 return 0; 414 return 0;
414} 415}
@@ -484,12 +485,11 @@ static struct pci_driver ohci_pci_driver = {
484 485
485 .probe = usb_hcd_pci_probe, 486 .probe = usb_hcd_pci_probe,
486 .remove = usb_hcd_pci_remove, 487 .remove = usb_hcd_pci_remove,
488 .shutdown = usb_hcd_pci_shutdown,
487 489
488#ifdef CONFIG_PM 490#ifdef CONFIG_PM_SLEEP
489 .suspend = usb_hcd_pci_suspend, 491 .driver = {
490 .resume = usb_hcd_pci_resume, 492 .pm = &usb_hcd_pci_pm_ops
493 },
491#endif 494#endif
492
493 .shutdown = usb_hcd_pci_shutdown,
494}; 495};
495
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 033c2846ce59..83b5f9cea85a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -15,6 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include "pci-quirks.h" 17#include "pci-quirks.h"
18#include "xhci-ext-caps.h"
18 19
19 20
20#define UHCI_USBLEGSUP 0xc0 /* legacy support */ 21#define UHCI_USBLEGSUP 0xc0 /* legacy support */
@@ -341,7 +342,127 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
341 return; 342 return;
342} 343}
343 344
345/*
346 * handshake - spin reading a register until handshake completes
347 * @ptr: address of hc register to be read
348 * @mask: bits to look at in result of read
349 * @done: value of those bits when handshake succeeds
350 * @wait_usec: timeout in microseconds
351 * @delay_usec: delay in microseconds to wait between polling
352 *
353 * Polls a register every delay_usec microseconds.
354 * Returns 0 when the mask bits have the value done.
355 * Returns -ETIMEDOUT if this condition is not true after
356 * wait_usec microseconds have passed.
357 */
358static int handshake(void __iomem *ptr, u32 mask, u32 done,
359 int wait_usec, int delay_usec)
360{
361 u32 result;
362
363 do {
364 result = readl(ptr);
365 result &= mask;
366 if (result == done)
367 return 0;
368 udelay(delay_usec);
369 wait_usec -= delay_usec;
370 } while (wait_usec > 0);
371 return -ETIMEDOUT;
372}
373
374/**
375 * PCI Quirks for xHCI.
376 *
377 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
378 * It signals to the BIOS that the OS wants control of the host controller,
379 * and then waits 5 seconds for the BIOS to hand over control.
380 * If we timeout, assume the BIOS is broken and take control anyway.
381 */
382static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
383{
384 void __iomem *base;
385 int ext_cap_offset;
386 void __iomem *op_reg_base;
387 u32 val;
388 int timeout;
389
390 if (!mmio_resource_enabled(pdev, 0))
391 return;
392
393 base = ioremap_nocache(pci_resource_start(pdev, 0),
394 pci_resource_len(pdev, 0));
395 if (base == NULL)
396 return;
344 397
398 /*
399 * Find the Legacy Support Capability register -
400 * this is optional for xHCI host controllers.
401 */
402 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
403 do {
404 if (!ext_cap_offset)
405 /* We've reached the end of the extended capabilities */
406 goto hc_init;
407 val = readl(base + ext_cap_offset);
408 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
409 break;
410 ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
411 } while (1);
412
413 /* If the BIOS owns the HC, signal that the OS wants it, and wait */
414 if (val & XHCI_HC_BIOS_OWNED) {
415 writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
416
417 /* Wait for 5 seconds with 10 microsecond polling interval */
418 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
419 0, 5000, 10);
420
421 /* Assume a buggy BIOS and take HC ownership anyway */
422 if (timeout) {
423 dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
424 " (BIOS bug ?) %08x\n", val);
425 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
426 }
427 }
428
429 /* Disable any BIOS SMIs */
430 writel(XHCI_LEGACY_DISABLE_SMI,
431 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
432
433hc_init:
434 op_reg_base = base + XHCI_HC_LENGTH(readl(base));
435
436 /* Wait for the host controller to be ready before writing any
437 * operational or runtime registers. Wait 5 seconds and no more.
438 */
439 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
440 5000, 10);
441 /* Assume a buggy HC and start HC initialization anyway */
442 if (timeout) {
443 val = readl(op_reg_base + XHCI_STS_OFFSET);
444 dev_warn(&pdev->dev,
445 "xHCI HW not ready after 5 sec (HC bug?) "
446 "status = 0x%x\n", val);
447 }
448
449 /* Send the halt and disable interrupts command */
450 val = readl(op_reg_base + XHCI_CMD_OFFSET);
451 val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
452 writel(val, op_reg_base + XHCI_CMD_OFFSET);
453
454 /* Wait for the HC to halt - poll every 125 usec (one microframe). */
455 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
456 XHCI_MAX_HALT_USEC, 125);
457 if (timeout) {
458 val = readl(op_reg_base + XHCI_STS_OFFSET);
459 dev_warn(&pdev->dev,
460 "xHCI HW did not halt within %d usec "
461 "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
462 }
463
464 iounmap(base);
465}
345 466
346static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) 467static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
347{ 468{
@@ -351,5 +472,7 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
351 quirk_usb_handoff_ohci(pdev); 472 quirk_usb_handoff_ohci(pdev);
352 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) 473 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
353 quirk_usb_disable_ehci(pdev); 474 quirk_usb_disable_ehci(pdev);
475 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
476 quirk_usb_handoff_xhci(pdev);
354} 477}
355DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); 478DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f1626e58c141..56976cc0352a 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -46,31 +46,10 @@ MODULE_LICENSE("GPL");
46MODULE_AUTHOR("Yoshihiro Shimoda"); 46MODULE_AUTHOR("Yoshihiro Shimoda");
47MODULE_ALIAS("platform:r8a66597_hcd"); 47MODULE_ALIAS("platform:r8a66597_hcd");
48 48
49#define DRIVER_VERSION "10 Apr 2008" 49#define DRIVER_VERSION "2009-05-26"
50 50
51static const char hcd_name[] = "r8a66597_hcd"; 51static const char hcd_name[] = "r8a66597_hcd";
52 52
53/* module parameters */
54#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597)
55static unsigned short clock = XTAL12;
56module_param(clock, ushort, 0644);
57MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
58 "(default=0)");
59#endif
60
61static unsigned short vif = LDRV;
62module_param(vif, ushort, 0644);
63MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
64
65static unsigned short endian;
66module_param(endian, ushort, 0644);
67MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
68
69static unsigned short irq_sense = 0xff;
70module_param(irq_sense, ushort, 0644);
71MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
72 "(default=32)");
73
74static void packet_write(struct r8a66597 *r8a66597, u16 pipenum); 53static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
75static int r8a66597_get_frame(struct usb_hcd *hcd); 54static int r8a66597_get_frame(struct usb_hcd *hcd);
76 55
@@ -136,7 +115,8 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
136 } 115 }
137 } while ((tmp & USBE) != USBE); 116 } while ((tmp & USBE) != USBE);
138 r8a66597_bclr(r8a66597, USBE, SYSCFG0); 117 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
139 r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0); 118 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL,
119 SYSCFG0);
140 120
141 i = 0; 121 i = 0;
142 r8a66597_bset(r8a66597, XCKE, SYSCFG0); 122 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
@@ -203,6 +183,9 @@ static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
203static int enable_controller(struct r8a66597 *r8a66597) 183static int enable_controller(struct r8a66597 *r8a66597)
204{ 184{
205 int ret, port; 185 int ret, port;
186 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
187 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
188 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
206 189
207 ret = r8a66597_clock_enable(r8a66597); 190 ret = r8a66597_clock_enable(r8a66597);
208 if (ret < 0) 191 if (ret < 0)
@@ -2373,7 +2356,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
2373 return 0; 2356 return 0;
2374} 2357}
2375 2358
2376static int __init r8a66597_probe(struct platform_device *pdev) 2359static int __devinit r8a66597_probe(struct platform_device *pdev)
2377{ 2360{
2378#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) 2361#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
2379 char clk_name[8]; 2362 char clk_name[8];
@@ -2418,6 +2401,12 @@ static int __init r8a66597_probe(struct platform_device *pdev)
2418 goto clean_up; 2401 goto clean_up;
2419 } 2402 }
2420 2403
2404 if (pdev->dev.platform_data == NULL) {
2405 dev_err(&pdev->dev, "no platform data\n");
2406 ret = -ENODEV;
2407 goto clean_up;
2408 }
2409
2421 /* initialize hcd */ 2410 /* initialize hcd */
2422 hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name); 2411 hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
2423 if (!hcd) { 2412 if (!hcd) {
@@ -2428,6 +2417,8 @@ static int __init r8a66597_probe(struct platform_device *pdev)
2428 r8a66597 = hcd_to_r8a66597(hcd); 2417 r8a66597 = hcd_to_r8a66597(hcd);
2429 memset(r8a66597, 0, sizeof(struct r8a66597)); 2418 memset(r8a66597, 0, sizeof(struct r8a66597));
2430 dev_set_drvdata(&pdev->dev, r8a66597); 2419 dev_set_drvdata(&pdev->dev, r8a66597);
2420 r8a66597->pdata = pdev->dev.platform_data;
2421 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
2431 2422
2432#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) 2423#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
2433 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); 2424 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
@@ -2458,29 +2449,6 @@ static int __init r8a66597_probe(struct platform_device *pdev)
2458 2449
2459 hcd->rsrc_start = res->start; 2450 hcd->rsrc_start = res->start;
2460 2451
2461 /* irq_sense setting on cmdline takes precedence over resource
2462 * settings, so the introduction of irqflags in IRQ resourse
2463 * won't disturb existing setups */
2464 switch (irq_sense) {
2465 case INTL:
2466 irq_trigger = IRQF_TRIGGER_LOW;
2467 break;
2468 case 0:
2469 irq_trigger = IRQF_TRIGGER_FALLING;
2470 break;
2471 case 0xff:
2472 if (irq_trigger)
2473 irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
2474 INTL : 0;
2475 else {
2476 irq_sense = INTL;
2477 irq_trigger = IRQF_TRIGGER_LOW;
2478 }
2479 break;
2480 default:
2481 dev_err(&pdev->dev, "Unknown irq_sense value.\n");
2482 }
2483
2484 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); 2452 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
2485 if (ret != 0) { 2453 if (ret != 0) {
2486 dev_err(&pdev->dev, "Failed to add hcd\n"); 2454 dev_err(&pdev->dev, "Failed to add hcd\n");
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index f49208f1bb74..d72680b433f9 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -30,6 +30,8 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#endif 31#endif
32 32
33#include <linux/usb/r8a66597.h>
34
33#define SYSCFG0 0x00 35#define SYSCFG0 0x00
34#define SYSCFG1 0x02 36#define SYSCFG1 0x02
35#define SYSSTS0 0x04 37#define SYSSTS0 0x04
@@ -488,6 +490,7 @@ struct r8a66597 {
488#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) 490#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
489 struct clk *clk; 491 struct clk *clk;
490#endif 492#endif
493 struct r8a66597_platdata *pdata;
491 struct r8a66597_device device0; 494 struct r8a66597_device device0;
492 struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB]; 495 struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
493 struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE]; 496 struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
@@ -506,6 +509,7 @@ struct r8a66597 {
506 unsigned long child_connect_map[4]; 509 unsigned long child_connect_map[4];
507 510
508 unsigned bus_suspended:1; 511 unsigned bus_suspended:1;
512 unsigned irq_sense_low:1;
509}; 513};
510 514
511static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd) 515static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
@@ -660,10 +664,36 @@ static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
660{ 664{
661 unsigned long dvstctr_reg = get_dvstctr_reg(port); 665 unsigned long dvstctr_reg = get_dvstctr_reg(port);
662 666
663 if (power) 667 if (r8a66597->pdata->port_power) {
664 r8a66597_bset(r8a66597, VBOUT, dvstctr_reg); 668 r8a66597->pdata->port_power(port, power);
665 else 669 } else {
666 r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg); 670 if (power)
671 r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
672 else
673 r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
674 }
675}
676
677static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
678{
679 u16 clock = 0;
680
681 switch (pdata->xtal) {
682 case R8A66597_PLATDATA_XTAL_12MHZ:
683 clock = XTAL12;
684 break;
685 case R8A66597_PLATDATA_XTAL_24MHZ:
686 clock = XTAL24;
687 break;
688 case R8A66597_PLATDATA_XTAL_48MHZ:
689 clock = XTAL48;
690 break;
691 default:
692 printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
693 break;
694 }
695
696 return clock;
667} 697}
668 698
669#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2) 699#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf7ea42..274751b4409c 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -769,7 +769,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
769 return rc; 769 return rc;
770} 770}
771 771
772static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message) 772static int uhci_pci_suspend(struct usb_hcd *hcd)
773{ 773{
774 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 774 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
775 int rc = 0; 775 int rc = 0;
@@ -795,10 +795,6 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
795 795
796 /* FIXME: Enable non-PME# remote wakeup? */ 796 /* FIXME: Enable non-PME# remote wakeup? */
797 797
798 /* make sure snapshot being resumed re-enumerates everything */
799 if (message.event == PM_EVENT_PRETHAW)
800 uhci_hc_died(uhci);
801
802done_okay: 798done_okay:
803 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 799 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
804done: 800done:
@@ -806,7 +802,7 @@ done:
806 return rc; 802 return rc;
807} 803}
808 804
809static int uhci_pci_resume(struct usb_hcd *hcd) 805static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
810{ 806{
811 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 807 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
812 808
@@ -820,6 +816,10 @@ static int uhci_pci_resume(struct usb_hcd *hcd)
820 816
821 spin_lock_irq(&uhci->lock); 817 spin_lock_irq(&uhci->lock);
822 818
819 /* Make sure resume from hibernation re-enumerates everything */
820 if (hibernated)
821 uhci_hc_died(uhci);
822
823 /* FIXME: Disable non-PME# remote wakeup? */ 823 /* FIXME: Disable non-PME# remote wakeup? */
824 824
825 /* The firmware or a boot kernel may have changed the controller 825 /* The firmware or a boot kernel may have changed the controller
@@ -940,10 +940,11 @@ static struct pci_driver uhci_pci_driver = {
940 .remove = usb_hcd_pci_remove, 940 .remove = usb_hcd_pci_remove,
941 .shutdown = uhci_shutdown, 941 .shutdown = uhci_shutdown,
942 942
943#ifdef CONFIG_PM 943#ifdef CONFIG_PM_SLEEP
944 .suspend = usb_hcd_pci_suspend, 944 .driver = {
945 .resume = usb_hcd_pci_resume, 945 .pm = &usb_hcd_pci_pm_ops
946#endif /* PM */ 946 },
947#endif
947}; 948};
948 949
949static int __init uhci_hcd_init(void) 950static int __init uhci_hcd_init(void)
@@ -961,7 +962,7 @@ static int __init uhci_hcd_init(void)
961 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); 962 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
962 if (!errbuf) 963 if (!errbuf)
963 goto errbuf_failed; 964 goto errbuf_failed;
964 uhci_debugfs_root = debugfs_create_dir("uhci", NULL); 965 uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
965 if (!uhci_debugfs_root) 966 if (!uhci_debugfs_root)
966 goto debug_failed; 967 goto debug_failed;
967 } 968 }
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 3e5807d14ffb..64e57bfe236b 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -260,7 +260,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
260 INIT_LIST_HEAD(&qh->node); 260 INIT_LIST_HEAD(&qh->node);
261 261
262 if (udev) { /* Normal QH */ 262 if (udev) { /* Normal QH */
263 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 263 qh->type = usb_endpoint_type(&hep->desc);
264 if (qh->type != USB_ENDPOINT_XFER_ISOC) { 264 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
265 qh->dummy_td = uhci_alloc_td(uhci); 265 qh->dummy_td = uhci_alloc_td(uhci);
266 if (!qh->dummy_td) { 266 if (!qh->dummy_td) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644
index 000000000000..2501c571f855
--- /dev/null
+++ b/drivers/usb/host/xhci-dbg.c
@@ -0,0 +1,485 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include "xhci.h"
24
25#define XHCI_INIT_VALUE 0x0
26
27/* Add verbose debugging later, just print everything for now */
28
29void xhci_dbg_regs(struct xhci_hcd *xhci)
30{
31 u32 temp;
32
33 xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
34 xhci->cap_regs);
35 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
36 xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37 &xhci->cap_regs->hc_capbase, temp);
38 xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
39 (unsigned int) HC_LENGTH(temp));
40#if 0
41 xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
42 (unsigned int) HC_VERSION(temp));
43#endif
44
45 xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
46
47 temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
48 xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
49 &xhci->cap_regs->run_regs_off,
50 (unsigned int) temp & RTSOFF_MASK);
51 xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
52
53 temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
54 xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
55 xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
56}
57
58static void xhci_print_cap_regs(struct xhci_hcd *xhci)
59{
60 u32 temp;
61
62 xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
63
64 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
65 xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
66 (unsigned int) temp);
67 xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
68 (unsigned int) HC_LENGTH(temp));
69 xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
70 (unsigned int) HC_VERSION(temp));
71
72 temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
73 xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
74 (unsigned int) temp);
75 xhci_dbg(xhci, " Max device slots: %u\n",
76 (unsigned int) HCS_MAX_SLOTS(temp));
77 xhci_dbg(xhci, " Max interrupters: %u\n",
78 (unsigned int) HCS_MAX_INTRS(temp));
79 xhci_dbg(xhci, " Max ports: %u\n",
80 (unsigned int) HCS_MAX_PORTS(temp));
81
82 temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
83 xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
84 (unsigned int) temp);
85 xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
86 (unsigned int) HCS_IST(temp));
87 xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
88 (unsigned int) HCS_ERST_MAX(temp));
89
90 temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
91 xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
92 (unsigned int) temp);
93 xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
94 (unsigned int) HCS_U1_LATENCY(temp));
95 xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
96 (unsigned int) HCS_U2_LATENCY(temp));
97
98 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
99 xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
100 xhci_dbg(xhci, " HC generates %s bit addresses\n",
101 HCC_64BIT_ADDR(temp) ? "64" : "32");
102 /* FIXME */
103 xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
104
105 temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
106 xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
107}
108
109static void xhci_print_command_reg(struct xhci_hcd *xhci)
110{
111 u32 temp;
112
113 temp = xhci_readl(xhci, &xhci->op_regs->command);
114 xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
115 xhci_dbg(xhci, " HC is %s\n",
116 (temp & CMD_RUN) ? "running" : "being stopped");
117 xhci_dbg(xhci, " HC has %sfinished hard reset\n",
118 (temp & CMD_RESET) ? "not " : "");
119 xhci_dbg(xhci, " Event Interrupts %s\n",
120 (temp & CMD_EIE) ? "enabled " : "disabled");
121 xhci_dbg(xhci, " Host System Error Interrupts %s\n",
122 (temp & CMD_EIE) ? "enabled " : "disabled");
123 xhci_dbg(xhci, " HC has %sfinished light reset\n",
124 (temp & CMD_LRESET) ? "not " : "");
125}
126
127static void xhci_print_status(struct xhci_hcd *xhci)
128{
129 u32 temp;
130
131 temp = xhci_readl(xhci, &xhci->op_regs->status);
132 xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
133 xhci_dbg(xhci, " Event ring is %sempty\n",
134 (temp & STS_EINT) ? "not " : "");
135 xhci_dbg(xhci, " %sHost System Error\n",
136 (temp & STS_FATAL) ? "WARNING: " : "No ");
137 xhci_dbg(xhci, " HC is %s\n",
138 (temp & STS_HALT) ? "halted" : "running");
139}
140
141static void xhci_print_op_regs(struct xhci_hcd *xhci)
142{
143 xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
144 xhci_print_command_reg(xhci);
145 xhci_print_status(xhci);
146}
147
148static void xhci_print_ports(struct xhci_hcd *xhci)
149{
150 u32 __iomem *addr;
151 int i, j;
152 int ports;
153 char *names[NUM_PORT_REGS] = {
154 "status",
155 "power",
156 "link",
157 "reserved",
158 };
159
160 ports = HCS_MAX_PORTS(xhci->hcs_params1);
161 addr = &xhci->op_regs->port_status_base;
162 for (i = 0; i < ports; i++) {
163 for (j = 0; j < NUM_PORT_REGS; ++j) {
164 xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
165 addr, names[j],
166 (unsigned int) xhci_readl(xhci, addr));
167 addr++;
168 }
169 }
170}
171
172void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
173{
174 void *addr;
175 u32 temp;
176
177 addr = &ir_set->irq_pending;
178 temp = xhci_readl(xhci, addr);
179 if (temp == XHCI_INIT_VALUE)
180 return;
181
182 xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
183
184 xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
185 (unsigned int)temp);
186
187 addr = &ir_set->irq_control;
188 temp = xhci_readl(xhci, addr);
189 xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
190 (unsigned int)temp);
191
192 addr = &ir_set->erst_size;
193 temp = xhci_readl(xhci, addr);
194 xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
195 (unsigned int)temp);
196
197 addr = &ir_set->rsvd;
198 temp = xhci_readl(xhci, addr);
199 if (temp != XHCI_INIT_VALUE)
200 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
201 addr, (unsigned int)temp);
202
203 addr = &ir_set->erst_base[0];
204 temp = xhci_readl(xhci, addr);
205 xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n",
206 addr, (unsigned int) temp);
207
208 addr = &ir_set->erst_base[1];
209 temp = xhci_readl(xhci, addr);
210 xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
211 addr, (unsigned int) temp);
212
213 addr = &ir_set->erst_dequeue[0];
214 temp = xhci_readl(xhci, addr);
215 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
216 addr, (unsigned int) temp);
217
218 addr = &ir_set->erst_dequeue[1];
219 temp = xhci_readl(xhci, addr);
220 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
221 addr, (unsigned int) temp);
222}
223
224void xhci_print_run_regs(struct xhci_hcd *xhci)
225{
226 u32 temp;
227 int i;
228
229 xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
230 temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
231 xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
232 &xhci->run_regs->microframe_index,
233 (unsigned int) temp);
234 for (i = 0; i < 7; ++i) {
235 temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
236 if (temp != XHCI_INIT_VALUE)
237 xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
238 &xhci->run_regs->rsvd[i],
239 i, (unsigned int) temp);
240 }
241}
242
243void xhci_print_registers(struct xhci_hcd *xhci)
244{
245 xhci_print_cap_regs(xhci);
246 xhci_print_op_regs(xhci);
247 xhci_print_ports(xhci);
248}
249
250void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
251{
252 int i;
253 for (i = 0; i < 4; ++i)
254 xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
255 i*4, trb->generic.field[i]);
256}
257
258/**
259 * Debug a transfer request block (TRB).
260 */
261void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
262{
263 u64 address;
264 u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
265
266 switch (type) {
267 case TRB_TYPE(TRB_LINK):
268 xhci_dbg(xhci, "Link TRB:\n");
269 xhci_print_trb_offsets(xhci, trb);
270
271 address = trb->link.segment_ptr[0] +
272 (((u64) trb->link.segment_ptr[1]) << 32);
273 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
274
275 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
276 GET_INTR_TARGET(trb->link.intr_target));
277 xhci_dbg(xhci, "Cycle bit = %u\n",
278 (unsigned int) (trb->link.control & TRB_CYCLE));
279 xhci_dbg(xhci, "Toggle cycle bit = %u\n",
280 (unsigned int) (trb->link.control & LINK_TOGGLE));
281 xhci_dbg(xhci, "No Snoop bit = %u\n",
282 (unsigned int) (trb->link.control & TRB_NO_SNOOP));
283 break;
284 case TRB_TYPE(TRB_TRANSFER):
285 address = trb->trans_event.buffer[0] +
286 (((u64) trb->trans_event.buffer[1]) << 32);
287 /*
288 * FIXME: look at flags to figure out if it's an address or if
289 * the data is directly in the buffer field.
290 */
291 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
292 break;
293 case TRB_TYPE(TRB_COMPLETION):
294 address = trb->event_cmd.cmd_trb[0] +
295 (((u64) trb->event_cmd.cmd_trb[1]) << 32);
296 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
297 xhci_dbg(xhci, "Completion status = %u\n",
298 (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
299 xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
300 break;
301 default:
302 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
303 (unsigned int) type>>10);
304 xhci_print_trb_offsets(xhci, trb);
305 break;
306 }
307}
308
309/**
310 * Debug a segment with an xHCI ring.
311 *
312 * @return The Link TRB of the segment, or NULL if there is no Link TRB
313 * (which is a bug, since all segments must have a Link TRB).
314 *
315 * Prints out all TRBs in the segment, even those after the Link TRB.
316 *
317 * XXX: should we print out TRBs that the HC owns? As long as we don't
318 * write, that should be fine... We shouldn't expect that the memory pointed to
319 * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
320 * for HC debugging.
321 */
322void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
323{
324 int i;
325 u32 addr = (u32) seg->dma;
326 union xhci_trb *trb = seg->trbs;
327
328 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
329 trb = &seg->trbs[i];
330 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
331 (unsigned int) trb->link.segment_ptr[0],
332 (unsigned int) trb->link.segment_ptr[1],
333 (unsigned int) trb->link.intr_target,
334 (unsigned int) trb->link.control);
335 addr += sizeof(*trb);
336 }
337}
338
339void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
340{
341 xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
342 ring->dequeue,
343 (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
344 ring->dequeue));
345 xhci_dbg(xhci, "Ring deq updated %u times\n",
346 ring->deq_updates);
347 xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
348 ring->enqueue,
349 (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
350 ring->enqueue));
351 xhci_dbg(xhci, "Ring enq updated %u times\n",
352 ring->enq_updates);
353}
354
355/**
356 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
357 *
358 * Print out each segment in the ring. Check that the DMA address in
359 * each link segment actually matches the segment's stored DMA address.
360 * Check that the link end bit is only set at the end of the ring.
361 * Check that the dequeue and enqueue pointers point to real data in this ring
362 * (not some other ring).
363 */
364void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
365{
366 /* FIXME: Throw an error if any segment doesn't have a Link TRB */
367 struct xhci_segment *seg;
368 struct xhci_segment *first_seg = ring->first_seg;
369 xhci_debug_segment(xhci, first_seg);
370
371 if (!ring->enq_updates && !ring->deq_updates) {
372 xhci_dbg(xhci, " Ring has not been updated\n");
373 return;
374 }
375 for (seg = first_seg->next; seg != first_seg; seg = seg->next)
376 xhci_debug_segment(xhci, seg);
377}
378
379void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
380{
381 u32 addr = (u32) erst->erst_dma_addr;
382 int i;
383 struct xhci_erst_entry *entry;
384
385 for (i = 0; i < erst->num_entries; ++i) {
386 entry = &erst->entries[i];
387 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
388 (unsigned int) addr,
389 (unsigned int) entry->seg_addr[0],
390 (unsigned int) entry->seg_addr[1],
391 (unsigned int) entry->seg_size,
392 (unsigned int) entry->rsvd);
393 addr += sizeof(*entry);
394 }
395}
396
397void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
398{
399 u32 val;
400
401 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
402 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
403 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
404 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
405}
406
407void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
408{
409 int i, j;
410 int last_ep_ctx = 31;
411 /* Fields are 32 bits wide, DMA addresses are in bytes */
412 int field_size = 32 / 8;
413
414 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
415 &ctx->drop_flags, (unsigned long long)dma,
416 ctx->drop_flags);
417 dma += field_size;
418 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
419 &ctx->add_flags, (unsigned long long)dma,
420 ctx->add_flags);
421 dma += field_size;
422 for (i = 0; i > 6; ++i) {
423 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
424 &ctx->rsvd[i], (unsigned long long)dma,
425 ctx->rsvd[i], i);
426 dma += field_size;
427 }
428
429 xhci_dbg(xhci, "Slot Context:\n");
430 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
431 &ctx->slot.dev_info,
432 (unsigned long long)dma, ctx->slot.dev_info);
433 dma += field_size;
434 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
435 &ctx->slot.dev_info2,
436 (unsigned long long)dma, ctx->slot.dev_info2);
437 dma += field_size;
438 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
439 &ctx->slot.tt_info,
440 (unsigned long long)dma, ctx->slot.tt_info);
441 dma += field_size;
442 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
443 &ctx->slot.dev_state,
444 (unsigned long long)dma, ctx->slot.dev_state);
445 dma += field_size;
446 for (i = 0; i > 4; ++i) {
447 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
448 &ctx->slot.reserved[i], (unsigned long long)dma,
449 ctx->slot.reserved[i], i);
450 dma += field_size;
451 }
452
453 if (last_ep < 31)
454 last_ep_ctx = last_ep + 1;
455 for (i = 0; i < last_ep_ctx; ++i) {
456 xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
457 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
458 &ctx->ep[i].ep_info,
459 (unsigned long long)dma, ctx->ep[i].ep_info);
460 dma += field_size;
461 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
462 &ctx->ep[i].ep_info2,
463 (unsigned long long)dma, ctx->ep[i].ep_info2);
464 dma += field_size;
465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
466 &ctx->ep[i].deq[0],
467 (unsigned long long)dma, ctx->ep[i].deq[0]);
468 dma += field_size;
469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
470 &ctx->ep[i].deq[1],
471 (unsigned long long)dma, ctx->ep[i].deq[1]);
472 dma += field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
474 &ctx->ep[i].tx_info,
475 (unsigned long long)dma, ctx->ep[i].tx_info);
476 dma += field_size;
477 for (j = 0; j < 3; ++j) {
478 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
479 &ctx->ep[i].reserved[j],
480 (unsigned long long)dma,
481 ctx->ep[i].reserved[j], j);
482 dma += field_size;
483 }
484 }
485}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644
index 000000000000..ecc131c3fe33
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -0,0 +1,145 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
23#define XHCI_MAX_HALT_USEC (16*125)
24/* HC not running - set to 1 when run/stop bit is cleared. */
25#define XHCI_STS_HALT (1<<0)
26
27/* HCCPARAMS offset from PCI base address */
28#define XHCI_HCC_PARAMS_OFFSET 0x10
29/* HCCPARAMS contains the first extended capability pointer */
30#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff)
31
32/* Command and Status registers offset from the Operational Registers address */
33#define XHCI_CMD_OFFSET 0x00
34#define XHCI_STS_OFFSET 0x04
35
36#define XHCI_MAX_EXT_CAPS 50
37
38/* Capability Register */
39/* bits 7:0 - how long is the Capabilities register */
40#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
41
42/* Extended capability register fields */
43#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff)
44#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff)
45#define XHCI_EXT_CAPS_VAL(p) ((p)>>16)
46/* Extended capability IDs - ID 0 reserved */
47#define XHCI_EXT_CAPS_LEGACY 1
48#define XHCI_EXT_CAPS_PROTOCOL 2
49#define XHCI_EXT_CAPS_PM 3
50#define XHCI_EXT_CAPS_VIRT 4
51#define XHCI_EXT_CAPS_ROUTE 5
52/* IDs 6-9 reserved */
53#define XHCI_EXT_CAPS_DEBUG 10
54/* USB Legacy Support Capability - section 7.1.1 */
55#define XHCI_HC_BIOS_OWNED (1 << 16)
56#define XHCI_HC_OS_OWNED (1 << 24)
57
58/* USB Legacy Support Capability - section 7.1.1 */
59/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
60#define XHCI_LEGACY_SUPPORT_OFFSET (0x00)
61
62/* USB Legacy Support Control and Status Register - section 7.1.2 */
63/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
64#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
65/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
66#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
67
68/* command register values to disable interrupts and halt the HC */
69/* start/stop HC execution - do not write unless HC is halted*/
70#define XHCI_CMD_RUN (1 << 0)
71/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
72#define XHCI_CMD_EIE (1 << 2)
73/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
74#define XHCI_CMD_HSEIE (1 << 3)
75/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
76#define XHCI_CMD_EWE (1 << 10)
77
78#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
79
80/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
81#define XHCI_STS_CNR (1 << 11)
82
83#include <linux/io.h>
84
85/**
86 * Return the next extended capability pointer register.
87 *
88 * @base PCI register base address.
89 *
90 * @ext_offset Offset of the 32-bit register that contains the extended
91 * capabilites pointer. If searching for the first extended capability, pass
92 * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability,
93 * pass in the offset of the current extended capability register.
94 *
95 * Returns 0 if there is no next extended capability register or returns the register offset
96 * from the PCI registers base address.
97 */
98static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
99{
100 u32 next;
101
102 next = readl(base + ext_offset);
103
104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
105 /* Find the first extended capability */
106 next = XHCI_HCC_EXT_CAPS(next);
107 else
108 /* Find the next extended capability */
109 next = XHCI_EXT_CAPS_NEXT(next);
110 if (!next)
111 return 0;
112 /*
113 * Address calculation from offset of extended capabilities
114 * (or HCCPARAMS) register - see section 5.3.6 and section 7.
115 */
116 return ext_offset + (next << 2);
117}
118
119/**
120 * Find the offset of the extended capabilities with capability ID id.
121 *
122 * @base PCI MMIO registers base address.
123 * @ext_offset Offset from base of the first extended capability to look at,
124 * or the address of HCCPARAMS.
125 * @id Extended capability ID to search for.
126 *
127 * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
128 * to make sure that the list doesn't contain a loop.
129 */
130static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
131{
132 u32 val;
133 int limit = XHCI_MAX_EXT_CAPS;
134
135 while (ext_offset && limit > 0) {
136 val = readl(base + ext_offset);
137 if (XHCI_EXT_CAPS_ID(val) == id)
138 break;
139 ext_offset = xhci_find_next_cap_offset(base, ext_offset);
140 limit--;
141 }
142 if (limit > 0)
143 return ext_offset;
144 return 0;
145}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
new file mode 100644
index 000000000000..dba3e07ccd09
--- /dev/null
+++ b/drivers/usb/host/xhci-hcd.c
@@ -0,0 +1,1274 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/irq.h>
24#include <linux/module.h>
25
26#include "xhci.h"
27
28#define DRIVER_AUTHOR "Sarah Sharp"
29#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
30
31/* TODO: copied from ehci-hcd.c - can this be refactored? */
32/*
33 * handshake - spin reading hc until handshake completes or fails
34 * @ptr: address of hc register to be read
35 * @mask: bits to look at in result of read
36 * @done: value of those bits when handshake succeeds
37 * @usec: timeout in microseconds
38 *
39 * Returns negative errno, or zero on success
40 *
41 * Success happens when the "mask" bits have the specified value (hardware
42 * handshake done). There are two failure modes: "usec" have passed (major
43 * hardware flakeout), or the register reads as all-ones (hardware removed).
44 */
45static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
46 u32 mask, u32 done, int usec)
47{
48 u32 result;
49
50 do {
51 result = xhci_readl(xhci, ptr);
52 if (result == ~(u32)0) /* card removed */
53 return -ENODEV;
54 result &= mask;
55 if (result == done)
56 return 0;
57 udelay(1);
58 usec--;
59 } while (usec > 0);
60 return -ETIMEDOUT;
61}
62
63/*
64 * Force HC into halt state.
65 *
66 * Disable any IRQs and clear the run/stop bit.
67 * HC will complete any current and actively pipelined transactions, and
68 * should halt within 16 microframes of the run/stop bit being cleared.
69 * Read HC Halted bit in the status register to see when the HC is finished.
70 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
71 */
72int xhci_halt(struct xhci_hcd *xhci)
73{
74 u32 halted;
75 u32 cmd;
76 u32 mask;
77
78 xhci_dbg(xhci, "// Halt the HC\n");
79 /* Disable all interrupts from the host controller */
80 mask = ~(XHCI_IRQS);
81 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
82 if (!halted)
83 mask &= ~CMD_RUN;
84
85 cmd = xhci_readl(xhci, &xhci->op_regs->command);
86 cmd &= mask;
87 xhci_writel(xhci, cmd, &xhci->op_regs->command);
88
89 return handshake(xhci, &xhci->op_regs->status,
90 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
91}
92
93/*
94 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
95 *
96 * This resets pipelines, timers, counters, state machines, etc.
97 * Transactions will be terminated immediately, and operational registers
98 * will be set to their defaults.
99 */
100int xhci_reset(struct xhci_hcd *xhci)
101{
102 u32 command;
103 u32 state;
104
105 state = xhci_readl(xhci, &xhci->op_regs->status);
106 BUG_ON((state & STS_HALT) == 0);
107
108 xhci_dbg(xhci, "// Reset the HC\n");
109 command = xhci_readl(xhci, &xhci->op_regs->command);
110 command |= CMD_RESET;
111 xhci_writel(xhci, command, &xhci->op_regs->command);
112 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
113 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
114
115 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
116}
117
118/*
119 * Stop the HC from processing the endpoint queues.
120 */
121static void xhci_quiesce(struct xhci_hcd *xhci)
122{
123 /*
124 * Queues are per endpoint, so we need to disable an endpoint or slot.
125 *
126 * To disable a slot, we need to insert a disable slot command on the
127 * command ring and ring the doorbell. This will also free any internal
128 * resources associated with the slot (which might not be what we want).
129 *
130 * A Release Endpoint command sounds better - doesn't free internal HC
131 * memory, but removes the endpoints from the schedule and releases the
132 * bandwidth, disables the doorbells, and clears the endpoint enable
133 * flag. Usually used prior to a set interface command.
134 *
135 * TODO: Implement after command ring code is done.
136 */
137 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
138 xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
139}
140
141#if 0
142/* Set up MSI-X table for entry 0 (may claim other entries later) */
143static int xhci_setup_msix(struct xhci_hcd *xhci)
144{
145 int ret;
146 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
147
148 xhci->msix_count = 0;
149 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
150 xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
151 if (!xhci->msix_entries) {
152 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
153 return -ENOMEM;
154 }
155 xhci->msix_entries[0].entry = 0;
156
157 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
158 if (ret) {
159 xhci_err(xhci, "Failed to enable MSI-X\n");
160 goto free_entries;
161 }
162
163 /*
164 * Pass the xhci pointer value as the request_irq "cookie".
165 * If more irqs are added, this will need to be unique for each one.
166 */
167 ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
168 "xHCI", xhci_to_hcd(xhci));
169 if (ret) {
170 xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
171 goto disable_msix;
172 }
173 xhci_dbg(xhci, "Finished setting up MSI-X\n");
174 return 0;
175
176disable_msix:
177 pci_disable_msix(pdev);
178free_entries:
179 kfree(xhci->msix_entries);
180 xhci->msix_entries = NULL;
181 return ret;
182}
183
184/* XXX: code duplication; can xhci_setup_msix call this? */
185/* Free any IRQs and disable MSI-X */
186static void xhci_cleanup_msix(struct xhci_hcd *xhci)
187{
188 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
189 if (!xhci->msix_entries)
190 return;
191
192 free_irq(xhci->msix_entries[0].vector, xhci);
193 pci_disable_msix(pdev);
194 kfree(xhci->msix_entries);
195 xhci->msix_entries = NULL;
196 xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
197}
198#endif
199
200/*
201 * Initialize memory for HCD and xHC (one-time init).
202 *
203 * Program the PAGESIZE register, initialize the device context array, create
204 * device contexts (?), set up a command ring segment (or two?), create event
205 * ring (one for now).
206 */
207int xhci_init(struct usb_hcd *hcd)
208{
209 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
210 int retval = 0;
211
212 xhci_dbg(xhci, "xhci_init\n");
213 spin_lock_init(&xhci->lock);
214 retval = xhci_mem_init(xhci, GFP_KERNEL);
215 xhci_dbg(xhci, "Finished xhci_init\n");
216
217 return retval;
218}
219
220/*
221 * Called in interrupt context when there might be work
222 * queued on the event ring
223 *
224 * xhci->lock must be held by caller.
225 */
226static void xhci_work(struct xhci_hcd *xhci)
227{
228 u32 temp;
229
230 /*
231 * Clear the op reg interrupt status first,
232 * so we can receive interrupts from other MSI-X interrupters.
233 * Write 1 to clear the interrupt status.
234 */
235 temp = xhci_readl(xhci, &xhci->op_regs->status);
236 temp |= STS_EINT;
237 xhci_writel(xhci, temp, &xhci->op_regs->status);
238 /* FIXME when MSI-X is supported and there are multiple vectors */
239 /* Clear the MSI-X event interrupt status */
240
241 /* Acknowledge the interrupt */
242 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
243 temp |= 0x3;
244 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
245 /* Flush posted writes */
246 xhci_readl(xhci, &xhci->ir_set->irq_pending);
247
248 /* FIXME this should be a delayed service routine that clears the EHB */
249 xhci_handle_event(xhci);
250
251 /* Clear the event handler busy flag; the event ring should be empty. */
252 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
253 xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
254 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci, &xhci->ir_set->irq_pending);
256}
257
258/*-------------------------------------------------------------------------*/
259
260/*
261 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
262 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
263 * indicators of an event TRB error, but we check the status *first* to be safe.
264 */
265irqreturn_t xhci_irq(struct usb_hcd *hcd)
266{
267 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
268 u32 temp, temp2;
269
270 spin_lock(&xhci->lock);
271 /* Check if the xHC generated the interrupt, or the irq is shared */
272 temp = xhci_readl(xhci, &xhci->op_regs->status);
273 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
274 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
275 spin_unlock(&xhci->lock);
276 return IRQ_NONE;
277 }
278
279 if (temp & STS_FATAL) {
280 xhci_warn(xhci, "WARNING: Host System Error\n");
281 xhci_halt(xhci);
282 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
283 spin_unlock(&xhci->lock);
284 return -ESHUTDOWN;
285 }
286
287 xhci_work(xhci);
288 spin_unlock(&xhci->lock);
289
290 return IRQ_HANDLED;
291}
292
293#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
294void xhci_event_ring_work(unsigned long arg)
295{
296 unsigned long flags;
297 int temp;
298 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
299 int i, j;
300
301 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
302
303 spin_lock_irqsave(&xhci->lock, flags);
304 temp = xhci_readl(xhci, &xhci->op_regs->status);
305 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
306 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
307 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
308 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
309 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
310 xhci->error_bitmask = 0;
311 xhci_dbg(xhci, "Event ring:\n");
312 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
313 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
314 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
315 temp &= ERST_PTR_MASK;
316 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
317 xhci_dbg(xhci, "Command ring:\n");
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
320 xhci_dbg_cmd_ptrs(xhci);
321 for (i = 0; i < MAX_HC_SLOTS; ++i) {
322 if (xhci->devs[i]) {
323 for (j = 0; j < 31; ++j) {
324 if (xhci->devs[i]->ep_rings[j]) {
325 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
326 xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
327 }
328 }
329 }
330 }
331
332 if (xhci->noops_submitted != NUM_TEST_NOOPS)
333 if (xhci_setup_one_noop(xhci))
334 xhci_ring_cmd_db(xhci);
335 spin_unlock_irqrestore(&xhci->lock, flags);
336
337 if (!xhci->zombie)
338 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
339 else
340 xhci_dbg(xhci, "Quit polling the event ring.\n");
341}
342#endif
343
344/*
345 * Start the HC after it was halted.
346 *
347 * This function is called by the USB core when the HC driver is added.
348 * Its opposite is xhci_stop().
349 *
350 * xhci_init() must be called once before this function can be called.
351 * Reset the HC, enable device slot contexts, program DCBAAP, and
352 * set command ring pointer and event ring pointer.
353 *
354 * Setup MSI-X vectors and enable interrupts.
355 */
356int xhci_run(struct usb_hcd *hcd)
357{
358 u32 temp;
359 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 void (*doorbell)(struct xhci_hcd *) = NULL;
361
362 hcd->uses_new_polling = 1;
363 hcd->poll_rh = 0;
364
365 xhci_dbg(xhci, "xhci_run\n");
366#if 0 /* FIXME: MSI not setup yet */
367 /* Do this at the very last minute */
368 ret = xhci_setup_msix(xhci);
369 if (!ret)
370 return ret;
371
372 return -ENOSYS;
373#endif
374#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
375 init_timer(&xhci->event_ring_timer);
376 xhci->event_ring_timer.data = (unsigned long) xhci;
377 xhci->event_ring_timer.function = xhci_event_ring_work;
378 /* Poll the event ring */
379 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
380 xhci->zombie = 0;
381 xhci_dbg(xhci, "Setting event ring polling timer\n");
382 add_timer(&xhci->event_ring_timer);
383#endif
384
385 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
386 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
387 temp &= ~ER_IRQ_INTERVAL_MASK;
388 temp |= (u32) 160;
389 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
390
391 /* Set the HCD state before we enable the irqs */
392 hcd->state = HC_STATE_RUNNING;
393 temp = xhci_readl(xhci, &xhci->op_regs->command);
394 temp |= (CMD_EIE);
395 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
396 temp);
397 xhci_writel(xhci, temp, &xhci->op_regs->command);
398
399 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
400 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
401 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
402 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
403 &xhci->ir_set->irq_pending);
404 xhci_print_ir_set(xhci, xhci->ir_set, 0);
405
406 if (NUM_TEST_NOOPS > 0)
407 doorbell = xhci_setup_one_noop(xhci);
408
409 xhci_dbg(xhci, "Command ring memory map follows:\n");
410 xhci_debug_ring(xhci, xhci->cmd_ring);
411 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
412 xhci_dbg_cmd_ptrs(xhci);
413
414 xhci_dbg(xhci, "ERST memory map follows:\n");
415 xhci_dbg_erst(xhci, &xhci->erst);
416 xhci_dbg(xhci, "Event ring:\n");
417 xhci_debug_ring(xhci, xhci->event_ring);
418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
420 temp &= ERST_PTR_MASK;
421 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
422 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
423 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
424
425 temp = xhci_readl(xhci, &xhci->op_regs->command);
426 temp |= (CMD_RUN);
427 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
428 temp);
429 xhci_writel(xhci, temp, &xhci->op_regs->command);
430 /* Flush PCI posted writes */
431 temp = xhci_readl(xhci, &xhci->op_regs->command);
432 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
433 if (doorbell)
434 (*doorbell)(xhci);
435
436 xhci_dbg(xhci, "Finished xhci_run\n");
437 return 0;
438}
439
440/*
441 * Stop xHCI driver.
442 *
443 * This function is called by the USB core when the HC driver is removed.
444 * Its opposite is xhci_run().
445 *
446 * Disable device contexts, disable IRQs, and quiesce the HC.
447 * Reset the HC, finish any completed transactions, and cleanup memory.
448 */
449void xhci_stop(struct usb_hcd *hcd)
450{
451 u32 temp;
452 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
453
454 spin_lock_irq(&xhci->lock);
455 if (HC_IS_RUNNING(hcd->state))
456 xhci_quiesce(xhci);
457 xhci_halt(xhci);
458 xhci_reset(xhci);
459 spin_unlock_irq(&xhci->lock);
460
461#if 0 /* No MSI yet */
462 xhci_cleanup_msix(xhci);
463#endif
464#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
465 /* Tell the event ring poll function not to reschedule */
466 xhci->zombie = 1;
467 del_timer_sync(&xhci->event_ring_timer);
468#endif
469
470 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
471 temp = xhci_readl(xhci, &xhci->op_regs->status);
472 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
473 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
474 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
475 &xhci->ir_set->irq_pending);
476 xhci_print_ir_set(xhci, xhci->ir_set, 0);
477
478 xhci_dbg(xhci, "cleaning up memory\n");
479 xhci_mem_cleanup(xhci);
480 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
481 xhci_readl(xhci, &xhci->op_regs->status));
482}
483
484/*
485 * Shutdown HC (not bus-specific)
486 *
487 * This is called when the machine is rebooting or halting. We assume that the
488 * machine will be powered off, and the HC's internal state will be reset.
489 * Don't bother to free memory.
490 */
491void xhci_shutdown(struct usb_hcd *hcd)
492{
493 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
494
495 spin_lock_irq(&xhci->lock);
496 xhci_halt(xhci);
497 spin_unlock_irq(&xhci->lock);
498
499#if 0
500 xhci_cleanup_msix(xhci);
501#endif
502
503 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
504 xhci_readl(xhci, &xhci->op_regs->status));
505}
506
507/*-------------------------------------------------------------------------*/
508
509/**
510 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
511 * HCDs. Find the index for an endpoint given its descriptor. Use the return
512 * value to right shift 1 for the bitmask.
513 *
514 * Index = (epnum * 2) + direction - 1,
515 * where direction = 0 for OUT, 1 for IN.
516 * For control endpoints, the IN index is used (OUT index is unused), so
517 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
518 */
519unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
520{
521 unsigned int index;
522 if (usb_endpoint_xfer_control(desc))
523 index = (unsigned int) (usb_endpoint_num(desc)*2);
524 else
525 index = (unsigned int) (usb_endpoint_num(desc)*2) +
526 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
527 return index;
528}
529
530/* Find the flag for this endpoint (for use in the control context). Use the
531 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
532 * bit 1, etc.
533 */
534unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
535{
536 return 1 << (xhci_get_endpoint_index(desc) + 1);
537}
538
539/* Compute the last valid endpoint context index. Basically, this is the
540 * endpoint index plus one. For slot contexts with more than valid endpoint,
541 * we find the most significant bit set in the added contexts flags.
542 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
543 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
544 */
545static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
546{
547 return fls(added_ctxs) - 1;
548}
549
550/* Returns 1 if the arguments are OK;
551 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
552 */
553int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
554 struct usb_host_endpoint *ep, int check_ep, const char *func) {
555 if (!hcd || (check_ep && !ep) || !udev) {
556 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
557 func);
558 return -EINVAL;
559 }
560 if (!udev->parent) {
561 printk(KERN_DEBUG "xHCI %s called for root hub\n",
562 func);
563 return 0;
564 }
565 if (!udev->slot_id) {
566 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
567 func);
568 return -EINVAL;
569 }
570 return 1;
571}
572
573/*
574 * non-error returns are a promise to giveback() the urb later
575 * we drop ownership so next owner (or urb unlink) can get it
576 */
577int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
578{
579 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
580 unsigned long flags;
581 int ret = 0;
582 unsigned int slot_id, ep_index;
583
584 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
585 return -EINVAL;
586
587 slot_id = urb->dev->slot_id;
588 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
589
590 spin_lock_irqsave(&xhci->lock, flags);
591 if (!xhci->devs || !xhci->devs[slot_id]) {
592 if (!in_interrupt())
593 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
594 ret = -EINVAL;
595 goto exit;
596 }
597 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
598 if (!in_interrupt())
599 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
600 ret = -ESHUTDOWN;
601 goto exit;
602 }
603 if (usb_endpoint_xfer_control(&urb->ep->desc))
604 ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
605 slot_id, ep_index);
606 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
607 ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
608 slot_id, ep_index);
609 else
610 ret = -EINVAL;
611exit:
612 spin_unlock_irqrestore(&xhci->lock, flags);
613 return ret;
614}
615
616/*
617 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
618 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
619 * should pick up where it left off in the TD, unless a Set Transfer Ring
620 * Dequeue Pointer is issued.
621 *
622 * The TRBs that make up the buffers for the canceled URB will be "removed" from
623 * the ring. Since the ring is a contiguous structure, they can't be physically
624 * removed. Instead, there are two options:
625 *
626 * 1) If the HC is in the middle of processing the URB to be canceled, we
627 * simply move the ring's dequeue pointer past those TRBs using the Set
628 * Transfer Ring Dequeue Pointer command. This will be the common case,
629 * when drivers timeout on the last submitted URB and attempt to cancel.
630 *
631 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
632 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
633 * HC will need to invalidate the any TRBs it has cached after the stop
634 * endpoint command, as noted in the xHCI 0.95 errata.
635 *
636 * 3) The TD may have completed by the time the Stop Endpoint Command
637 * completes, so software needs to handle that case too.
638 *
639 * This function should protect against the TD enqueueing code ringing the
640 * doorbell while this code is waiting for a Stop Endpoint command to complete.
641 * It also needs to account for multiple cancellations on happening at the same
642 * time for the same endpoint.
643 *
644 * Note that this function can be called in any context, or so says
645 * usb_hcd_unlink_urb()
646 */
647int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
648{
649 unsigned long flags;
650 int ret;
651 struct xhci_hcd *xhci;
652 struct xhci_td *td;
653 unsigned int ep_index;
654 struct xhci_ring *ep_ring;
655
656 xhci = hcd_to_xhci(hcd);
657 spin_lock_irqsave(&xhci->lock, flags);
658 /* Make sure the URB hasn't completed or been unlinked already */
659 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
660 if (ret || !urb->hcpriv)
661 goto done;
662
663 xhci_dbg(xhci, "Cancel URB %p\n", urb);
664 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
665 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
666 td = (struct xhci_td *) urb->hcpriv;
667
668 ep_ring->cancels_pending++;
669 list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
670 /* Queue a stop endpoint command, but only if this is
671 * the first cancellation to be handled.
672 */
673 if (ep_ring->cancels_pending == 1) {
674 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
675 xhci_ring_cmd_db(xhci);
676 }
677done:
678 spin_unlock_irqrestore(&xhci->lock, flags);
679 return ret;
680}
681
682/* Drop an endpoint from a new bandwidth configuration for this device.
683 * Only one call to this function is allowed per endpoint before
684 * check_bandwidth() or reset_bandwidth() must be called.
685 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
686 * add the endpoint to the schedule with possibly new parameters denoted by a
687 * different endpoint descriptor in usb_host_endpoint.
688 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
689 * not allowed.
690 *
691 * The USB core will not allow URBs to be queued to an endpoint that is being
692 * disabled, so there's no need for mutual exclusion to protect
693 * the xhci->devs[slot_id] structure.
694 */
695int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
696 struct usb_host_endpoint *ep)
697{
698 struct xhci_hcd *xhci;
699 struct xhci_device_control *in_ctx;
700 unsigned int last_ctx;
701 unsigned int ep_index;
702 struct xhci_ep_ctx *ep_ctx;
703 u32 drop_flag;
704 u32 new_add_flags, new_drop_flags, new_slot_info;
705 int ret;
706
707 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
708 if (ret <= 0)
709 return ret;
710 xhci = hcd_to_xhci(hcd);
711 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
712
713 drop_flag = xhci_get_endpoint_flag(&ep->desc);
714 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
715 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
716 __func__, drop_flag);
717 return 0;
718 }
719
720 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
721 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
722 __func__);
723 return -EINVAL;
724 }
725
726 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
727 ep_index = xhci_get_endpoint_index(&ep->desc);
728 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
729 /* If the HC already knows the endpoint is disabled,
730 * or the HCD has noted it is disabled, ignore this request
731 */
732 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
733 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
734 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
735 __func__, ep);
736 return 0;
737 }
738
739 in_ctx->drop_flags |= drop_flag;
740 new_drop_flags = in_ctx->drop_flags;
741
742 in_ctx->add_flags = ~drop_flag;
743 new_add_flags = in_ctx->add_flags;
744
745 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
746 /* Update the last valid endpoint context, if we deleted the last one */
747 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
748 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
749 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
750 }
751 new_slot_info = in_ctx->slot.dev_info;
752
753 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
754
755 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
756 (unsigned int) ep->desc.bEndpointAddress,
757 udev->slot_id,
758 (unsigned int) new_drop_flags,
759 (unsigned int) new_add_flags,
760 (unsigned int) new_slot_info);
761 return 0;
762}
763
764/* Add an endpoint to a new possible bandwidth configuration for this device.
765 * Only one call to this function is allowed per endpoint before
766 * check_bandwidth() or reset_bandwidth() must be called.
767 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
768 * add the endpoint to the schedule with possibly new parameters denoted by a
769 * different endpoint descriptor in usb_host_endpoint.
770 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
771 * not allowed.
772 *
773 * The USB core will not allow URBs to be queued to an endpoint until the
774 * configuration or alt setting is installed in the device, so there's no need
775 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
776 */
777int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
778 struct usb_host_endpoint *ep)
779{
780 struct xhci_hcd *xhci;
781 struct xhci_device_control *in_ctx;
782 unsigned int ep_index;
783 struct xhci_ep_ctx *ep_ctx;
784 u32 added_ctxs;
785 unsigned int last_ctx;
786 u32 new_add_flags, new_drop_flags, new_slot_info;
787 int ret = 0;
788
789 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
790 if (ret <= 0)
791 return ret;
792 xhci = hcd_to_xhci(hcd);
793
794 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
795 last_ctx = xhci_last_valid_endpoint(added_ctxs);
796 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
797 /* FIXME when we have to issue an evaluate endpoint command to
798 * deal with ep0 max packet size changing once we get the
799 * descriptors
800 */
801 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
802 __func__, added_ctxs);
803 return 0;
804 }
805
806 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
807 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
808 __func__);
809 return -EINVAL;
810 }
811
812 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
813 ep_index = xhci_get_endpoint_index(&ep->desc);
814 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
815 /* If the HCD has already noted the endpoint is enabled,
816 * ignore this request.
817 */
818 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
819 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
820 __func__, ep);
821 return 0;
822 }
823
824 /*
825 * Configuration and alternate setting changes must be done in
826 * process context, not interrupt context (or so documenation
827 * for usb_set_interface() and usb_set_configuration() claim).
828 */
829 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
830 udev, ep, GFP_KERNEL) < 0) {
831 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
832 __func__, ep->desc.bEndpointAddress);
833 return -ENOMEM;
834 }
835
836 in_ctx->add_flags |= added_ctxs;
837 new_add_flags = in_ctx->add_flags;
838
839 /* If xhci_endpoint_disable() was called for this endpoint, but the
840 * xHC hasn't been notified yet through the check_bandwidth() call,
841 * this re-adds a new state for the endpoint from the new endpoint
842 * descriptors. We must drop and re-add this endpoint, so we leave the
843 * drop flags alone.
844 */
845 new_drop_flags = in_ctx->drop_flags;
846
847 /* Update the last valid endpoint context, if we just added one past */
848 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
849 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
850 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
851 }
852 new_slot_info = in_ctx->slot.dev_info;
853
854 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
855 (unsigned int) ep->desc.bEndpointAddress,
856 udev->slot_id,
857 (unsigned int) new_drop_flags,
858 (unsigned int) new_add_flags,
859 (unsigned int) new_slot_info);
860 return 0;
861}
862
863static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
864{
865 struct xhci_ep_ctx *ep_ctx;
866 int i;
867
868 /* When a device's add flag and drop flag are zero, any subsequent
869 * configure endpoint command will leave that endpoint's state
870 * untouched. Make sure we don't leave any old state in the input
871 * endpoint contexts.
872 */
873 virt_dev->in_ctx->drop_flags = 0;
874 virt_dev->in_ctx->add_flags = 0;
875 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
876 /* Endpoint 0 is always valid */
877 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
878 for (i = 1; i < 31; ++i) {
879 ep_ctx = &virt_dev->in_ctx->ep[i];
880 ep_ctx->ep_info = 0;
881 ep_ctx->ep_info2 = 0;
882 ep_ctx->deq[0] = 0;
883 ep_ctx->deq[1] = 0;
884 ep_ctx->tx_info = 0;
885 }
886}
887
888/* Called after one or more calls to xhci_add_endpoint() or
889 * xhci_drop_endpoint(). If this call fails, the USB core is expected
890 * to call xhci_reset_bandwidth().
891 *
892 * Since we are in the middle of changing either configuration or
893 * installing a new alt setting, the USB core won't allow URBs to be
894 * enqueued for any endpoint on the old config or interface. Nothing
895 * else should be touching the xhci->devs[slot_id] structure, so we
896 * don't need to take the xhci->lock for manipulating that.
897 */
898int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
899{
900 int i;
901 int ret = 0;
902 int timeleft;
903 unsigned long flags;
904 struct xhci_hcd *xhci;
905 struct xhci_virt_device *virt_dev;
906
907 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
908 if (ret <= 0)
909 return ret;
910 xhci = hcd_to_xhci(hcd);
911
912 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
913 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
914 __func__);
915 return -EINVAL;
916 }
917 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
918 virt_dev = xhci->devs[udev->slot_id];
919
920 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
921 virt_dev->in_ctx->add_flags |= SLOT_FLAG;
922 virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
923 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
924 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
925 xhci_dbg(xhci, "New Input Control Context:\n");
926 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
927 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
928
929 spin_lock_irqsave(&xhci->lock, flags);
930 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
931 udev->slot_id);
932 if (ret < 0) {
933 spin_unlock_irqrestore(&xhci->lock, flags);
934 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
935 return -ENOMEM;
936 }
937 xhci_ring_cmd_db(xhci);
938 spin_unlock_irqrestore(&xhci->lock, flags);
939
940 /* Wait for the configure endpoint command to complete */
941 timeleft = wait_for_completion_interruptible_timeout(
942 &virt_dev->cmd_completion,
943 USB_CTRL_SET_TIMEOUT);
944 if (timeleft <= 0) {
945 xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
946 timeleft == 0 ? "Timeout" : "Signal");
947 /* FIXME cancel the configure endpoint command */
948 return -ETIME;
949 }
950
951 switch (virt_dev->cmd_status) {
952 case COMP_ENOMEM:
953 dev_warn(&udev->dev, "Not enough host controller resources "
954 "for new device state.\n");
955 ret = -ENOMEM;
956 /* FIXME: can we allocate more resources for the HC? */
957 break;
958 case COMP_BW_ERR:
959 dev_warn(&udev->dev, "Not enough bandwidth "
960 "for new device state.\n");
961 ret = -ENOSPC;
962 /* FIXME: can we go back to the old state? */
963 break;
964 case COMP_TRB_ERR:
965 /* the HCD set up something wrong */
966 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
967 "and endpoint is not disabled.\n");
968 ret = -EINVAL;
969 break;
970 case COMP_SUCCESS:
971 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
972 break;
973 default:
974 xhci_err(xhci, "ERROR: unexpected command completion "
975 "code 0x%x.\n", virt_dev->cmd_status);
976 ret = -EINVAL;
977 break;
978 }
979 if (ret) {
980 /* Callee should call reset_bandwidth() */
981 return ret;
982 }
983
984 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
985 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
986 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
987
988 xhci_zero_in_ctx(virt_dev);
989 /* Free any old rings */
990 for (i = 1; i < 31; ++i) {
991 if (virt_dev->new_ep_rings[i]) {
992 xhci_ring_free(xhci, virt_dev->ep_rings[i]);
993 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
994 virt_dev->new_ep_rings[i] = NULL;
995 }
996 }
997
998 return ret;
999}
1000
1001void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1002{
1003 struct xhci_hcd *xhci;
1004 struct xhci_virt_device *virt_dev;
1005 int i, ret;
1006
1007 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1008 if (ret <= 0)
1009 return;
1010 xhci = hcd_to_xhci(hcd);
1011
1012 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1013 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1014 __func__);
1015 return;
1016 }
1017 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1018 virt_dev = xhci->devs[udev->slot_id];
1019 /* Free any rings allocated for added endpoints */
1020 for (i = 0; i < 31; ++i) {
1021 if (virt_dev->new_ep_rings[i]) {
1022 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
1023 virt_dev->new_ep_rings[i] = NULL;
1024 }
1025 }
1026 xhci_zero_in_ctx(virt_dev);
1027}
1028
1029/*
1030 * At this point, the struct usb_device is about to go away, the device has
1031 * disconnected, and all traffic has been stopped and the endpoints have been
1032 * disabled. Free any HC data structures associated with that device.
1033 */
1034void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1035{
1036 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1037 unsigned long flags;
1038
1039 if (udev->slot_id == 0)
1040 return;
1041
1042 spin_lock_irqsave(&xhci->lock, flags);
1043 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
1044 spin_unlock_irqrestore(&xhci->lock, flags);
1045 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1046 return;
1047 }
1048 xhci_ring_cmd_db(xhci);
1049 spin_unlock_irqrestore(&xhci->lock, flags);
1050 /*
1051 * Event command completion handler will free any data structures
1052 * associated with the slot. XXX Can free sleep?
1053 */
1054}
1055
1056/*
1057 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1058 * timed out, or allocating memory failed. Returns 1 on success.
1059 */
1060int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1061{
1062 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1063 unsigned long flags;
1064 int timeleft;
1065 int ret;
1066
1067 spin_lock_irqsave(&xhci->lock, flags);
1068 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
1069 if (ret) {
1070 spin_unlock_irqrestore(&xhci->lock, flags);
1071 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1072 return 0;
1073 }
1074 xhci_ring_cmd_db(xhci);
1075 spin_unlock_irqrestore(&xhci->lock, flags);
1076
1077 /* XXX: how much time for xHC slot assignment? */
1078 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1079 USB_CTRL_SET_TIMEOUT);
1080 if (timeleft <= 0) {
1081 xhci_warn(xhci, "%s while waiting for a slot\n",
1082 timeleft == 0 ? "Timeout" : "Signal");
1083 /* FIXME cancel the enable slot request */
1084 return 0;
1085 }
1086
1087 if (!xhci->slot_id) {
1088 xhci_err(xhci, "Error while assigning device slot ID\n");
1089 return 0;
1090 }
1091 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1092 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1093 /* Disable slot, if we can do it without mem alloc */
1094 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
1095 spin_lock_irqsave(&xhci->lock, flags);
1096 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1097 xhci_ring_cmd_db(xhci);
1098 spin_unlock_irqrestore(&xhci->lock, flags);
1099 return 0;
1100 }
1101 udev->slot_id = xhci->slot_id;
1102 /* Is this a LS or FS device under a HS hub? */
1103 /* Hub or peripherial? */
1104 return 1;
1105}
1106
1107/*
1108 * Issue an Address Device command (which will issue a SetAddress request to
1109 * the device).
1110 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1111 * we should only issue and wait on one address command at the same time.
1112 *
1113 * We add one to the device address issued by the hardware because the USB core
1114 * uses address 1 for the root hubs (even though they're not really devices).
1115 */
1116int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1117{
1118 unsigned long flags;
1119 int timeleft;
1120 struct xhci_virt_device *virt_dev;
1121 int ret = 0;
1122 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1123 u32 temp;
1124
1125 if (!udev->slot_id) {
1126 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
1127 return -EINVAL;
1128 }
1129
1130 virt_dev = xhci->devs[udev->slot_id];
1131
1132 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1133 if (!udev->config)
1134 xhci_setup_addressable_virt_dev(xhci, udev);
1135 /* Otherwise, assume the core has the device configured how it wants */
1136
1137 spin_lock_irqsave(&xhci->lock, flags);
1138 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
1139 udev->slot_id);
1140 if (ret) {
1141 spin_unlock_irqrestore(&xhci->lock, flags);
1142 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1143 return ret;
1144 }
1145 xhci_ring_cmd_db(xhci);
1146 spin_unlock_irqrestore(&xhci->lock, flags);
1147
1148 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1149 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1150 USB_CTRL_SET_TIMEOUT);
1151 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1152 * the SetAddress() "recovery interval" required by USB and aborting the
1153 * command on a timeout.
1154 */
1155 if (timeleft <= 0) {
1156 xhci_warn(xhci, "%s while waiting for a slot\n",
1157 timeleft == 0 ? "Timeout" : "Signal");
1158 /* FIXME cancel the address device command */
1159 return -ETIME;
1160 }
1161
1162 switch (virt_dev->cmd_status) {
1163 case COMP_CTX_STATE:
1164 case COMP_EBADSLT:
1165 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
1166 udev->slot_id);
1167 ret = -EINVAL;
1168 break;
1169 case COMP_TX_ERR:
1170 dev_warn(&udev->dev, "Device not responding to set address.\n");
1171 ret = -EPROTO;
1172 break;
1173 case COMP_SUCCESS:
1174 xhci_dbg(xhci, "Successful Address Device command\n");
1175 break;
1176 default:
1177 xhci_err(xhci, "ERROR: unexpected command completion "
1178 "code 0x%x.\n", virt_dev->cmd_status);
1179 ret = -EINVAL;
1180 break;
1181 }
1182 if (ret) {
1183 return ret;
1184 }
1185 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
1186 xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
1187 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
1188 xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
1189 xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
1190 udev->slot_id,
1191 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
1192 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
1193 xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
1194 udev->slot_id,
1195 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
1196 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
1197 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1198 (unsigned long long)virt_dev->out_ctx_dma);
1199 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1200 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
1201 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1202 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
1203 /*
1204 * USB core uses address 1 for the roothubs, so we add one to the
1205 * address given back to us by the HC.
1206 */
1207 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
1208 /* Zero the input context control for later use */
1209 virt_dev->in_ctx->add_flags = 0;
1210 virt_dev->in_ctx->drop_flags = 0;
1211 /* Mirror flags in the output context for future ep enable/disable */
1212 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1213 virt_dev->out_ctx->drop_flags = 0;
1214
1215 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1216 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1217 set_bit(udev->devnum, udev->bus->devmap.devicemap);
1218
1219 return 0;
1220}
1221
1222int xhci_get_frame(struct usb_hcd *hcd)
1223{
1224 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1225 /* EHCI mods by the periodic size. Why? */
1226 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
1227}
1228
1229MODULE_DESCRIPTION(DRIVER_DESC);
1230MODULE_AUTHOR(DRIVER_AUTHOR);
1231MODULE_LICENSE("GPL");
1232
1233static int __init xhci_hcd_init(void)
1234{
1235#ifdef CONFIG_PCI
1236 int retval = 0;
1237
1238 retval = xhci_register_pci();
1239
1240 if (retval < 0) {
1241 printk(KERN_DEBUG "Problem registering PCI driver.");
1242 return retval;
1243 }
1244#endif
1245 /*
1246 * Check the compiler generated sizes of structures that must be laid
1247 * out in specific ways for hardware access.
1248 */
1249 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1250 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
1251 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
1252 /* xhci_device_control has eight fields, and also
1253 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1254 */
1255 BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
1256 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1257 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1258 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
1259 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
1260 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1261 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1262 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1263 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1264 return 0;
1265}
1266module_init(xhci_hcd_init);
1267
1268static void __exit xhci_hcd_cleanup(void)
1269{
1270#ifdef CONFIG_PCI
1271 xhci_unregister_pci();
1272#endif
1273}
1274module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644
index 000000000000..eac5b53aa9e7
--- /dev/null
+++ b/drivers/usb/host/xhci-hub.c
@@ -0,0 +1,308 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <asm/unaligned.h>
24
25#include "xhci.h"
26
27static void xhci_hub_descriptor(struct xhci_hcd *xhci,
28 struct usb_hub_descriptor *desc)
29{
30 int ports;
31 u16 temp;
32
33 ports = HCS_MAX_PORTS(xhci->hcs_params1);
34
35 /* USB 3.0 hubs have a different descriptor, but we fake this for now */
36 desc->bDescriptorType = 0x29;
37 desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
38 desc->bHubContrCurrent = 0;
39
40 desc->bNbrPorts = ports;
41 temp = 1 + (ports / 8);
42 desc->bDescLength = 7 + 2 * temp;
43
44 /* Why does core/hcd.h define bitmap? It's just confusing. */
45 memset(&desc->DeviceRemovable[0], 0, temp);
46 memset(&desc->DeviceRemovable[temp], 0xff, temp);
47
48 /* Ugh, these should be #defines, FIXME */
49 /* Using table 11-13 in USB 2.0 spec. */
50 temp = 0;
51 /* Bits 1:0 - support port power switching, or power always on */
52 if (HCC_PPC(xhci->hcc_params))
53 temp |= 0x0001;
54 else
55 temp |= 0x0002;
56 /* Bit 2 - root hubs are not part of a compound device */
57 /* Bits 4:3 - individual port over current protection */
58 temp |= 0x0008;
59 /* Bits 6:5 - no TTs in root ports */
60 /* Bit 7 - no port indicators */
61 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
62}
63
64static unsigned int xhci_port_speed(unsigned int port_status)
65{
66 if (DEV_LOWSPEED(port_status))
67 return 1 << USB_PORT_FEAT_LOWSPEED;
68 if (DEV_HIGHSPEED(port_status))
69 return 1 << USB_PORT_FEAT_HIGHSPEED;
70 if (DEV_SUPERSPEED(port_status))
71 return 1 << USB_PORT_FEAT_SUPERSPEED;
72 /*
73 * FIXME: Yes, we should check for full speed, but the core uses that as
74 * a default in portspeed() in usb/core/hub.c (which is the only place
75 * USB_PORT_FEAT_*SPEED is used).
76 */
77 return 0;
78}
79
80/*
81 * These bits are Read Only (RO) and should be saved and written to the
82 * registers: 0, 3, 10:13, 30
83 * connect status, over-current status, port speed, and device removable.
84 * connect status and port speed are also sticky - meaning they're in
85 * the AUX well and they aren't changed by a hot, warm, or cold reset.
86 */
87#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
88/*
89 * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
90 * bits 5:8, 9, 14:15, 25:27
91 * link state, port power, port indicator state, "wake on" enable state
92 */
93#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
94/*
95 * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
96 * bit 4 (port reset)
97 */
98#define XHCI_PORT_RW1S ((1<<4))
99/*
100 * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
101 * bits 1, 17, 18, 19, 20, 21, 22, 23
102 * port enable/disable, and
103 * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
104 * over-current, reset, link state, and L1 change
105 */
106#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17))
107/*
108 * Bit 16 is RW, and writing a '1' to it causes the link state control to be
109 * latched in
110 */
111#define XHCI_PORT_RW ((1<<16))
112/*
113 * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
114 * bits 2, 24, 28:31
115 */
116#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28))
117
118/*
119 * Given a port state, this function returns a value that would result in the
120 * port being in the same state, if the value was written to the port status
121 * control register.
122 * Save Read Only (RO) bits and save read/write bits where
123 * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
124 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
125 */
126static u32 xhci_port_state_to_neutral(u32 state)
127{
128 /* Save read-only status and port state */
129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
130}
131
132int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
133 u16 wIndex, char *buf, u16 wLength)
134{
135 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
136 int ports;
137 unsigned long flags;
138 u32 temp, status;
139 int retval = 0;
140 u32 __iomem *addr;
141 char *port_change_bit;
142
143 ports = HCS_MAX_PORTS(xhci->hcs_params1);
144
145 spin_lock_irqsave(&xhci->lock, flags);
146 switch (typeReq) {
147 case GetHubStatus:
148 /* No power source, over-current reported per port */
149 memset(buf, 0, 4);
150 break;
151 case GetHubDescriptor:
152 xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
153 break;
154 case GetPortStatus:
155 if (!wIndex || wIndex > ports)
156 goto error;
157 wIndex--;
158 status = 0;
159 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
160 temp = xhci_readl(xhci, addr);
161 xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
162
163 /* wPortChange bits */
164 if (temp & PORT_CSC)
165 status |= 1 << USB_PORT_FEAT_C_CONNECTION;
166 if (temp & PORT_PEC)
167 status |= 1 << USB_PORT_FEAT_C_ENABLE;
168 if ((temp & PORT_OCC))
169 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
170 /*
171 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
172 * changes
173 */
174 if (temp & PORT_CONNECT) {
175 status |= 1 << USB_PORT_FEAT_CONNECTION;
176 status |= xhci_port_speed(temp);
177 }
178 if (temp & PORT_PE)
179 status |= 1 << USB_PORT_FEAT_ENABLE;
180 if (temp & PORT_OC)
181 status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
182 if (temp & PORT_RESET)
183 status |= 1 << USB_PORT_FEAT_RESET;
184 if (temp & PORT_POWER)
185 status |= 1 << USB_PORT_FEAT_POWER;
186 xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
187 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
188 break;
189 case SetPortFeature:
190 wIndex &= 0xff;
191 if (!wIndex || wIndex > ports)
192 goto error;
193 wIndex--;
194 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
195 temp = xhci_readl(xhci, addr);
196 temp = xhci_port_state_to_neutral(temp);
197 switch (wValue) {
198 case USB_PORT_FEAT_POWER:
199 /*
200 * Turn on ports, even if there isn't per-port switching.
201 * HC will report connect events even before this is set.
202 * However, khubd will ignore the roothub events until
203 * the roothub is registered.
204 */
205 xhci_writel(xhci, temp | PORT_POWER, addr);
206
207 temp = xhci_readl(xhci, addr);
208 xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
209 break;
210 case USB_PORT_FEAT_RESET:
211 temp = (temp | PORT_RESET);
212 xhci_writel(xhci, temp, addr);
213
214 temp = xhci_readl(xhci, addr);
215 xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
216 break;
217 default:
218 goto error;
219 }
220 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
221 break;
222 case ClearPortFeature:
223 if (!wIndex || wIndex > ports)
224 goto error;
225 wIndex--;
226 addr = &xhci->op_regs->port_status_base +
227 NUM_PORT_REGS*(wIndex & 0xff);
228 temp = xhci_readl(xhci, addr);
229 temp = xhci_port_state_to_neutral(temp);
230 switch (wValue) {
231 case USB_PORT_FEAT_C_RESET:
232 status = PORT_RC;
233 port_change_bit = "reset";
234 break;
235 case USB_PORT_FEAT_C_CONNECTION:
236 status = PORT_CSC;
237 port_change_bit = "connect";
238 break;
239 case USB_PORT_FEAT_C_OVER_CURRENT:
240 status = PORT_OCC;
241 port_change_bit = "over-current";
242 break;
243 default:
244 goto error;
245 }
246 /* Change bits are all write 1 to clear */
247 xhci_writel(xhci, temp | status, addr);
248 temp = xhci_readl(xhci, addr);
249 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
250 port_change_bit, wIndex, temp);
251 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
252 break;
253 default:
254error:
255 /* "stall" on error */
256 retval = -EPIPE;
257 }
258 spin_unlock_irqrestore(&xhci->lock, flags);
259 return retval;
260}
261
262/*
263 * Returns 0 if the status hasn't changed, or the number of bytes in buf.
264 * Ports are 0-indexed from the HCD point of view,
265 * and 1-indexed from the USB core pointer of view.
266 * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
267 *
268 * Note that the status change bits will be cleared as soon as a port status
269 * change event is generated, so we use the saved status from that event.
270 */
271int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
272{
273 unsigned long flags;
274 u32 temp, status;
275 int i, retval;
276 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
277 int ports;
278 u32 __iomem *addr;
279
280 ports = HCS_MAX_PORTS(xhci->hcs_params1);
281
282 /* Initial status is no changes */
283 buf[0] = 0;
284 status = 0;
285 if (ports > 7) {
286 buf[1] = 0;
287 retval = 2;
288 } else {
289 retval = 1;
290 }
291
292 spin_lock_irqsave(&xhci->lock, flags);
293 /* For each port, did anything change? If so, set that bit in buf. */
294 for (i = 0; i < ports; i++) {
295 addr = &xhci->op_regs->port_status_base +
296 NUM_PORT_REGS*i;
297 temp = xhci_readl(xhci, addr);
298 if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
299 if (i < 7)
300 buf[0] |= 1 << (i + 1);
301 else
302 buf[1] |= 1 << (i - 7);
303 status = 1;
304 }
305 }
306 spin_unlock_irqrestore(&xhci->lock, flags);
307 return status ? retval : 0;
308}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644
index 000000000000..c8a72de1c508
--- /dev/null
+++ b/drivers/usb/host/xhci-mem.c
@@ -0,0 +1,769 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/usb.h>
24#include <linux/pci.h>
25#include <linux/dmapool.h>
26
27#include "xhci.h"
28
29/*
30 * Allocates a generic ring segment from the ring pool, sets the dma address,
31 * initializes the segment to zero, and sets the private next pointer to NULL.
32 *
33 * Section 4.11.1.1:
34 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
35 */
36static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
37{
38 struct xhci_segment *seg;
39 dma_addr_t dma;
40
41 seg = kzalloc(sizeof *seg, flags);
42 if (!seg)
43 return 0;
44 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
45
46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
47 if (!seg->trbs) {
48 kfree(seg);
49 return 0;
50 }
51 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 seg->trbs, (unsigned long long)dma);
53
54 memset(seg->trbs, 0, SEGMENT_SIZE);
55 seg->dma = dma;
56 seg->next = NULL;
57
58 return seg;
59}
60
61static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
62{
63 if (!seg)
64 return;
65 if (seg->trbs) {
66 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 seg->trbs, (unsigned long long)seg->dma);
68 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
69 seg->trbs = NULL;
70 }
71 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
72 kfree(seg);
73}
74
75/*
76 * Make the prev segment point to the next segment.
77 *
78 * Change the last TRB in the prev segment to be a Link TRB which points to the
79 * DMA address of the next segment. The caller needs to set any Link TRB
80 * related flags, such as End TRB, Toggle Cycle, and no snoop.
81 */
82static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
83 struct xhci_segment *next, bool link_trbs)
84{
85 u32 val;
86
87 if (!prev || !next)
88 return;
89 prev->next = next;
90 if (link_trbs) {
91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
92
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
95 val &= ~TRB_TYPE_BITMASK;
96 val |= TRB_TYPE(TRB_LINK);
97 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
98 }
99 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
100 (unsigned long long)prev->dma,
101 (unsigned long long)next->dma);
102}
103
104/* XXX: Do we need the hcd structure in all these functions? */
105void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
106{
107 struct xhci_segment *seg;
108 struct xhci_segment *first_seg;
109
110 if (!ring || !ring->first_seg)
111 return;
112 first_seg = ring->first_seg;
113 seg = first_seg->next;
114 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
115 while (seg != first_seg) {
116 struct xhci_segment *next = seg->next;
117 xhci_segment_free(xhci, seg);
118 seg = next;
119 }
120 xhci_segment_free(xhci, first_seg);
121 ring->first_seg = NULL;
122 kfree(ring);
123}
124
125/**
126 * Create a new ring with zero or more segments.
127 *
128 * Link each segment together into a ring.
129 * Set the end flag and the cycle toggle bit on the last segment.
130 * See section 4.9.1 and figures 15 and 16.
131 */
132static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
133 unsigned int num_segs, bool link_trbs, gfp_t flags)
134{
135 struct xhci_ring *ring;
136 struct xhci_segment *prev;
137
138 ring = kzalloc(sizeof *(ring), flags);
139 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
140 if (!ring)
141 return 0;
142
143 INIT_LIST_HEAD(&ring->td_list);
144 INIT_LIST_HEAD(&ring->cancelled_td_list);
145 if (num_segs == 0)
146 return ring;
147
148 ring->first_seg = xhci_segment_alloc(xhci, flags);
149 if (!ring->first_seg)
150 goto fail;
151 num_segs--;
152
153 prev = ring->first_seg;
154 while (num_segs > 0) {
155 struct xhci_segment *next;
156
157 next = xhci_segment_alloc(xhci, flags);
158 if (!next)
159 goto fail;
160 xhci_link_segments(xhci, prev, next, link_trbs);
161
162 prev = next;
163 num_segs--;
164 }
165 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
166
167 if (link_trbs) {
168 /* See section 4.9.2.1 and 6.4.4.1 */
169 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
170 xhci_dbg(xhci, "Wrote link toggle flag to"
171 " segment %p (virtual), 0x%llx (DMA)\n",
172 prev, (unsigned long long)prev->dma);
173 }
174 /* The ring is empty, so the enqueue pointer == dequeue pointer */
175 ring->enqueue = ring->first_seg->trbs;
176 ring->enq_seg = ring->first_seg;
177 ring->dequeue = ring->enqueue;
178 ring->deq_seg = ring->first_seg;
179 /* The ring is initialized to 0. The producer must write 1 to the cycle
180 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
181 * compare CCS to the cycle bit to check ownership, so CCS = 1.
182 */
183 ring->cycle_state = 1;
184
185 return ring;
186
187fail:
188 xhci_ring_free(xhci, ring);
189 return 0;
190}
191
192/* All the xhci_tds in the ring's TD list should be freed at this point */
193void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
194{
195 struct xhci_virt_device *dev;
196 int i;
197
198 /* Slot ID 0 is reserved */
199 if (slot_id == 0 || !xhci->devs[slot_id])
200 return;
201
202 dev = xhci->devs[slot_id];
203 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
204 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
205 if (!dev)
206 return;
207
208 for (i = 0; i < 31; ++i)
209 if (dev->ep_rings[i])
210 xhci_ring_free(xhci, dev->ep_rings[i]);
211
212 if (dev->in_ctx)
213 dma_pool_free(xhci->device_pool,
214 dev->in_ctx, dev->in_ctx_dma);
215 if (dev->out_ctx)
216 dma_pool_free(xhci->device_pool,
217 dev->out_ctx, dev->out_ctx_dma);
218 kfree(xhci->devs[slot_id]);
219 xhci->devs[slot_id] = 0;
220}
221
222int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
223 struct usb_device *udev, gfp_t flags)
224{
225 dma_addr_t dma;
226 struct xhci_virt_device *dev;
227
228 /* Slot ID 0 is reserved */
229 if (slot_id == 0 || xhci->devs[slot_id]) {
230 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
231 return 0;
232 }
233
234 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
235 if (!xhci->devs[slot_id])
236 return 0;
237 dev = xhci->devs[slot_id];
238
239 /* Allocate the (output) device context that will be used in the HC */
240 dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
241 if (!dev->out_ctx)
242 goto fail;
243 dev->out_ctx_dma = dma;
244 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
245 (unsigned long long)dma);
246 memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
247
248 /* Allocate the (input) device context for address device command */
249 dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
250 if (!dev->in_ctx)
251 goto fail;
252 dev->in_ctx_dma = dma;
253 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
254 (unsigned long long)dma);
255 memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
256
257 /* Allocate endpoint 0 ring */
258 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
259 if (!dev->ep_rings[0])
260 goto fail;
261
262 init_completion(&dev->cmd_completion);
263
264 /*
265 * Point to output device context in dcbaa; skip the output control
266 * context, which is eight 32 bit fields (or 32 bytes long)
267 */
268 xhci->dcbaa->dev_context_ptrs[2*slot_id] =
269 (u32) dev->out_ctx_dma + (32);
270 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 slot_id,
272 &xhci->dcbaa->dev_context_ptrs[2*slot_id],
273 (unsigned long long)dev->out_ctx_dma);
274 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
275
276 return 1;
277fail:
278 xhci_free_virt_device(xhci, slot_id);
279 return 0;
280}
281
282/* Setup an xHCI virtual device for a Set Address command */
283int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
284{
285 struct xhci_virt_device *dev;
286 struct xhci_ep_ctx *ep0_ctx;
287 struct usb_device *top_dev;
288
289 dev = xhci->devs[udev->slot_id];
290 /* Slot ID 0 is reserved */
291 if (udev->slot_id == 0 || !dev) {
292 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
293 udev->slot_id);
294 return -EINVAL;
295 }
296 ep0_ctx = &dev->in_ctx->ep[0];
297
298 /* 2) New slot context and endpoint 0 context are valid*/
299 dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
300
301 /* 3) Only the control endpoint is valid - one endpoint context */
302 dev->in_ctx->slot.dev_info |= LAST_CTX(1);
303
304 switch (udev->speed) {
305 case USB_SPEED_SUPER:
306 dev->in_ctx->slot.dev_info |= (u32) udev->route;
307 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
308 break;
309 case USB_SPEED_HIGH:
310 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
311 break;
312 case USB_SPEED_FULL:
313 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
314 break;
315 case USB_SPEED_LOW:
316 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
317 break;
318 case USB_SPEED_VARIABLE:
319 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
320 return -EINVAL;
321 break;
322 default:
323 /* Speed was set earlier, this shouldn't happen. */
324 BUG();
325 }
326 /* Find the root hub port this device is under */
327 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
328 top_dev = top_dev->parent)
329 /* Found device below root hub */;
330 dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
331 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
332
333 /* Is this a LS/FS device under a HS hub? */
334 /*
335 * FIXME: I don't think this is right, where does the TT info for the
336 * roothub or parent hub come from?
337 */
338 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
339 udev->tt) {
340 dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
341 dev->in_ctx->slot.tt_info |= udev->ttport << 8;
342 }
343 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
344 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
345
346 /* Step 4 - ring already allocated */
347 /* Step 5 */
348 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
349 /*
350 * See section 4.3 bullet 6:
351 * The default Max Packet size for ep0 is "8 bytes for a USB2
352 * LS/FS/HS device or 512 bytes for a USB3 SS device"
353 * XXX: Not sure about wireless USB devices.
354 */
355 if (udev->speed == USB_SPEED_SUPER)
356 ep0_ctx->ep_info2 |= MAX_PACKET(512);
357 else
358 ep0_ctx->ep_info2 |= MAX_PACKET(8);
359 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
360 ep0_ctx->ep_info2 |= MAX_BURST(0);
361 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
362
363 ep0_ctx->deq[0] =
364 dev->ep_rings[0]->first_seg->dma;
365 ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
366 ep0_ctx->deq[1] = 0;
367
368 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
369
370 return 0;
371}
372
373/* Return the polling or NAK interval.
374 *
375 * The polling interval is expressed in "microframes". If xHCI's Interval field
376 * is set to N, it will service the endpoint every 2^(Interval)*125us.
377 *
378 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
379 * is set to 0.
380 */
381static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
382 struct usb_host_endpoint *ep)
383{
384 unsigned int interval = 0;
385
386 switch (udev->speed) {
387 case USB_SPEED_HIGH:
388 /* Max NAK rate */
389 if (usb_endpoint_xfer_control(&ep->desc) ||
390 usb_endpoint_xfer_bulk(&ep->desc))
391 interval = ep->desc.bInterval;
392 /* Fall through - SS and HS isoc/int have same decoding */
393 case USB_SPEED_SUPER:
394 if (usb_endpoint_xfer_int(&ep->desc) ||
395 usb_endpoint_xfer_isoc(&ep->desc)) {
396 if (ep->desc.bInterval == 0)
397 interval = 0;
398 else
399 interval = ep->desc.bInterval - 1;
400 if (interval > 15)
401 interval = 15;
402 if (interval != ep->desc.bInterval + 1)
403 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
404 ep->desc.bEndpointAddress, 1 << interval);
405 }
406 break;
407 /* Convert bInterval (in 1-255 frames) to microframes and round down to
408 * nearest power of 2.
409 */
410 case USB_SPEED_FULL:
411 case USB_SPEED_LOW:
412 if (usb_endpoint_xfer_int(&ep->desc) ||
413 usb_endpoint_xfer_isoc(&ep->desc)) {
414 interval = fls(8*ep->desc.bInterval) - 1;
415 if (interval > 10)
416 interval = 10;
417 if (interval < 3)
418 interval = 3;
419 if ((1 << interval) != 8*ep->desc.bInterval)
420 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
421 ep->desc.bEndpointAddress, 1 << interval);
422 }
423 break;
424 default:
425 BUG();
426 }
427 return EP_INTERVAL(interval);
428}
429
430static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
431 struct usb_host_endpoint *ep)
432{
433 int in;
434 u32 type;
435
436 in = usb_endpoint_dir_in(&ep->desc);
437 if (usb_endpoint_xfer_control(&ep->desc)) {
438 type = EP_TYPE(CTRL_EP);
439 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
440 if (in)
441 type = EP_TYPE(BULK_IN_EP);
442 else
443 type = EP_TYPE(BULK_OUT_EP);
444 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
445 if (in)
446 type = EP_TYPE(ISOC_IN_EP);
447 else
448 type = EP_TYPE(ISOC_OUT_EP);
449 } else if (usb_endpoint_xfer_int(&ep->desc)) {
450 if (in)
451 type = EP_TYPE(INT_IN_EP);
452 else
453 type = EP_TYPE(INT_OUT_EP);
454 } else {
455 BUG();
456 }
457 return type;
458}
459
460int xhci_endpoint_init(struct xhci_hcd *xhci,
461 struct xhci_virt_device *virt_dev,
462 struct usb_device *udev,
463 struct usb_host_endpoint *ep,
464 gfp_t mem_flags)
465{
466 unsigned int ep_index;
467 struct xhci_ep_ctx *ep_ctx;
468 struct xhci_ring *ep_ring;
469 unsigned int max_packet;
470 unsigned int max_burst;
471
472 ep_index = xhci_get_endpoint_index(&ep->desc);
473 ep_ctx = &virt_dev->in_ctx->ep[ep_index];
474
475 /* Set up the endpoint ring */
476 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
477 if (!virt_dev->new_ep_rings[ep_index])
478 return -ENOMEM;
479 ep_ring = virt_dev->new_ep_rings[ep_index];
480 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
481 ep_ctx->deq[1] = 0;
482
483 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
484
485 /* FIXME dig Mult and streams info out of ep companion desc */
486
487 /* Allow 3 retries for everything but isoc */
488 if (!usb_endpoint_xfer_isoc(&ep->desc))
489 ep_ctx->ep_info2 = ERROR_COUNT(3);
490 else
491 ep_ctx->ep_info2 = ERROR_COUNT(0);
492
493 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
494
495 /* Set the max packet size and max burst */
496 switch (udev->speed) {
497 case USB_SPEED_SUPER:
498 max_packet = ep->desc.wMaxPacketSize;
499 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
500 /* dig out max burst from ep companion desc */
501 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
502 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
503 break;
504 case USB_SPEED_HIGH:
505 /* bits 11:12 specify the number of additional transaction
506 * opportunities per microframe (USB 2.0, section 9.6.6)
507 */
508 if (usb_endpoint_xfer_isoc(&ep->desc) ||
509 usb_endpoint_xfer_int(&ep->desc)) {
510 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
511 ep_ctx->ep_info2 |= MAX_BURST(max_burst);
512 }
513 /* Fall through */
514 case USB_SPEED_FULL:
515 case USB_SPEED_LOW:
516 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
517 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
518 break;
519 default:
520 BUG();
521 }
522 /* FIXME Debug endpoint context */
523 return 0;
524}
525
526void xhci_endpoint_zero(struct xhci_hcd *xhci,
527 struct xhci_virt_device *virt_dev,
528 struct usb_host_endpoint *ep)
529{
530 unsigned int ep_index;
531 struct xhci_ep_ctx *ep_ctx;
532
533 ep_index = xhci_get_endpoint_index(&ep->desc);
534 ep_ctx = &virt_dev->in_ctx->ep[ep_index];
535
536 ep_ctx->ep_info = 0;
537 ep_ctx->ep_info2 = 0;
538 ep_ctx->deq[0] = 0;
539 ep_ctx->deq[1] = 0;
540 ep_ctx->tx_info = 0;
541 /* Don't free the endpoint ring until the set interface or configuration
542 * request succeeds.
543 */
544}
545
546void xhci_mem_cleanup(struct xhci_hcd *xhci)
547{
548 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
549 int size;
550 int i;
551
552 /* Free the Event Ring Segment Table and the actual Event Ring */
553 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
554 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
556 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
557 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
558 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
559 if (xhci->erst.entries)
560 pci_free_consistent(pdev, size,
561 xhci->erst.entries, xhci->erst.erst_dma_addr);
562 xhci->erst.entries = NULL;
563 xhci_dbg(xhci, "Freed ERST\n");
564 if (xhci->event_ring)
565 xhci_ring_free(xhci, xhci->event_ring);
566 xhci->event_ring = NULL;
567 xhci_dbg(xhci, "Freed event ring\n");
568
569 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
570 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
571 if (xhci->cmd_ring)
572 xhci_ring_free(xhci, xhci->cmd_ring);
573 xhci->cmd_ring = NULL;
574 xhci_dbg(xhci, "Freed command ring\n");
575
576 for (i = 1; i < MAX_HC_SLOTS; ++i)
577 xhci_free_virt_device(xhci, i);
578
579 if (xhci->segment_pool)
580 dma_pool_destroy(xhci->segment_pool);
581 xhci->segment_pool = NULL;
582 xhci_dbg(xhci, "Freed segment pool\n");
583
584 if (xhci->device_pool)
585 dma_pool_destroy(xhci->device_pool);
586 xhci->device_pool = NULL;
587 xhci_dbg(xhci, "Freed device context pool\n");
588
589 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
590 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
591 if (xhci->dcbaa)
592 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
593 xhci->dcbaa, xhci->dcbaa->dma);
594 xhci->dcbaa = NULL;
595
596 xhci->page_size = 0;
597 xhci->page_shift = 0;
598}
599
600int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
601{
602 dma_addr_t dma;
603 struct device *dev = xhci_to_hcd(xhci)->self.controller;
604 unsigned int val, val2;
605 struct xhci_segment *seg;
606 u32 page_size;
607 int i;
608
609 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
610 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
611 for (i = 0; i < 16; i++) {
612 if ((0x1 & page_size) != 0)
613 break;
614 page_size = page_size >> 1;
615 }
616 if (i < 16)
617 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
618 else
619 xhci_warn(xhci, "WARN: no supported page size\n");
620 /* Use 4K pages, since that's common and the minimum the HC supports */
621 xhci->page_shift = 12;
622 xhci->page_size = 1 << xhci->page_shift;
623 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
624
625 /*
626 * Program the Number of Device Slots Enabled field in the CONFIG
627 * register with the max value of slots the HC can handle.
628 */
629 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
630 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
631 (unsigned int) val);
632 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
633 val |= (val2 & ~HCS_SLOTS_MASK);
634 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
635 (unsigned int) val);
636 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
637
638 /*
639 * Section 5.4.8 - doorbell array must be
640 * "physically contiguous and 64-byte (cache line) aligned".
641 */
642 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
643 sizeof(*xhci->dcbaa), &dma);
644 if (!xhci->dcbaa)
645 goto fail;
646 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
647 xhci->dcbaa->dma = dma;
648 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
649 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
650 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
651 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
652
653 /*
654 * Initialize the ring segment pool. The ring must be a contiguous
655 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
656 * however, the command ring segment needs 64-byte aligned segments,
657 * so we pick the greater alignment need.
658 */
659 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
660 SEGMENT_SIZE, 64, xhci->page_size);
661 /* See Table 46 and Note on Figure 55 */
662 /* FIXME support 64-byte contexts */
663 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
664 sizeof(struct xhci_device_control),
665 64, xhci->page_size);
666 if (!xhci->segment_pool || !xhci->device_pool)
667 goto fail;
668
669 /* Set up the command ring to have one segments for now. */
670 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
671 if (!xhci->cmd_ring)
672 goto fail;
673 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
674 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
675 (unsigned long long)xhci->cmd_ring->first_seg->dma);
676
677 /* Set the address in the Command Ring Control register */
678 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
679 val = (val & ~CMD_RING_ADDR_MASK) |
680 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
681 xhci->cmd_ring->cycle_state;
682 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
683 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
684 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
685 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
686 xhci_dbg_cmd_ptrs(xhci);
687
688 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
689 val &= DBOFF_MASK;
690 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
691 " from cap regs base addr\n", val);
692 xhci->dba = (void *) xhci->cap_regs + val;
693 xhci_dbg_regs(xhci);
694 xhci_print_run_regs(xhci);
695 /* Set ir_set to interrupt register set 0 */
696 xhci->ir_set = (void *) xhci->run_regs->ir_set;
697
698 /*
699 * Event ring setup: Allocate a normal ring, but also setup
700 * the event ring segment table (ERST). Section 4.9.3.
701 */
702 xhci_dbg(xhci, "// Allocating event ring\n");
703 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
704 if (!xhci->event_ring)
705 goto fail;
706
707 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
708 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
709 if (!xhci->erst.entries)
710 goto fail;
711 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
712 (unsigned long long)dma);
713
714 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
715 xhci->erst.num_entries = ERST_NUM_SEGS;
716 xhci->erst.erst_dma_addr = dma;
717 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
718 xhci->erst.num_entries,
719 xhci->erst.entries,
720 (unsigned long long)xhci->erst.erst_dma_addr);
721
722 /* set ring base address and size for each segment table entry */
723 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
724 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
725 entry->seg_addr[0] = seg->dma;
726 entry->seg_addr[1] = 0;
727 entry->seg_size = TRBS_PER_SEGMENT;
728 entry->rsvd = 0;
729 seg = seg->next;
730 }
731
732 /* set ERST count with the number of entries in the segment table */
733 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
734 val &= ERST_SIZE_MASK;
735 val |= ERST_NUM_SEGS;
736 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
737 val);
738 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
739
740 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
741 /* set the segment table base address */
742 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
743 (unsigned long long)xhci->erst.erst_dma_addr);
744 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
745 val &= ERST_PTR_MASK;
746 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
747 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
748 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
749
750 /* Set the event ring dequeue address */
751 xhci_set_hc_event_deq(xhci);
752 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
753 xhci_print_ir_set(xhci, xhci->ir_set, 0);
754
755 /*
756 * XXX: Might need to set the Interrupter Moderation Register to
757 * something other than the default (~1ms minimum between interrupts).
758 * See section 5.5.1.2.
759 */
760 init_completion(&xhci->addr_dev);
761 for (i = 0; i < MAX_HC_SLOTS; ++i)
762 xhci->devs[i] = 0;
763
764 return 0;
765fail:
766 xhci_warn(xhci, "Couldn't initialize memory\n");
767 xhci_mem_cleanup(xhci);
768 return -ENOMEM;
769}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644
index 000000000000..1462709e26c0
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.c
@@ -0,0 +1,166 @@
1/*
2 * xHCI host controller driver PCI Bus Glue.
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/pci.h>
24
25#include "xhci.h"
26
27static const char hcd_name[] = "xhci_hcd";
28
29/* called after powerup, by probe or system-pm "wakeup" */
30static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
31{
32 /*
33 * TODO: Implement finding debug ports later.
34 * TODO: see if there are any quirks that need to be added to handle
35 * new extended capabilities.
36 */
37
38 /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
39 if (!pci_set_mwi(pdev))
40 xhci_dbg(xhci, "MWI active\n");
41
42 xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
43 return 0;
44}
45
46/* called during probe() after chip reset completes */
47static int xhci_pci_setup(struct usb_hcd *hcd)
48{
49 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
50 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
51 int retval;
52
53 xhci->cap_regs = hcd->regs;
54 xhci->op_regs = hcd->regs +
55 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
56 xhci->run_regs = hcd->regs +
57 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
58 /* Cache read-only capability registers */
59 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
60 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
61 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
62 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
63 xhci_print_registers(xhci);
64
65 /* Make sure the HC is halted. */
66 retval = xhci_halt(xhci);
67 if (retval)
68 return retval;
69
70 xhci_dbg(xhci, "Resetting HCD\n");
71 /* Reset the internal HC memory state and registers. */
72 retval = xhci_reset(xhci);
73 if (retval)
74 return retval;
75 xhci_dbg(xhci, "Reset complete\n");
76
77 xhci_dbg(xhci, "Calling HCD init\n");
78 /* Initialize HCD and host controller data structures. */
79 retval = xhci_init(hcd);
80 if (retval)
81 return retval;
82 xhci_dbg(xhci, "Called HCD init\n");
83
84 pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
85 xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
86
87 /* Find any debug ports */
88 return xhci_pci_reinit(xhci, pdev);
89}
90
91static const struct hc_driver xhci_pci_hc_driver = {
92 .description = hcd_name,
93 .product_desc = "xHCI Host Controller",
94 .hcd_priv_size = sizeof(struct xhci_hcd),
95
96 /*
97 * generic hardware linkage
98 */
99 .irq = xhci_irq,
100 .flags = HCD_MEMORY | HCD_USB3,
101
102 /*
103 * basic lifecycle operations
104 */
105 .reset = xhci_pci_setup,
106 .start = xhci_run,
107 /* suspend and resume implemented later */
108 .stop = xhci_stop,
109 .shutdown = xhci_shutdown,
110
111 /*
112 * managing i/o requests and associated device resources
113 */
114 .urb_enqueue = xhci_urb_enqueue,
115 .urb_dequeue = xhci_urb_dequeue,
116 .alloc_dev = xhci_alloc_dev,
117 .free_dev = xhci_free_dev,
118 .add_endpoint = xhci_add_endpoint,
119 .drop_endpoint = xhci_drop_endpoint,
120 .check_bandwidth = xhci_check_bandwidth,
121 .reset_bandwidth = xhci_reset_bandwidth,
122 .address_device = xhci_address_device,
123
124 /*
125 * scheduling support
126 */
127 .get_frame_number = xhci_get_frame,
128
129 /* Root hub support */
130 .hub_control = xhci_hub_control,
131 .hub_status_data = xhci_hub_status_data,
132};
133
134/*-------------------------------------------------------------------------*/
135
136/* PCI driver selection metadata; PCI hotplugging uses this */
137static const struct pci_device_id pci_ids[] = { {
138 /* handle any USB 3.0 xHCI controller */
139 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
140 .driver_data = (unsigned long) &xhci_pci_hc_driver,
141 },
142 { /* end: all zeroes */ }
143};
144MODULE_DEVICE_TABLE(pci, pci_ids);
145
146/* pci driver glue; this is a "new style" PCI driver module */
147static struct pci_driver xhci_pci_driver = {
148 .name = (char *) hcd_name,
149 .id_table = pci_ids,
150
151 .probe = usb_hcd_pci_probe,
152 .remove = usb_hcd_pci_remove,
153 /* suspend and resume implemented later */
154
155 .shutdown = usb_hcd_pci_shutdown,
156};
157
158int xhci_register_pci()
159{
160 return pci_register_driver(&xhci_pci_driver);
161}
162
163void xhci_unregister_pci()
164{
165 pci_unregister_driver(&xhci_pci_driver);
166}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 000000000000..02d81985c454
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,1648 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include <linux/scatterlist.h>
68#include "xhci.h"
69
70/*
71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
72 * address of the TRB.
73 */
74dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
75 union xhci_trb *trb)
76{
77 unsigned long segment_offset;
78
79 if (!seg || !trb || trb < seg->trbs)
80 return 0;
81 /* offset in TRBs */
82 segment_offset = trb - seg->trbs;
83 if (segment_offset > TRBS_PER_SEGMENT)
84 return 0;
85 return seg->dma + (segment_offset * sizeof(*trb));
86}
87
88/* Does this link TRB point to the first segment in a ring,
89 * or was the previous TRB the last TRB on the last segment in the ERST?
90 */
91static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
92 struct xhci_segment *seg, union xhci_trb *trb)
93{
94 if (ring == xhci->event_ring)
95 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
96 (seg->next == xhci->event_ring->first_seg);
97 else
98 return trb->link.control & LINK_TOGGLE;
99}
100
101/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102 * segment? I.e. would the updated event TRB pointer step off the end of the
103 * event seg?
104 */
105static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
106 struct xhci_segment *seg, union xhci_trb *trb)
107{
108 if (ring == xhci->event_ring)
109 return trb == &seg->trbs[TRBS_PER_SEGMENT];
110 else
111 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
112}
113
114/* Updates trb to point to the next TRB in the ring, and updates seg if the next
115 * TRB is in a new segment. This does not skip over link TRBs, and it does not
116 * effect the ring dequeue or enqueue pointers.
117 */
118static void next_trb(struct xhci_hcd *xhci,
119 struct xhci_ring *ring,
120 struct xhci_segment **seg,
121 union xhci_trb **trb)
122{
123 if (last_trb(xhci, ring, *seg, *trb)) {
124 *seg = (*seg)->next;
125 *trb = ((*seg)->trbs);
126 } else {
127 *trb = (*trb)++;
128 }
129}
130
131/*
132 * See Cycle bit rules. SW is the consumer for the event ring only.
133 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
134 */
135static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
136{
137 union xhci_trb *next = ++(ring->dequeue);
138
139 ring->deq_updates++;
140 /* Update the dequeue pointer further if that was a link TRB or we're at
141 * the end of an event ring segment (which doesn't have link TRBS)
142 */
143 while (last_trb(xhci, ring, ring->deq_seg, next)) {
144 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
145 ring->cycle_state = (ring->cycle_state ? 0 : 1);
146 if (!in_interrupt())
147 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
148 ring,
149 (unsigned int) ring->cycle_state);
150 }
151 ring->deq_seg = ring->deq_seg->next;
152 ring->dequeue = ring->deq_seg->trbs;
153 next = ring->dequeue;
154 }
155}
156
157/*
158 * See Cycle bit rules. SW is the consumer for the event ring only.
159 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
160 *
161 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
162 * chain bit is set), then set the chain bit in all the following link TRBs.
163 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
164 * have their chain bit cleared (so that each Link TRB is a separate TD).
165 *
166 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
167 * set, but other sections talk about dealing with the chain bit set.
168 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
169 */
170static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
171{
172 u32 chain;
173 union xhci_trb *next;
174
175 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
176 next = ++(ring->enqueue);
177
178 ring->enq_updates++;
179 /* Update the dequeue pointer further if that was a link TRB or we're at
180 * the end of an event ring segment (which doesn't have link TRBS)
181 */
182 while (last_trb(xhci, ring, ring->enq_seg, next)) {
183 if (!consumer) {
184 if (ring != xhci->event_ring) {
185 next->link.control &= ~TRB_CHAIN;
186 next->link.control |= chain;
187 /* Give this link TRB to the hardware */
188 wmb();
189 if (next->link.control & TRB_CYCLE)
190 next->link.control &= (u32) ~TRB_CYCLE;
191 else
192 next->link.control |= (u32) TRB_CYCLE;
193 }
194 /* Toggle the cycle bit after the last ring segment. */
195 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
196 ring->cycle_state = (ring->cycle_state ? 0 : 1);
197 if (!in_interrupt())
198 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
199 ring,
200 (unsigned int) ring->cycle_state);
201 }
202 }
203 ring->enq_seg = ring->enq_seg->next;
204 ring->enqueue = ring->enq_seg->trbs;
205 next = ring->enqueue;
206 }
207}
208
209/*
210 * Check to see if there's room to enqueue num_trbs on the ring. See rules
211 * above.
212 * FIXME: this would be simpler and faster if we just kept track of the number
213 * of free TRBs in a ring.
214 */
215static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
216 unsigned int num_trbs)
217{
218 int i;
219 union xhci_trb *enq = ring->enqueue;
220 struct xhci_segment *enq_seg = ring->enq_seg;
221
222 /* Check if ring is empty */
223 if (enq == ring->dequeue)
224 return 1;
225 /* Make sure there's an extra empty TRB available */
226 for (i = 0; i <= num_trbs; ++i) {
227 if (enq == ring->dequeue)
228 return 0;
229 enq++;
230 while (last_trb(xhci, ring, enq_seg, enq)) {
231 enq_seg = enq_seg->next;
232 enq = enq_seg->trbs;
233 }
234 }
235 return 1;
236}
237
238void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{
240 u32 temp;
241 dma_addr_t deq;
242
243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
244 xhci->event_ring->dequeue);
245 if (deq == 0 && !in_interrupt())
246 xhci_warn(xhci, "WARN something wrong with SW event ring "
247 "dequeue ptr.\n");
248 /* Update HC event ring dequeue pointer */
249 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
250 temp &= ERST_PTR_MASK;
251 if (!in_interrupt())
252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
253 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
254 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
255 &xhci->ir_set->erst_dequeue[0]);
256}
257
258/* Ring the host controller doorbell after placing a command on the ring */
259void xhci_ring_cmd_db(struct xhci_hcd *xhci)
260{
261 u32 temp;
262
263 xhci_dbg(xhci, "// Ding dong!\n");
264 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
265 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
266 /* Flush PCI posted writes */
267 xhci_readl(xhci, &xhci->dba->doorbell[0]);
268}
269
270static void ring_ep_doorbell(struct xhci_hcd *xhci,
271 unsigned int slot_id,
272 unsigned int ep_index)
273{
274 struct xhci_ring *ep_ring;
275 u32 field;
276 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
277
278 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
279 /* Don't ring the doorbell for this endpoint if there are pending
280 * cancellations because the we don't want to interrupt processing.
281 */
282 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
283 field = xhci_readl(xhci, db_addr) & DB_MASK;
284 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
285 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
286 * isn't time-critical and we shouldn't make the CPU wait for
287 * the flush.
288 */
289 xhci_readl(xhci, db_addr);
290 }
291}
292
293/*
294 * Find the segment that trb is in. Start searching in start_seg.
295 * If we must move past a segment that has a link TRB with a toggle cycle state
296 * bit set, then we will toggle the value pointed at by cycle_state.
297 */
298static struct xhci_segment *find_trb_seg(
299 struct xhci_segment *start_seg,
300 union xhci_trb *trb, int *cycle_state)
301{
302 struct xhci_segment *cur_seg = start_seg;
303 struct xhci_generic_trb *generic_trb;
304
305 while (cur_seg->trbs > trb ||
306 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
307 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
308 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
309 (generic_trb->field[3] & LINK_TOGGLE))
310 *cycle_state = ~(*cycle_state) & 0x1;
311 cur_seg = cur_seg->next;
312 if (cur_seg == start_seg)
313 /* Looped over the entire list. Oops! */
314 return 0;
315 }
316 return cur_seg;
317}
318
319struct dequeue_state {
320 struct xhci_segment *new_deq_seg;
321 union xhci_trb *new_deq_ptr;
322 int new_cycle_state;
323};
324
325/*
326 * Move the xHC's endpoint ring dequeue pointer past cur_td.
327 * Record the new state of the xHC's endpoint ring dequeue segment,
328 * dequeue pointer, and new consumer cycle state in state.
329 * Update our internal representation of the ring's dequeue pointer.
330 *
331 * We do this in three jumps:
332 * - First we update our new ring state to be the same as when the xHC stopped.
333 * - Then we traverse the ring to find the segment that contains
334 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
335 * any link TRBs with the toggle cycle bit set.
336 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
337 * if we've moved it past a link TRB with the toggle cycle bit set.
338 */
339static void find_new_dequeue_state(struct xhci_hcd *xhci,
340 unsigned int slot_id, unsigned int ep_index,
341 struct xhci_td *cur_td, struct dequeue_state *state)
342{
343 struct xhci_virt_device *dev = xhci->devs[slot_id];
344 struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
345 struct xhci_generic_trb *trb;
346
347 state->new_cycle_state = 0;
348 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
349 ep_ring->stopped_trb,
350 &state->new_cycle_state);
351 if (!state->new_deq_seg)
352 BUG();
353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
355
356 state->new_deq_ptr = cur_td->last_trb;
357 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
358 state->new_deq_ptr,
359 &state->new_cycle_state);
360 if (!state->new_deq_seg)
361 BUG();
362
363 trb = &state->new_deq_ptr->generic;
364 if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
365 (trb->field[3] & LINK_TOGGLE))
366 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
367 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
368
369 /* Don't update the ring cycle state for the producer (us). */
370 ep_ring->dequeue = state->new_deq_ptr;
371 ep_ring->deq_seg = state->new_deq_seg;
372}
373
374static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
375 struct xhci_td *cur_td)
376{
377 struct xhci_segment *cur_seg;
378 union xhci_trb *cur_trb;
379
380 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
381 true;
382 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
383 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
384 TRB_TYPE(TRB_LINK)) {
385 /* Unchain any chained Link TRBs, but
386 * leave the pointers intact.
387 */
388 cur_trb->generic.field[3] &= ~TRB_CHAIN;
389 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
390 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
391 "in seg %p (0x%llx dma)\n",
392 cur_trb,
393 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
394 cur_seg,
395 (unsigned long long)cur_seg->dma);
396 } else {
397 cur_trb->generic.field[0] = 0;
398 cur_trb->generic.field[1] = 0;
399 cur_trb->generic.field[2] = 0;
400 /* Preserve only the cycle bit of this TRB */
401 cur_trb->generic.field[3] &= TRB_CYCLE;
402 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
404 "in seg %p (0x%llx dma)\n",
405 cur_trb,
406 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
407 cur_seg,
408 (unsigned long long)cur_seg->dma);
409 }
410 if (cur_trb == cur_td->last_trb)
411 break;
412 }
413}
414
415static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
416 unsigned int ep_index, struct xhci_segment *deq_seg,
417 union xhci_trb *deq_ptr, u32 cycle_state);
418
419/*
420 * When we get a command completion for a Stop Endpoint Command, we need to
421 * unlink any cancelled TDs from the ring. There are two ways to do that:
422 *
423 * 1. If the HW was in the middle of processing the TD that needs to be
424 * cancelled, then we must move the ring's dequeue pointer past the last TRB
425 * in the TD with a Set Dequeue Pointer Command.
426 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
427 * bit cleared) so that the HW will skip over them.
428 */
429static void handle_stopped_endpoint(struct xhci_hcd *xhci,
430 union xhci_trb *trb)
431{
432 unsigned int slot_id;
433 unsigned int ep_index;
434 struct xhci_ring *ep_ring;
435 struct list_head *entry;
436 struct xhci_td *cur_td = 0;
437 struct xhci_td *last_unlinked_td;
438
439 struct dequeue_state deq_state;
440#ifdef CONFIG_USB_HCD_STAT
441 ktime_t stop_time = ktime_get();
442#endif
443
444 memset(&deq_state, 0, sizeof(deq_state));
445 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
446 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
447 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
448
449 if (list_empty(&ep_ring->cancelled_td_list))
450 return;
451
452 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
453 * We have the xHCI lock, so nothing can modify this list until we drop
454 * it. We're also in the event handler, so we can't get re-interrupted
455 * if another Stop Endpoint command completes
456 */
457 list_for_each(entry, &ep_ring->cancelled_td_list) {
458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
460 cur_td->first_trb,
461 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
462 /*
463 * If we stopped on the TD we need to cancel, then we have to
464 * move the xHC endpoint ring dequeue pointer past this TD.
465 */
466 if (cur_td == ep_ring->stopped_td)
467 find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
468 &deq_state);
469 else
470 td_to_noop(xhci, ep_ring, cur_td);
471 /*
472 * The event handler won't see a completion for this TD anymore,
473 * so remove it from the endpoint ring's TD list. Keep it in
474 * the cancelled TD list for URB completion later.
475 */
476 list_del(&cur_td->td_list);
477 ep_ring->cancels_pending--;
478 }
479 last_unlinked_td = cur_td;
480
481 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
482 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
483 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
484 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
485 deq_state.new_deq_seg,
486 (unsigned long long)deq_state.new_deq_seg->dma,
487 deq_state.new_deq_ptr,
488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg,
492 deq_state.new_deq_ptr,
493 (u32) deq_state.new_cycle_state);
494 /* Stop the TD queueing code from ringing the doorbell until
495 * this command completes. The HC won't set the dequeue pointer
496 * if the ring is running, and ringing the doorbell starts the
497 * ring running.
498 */
499 ep_ring->state |= SET_DEQ_PENDING;
500 xhci_ring_cmd_db(xhci);
501 } else {
502 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index);
504 }
505
506 /*
507 * Drop the lock and complete the URBs in the cancelled TD list.
508 * New TDs to be cancelled might be added to the end of the list before
509 * we can complete all the URBs for the TDs we already unlinked.
510 * So stop when we've completed the URB for the last TD we unlinked.
511 */
512 do {
513 cur_td = list_entry(ep_ring->cancelled_td_list.next,
514 struct xhci_td, cancelled_td_list);
515 list_del(&cur_td->cancelled_td_list);
516
517 /* Clean up the cancelled URB */
518#ifdef CONFIG_USB_HCD_STAT
519 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
520 ktime_sub(stop_time, cur_td->start_time));
521#endif
522 cur_td->urb->hcpriv = NULL;
523 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
524
525 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
526 spin_unlock(&xhci->lock);
527 /* Doesn't matter what we pass for status, since the core will
528 * just overwrite it (because the URB has been unlinked).
529 */
530 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
531 kfree(cur_td);
532
533 spin_lock(&xhci->lock);
534 } while (cur_td != last_unlinked_td);
535
536 /* Return to the event handler with xhci->lock re-acquired */
537}
538
539/*
540 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
541 * we need to clear the set deq pending flag in the endpoint ring state, so that
542 * the TD queueing code can ring the doorbell again. We also need to ring the
543 * endpoint doorbell to restart the ring, but only if there aren't more
544 * cancellations pending.
545 */
546static void handle_set_deq_completion(struct xhci_hcd *xhci,
547 struct xhci_event_cmd *event,
548 union xhci_trb *trb)
549{
550 unsigned int slot_id;
551 unsigned int ep_index;
552 struct xhci_ring *ep_ring;
553 struct xhci_virt_device *dev;
554
555 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
556 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
557 dev = xhci->devs[slot_id];
558 ep_ring = dev->ep_rings[ep_index];
559
560 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
561 unsigned int ep_state;
562 unsigned int slot_state;
563
564 switch (GET_COMP_CODE(event->status)) {
565 case COMP_TRB_ERR:
566 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
567 "of stream ID configuration\n");
568 break;
569 case COMP_CTX_STATE:
570 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
571 "to incorrect slot or ep state.\n");
572 ep_state = dev->out_ctx->ep[ep_index].ep_info;
573 ep_state &= EP_STATE_MASK;
574 slot_state = dev->out_ctx->slot.dev_state;
575 slot_state = GET_SLOT_STATE(slot_state);
576 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
577 slot_state, ep_state);
578 break;
579 case COMP_EBADSLT:
580 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
581 "slot %u was not enabled.\n", slot_id);
582 break;
583 default:
584 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
585 "completion code of %u.\n",
586 GET_COMP_CODE(event->status));
587 break;
588 }
589 /* OK what do we do now? The endpoint state is hosed, and we
590 * should never get to this point if the synchronization between
591 * queueing, and endpoint state are correct. This might happen
592 * if the device gets disconnected after we've finished
593 * cancelling URBs, which might not be an error...
594 */
595 } else {
596 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
597 "deq[1] = 0x%x.\n",
598 dev->out_ctx->ep[ep_index].deq[0],
599 dev->out_ctx->ep[ep_index].deq[1]);
600 }
601
602 ep_ring->state &= ~SET_DEQ_PENDING;
603 ring_ep_doorbell(xhci, slot_id, ep_index);
604}
605
606
607static void handle_cmd_completion(struct xhci_hcd *xhci,
608 struct xhci_event_cmd *event)
609{
610 int slot_id = TRB_TO_SLOT_ID(event->flags);
611 u64 cmd_dma;
612 dma_addr_t cmd_dequeue_dma;
613
614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
616 xhci->cmd_ring->dequeue);
617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
618 if (cmd_dequeue_dma == 0) {
619 xhci->error_bitmask |= 1 << 4;
620 return;
621 }
622 /* Does the DMA address match our internal dequeue pointer address? */
623 if (cmd_dma != (u64) cmd_dequeue_dma) {
624 xhci->error_bitmask |= 1 << 5;
625 return;
626 }
627 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
628 case TRB_TYPE(TRB_ENABLE_SLOT):
629 if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
630 xhci->slot_id = slot_id;
631 else
632 xhci->slot_id = 0;
633 complete(&xhci->addr_dev);
634 break;
635 case TRB_TYPE(TRB_DISABLE_SLOT):
636 if (xhci->devs[slot_id])
637 xhci_free_virt_device(xhci, slot_id);
638 break;
639 case TRB_TYPE(TRB_CONFIG_EP):
640 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
641 complete(&xhci->devs[slot_id]->cmd_completion);
642 break;
643 case TRB_TYPE(TRB_ADDR_DEV):
644 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
645 complete(&xhci->addr_dev);
646 break;
647 case TRB_TYPE(TRB_STOP_RING):
648 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
649 break;
650 case TRB_TYPE(TRB_SET_DEQ):
651 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
652 break;
653 case TRB_TYPE(TRB_CMD_NOOP):
654 ++xhci->noops_handled;
655 break;
656 default:
657 /* Skip over unknown commands on the event ring */
658 xhci->error_bitmask |= 1 << 6;
659 break;
660 }
661 inc_deq(xhci, xhci->cmd_ring, false);
662}
663
664static void handle_port_status(struct xhci_hcd *xhci,
665 union xhci_trb *event)
666{
667 u32 port_id;
668
669 /* Port status change events always have a successful completion code */
670 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
671 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
672 xhci->error_bitmask |= 1 << 8;
673 }
674 /* FIXME: core doesn't care about all port link state changes yet */
675 port_id = GET_PORT_ID(event->generic.field[0]);
676 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
677
678 /* Update event ring dequeue pointer before dropping the lock */
679 inc_deq(xhci, xhci->event_ring, true);
680 xhci_set_hc_event_deq(xhci);
681
682 spin_unlock(&xhci->lock);
683 /* Pass this up to the core */
684 usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
685 spin_lock(&xhci->lock);
686}
687
688/*
689 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
690 * at end_trb, which may be in another segment. If the suspect DMA address is a
691 * TRB in this TD, this function returns that TRB's segment. Otherwise it
692 * returns 0.
693 */
694static struct xhci_segment *trb_in_td(
695 struct xhci_segment *start_seg,
696 union xhci_trb *start_trb,
697 union xhci_trb *end_trb,
698 dma_addr_t suspect_dma)
699{
700 dma_addr_t start_dma;
701 dma_addr_t end_seg_dma;
702 dma_addr_t end_trb_dma;
703 struct xhci_segment *cur_seg;
704
705 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
706 cur_seg = start_seg;
707
708 do {
709 /* We may get an event for a Link TRB in the middle of a TD */
710 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
712 /* If the end TRB isn't in this segment, this is set to 0 */
713 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
714
715 if (end_trb_dma > 0) {
716 /* The end TRB is in this segment, so suspect should be here */
717 if (start_dma <= end_trb_dma) {
718 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
719 return cur_seg;
720 } else {
721 /* Case for one segment with
722 * a TD wrapped around to the top
723 */
724 if ((suspect_dma >= start_dma &&
725 suspect_dma <= end_seg_dma) ||
726 (suspect_dma >= cur_seg->dma &&
727 suspect_dma <= end_trb_dma))
728 return cur_seg;
729 }
730 return 0;
731 } else {
732 /* Might still be somewhere in this segment */
733 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
734 return cur_seg;
735 }
736 cur_seg = cur_seg->next;
737 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
738 } while (1);
739
740}
741
742/*
743 * If this function returns an error condition, it means it got a Transfer
744 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
745 * At this point, the host controller is probably hosed and should be reset.
746 */
747static int handle_tx_event(struct xhci_hcd *xhci,
748 struct xhci_transfer_event *event)
749{
750 struct xhci_virt_device *xdev;
751 struct xhci_ring *ep_ring;
752 int ep_index;
753 struct xhci_td *td = 0;
754 dma_addr_t event_dma;
755 struct xhci_segment *event_seg;
756 union xhci_trb *event_trb;
757 struct urb *urb = 0;
758 int status = -EINPROGRESS;
759
760 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
761 if (!xdev) {
762 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
763 return -ENODEV;
764 }
765
766 /* Endpoint ID is 1 based, our index is zero based */
767 ep_index = TRB_TO_EP_ID(event->flags) - 1;
768 ep_ring = xdev->ep_rings[ep_index];
769 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
770 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
771 return -ENODEV;
772 }
773
774 event_dma = event->buffer[0];
775 if (event->buffer[1] != 0)
776 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
777
778 /* This TRB should be in the TD at the head of this ring's TD list */
779 if (list_empty(&ep_ring->td_list)) {
780 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
781 TRB_TO_SLOT_ID(event->flags), ep_index);
782 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
783 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
784 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
785 urb = NULL;
786 goto cleanup;
787 }
788 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
789
790 /* Is this a TRB in the currently executing TD? */
791 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
792 td->last_trb, event_dma);
793 if (!event_seg) {
794 /* HC is busted, give up! */
795 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
796 return -ESHUTDOWN;
797 }
798 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
799 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
800 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
801 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
802 (unsigned int) event->buffer[0]);
803 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
804 (unsigned int) event->buffer[1]);
805 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
806 (unsigned int) event->transfer_len);
807 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
808 (unsigned int) event->flags);
809
810 /* Look for common error cases */
811 switch (GET_COMP_CODE(event->transfer_len)) {
812 /* Skip codes that require special handling depending on
813 * transfer type
814 */
815 case COMP_SUCCESS:
816 case COMP_SHORT_TX:
817 break;
818 case COMP_STOP:
819 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
820 break;
821 case COMP_STOP_INVAL:
822 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
823 break;
824 case COMP_STALL:
825 xhci_warn(xhci, "WARN: Stalled endpoint\n");
826 status = -EPIPE;
827 break;
828 case COMP_TRB_ERR:
829 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
830 status = -EILSEQ;
831 break;
832 case COMP_TX_ERR:
833 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
834 status = -EPROTO;
835 break;
836 case COMP_DB_ERR:
837 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
838 status = -ENOSR;
839 break;
840 default:
841 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
842 urb = NULL;
843 goto cleanup;
844 }
845 /* Now update the urb's actual_length and give back to the core */
846 /* Was this a control transfer? */
847 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
848 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
849 switch (GET_COMP_CODE(event->transfer_len)) {
850 case COMP_SUCCESS:
851 if (event_trb == ep_ring->dequeue) {
852 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
853 status = -ESHUTDOWN;
854 } else if (event_trb != td->last_trb) {
855 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
856 status = -ESHUTDOWN;
857 } else {
858 xhci_dbg(xhci, "Successful control transfer!\n");
859 status = 0;
860 }
861 break;
862 case COMP_SHORT_TX:
863 xhci_warn(xhci, "WARN: short transfer on control ep\n");
864 status = -EREMOTEIO;
865 break;
866 default:
867 /* Others already handled above */
868 break;
869 }
870 /*
871 * Did we transfer any data, despite the errors that might have
872 * happened? I.e. did we get past the setup stage?
873 */
874 if (event_trb != ep_ring->dequeue) {
875 /* The event was for the status stage */
876 if (event_trb == td->last_trb) {
877 td->urb->actual_length =
878 td->urb->transfer_buffer_length;
879 } else {
880 /* Maybe the event was for the data stage? */
881 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
882 /* We didn't stop on a link TRB in the middle */
883 td->urb->actual_length =
884 td->urb->transfer_buffer_length -
885 TRB_LEN(event->transfer_len);
886 }
887 }
888 } else {
889 switch (GET_COMP_CODE(event->transfer_len)) {
890 case COMP_SUCCESS:
891 /* Double check that the HW transferred everything. */
892 if (event_trb != td->last_trb) {
893 xhci_warn(xhci, "WARN Successful completion "
894 "on short TX\n");
895 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
896 status = -EREMOTEIO;
897 else
898 status = 0;
899 } else {
900 xhci_dbg(xhci, "Successful bulk transfer!\n");
901 status = 0;
902 }
903 break;
904 case COMP_SHORT_TX:
905 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
906 status = -EREMOTEIO;
907 else
908 status = 0;
909 break;
910 default:
911 /* Others already handled above */
912 break;
913 }
914 dev_dbg(&td->urb->dev->dev,
915 "ep %#x - asked for %d bytes, "
916 "%d bytes untransferred\n",
917 td->urb->ep->desc.bEndpointAddress,
918 td->urb->transfer_buffer_length,
919 TRB_LEN(event->transfer_len));
920 /* Fast path - was this the last TRB in the TD for this URB? */
921 if (event_trb == td->last_trb) {
922 if (TRB_LEN(event->transfer_len) != 0) {
923 td->urb->actual_length =
924 td->urb->transfer_buffer_length -
925 TRB_LEN(event->transfer_len);
926 if (td->urb->actual_length < 0) {
927 xhci_warn(xhci, "HC gave bad length "
928 "of %d bytes left\n",
929 TRB_LEN(event->transfer_len));
930 td->urb->actual_length = 0;
931 }
932 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
933 status = -EREMOTEIO;
934 else
935 status = 0;
936 } else {
937 td->urb->actual_length = td->urb->transfer_buffer_length;
938 /* Ignore a short packet completion if the
939 * untransferred length was zero.
940 */
941 status = 0;
942 }
943 } else {
944 /* Slow path - walk the list, starting from the dequeue
945 * pointer, to get the actual length transferred.
946 */
947 union xhci_trb *cur_trb;
948 struct xhci_segment *cur_seg;
949
950 td->urb->actual_length = 0;
951 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
952 cur_trb != event_trb;
953 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
954 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
955 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
956 td->urb->actual_length +=
957 TRB_LEN(cur_trb->generic.field[2]);
958 }
959 /* If the ring didn't stop on a Link or No-op TRB, add
960 * in the actual bytes transferred from the Normal TRB
961 */
962 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
963 td->urb->actual_length +=
964 TRB_LEN(cur_trb->generic.field[2]) -
965 TRB_LEN(event->transfer_len);
966 }
967 }
968 /* The Endpoint Stop Command completion will take care of
969 * any stopped TDs. A stopped TD may be restarted, so don't update the
970 * ring dequeue pointer or take this TD off any lists yet.
971 */
972 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
973 GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
974 ep_ring->stopped_td = td;
975 ep_ring->stopped_trb = event_trb;
976 } else {
977 /* Update ring dequeue pointer */
978 while (ep_ring->dequeue != td->last_trb)
979 inc_deq(xhci, ep_ring, false);
980 inc_deq(xhci, ep_ring, false);
981
982 /* Clean up the endpoint's TD list */
983 urb = td->urb;
984 list_del(&td->td_list);
985 /* Was this TD slated to be cancelled but completed anyway? */
986 if (!list_empty(&td->cancelled_td_list)) {
987 list_del(&td->cancelled_td_list);
988 ep_ring->cancels_pending--;
989 }
990 kfree(td);
991 urb->hcpriv = NULL;
992 }
993cleanup:
994 inc_deq(xhci, xhci->event_ring, true);
995 xhci_set_hc_event_deq(xhci);
996
997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
998 if (urb) {
999 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1000 spin_unlock(&xhci->lock);
1001 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1002 spin_lock(&xhci->lock);
1003 }
1004 return 0;
1005}
1006
1007/*
1008 * This function handles all OS-owned events on the event ring. It may drop
1009 * xhci->lock between event processing (e.g. to pass up port status changes).
1010 */
1011void xhci_handle_event(struct xhci_hcd *xhci)
1012{
1013 union xhci_trb *event;
1014 int update_ptrs = 1;
1015 int ret;
1016
1017 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
1018 xhci->error_bitmask |= 1 << 1;
1019 return;
1020 }
1021
1022 event = xhci->event_ring->dequeue;
1023 /* Does the HC or OS own the TRB? */
1024 if ((event->event_cmd.flags & TRB_CYCLE) !=
1025 xhci->event_ring->cycle_state) {
1026 xhci->error_bitmask |= 1 << 2;
1027 return;
1028 }
1029
1030 /* FIXME: Handle more event types. */
1031 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
1032 case TRB_TYPE(TRB_COMPLETION):
1033 handle_cmd_completion(xhci, &event->event_cmd);
1034 break;
1035 case TRB_TYPE(TRB_PORT_STATUS):
1036 handle_port_status(xhci, event);
1037 update_ptrs = 0;
1038 break;
1039 case TRB_TYPE(TRB_TRANSFER):
1040 ret = handle_tx_event(xhci, &event->trans_event);
1041 if (ret < 0)
1042 xhci->error_bitmask |= 1 << 9;
1043 else
1044 update_ptrs = 0;
1045 break;
1046 default:
1047 xhci->error_bitmask |= 1 << 3;
1048 }
1049
1050 if (update_ptrs) {
1051 /* Update SW and HC event ring dequeue pointer */
1052 inc_deq(xhci, xhci->event_ring, true);
1053 xhci_set_hc_event_deq(xhci);
1054 }
1055 /* Are there more items on the event ring? */
1056 xhci_handle_event(xhci);
1057}
1058
1059/**** Endpoint Ring Operations ****/
1060
1061/*
1062 * Generic function for queueing a TRB on a ring.
1063 * The caller must have checked to make sure there's room on the ring.
1064 */
1065static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
1066 bool consumer,
1067 u32 field1, u32 field2, u32 field3, u32 field4)
1068{
1069 struct xhci_generic_trb *trb;
1070
1071 trb = &ring->enqueue->generic;
1072 trb->field[0] = field1;
1073 trb->field[1] = field2;
1074 trb->field[2] = field3;
1075 trb->field[3] = field4;
1076 inc_enq(xhci, ring, consumer);
1077}
1078
1079/*
1080 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
1081 * FIXME allocate segments if the ring is full.
1082 */
1083static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1084 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
1085{
1086 /* Make sure the endpoint has been added to xHC schedule */
1087 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
1088 switch (ep_state) {
1089 case EP_STATE_DISABLED:
1090 /*
1091 * USB core changed config/interfaces without notifying us,
1092 * or hardware is reporting the wrong state.
1093 */
1094 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
1095 return -ENOENT;
1096 case EP_STATE_HALTED:
1097 case EP_STATE_ERROR:
1098 xhci_warn(xhci, "WARN waiting for halt or error on ep "
1099 "to be cleared\n");
1100 /* FIXME event handling code for error needs to clear it */
1101 /* XXX not sure if this should be -ENOENT or not */
1102 return -EINVAL;
1103 case EP_STATE_STOPPED:
1104 case EP_STATE_RUNNING:
1105 break;
1106 default:
1107 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
1108 /*
1109 * FIXME issue Configure Endpoint command to try to get the HC
1110 * back into a known state.
1111 */
1112 return -EINVAL;
1113 }
1114 if (!room_on_ring(xhci, ep_ring, num_trbs)) {
1115 /* FIXME allocate more room */
1116 xhci_err(xhci, "ERROR no room on ep ring\n");
1117 return -ENOMEM;
1118 }
1119 return 0;
1120}
1121
1122static int prepare_transfer(struct xhci_hcd *xhci,
1123 struct xhci_virt_device *xdev,
1124 unsigned int ep_index,
1125 unsigned int num_trbs,
1126 struct urb *urb,
1127 struct xhci_td **td,
1128 gfp_t mem_flags)
1129{
1130 int ret;
1131
1132 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
1133 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
1134 num_trbs, mem_flags);
1135 if (ret)
1136 return ret;
1137 *td = kzalloc(sizeof(struct xhci_td), mem_flags);
1138 if (!*td)
1139 return -ENOMEM;
1140 INIT_LIST_HEAD(&(*td)->td_list);
1141 INIT_LIST_HEAD(&(*td)->cancelled_td_list);
1142
1143 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
1144 if (unlikely(ret)) {
1145 kfree(*td);
1146 return ret;
1147 }
1148
1149 (*td)->urb = urb;
1150 urb->hcpriv = (void *) (*td);
1151 /* Add this TD to the tail of the endpoint ring's TD list */
1152 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
1153 (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
1154 (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
1155
1156 return 0;
1157}
1158
1159static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1160{
1161 int num_sgs, num_trbs, running_total, temp, i;
1162 struct scatterlist *sg;
1163
1164 sg = NULL;
1165 num_sgs = urb->num_sgs;
1166 temp = urb->transfer_buffer_length;
1167
1168 xhci_dbg(xhci, "count sg list trbs: \n");
1169 num_trbs = 0;
1170 for_each_sg(urb->sg->sg, sg, num_sgs, i) {
1171 unsigned int previous_total_trbs = num_trbs;
1172 unsigned int len = sg_dma_len(sg);
1173
1174 /* Scatter gather list entries may cross 64KB boundaries */
1175 running_total = TRB_MAX_BUFF_SIZE -
1176 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1177 if (running_total != 0)
1178 num_trbs++;
1179
1180 /* How many more 64KB chunks to transfer, how many more TRBs? */
1181 while (running_total < sg_dma_len(sg)) {
1182 num_trbs++;
1183 running_total += TRB_MAX_BUFF_SIZE;
1184 }
1185 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
1186 i, (unsigned long long)sg_dma_address(sg),
1187 len, len, num_trbs - previous_total_trbs);
1188
1189 len = min_t(int, len, temp);
1190 temp -= len;
1191 if (temp == 0)
1192 break;
1193 }
1194 xhci_dbg(xhci, "\n");
1195 if (!in_interrupt())
1196 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
1197 urb->ep->desc.bEndpointAddress,
1198 urb->transfer_buffer_length,
1199 num_trbs);
1200 return num_trbs;
1201}
1202
1203static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1204{
1205 if (num_trbs != 0)
1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
1207 "TRBs, %d left\n", __func__,
1208 urb->ep->desc.bEndpointAddress, num_trbs);
1209 if (running_total != urb->transfer_buffer_length)
1210 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
1211 "queued %#x (%d), asked for %#x (%d)\n",
1212 __func__,
1213 urb->ep->desc.bEndpointAddress,
1214 running_total, running_total,
1215 urb->transfer_buffer_length,
1216 urb->transfer_buffer_length);
1217}
1218
1219static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1220 unsigned int ep_index, int start_cycle,
1221 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1222{
1223 /*
1224 * Pass all the TRBs to the hardware at once and make sure this write
1225 * isn't reordered.
1226 */
1227 wmb();
1228 start_trb->field[3] |= start_cycle;
1229 ring_ep_doorbell(xhci, slot_id, ep_index);
1230}
1231
1232static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1233 struct urb *urb, int slot_id, unsigned int ep_index)
1234{
1235 struct xhci_ring *ep_ring;
1236 unsigned int num_trbs;
1237 struct xhci_td *td;
1238 struct scatterlist *sg;
1239 int num_sgs;
1240 int trb_buff_len, this_sg_len, running_total;
1241 bool first_trb;
1242 u64 addr;
1243
1244 struct xhci_generic_trb *start_trb;
1245 int start_cycle;
1246
1247 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
1248 num_trbs = count_sg_trbs_needed(xhci, urb);
1249 num_sgs = urb->num_sgs;
1250
1251 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1252 ep_index, num_trbs, urb, &td, mem_flags);
1253 if (trb_buff_len < 0)
1254 return trb_buff_len;
1255 /*
1256 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1257 * until we've finished creating all the other TRBs. The ring's cycle
1258 * state may change as we enqueue the other TRBs, so save it too.
1259 */
1260 start_trb = &ep_ring->enqueue->generic;
1261 start_cycle = ep_ring->cycle_state;
1262
1263 running_total = 0;
1264 /*
1265 * How much data is in the first TRB?
1266 *
1267 * There are three forces at work for TRB buffer pointers and lengths:
1268 * 1. We don't want to walk off the end of this sg-list entry buffer.
1269 * 2. The transfer length that the driver requested may be smaller than
1270 * the amount of memory allocated for this scatter-gather list.
1271 * 3. TRBs buffers can't cross 64KB boundaries.
1272 */
1273 sg = urb->sg->sg;
1274 addr = (u64) sg_dma_address(sg);
1275 this_sg_len = sg_dma_len(sg);
1276 trb_buff_len = TRB_MAX_BUFF_SIZE -
1277 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1278 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1279 if (trb_buff_len > urb->transfer_buffer_length)
1280 trb_buff_len = urb->transfer_buffer_length;
1281 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
1282 trb_buff_len);
1283
1284 first_trb = true;
1285 /* Queue the first TRB, even if it's zero-length */
1286 do {
1287 u32 field = 0;
1288
1289 /* Don't change the cycle bit of the first TRB until later */
1290 if (first_trb)
1291 first_trb = false;
1292 else
1293 field |= ep_ring->cycle_state;
1294
1295 /* Chain all the TRBs together; clear the chain bit in the last
1296 * TRB to indicate it's the last TRB in the chain.
1297 */
1298 if (num_trbs > 1) {
1299 field |= TRB_CHAIN;
1300 } else {
1301 /* FIXME - add check for ZERO_PACKET flag before this */
1302 td->last_trb = ep_ring->enqueue;
1303 field |= TRB_IOC;
1304 }
1305 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
1306 "64KB boundary at %#x, end dma = %#x\n",
1307 (unsigned int) addr, trb_buff_len, trb_buff_len,
1308 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1309 (unsigned int) addr + trb_buff_len);
1310 if (TRB_MAX_BUFF_SIZE -
1311 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
1312 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
1313 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
1314 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1315 (unsigned int) addr + trb_buff_len);
1316 }
1317 queue_trb(xhci, ep_ring, false,
1318 (u32) addr,
1319 (u32) ((u64) addr >> 32),
1320 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
1321 /* We always want to know if the TRB was short,
1322 * or we won't get an event when it completes.
1323 * (Unless we use event data TRBs, which are a
1324 * waste of space and HC resources.)
1325 */
1326 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
1327 --num_trbs;
1328 running_total += trb_buff_len;
1329
1330 /* Calculate length for next transfer --
1331 * Are we done queueing all the TRBs for this sg entry?
1332 */
1333 this_sg_len -= trb_buff_len;
1334 if (this_sg_len == 0) {
1335 --num_sgs;
1336 if (num_sgs == 0)
1337 break;
1338 sg = sg_next(sg);
1339 addr = (u64) sg_dma_address(sg);
1340 this_sg_len = sg_dma_len(sg);
1341 } else {
1342 addr += trb_buff_len;
1343 }
1344
1345 trb_buff_len = TRB_MAX_BUFF_SIZE -
1346 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1347 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1348 if (running_total + trb_buff_len > urb->transfer_buffer_length)
1349 trb_buff_len =
1350 urb->transfer_buffer_length - running_total;
1351 } while (running_total < urb->transfer_buffer_length);
1352
1353 check_trb_math(urb, num_trbs, running_total);
1354 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1355 return 0;
1356}
1357
1358/* This is very similar to what ehci-q.c qtd_fill() does */
1359int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1360 struct urb *urb, int slot_id, unsigned int ep_index)
1361{
1362 struct xhci_ring *ep_ring;
1363 struct xhci_td *td;
1364 int num_trbs;
1365 struct xhci_generic_trb *start_trb;
1366 bool first_trb;
1367 int start_cycle;
1368 u32 field;
1369
1370 int running_total, trb_buff_len, ret;
1371 u64 addr;
1372
1373 if (urb->sg)
1374 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1375
1376 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
1377
1378 num_trbs = 0;
1379 /* How much data is (potentially) left before the 64KB boundary? */
1380 running_total = TRB_MAX_BUFF_SIZE -
1381 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1382
1383 /* If there's some data on this 64KB chunk, or we have to send a
1384 * zero-length transfer, we need at least one TRB
1385 */
1386 if (running_total != 0 || urb->transfer_buffer_length == 0)
1387 num_trbs++;
1388 /* How many more 64KB chunks to transfer, how many more TRBs? */
1389 while (running_total < urb->transfer_buffer_length) {
1390 num_trbs++;
1391 running_total += TRB_MAX_BUFF_SIZE;
1392 }
1393 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
1394
1395 if (!in_interrupt())
1396 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
1397 urb->ep->desc.bEndpointAddress,
1398 urb->transfer_buffer_length,
1399 urb->transfer_buffer_length,
1400 (unsigned long long)urb->transfer_dma,
1401 num_trbs);
1402
1403 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
1404 num_trbs, urb, &td, mem_flags);
1405 if (ret < 0)
1406 return ret;
1407
1408 /*
1409 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1410 * until we've finished creating all the other TRBs. The ring's cycle
1411 * state may change as we enqueue the other TRBs, so save it too.
1412 */
1413 start_trb = &ep_ring->enqueue->generic;
1414 start_cycle = ep_ring->cycle_state;
1415
1416 running_total = 0;
1417 /* How much data is in the first TRB? */
1418 addr = (u64) urb->transfer_dma;
1419 trb_buff_len = TRB_MAX_BUFF_SIZE -
1420 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1421 if (urb->transfer_buffer_length < trb_buff_len)
1422 trb_buff_len = urb->transfer_buffer_length;
1423
1424 first_trb = true;
1425
1426 /* Queue the first TRB, even if it's zero-length */
1427 do {
1428 field = 0;
1429
1430 /* Don't change the cycle bit of the first TRB until later */
1431 if (first_trb)
1432 first_trb = false;
1433 else
1434 field |= ep_ring->cycle_state;
1435
1436 /* Chain all the TRBs together; clear the chain bit in the last
1437 * TRB to indicate it's the last TRB in the chain.
1438 */
1439 if (num_trbs > 1) {
1440 field |= TRB_CHAIN;
1441 } else {
1442 /* FIXME - add check for ZERO_PACKET flag before this */
1443 td->last_trb = ep_ring->enqueue;
1444 field |= TRB_IOC;
1445 }
1446 queue_trb(xhci, ep_ring, false,
1447 (u32) addr,
1448 (u32) ((u64) addr >> 32),
1449 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
1450 /* We always want to know if the TRB was short,
1451 * or we won't get an event when it completes.
1452 * (Unless we use event data TRBs, which are a
1453 * waste of space and HC resources.)
1454 */
1455 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
1456 --num_trbs;
1457 running_total += trb_buff_len;
1458
1459 /* Calculate length for next transfer */
1460 addr += trb_buff_len;
1461 trb_buff_len = urb->transfer_buffer_length - running_total;
1462 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
1463 trb_buff_len = TRB_MAX_BUFF_SIZE;
1464 } while (running_total < urb->transfer_buffer_length);
1465
1466 check_trb_math(urb, num_trbs, running_total);
1467 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1468 return 0;
1469}
1470
1471/* Caller must have locked xhci->lock */
1472int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1473 struct urb *urb, int slot_id, unsigned int ep_index)
1474{
1475 struct xhci_ring *ep_ring;
1476 int num_trbs;
1477 int ret;
1478 struct usb_ctrlrequest *setup;
1479 struct xhci_generic_trb *start_trb;
1480 int start_cycle;
1481 u32 field;
1482 struct xhci_td *td;
1483
1484 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
1485
1486 /*
1487 * Need to copy setup packet into setup TRB, so we can't use the setup
1488 * DMA address.
1489 */
1490 if (!urb->setup_packet)
1491 return -EINVAL;
1492
1493 if (!in_interrupt())
1494 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
1495 slot_id, ep_index);
1496 /* 1 TRB for setup, 1 for status */
1497 num_trbs = 2;
1498 /*
1499 * Don't need to check if we need additional event data and normal TRBs,
1500 * since data in control transfers will never get bigger than 16MB
1501 * XXX: can we get a buffer that crosses 64KB boundaries?
1502 */
1503 if (urb->transfer_buffer_length > 0)
1504 num_trbs++;
1505 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
1506 urb, &td, mem_flags);
1507 if (ret < 0)
1508 return ret;
1509
1510 /*
1511 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1512 * until we've finished creating all the other TRBs. The ring's cycle
1513 * state may change as we enqueue the other TRBs, so save it too.
1514 */
1515 start_trb = &ep_ring->enqueue->generic;
1516 start_cycle = ep_ring->cycle_state;
1517
1518 /* Queue setup TRB - see section 6.4.1.2.1 */
1519 /* FIXME better way to translate setup_packet into two u32 fields? */
1520 setup = (struct usb_ctrlrequest *) urb->setup_packet;
1521 queue_trb(xhci, ep_ring, false,
1522 /* FIXME endianness is probably going to bite my ass here. */
1523 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
1524 setup->wIndex | setup->wLength << 16,
1525 TRB_LEN(8) | TRB_INTR_TARGET(0),
1526 /* Immediate data in pointer */
1527 TRB_IDT | TRB_TYPE(TRB_SETUP));
1528
1529 /* If there's data, queue data TRBs */
1530 field = 0;
1531 if (urb->transfer_buffer_length > 0) {
1532 if (setup->bRequestType & USB_DIR_IN)
1533 field |= TRB_DIR_IN;
1534 queue_trb(xhci, ep_ring, false,
1535 lower_32_bits(urb->transfer_dma),
1536 upper_32_bits(urb->transfer_dma),
1537 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
1538 /* Event on short tx */
1539 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
1540 }
1541
1542 /* Save the DMA address of the last TRB in the TD */
1543 td->last_trb = ep_ring->enqueue;
1544
1545 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
1546 /* If the device sent data, the status stage is an OUT transfer */
1547 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
1548 field = 0;
1549 else
1550 field = TRB_DIR_IN;
1551 queue_trb(xhci, ep_ring, false,
1552 0,
1553 0,
1554 TRB_INTR_TARGET(0),
1555 /* Event on completion */
1556 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
1557
1558 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1559 return 0;
1560}
1561
1562/**** Command Ring Operations ****/
1563
1564/* Generic function for queueing a command TRB on the command ring */
1565static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
1566{
1567 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
1568 if (!in_interrupt())
1569 xhci_err(xhci, "ERR: No room for command on command ring\n");
1570 return -ENOMEM;
1571 }
1572 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
1573 field4 | xhci->cmd_ring->cycle_state);
1574 return 0;
1575}
1576
1577/* Queue a no-op command on the command ring */
1578static int queue_cmd_noop(struct xhci_hcd *xhci)
1579{
1580 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
1581}
1582
1583/*
1584 * Place a no-op command on the command ring to test the command and
1585 * event ring.
1586 */
1587void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1588{
1589 if (queue_cmd_noop(xhci) < 0)
1590 return NULL;
1591 xhci->noops_submitted++;
1592 return xhci_ring_cmd_db;
1593}
1594
1595/* Queue a slot enable or disable request on the command ring */
1596int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1597{
1598 return queue_command(xhci, 0, 0, 0,
1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
1600}
1601
1602/* Queue an address device command TRB */
1603int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1604 u32 slot_id)
1605{
1606 return queue_command(xhci, in_ctx_ptr, 0, 0,
1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1608}
1609
1610/* Queue a configure endpoint command TRB */
1611int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1612 u32 slot_id)
1613{
1614 return queue_command(xhci, in_ctx_ptr, 0, 0,
1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1616}
1617
1618int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1619 unsigned int ep_index)
1620{
1621 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1622 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1623 u32 type = TRB_TYPE(TRB_STOP_RING);
1624
1625 return queue_command(xhci, 0, 0, 0,
1626 trb_slot_id | trb_ep_index | type);
1627}
1628
1629/* Set Transfer Ring Dequeue Pointer command.
1630 * This should not be used for endpoints that have streams enabled.
1631 */
1632static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1633 unsigned int ep_index, struct xhci_segment *deq_seg,
1634 union xhci_trb *deq_ptr, u32 cycle_state)
1635{
1636 dma_addr_t addr;
1637 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1638 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1639 u32 type = TRB_TYPE(TRB_SET_DEQ);
1640
1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
1642 if (addr == 0)
1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
1645 deq_seg, deq_ptr);
1646 return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
1647 trb_slot_id | trb_ep_index | type);
1648}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644
index 000000000000..8936eeb5588b
--- /dev/null
+++ b/drivers/usb/host/xhci.h
@@ -0,0 +1,1157 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#ifndef __LINUX_XHCI_HCD_H
24#define __LINUX_XHCI_HCD_H
25
26#include <linux/usb.h>
27#include <linux/timer.h>
28
29#include "../core/hcd.h"
30/* Code sharing between pci-quirks and xhci hcd */
31#include "xhci-ext-caps.h"
32
33/* xHCI PCI Configuration Registers */
34#define XHCI_SBRN_OFFSET (0x60)
35
36/* Max number of USB devices for any host controller - limit in section 6.1 */
37#define MAX_HC_SLOTS 256
38/* Section 5.3.3 - MaxPorts */
39#define MAX_HC_PORTS 127
40
41/*
42 * xHCI register interface.
43 * This corresponds to the eXtensible Host Controller Interface (xHCI)
44 * Revision 0.95 specification
45 *
46 * Registers should always be accessed with double word or quad word accesses.
47 *
48 * Some xHCI implementations may support 64-bit address pointers. Registers
49 * with 64-bit address pointers should be written to with dword accesses by
50 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
51 * xHCI implementations that do not support 64-bit address pointers will ignore
52 * the high dword, and write order is irrelevant.
53 */
54
55/**
56 * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
57 * @hc_capbase: length of the capabilities register and HC version number
58 * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
59 * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
60 * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
61 * @hcc_params: HCCPARAMS - Capability Parameters
62 * @db_off: DBOFF - Doorbell array offset
63 * @run_regs_off: RTSOFF - Runtime register space offset
64 */
65struct xhci_cap_regs {
66 u32 hc_capbase;
67 u32 hcs_params1;
68 u32 hcs_params2;
69 u32 hcs_params3;
70 u32 hcc_params;
71 u32 db_off;
72 u32 run_regs_off;
73 /* Reserved up to (CAPLENGTH - 0x1C) */
74};
75
76/* hc_capbase bitmasks */
77/* bits 7:0 - how long is the Capabilities register */
78#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
79/* bits 31:16 */
80#define HC_VERSION(p) (((p) >> 16) & 0xffff)
81
82/* HCSPARAMS1 - hcs_params1 - bitmasks */
83/* bits 0:7, Max Device Slots */
84#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
85#define HCS_SLOTS_MASK 0xff
86/* bits 8:18, Max Interrupters */
87#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
88/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
89#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
90
91/* HCSPARAMS2 - hcs_params2 - bitmasks */
92/* bits 0:3, frames or uframes that SW needs to queue transactions
93 * ahead of the HW to meet periodic deadlines */
94#define HCS_IST(p) (((p) >> 0) & 0xf)
95/* bits 4:7, max number of Event Ring segments */
96#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
97/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
98/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
99
100/* HCSPARAMS3 - hcs_params3 - bitmasks */
101/* bits 0:7, Max U1 to U0 latency for the roothub ports */
102#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
103/* bits 16:31, Max U2 to U0 latency for the roothub ports */
104#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
105
106/* HCCPARAMS - hcc_params - bitmasks */
107/* true: HC can use 64-bit address pointers */
108#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
109/* true: HC can do bandwidth negotiation */
110#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
111/* true: HC uses 64-byte Device Context structures
112 * FIXME 64-byte context structures aren't supported yet.
113 */
114#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
115/* true: HC has port power switches */
116#define HCC_PPC(p) ((p) & (1 << 3))
117/* true: HC has port indicators */
118#define HCS_INDICATOR(p) ((p) & (1 << 4))
119/* true: HC has Light HC Reset Capability */
120#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
121/* true: HC supports latency tolerance messaging */
122#define HCC_LTC(p) ((p) & (1 << 6))
123/* true: no secondary Stream ID Support */
124#define HCC_NSS(p) ((p) & (1 << 7))
125/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
126#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1))
127/* Extended Capabilities pointer from PCI base - section 5.3.6 */
128#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
129
130/* db_off bitmask - bits 0:1 reserved */
131#define DBOFF_MASK (~0x3)
132
133/* run_regs_off bitmask - bits 0:4 reserved */
134#define RTSOFF_MASK (~0x1f)
135
136
137/* Number of registers per port */
138#define NUM_PORT_REGS 4
139
140/**
141 * struct xhci_op_regs - xHCI Host Controller Operational Registers.
142 * @command: USBCMD - xHC command register
143 * @status: USBSTS - xHC status register
144 * @page_size: This indicates the page size that the host controller
145 * supports. If bit n is set, the HC supports a page size
146 * of 2^(n+12), up to a 128MB page size.
147 * 4K is the minimum page size.
148 * @cmd_ring: CRP - 64-bit Command Ring Pointer
149 * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
150 * @config_reg: CONFIG - Configure Register
151 * @port_status_base: PORTSCn - base address for Port Status and Control
152 * Each port has a Port Status and Control register,
153 * followed by a Port Power Management Status and Control
154 * register, a Port Link Info register, and a reserved
155 * register.
156 * @port_power_base: PORTPMSCn - base address for
157 * Port Power Management Status and Control
158 * @port_link_base: PORTLIn - base address for Port Link Info (current
159 * Link PM state and control) for USB 2.1 and USB 3.0
160 * devices.
161 */
162struct xhci_op_regs {
163 u32 command;
164 u32 status;
165 u32 page_size;
166 u32 reserved1;
167 u32 reserved2;
168 u32 dev_notification;
169 u32 cmd_ring[2];
170 /* rsvd: offset 0x20-2F */
171 u32 reserved3[4];
172 u32 dcbaa_ptr[2];
173 u32 config_reg;
174 /* rsvd: offset 0x3C-3FF */
175 u32 reserved4[241];
176 /* port 1 registers, which serve as a base address for other ports */
177 u32 port_status_base;
178 u32 port_power_base;
179 u32 port_link_base;
180 u32 reserved5;
181 /* registers for ports 2-255 */
182 u32 reserved6[NUM_PORT_REGS*254];
183};
184
185/* USBCMD - USB command - command bitmasks */
186/* start/stop HC execution - do not write unless HC is halted*/
187#define CMD_RUN XHCI_CMD_RUN
188/* Reset HC - resets internal HC state machine and all registers (except
189 * PCI config regs). HC does NOT drive a USB reset on the downstream ports.
190 * The xHCI driver must reinitialize the xHC after setting this bit.
191 */
192#define CMD_RESET (1 << 1)
193/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
194#define CMD_EIE XHCI_CMD_EIE
195/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
196#define CMD_HSEIE XHCI_CMD_HSEIE
197/* bits 4:6 are reserved (and should be preserved on writes). */
198/* light reset (port status stays unchanged) - reset completed when this is 0 */
199#define CMD_LRESET (1 << 7)
200/* FIXME: ignoring host controller save/restore state for now. */
201#define CMD_CSS (1 << 8)
202#define CMD_CRS (1 << 9)
203/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
204#define CMD_EWE XHCI_CMD_EWE
205/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
206 * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
207 * '0' means the xHC can power it off if all ports are in the disconnect,
208 * disabled, or powered-off state.
209 */
210#define CMD_PM_INDEX (1 << 11)
211/* bits 12:31 are reserved (and should be preserved on writes). */
212
213/* USBSTS - USB status - status bitmasks */
214/* HC not running - set to 1 when run/stop bit is cleared. */
215#define STS_HALT XHCI_STS_HALT
216/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
217#define STS_FATAL (1 << 2)
218/* event interrupt - clear this prior to clearing any IP flags in IR set*/
219#define STS_EINT (1 << 3)
220/* port change detect */
221#define STS_PORT (1 << 4)
222/* bits 5:7 reserved and zeroed */
223/* save state status - '1' means xHC is saving state */
224#define STS_SAVE (1 << 8)
225/* restore state status - '1' means xHC is restoring state */
226#define STS_RESTORE (1 << 9)
227/* true: save or restore error */
228#define STS_SRE (1 << 10)
229/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
230#define STS_CNR XHCI_STS_CNR
231/* true: internal Host Controller Error - SW needs to reset and reinitialize */
232#define STS_HCE (1 << 12)
233/* bits 13:31 reserved and should be preserved */
234
235/*
236 * DNCTRL - Device Notification Control Register - dev_notification bitmasks
237 * Generate a device notification event when the HC sees a transaction with a
238 * notification type that matches a bit set in this bit field.
239 */
240#define DEV_NOTE_MASK (0xffff)
241#define ENABLE_DEV_NOTE(x) (1 << x)
242/* Most of the device notification types should only be used for debug.
243 * SW does need to pay attention to function wake notifications.
244 */
245#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
246
247/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
248/* bit 0 is the command ring cycle state */
249/* stop ring operation after completion of the currently executing command */
250#define CMD_RING_PAUSE (1 << 1)
251/* stop ring immediately - abort the currently executing command */
252#define CMD_RING_ABORT (1 << 2)
253/* true: command ring is running */
254#define CMD_RING_RUNNING (1 << 3)
255/* bits 4:5 reserved and should be preserved */
256/* Command Ring pointer - bit mask for the lower 32 bits. */
257#define CMD_RING_ADDR_MASK (0xffffffc0)
258
259/* CONFIG - Configure Register - config_reg bitmasks */
260/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
261#define MAX_DEVS(p) ((p) & 0xff)
262/* bits 8:31 - reserved and should be preserved */
263
264/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
265/* true: device connected */
266#define PORT_CONNECT (1 << 0)
267/* true: port enabled */
268#define PORT_PE (1 << 1)
269/* bit 2 reserved and zeroed */
270/* true: port has an over-current condition */
271#define PORT_OC (1 << 3)
272/* true: port reset signaling asserted */
273#define PORT_RESET (1 << 4)
274/* Port Link State - bits 5:8
275 * A read gives the current link PM state of the port,
276 * a write with Link State Write Strobe set sets the link state.
277 */
278/* true: port has power (see HCC_PPC) */
279#define PORT_POWER (1 << 9)
280/* bits 10:13 indicate device speed:
281 * 0 - undefined speed - port hasn't be initialized by a reset yet
282 * 1 - full speed
283 * 2 - low speed
284 * 3 - high speed
285 * 4 - super speed
286 * 5-15 reserved
287 */
288#define DEV_SPEED_MASK (0xf << 10)
289#define XDEV_FS (0x1 << 10)
290#define XDEV_LS (0x2 << 10)
291#define XDEV_HS (0x3 << 10)
292#define XDEV_SS (0x4 << 10)
293#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
294#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
295#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
296#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
297#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
298/* Bits 20:23 in the Slot Context are the speed for the device */
299#define SLOT_SPEED_FS (XDEV_FS << 10)
300#define SLOT_SPEED_LS (XDEV_LS << 10)
301#define SLOT_SPEED_HS (XDEV_HS << 10)
302#define SLOT_SPEED_SS (XDEV_SS << 10)
303/* Port Indicator Control */
304#define PORT_LED_OFF (0 << 14)
305#define PORT_LED_AMBER (1 << 14)
306#define PORT_LED_GREEN (2 << 14)
307#define PORT_LED_MASK (3 << 14)
308/* Port Link State Write Strobe - set this when changing link state */
309#define PORT_LINK_STROBE (1 << 16)
310/* true: connect status change */
311#define PORT_CSC (1 << 17)
312/* true: port enable change */
313#define PORT_PEC (1 << 18)
314/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
315 * into an enabled state, and the device into the default state. A "warm" reset
316 * also resets the link, forcing the device through the link training sequence.
317 * SW can also look at the Port Reset register to see when warm reset is done.
318 */
319#define PORT_WRC (1 << 19)
320/* true: over-current change */
321#define PORT_OCC (1 << 20)
322/* true: reset change - 1 to 0 transition of PORT_RESET */
323#define PORT_RC (1 << 21)
324/* port link status change - set on some port link state transitions:
325 * Transition Reason
326 * ------------------------------------------------------------------------------
327 * - U3 to Resume Wakeup signaling from a device
328 * - Resume to Recovery to U0 USB 3.0 device resume
329 * - Resume to U0 USB 2.0 device resume
330 * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
331 * - U3 to U0 Software resume of USB 2.0 device complete
332 * - U2 to U0 L1 resume of USB 2.1 device complete
333 * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
334 * - U0 to disabled L1 entry error with USB 2.1 device
335 * - Any state to inactive Error on USB 3.0 port
336 */
337#define PORT_PLC (1 << 22)
338/* port configure error change - port failed to configure its link partner */
339#define PORT_CEC (1 << 23)
340/* bit 24 reserved */
341/* wake on connect (enable) */
342#define PORT_WKCONN_E (1 << 25)
343/* wake on disconnect (enable) */
344#define PORT_WKDISC_E (1 << 26)
345/* wake on over-current (enable) */
346#define PORT_WKOC_E (1 << 27)
347/* bits 28:29 reserved */
348/* true: device is removable - for USB 3.0 roothub emulation */
349#define PORT_DEV_REMOVE (1 << 30)
350/* Initiate a warm port reset - complete when PORT_WRC is '1' */
351#define PORT_WR (1 << 31)
352
353/* Port Power Management Status and Control - port_power_base bitmasks */
354/* Inactivity timer value for transitions into U1, in microseconds.
355 * Timeout can be up to 127us. 0xFF means an infinite timeout.
356 */
357#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
358/* Inactivity timer value for transitions into U2 */
359#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
360/* Bits 24:31 for port testing */
361
362
363/**
364 * struct xhci_intr_reg - Interrupt Register Set
365 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
366 * interrupts and check for pending interrupts.
367 * @irq_control: IMOD - Interrupt Moderation Register.
368 * Used to throttle interrupts.
369 * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
370 * @erst_base: ERST base address.
371 * @erst_dequeue: Event ring dequeue pointer.
372 *
373 * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
374 * Ring Segment Table (ERST) associated with it. The event ring is comprised of
375 * multiple segments of the same size. The HC places events on the ring and
376 * "updates the Cycle bit in the TRBs to indicate to software the current
377 * position of the Enqueue Pointer." The HCD (Linux) processes those events and
378 * updates the dequeue pointer.
379 */
380struct xhci_intr_reg {
381 u32 irq_pending;
382 u32 irq_control;
383 u32 erst_size;
384 u32 rsvd;
385 u32 erst_base[2];
386 u32 erst_dequeue[2];
387};
388
389/* irq_pending bitmasks */
390#define ER_IRQ_PENDING(p) ((p) & 0x1)
391/* bits 2:31 need to be preserved */
392/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
393#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
394#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
395#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
396
397/* irq_control bitmasks */
398/* Minimum interval between interrupts (in 250ns intervals). The interval
399 * between interrupts will be longer if there are no events on the event ring.
400 * Default is 4000 (1 ms).
401 */
402#define ER_IRQ_INTERVAL_MASK (0xffff)
403/* Counter used to count down the time to the next interrupt - HW use only */
404#define ER_IRQ_COUNTER_MASK (0xffff << 16)
405
406/* erst_size bitmasks */
407/* Preserve bits 16:31 of erst_size */
408#define ERST_SIZE_MASK (0xffff << 16)
409
410/* erst_dequeue bitmasks */
411/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
412 * where the current dequeue pointer lies. This is an optional HW hint.
413 */
414#define ERST_DESI_MASK (0x7)
415/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
416 * a work queue (or delayed service routine)?
417 */
418#define ERST_EHB (1 << 3)
419#define ERST_PTR_MASK (0xf)
420
421/**
422 * struct xhci_run_regs
423 * @microframe_index:
424 * MFINDEX - current microframe number
425 *
426 * Section 5.5 Host Controller Runtime Registers:
427 * "Software should read and write these registers using only Dword (32 bit)
428 * or larger accesses"
429 */
430struct xhci_run_regs {
431 u32 microframe_index;
432 u32 rsvd[7];
433 struct xhci_intr_reg ir_set[128];
434};
435
436/**
437 * struct doorbell_array
438 *
439 * Section 5.6
440 */
441struct xhci_doorbell_array {
442 u32 doorbell[256];
443};
444
445#define DB_TARGET_MASK 0xFFFFFF00
446#define DB_STREAM_ID_MASK 0x0000FFFF
447#define DB_TARGET_HOST 0x0
448#define DB_STREAM_ID_HOST 0x0
449#define DB_MASK (0xff << 8)
450
451/* Endpoint Target - bits 0:7 */
452#define EPI_TO_DB(p) (((p) + 1) & 0xff)
453
454
455/**
456 * struct xhci_slot_ctx
457 * @dev_info: Route string, device speed, hub info, and last valid endpoint
458 * @dev_info2: Max exit latency for device number, root hub port number
459 * @tt_info: tt_info is used to construct split transaction tokens
460 * @dev_state: slot state and device address
461 *
462 * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
463 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
464 * reserved at the end of the slot context for HC internal use.
465 */
466struct xhci_slot_ctx {
467 u32 dev_info;
468 u32 dev_info2;
469 u32 tt_info;
470 u32 dev_state;
471 /* offset 0x10 to 0x1f reserved for HC internal use */
472 u32 reserved[4];
473};
474
475/* dev_info bitmasks */
476/* Route String - 0:19 */
477#define ROUTE_STRING_MASK (0xfffff)
478/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
479#define DEV_SPEED (0xf << 20)
480/* bit 24 reserved */
481/* Is this LS/FS device connected through a HS hub? - bit 25 */
482#define DEV_MTT (0x1 << 25)
483/* Set if the device is a hub - bit 26 */
484#define DEV_HUB (0x1 << 26)
485/* Index of the last valid endpoint context in this device context - 27:31 */
486#define LAST_CTX_MASK (0x1f << 27)
487#define LAST_CTX(p) ((p) << 27)
488#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
489#define SLOT_FLAG (1 << 0)
490#define EP0_FLAG (1 << 1)
491
492/* dev_info2 bitmasks */
493/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
494#define MAX_EXIT (0xffff)
495/* Root hub port number that is needed to access the USB device */
496#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
497
498/* tt_info bitmasks */
499/*
500 * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
501 * The Slot ID of the hub that isolates the high speed signaling from
502 * this low or full-speed device. '0' if attached to root hub port.
503 */
504#define TT_SLOT (0xff)
505/*
506 * The number of the downstream facing port of the high-speed hub
507 * '0' if the device is not low or full speed.
508 */
509#define TT_PORT (0xff << 8)
510
511/* dev_state bitmasks */
512/* USB device address - assigned by the HC */
513#define DEV_ADDR_MASK (0xff)
514/* bits 8:26 reserved */
515/* Slot state */
516#define SLOT_STATE (0x1f << 27)
517#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
518
519
520/**
521 * struct xhci_ep_ctx
522 * @ep_info: endpoint state, streams, mult, and interval information.
523 * @ep_info2: information on endpoint type, max packet size, max burst size,
524 * error count, and whether the HC will force an event for all
525 * transactions.
526 * @deq: 64-bit ring dequeue pointer address. If the endpoint only
527 * defines one stream, this points to the endpoint transfer ring.
528 * Otherwise, it points to a stream context array, which has a
529 * ring pointer for each flow.
530 * @tx_info:
531 * Average TRB lengths for the endpoint ring and
532 * max payload within an Endpoint Service Interval Time (ESIT).
533 *
534 * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
535 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
536 * reserved at the end of the endpoint context for HC internal use.
537 */
538struct xhci_ep_ctx {
539 u32 ep_info;
540 u32 ep_info2;
541 u32 deq[2];
542 u32 tx_info;
543 /* offset 0x14 - 0x1f reserved for HC internal use */
544 u32 reserved[3];
545};
546
547/* ep_info bitmasks */
548/*
549 * Endpoint State - bits 0:2
550 * 0 - disabled
551 * 1 - running
552 * 2 - halted due to halt condition - ok to manipulate endpoint ring
553 * 3 - stopped
554 * 4 - TRB error
555 * 5-7 - reserved
556 */
557#define EP_STATE_MASK (0xf)
558#define EP_STATE_DISABLED 0
559#define EP_STATE_RUNNING 1
560#define EP_STATE_HALTED 2
561#define EP_STATE_STOPPED 3
562#define EP_STATE_ERROR 4
563/* Mult - Max number of burtst within an interval, in EP companion desc. */
564#define EP_MULT(p) ((p & 0x3) << 8)
565/* bits 10:14 are Max Primary Streams */
566/* bit 15 is Linear Stream Array */
567/* Interval - period between requests to an endpoint - 125u increments. */
568#define EP_INTERVAL(p) ((p & 0xff) << 16)
569
570/* ep_info2 bitmasks */
571/*
572 * Force Event - generate transfer events for all TRBs for this endpoint
573 * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
574 */
575#define FORCE_EVENT (0x1)
576#define ERROR_COUNT(p) (((p) & 0x3) << 1)
577#define EP_TYPE(p) ((p) << 3)
578#define ISOC_OUT_EP 1
579#define BULK_OUT_EP 2
580#define INT_OUT_EP 3
581#define CTRL_EP 4
582#define ISOC_IN_EP 5
583#define BULK_IN_EP 6
584#define INT_IN_EP 7
585/* bit 6 reserved */
586/* bit 7 is Host Initiate Disable - for disabling stream selection */
587#define MAX_BURST(p) (((p)&0xff) << 8)
588#define MAX_PACKET(p) (((p)&0xffff) << 16)
589
590
591/**
592 * struct xhci_device_control
593 * Input/Output context; see section 6.2.5.
594 *
595 * @drop_context: set the bit of the endpoint context you want to disable
596 * @add_context: set the bit of the endpoint context you want to enable
597 */
598struct xhci_device_control {
599 u32 drop_flags;
600 u32 add_flags;
601 u32 rsvd[6];
602 struct xhci_slot_ctx slot;
603 struct xhci_ep_ctx ep[31];
604};
605
606/* drop context bitmasks */
607#define DROP_EP(x) (0x1 << x)
608/* add context bitmasks */
609#define ADD_EP(x) (0x1 << x)
610
611
612struct xhci_virt_device {
613 /*
614 * Commands to the hardware are passed an "input context" that
615 * tells the hardware what to change in its data structures.
616 * The hardware will return changes in an "output context" that
617 * software must allocate for the hardware. We need to keep
618 * track of input and output contexts separately because
619 * these commands might fail and we don't trust the hardware.
620 */
621 struct xhci_device_control *out_ctx;
622 dma_addr_t out_ctx_dma;
623 /* Used for addressing devices and configuration changes */
624 struct xhci_device_control *in_ctx;
625 dma_addr_t in_ctx_dma;
626 /* FIXME when stream support is added */
627 struct xhci_ring *ep_rings[31];
628 /* Temporary storage in case the configure endpoint command fails and we
629 * have to restore the device state to the previous state
630 */
631 struct xhci_ring *new_ep_rings[31];
632 struct completion cmd_completion;
633 /* Status of the last command issued for this device */
634 u32 cmd_status;
635};
636
637
638/**
639 * struct xhci_device_context_array
640 * @dev_context_ptr array of 64-bit DMA addresses for device contexts
641 */
642struct xhci_device_context_array {
643 /* 64-bit device addresses; we only write 32-bit addresses */
644 u32 dev_context_ptrs[2*MAX_HC_SLOTS];
645 /* private xHCD pointers */
646 dma_addr_t dma;
647};
648/* TODO: write function to set the 64-bit device DMA address */
649/*
650 * TODO: change this to be dynamically sized at HC mem init time since the HC
651 * might not be able to handle the maximum number of devices possible.
652 */
653
654
655struct xhci_stream_ctx {
656 /* 64-bit stream ring address, cycle state, and stream type */
657 u32 stream_ring[2];
658 /* offset 0x14 - 0x1f reserved for HC internal use */
659 u32 reserved[2];
660};
661
662
663struct xhci_transfer_event {
664 /* 64-bit buffer address, or immediate data */
665 u32 buffer[2];
666 u32 transfer_len;
667 /* This field is interpreted differently based on the type of TRB */
668 u32 flags;
669};
670
671/** Transfer Event bit fields **/
672#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
673
674/* Completion Code - only applicable for some types of TRBs */
675#define COMP_CODE_MASK (0xff << 24)
676#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
677#define COMP_SUCCESS 1
678/* Data Buffer Error */
679#define COMP_DB_ERR 2
680/* Babble Detected Error */
681#define COMP_BABBLE 3
682/* USB Transaction Error */
683#define COMP_TX_ERR 4
684/* TRB Error - some TRB field is invalid */
685#define COMP_TRB_ERR 5
686/* Stall Error - USB device is stalled */
687#define COMP_STALL 6
688/* Resource Error - HC doesn't have memory for that device configuration */
689#define COMP_ENOMEM 7
690/* Bandwidth Error - not enough room in schedule for this dev config */
691#define COMP_BW_ERR 8
692/* No Slots Available Error - HC ran out of device slots */
693#define COMP_ENOSLOTS 9
694/* Invalid Stream Type Error */
695#define COMP_STREAM_ERR 10
696/* Slot Not Enabled Error - doorbell rung for disabled device slot */
697#define COMP_EBADSLT 11
698/* Endpoint Not Enabled Error */
699#define COMP_EBADEP 12
700/* Short Packet */
701#define COMP_SHORT_TX 13
702/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
703#define COMP_UNDERRUN 14
704/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
705#define COMP_OVERRUN 15
706/* Virtual Function Event Ring Full Error */
707#define COMP_VF_FULL 16
708/* Parameter Error - Context parameter is invalid */
709#define COMP_EINVAL 17
710/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
711#define COMP_BW_OVER 18
712/* Context State Error - illegal context state transition requested */
713#define COMP_CTX_STATE 19
714/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
715#define COMP_PING_ERR 20
716/* Event Ring is full */
717#define COMP_ER_FULL 21
718/* Missed Service Error - HC couldn't service an isoc ep within interval */
719#define COMP_MISSED_INT 23
720/* Successfully stopped command ring */
721#define COMP_CMD_STOP 24
722/* Successfully aborted current command and stopped command ring */
723#define COMP_CMD_ABORT 25
724/* Stopped - transfer was terminated by a stop endpoint command */
725#define COMP_STOP 26
726/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
727#define COMP_STOP_INVAL 27
728/* Control Abort Error - Debug Capability - control pipe aborted */
729#define COMP_DBG_ABORT 28
730/* TRB type 29 and 30 reserved */
731/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
732#define COMP_BUFF_OVER 31
733/* Event Lost Error - xHC has an "internal event overrun condition" */
734#define COMP_ISSUES 32
735/* Undefined Error - reported when other error codes don't apply */
736#define COMP_UNKNOWN 33
737/* Invalid Stream ID Error */
738#define COMP_STRID_ERR 34
739/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
740/* FIXME - check for this */
741#define COMP_2ND_BW_ERR 35
742/* Split Transaction Error */
743#define COMP_SPLIT_ERR 36
744
745struct xhci_link_trb {
746 /* 64-bit segment pointer*/
747 u32 segment_ptr[2];
748 u32 intr_target;
749 u32 control;
750};
751
752/* control bitfields */
753#define LINK_TOGGLE (0x1<<1)
754
755/* Command completion event TRB */
756struct xhci_event_cmd {
757 /* Pointer to command TRB, or the value passed by the event data trb */
758 u32 cmd_trb[2];
759 u32 status;
760 u32 flags;
761};
762
763/* flags bitmasks */
764/* bits 16:23 are the virtual function ID */
765/* bits 24:31 are the slot ID */
766#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
767#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
768
769/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
770#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
771#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
772
773
774/* Port Status Change Event TRB fields */
775/* Port ID - bits 31:24 */
776#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
777
778/* Normal TRB fields */
779/* transfer_len bitmasks - bits 0:16 */
780#define TRB_LEN(p) ((p) & 0x1ffff)
781/* TD size - number of bytes remaining in the TD (including this TRB):
782 * bits 17 - 21. Shift the number of bytes by 10. */
783#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
784/* Interrupter Target - which MSI-X vector to target the completion event at */
785#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
786#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
787
788/* Cycle bit - indicates TRB ownership by HC or HCD */
789#define TRB_CYCLE (1<<0)
790/*
791 * Force next event data TRB to be evaluated before task switch.
792 * Used to pass OS data back after a TD completes.
793 */
794#define TRB_ENT (1<<1)
795/* Interrupt on short packet */
796#define TRB_ISP (1<<2)
797/* Set PCIe no snoop attribute */
798#define TRB_NO_SNOOP (1<<3)
799/* Chain multiple TRBs into a TD */
800#define TRB_CHAIN (1<<4)
801/* Interrupt on completion */
802#define TRB_IOC (1<<5)
803/* The buffer pointer contains immediate data */
804#define TRB_IDT (1<<6)
805
806
807/* Control transfer TRB specific fields */
808#define TRB_DIR_IN (1<<16)
809
810struct xhci_generic_trb {
811 u32 field[4];
812};
813
814union xhci_trb {
815 struct xhci_link_trb link;
816 struct xhci_transfer_event trans_event;
817 struct xhci_event_cmd event_cmd;
818 struct xhci_generic_trb generic;
819};
820
821/* TRB bit mask */
822#define TRB_TYPE_BITMASK (0xfc00)
823#define TRB_TYPE(p) ((p) << 10)
824/* TRB type IDs */
825/* bulk, interrupt, isoc scatter/gather, and control data stage */
826#define TRB_NORMAL 1
827/* setup stage for control transfers */
828#define TRB_SETUP 2
829/* data stage for control transfers */
830#define TRB_DATA 3
831/* status stage for control transfers */
832#define TRB_STATUS 4
833/* isoc transfers */
834#define TRB_ISOC 5
835/* TRB for linking ring segments */
836#define TRB_LINK 6
837#define TRB_EVENT_DATA 7
838/* Transfer Ring No-op (not for the command ring) */
839#define TRB_TR_NOOP 8
840/* Command TRBs */
841/* Enable Slot Command */
842#define TRB_ENABLE_SLOT 9
843/* Disable Slot Command */
844#define TRB_DISABLE_SLOT 10
845/* Address Device Command */
846#define TRB_ADDR_DEV 11
847/* Configure Endpoint Command */
848#define TRB_CONFIG_EP 12
849/* Evaluate Context Command */
850#define TRB_EVAL_CONTEXT 13
851/* Reset Transfer Ring Command */
852#define TRB_RESET_RING 14
853/* Stop Transfer Ring Command */
854#define TRB_STOP_RING 15
855/* Set Transfer Ring Dequeue Pointer Command */
856#define TRB_SET_DEQ 16
857/* Reset Device Command */
858#define TRB_RESET_DEV 17
859/* Force Event Command (opt) */
860#define TRB_FORCE_EVENT 18
861/* Negotiate Bandwidth Command (opt) */
862#define TRB_NEG_BANDWIDTH 19
863/* Set Latency Tolerance Value Command (opt) */
864#define TRB_SET_LT 20
865/* Get port bandwidth Command */
866#define TRB_GET_BW 21
867/* Force Header Command - generate a transaction or link management packet */
868#define TRB_FORCE_HEADER 22
869/* No-op Command - not for transfer rings */
870#define TRB_CMD_NOOP 23
871/* TRB IDs 24-31 reserved */
872/* Event TRBS */
873/* Transfer Event */
874#define TRB_TRANSFER 32
875/* Command Completion Event */
876#define TRB_COMPLETION 33
877/* Port Status Change Event */
878#define TRB_PORT_STATUS 34
879/* Bandwidth Request Event (opt) */
880#define TRB_BANDWIDTH_EVENT 35
881/* Doorbell Event (opt) */
882#define TRB_DOORBELL 36
883/* Host Controller Event */
884#define TRB_HC_EVENT 37
885/* Device Notification Event - device sent function wake notification */
886#define TRB_DEV_NOTE 38
887/* MFINDEX Wrap Event - microframe counter wrapped */
888#define TRB_MFINDEX_WRAP 39
889/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
890
891/*
892 * TRBS_PER_SEGMENT must be a multiple of 4,
893 * since the command ring is 64-byte aligned.
894 * It must also be greater than 16.
895 */
896#define TRBS_PER_SEGMENT 64
897#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
898/* TRB buffer pointers can't cross 64KB boundaries */
899#define TRB_MAX_BUFF_SHIFT 16
900#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
901
902struct xhci_segment {
903 union xhci_trb *trbs;
904 /* private to HCD */
905 struct xhci_segment *next;
906 dma_addr_t dma;
907};
908
909struct xhci_td {
910 struct list_head td_list;
911 struct list_head cancelled_td_list;
912 struct urb *urb;
913 struct xhci_segment *start_seg;
914 union xhci_trb *first_trb;
915 union xhci_trb *last_trb;
916};
917
918struct xhci_ring {
919 struct xhci_segment *first_seg;
920 union xhci_trb *enqueue;
921 struct xhci_segment *enq_seg;
922 unsigned int enq_updates;
923 union xhci_trb *dequeue;
924 struct xhci_segment *deq_seg;
925 unsigned int deq_updates;
926 struct list_head td_list;
927 /* ---- Related to URB cancellation ---- */
928 struct list_head cancelled_td_list;
929 unsigned int cancels_pending;
930 unsigned int state;
931#define SET_DEQ_PENDING (1 << 0)
932 /* The TRB that was last reported in a stopped endpoint ring */
933 union xhci_trb *stopped_trb;
934 struct xhci_td *stopped_td;
935 /*
936 * Write the cycle state into the TRB cycle field to give ownership of
937 * the TRB to the host controller (if we are the producer), or to check
938 * if we own the TRB (if we are the consumer). See section 4.9.1.
939 */
940 u32 cycle_state;
941};
942
943struct xhci_erst_entry {
944 /* 64-bit event ring segment address */
945 u32 seg_addr[2];
946 u32 seg_size;
947 /* Set to zero */
948 u32 rsvd;
949};
950
951struct xhci_erst {
952 struct xhci_erst_entry *entries;
953 unsigned int num_entries;
954 /* xhci->event_ring keeps track of segment dma addresses */
955 dma_addr_t erst_dma_addr;
956 /* Num entries the ERST can contain */
957 unsigned int erst_size;
958};
959
960/*
961 * Each segment table entry is 4*32bits long. 1K seems like an ok size:
962 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
963 * meaning 64 ring segments.
964 * Initial allocated size of the ERST, in number of entries */
965#define ERST_NUM_SEGS 1
966/* Initial allocated size of the ERST, in number of entries */
967#define ERST_SIZE 64
968/* Initial number of event segment rings allocated */
969#define ERST_ENTRIES 1
970/* Poll every 60 seconds */
971#define POLL_TIMEOUT 60
972/* XXX: Make these module parameters */
973
974
975/* There is one ehci_hci structure per controller */
976struct xhci_hcd {
977 /* glue to PCI and HCD framework */
978 struct xhci_cap_regs __iomem *cap_regs;
979 struct xhci_op_regs __iomem *op_regs;
980 struct xhci_run_regs __iomem *run_regs;
981 struct xhci_doorbell_array __iomem *dba;
982 /* Our HCD's current interrupter register set */
983 struct xhci_intr_reg __iomem *ir_set;
984
985 /* Cached register copies of read-only HC data */
986 __u32 hcs_params1;
987 __u32 hcs_params2;
988 __u32 hcs_params3;
989 __u32 hcc_params;
990
991 spinlock_t lock;
992
993 /* packed release number */
994 u8 sbrn;
995 u16 hci_version;
996 u8 max_slots;
997 u8 max_interrupters;
998 u8 max_ports;
999 u8 isoc_threshold;
1000 int event_ring_max;
1001 int addr_64;
1002 /* 4KB min, 128MB max */
1003 int page_size;
1004 /* Valid values are 12 to 20, inclusive */
1005 int page_shift;
1006 /* only one MSI vector for now, but might need more later */
1007 int msix_count;
1008 struct msix_entry *msix_entries;
1009 /* data structures */
1010 struct xhci_device_context_array *dcbaa;
1011 struct xhci_ring *cmd_ring;
1012 struct xhci_ring *event_ring;
1013 struct xhci_erst erst;
1014 /* slot enabling and address device helpers */
1015 struct completion addr_dev;
1016 int slot_id;
1017 /* Internal mirror of the HW's dcbaa */
1018 struct xhci_virt_device *devs[MAX_HC_SLOTS];
1019
1020 /* DMA pools */
1021 struct dma_pool *device_pool;
1022 struct dma_pool *segment_pool;
1023
1024#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1025 /* Poll the rings - for debugging */
1026 struct timer_list event_ring_timer;
1027 int zombie;
1028#endif
1029 /* Statistics */
1030 int noops_submitted;
1031 int noops_handled;
1032 int error_bitmask;
1033};
1034
1035/* For testing purposes */
1036#define NUM_TEST_NOOPS 0
1037
1038/* convert between an HCD pointer and the corresponding EHCI_HCD */
1039static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
1040{
1041 return (struct xhci_hcd *) (hcd->hcd_priv);
1042}
1043
1044static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1045{
1046 return container_of((void *) xhci, struct usb_hcd, hcd_priv);
1047}
1048
1049#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1050#define XHCI_DEBUG 1
1051#else
1052#define XHCI_DEBUG 0
1053#endif
1054
1055#define xhci_dbg(xhci, fmt, args...) \
1056 do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
1057#define xhci_info(xhci, fmt, args...) \
1058 do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
1059#define xhci_err(xhci, fmt, args...) \
1060 dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1061#define xhci_warn(xhci, fmt, args...) \
1062 dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1063
1064/* TODO: copied from ehci.h - can be refactored? */
1065/* xHCI spec says all registers are little endian */
1066static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1067 __u32 __iomem *regs)
1068{
1069 return readl(regs);
1070}
1071static inline void xhci_writel(struct xhci_hcd *xhci,
1072 const unsigned int val, __u32 __iomem *regs)
1073{
1074 if (!in_interrupt())
1075 xhci_dbg(xhci,
1076 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
1077 regs, val);
1078 writel(val, regs);
1079}
1080
1081/* xHCI debugging */
1082void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
1083void xhci_print_registers(struct xhci_hcd *xhci);
1084void xhci_dbg_regs(struct xhci_hcd *xhci);
1085void xhci_print_run_regs(struct xhci_hcd *xhci);
1086void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
1087void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
1088void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
1089void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1090void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1091void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1092void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1093void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
1094
1095/* xHCI memory managment */
1096void xhci_mem_cleanup(struct xhci_hcd *xhci);
1097int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
1098void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1099int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1100int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1101unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
1102unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
1103void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
1104int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1105 struct usb_device *udev, struct usb_host_endpoint *ep,
1106 gfp_t mem_flags);
1107void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1108
1109#ifdef CONFIG_PCI
1110/* xHCI PCI glue */
1111int xhci_register_pci(void);
1112void xhci_unregister_pci(void);
1113#endif
1114
1115/* xHCI host controller glue */
1116int xhci_halt(struct xhci_hcd *xhci);
1117int xhci_reset(struct xhci_hcd *xhci);
1118int xhci_init(struct usb_hcd *hcd);
1119int xhci_run(struct usb_hcd *hcd);
1120void xhci_stop(struct usb_hcd *hcd);
1121void xhci_shutdown(struct usb_hcd *hcd);
1122int xhci_get_frame(struct usb_hcd *hcd);
1123irqreturn_t xhci_irq(struct usb_hcd *hcd);
1124int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
1125void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
1126int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
1127int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
1128int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1129int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1130int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1131int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1132void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1133
1134/* xHCI ring, segment, TRB, and TD functions */
1135dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1136void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1137void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1138void xhci_handle_event(struct xhci_hcd *xhci);
1139void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
1140int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1141int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1142 u32 slot_id);
1143int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1144 unsigned int ep_index);
1145int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1146 int slot_id, unsigned int ep_index);
1147int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1148 int slot_id, unsigned int ep_index);
1149int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1150 u32 slot_id);
1151
1152/* xHCI roothub code */
1153int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1154 char *buf, u16 wLength);
1155int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1156
1157#endif /* __LINUX_XHCI_HCD_H */