aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-28 15:36:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-28 15:36:15 -0400
commit87367a0b71a5188e34a913c05673b5078f71a64d (patch)
treeda14cc575bfe692f07b42c2c8a13d9aa403fc471
parent4cb865deec59ef31d966622d1ec87411ae32dfab (diff)
parent2cf95c18d5069e13c02a8667d91e064df8e17e09 (diff)
Merge branch 'for-usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci
* 'for-usb-next' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci: Intel xhci: Limit number of active endpoints to 64. Intel xhci: Ignore spurious successful event. Intel xhci: Support EHCI/xHCI port switching. Intel xhci: Add PCI id for Panther Point xHCI host. xhci: STFU: Be quieter during URB submission and completion. xhci: STFU: Don't print event ring dequeue pointer. xhci: STFU: Remove function tracing. xhci: Don't submit commands when the host is dead. xhci: Clear stopped_td when Stop Endpoint command completes.
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/pci-quirks.c63
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c26
-rw-r--r--drivers/usb/host/xhci-ring.c89
-rw-r--r--drivers/usb/host/xhci.c240
-rw-r--r--drivers/usb/host/xhci.h22
-rw-r--r--include/linux/pci_ids.h1
8 files changed, 413 insertions, 69 deletions
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 660b80a75cac..1102ce65a3a9 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
348 return rc; 348 return rc;
349} 349}
350 350
351static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
352{
353 return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
354 pdev->vendor == PCI_VENDOR_ID_INTEL &&
355 pdev->device == 0x1E26;
356}
357
358static void ehci_enable_xhci_companion(void)
359{
360 struct pci_dev *companion = NULL;
361
362 /* The xHCI and EHCI controllers are not on the same PCI slot */
363 for_each_pci_dev(companion) {
364 if (!usb_is_intel_switchable_xhci(companion))
365 continue;
366 usb_enable_xhci_ports(companion);
367 return;
368 }
369}
370
351static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) 371static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
352{ 372{
353 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 373 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
354 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 374 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
355 375
376 /* The BIOS on systems with the Intel Panther Point chipset may or may
377 * not support xHCI natively. That means that during system resume, it
378 * may switch the ports back to EHCI so that users can use their
379 * keyboard to select a kernel from GRUB after resume from hibernate.
380 *
381 * The BIOS is supposed to remember whether the OS had xHCI ports
382 * enabled before resume, and switch the ports back to xHCI when the
383 * BIOS/OS semaphore is written, but we all know we can't trust BIOS
384 * writers.
385 *
386 * Unconditionally switch the ports back to xHCI after a system resume.
387 * We can't tell whether the EHCI or xHCI controller will be resumed
388 * first, so we have to do the port switchover in both drivers. Writing
389 * a '1' to the port switchover registers should have no effect if the
390 * port was already switched over.
391 */
392 if (usb_is_intel_switchable_ehci(pdev))
393 ehci_enable_xhci_companion();
394
356 // maybe restore FLADJ 395 // maybe restore FLADJ
357 396
358 if (time_before(jiffies, ehci->next_statechange)) 397 if (time_before(jiffies, ehci->next_statechange))
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f16c59d5f487..fd930618c28f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -69,6 +69,9 @@
69#define NB_PIF0_PWRDOWN_0 0x01100012 69#define NB_PIF0_PWRDOWN_0 0x01100012
70#define NB_PIF0_PWRDOWN_1 0x01100013 70#define NB_PIF0_PWRDOWN_1 0x01100013
71 71
72#define USB_INTEL_XUSB2PR 0xD0
73#define USB_INTEL_USB3_PSSEN 0xD8
74
72static struct amd_chipset_info { 75static struct amd_chipset_info {
73 struct pci_dev *nb_dev; 76 struct pci_dev *nb_dev;
74 struct pci_dev *smbus_dev; 77 struct pci_dev *smbus_dev;
@@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
673 return -ETIMEDOUT; 676 return -ETIMEDOUT;
674} 677}
675 678
679bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
680{
681 return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
682 pdev->vendor == PCI_VENDOR_ID_INTEL &&
683 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
684}
685EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
686
687/*
688 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
689 * share some number of ports. These ports can be switched between either
690 * controller. Not all of the ports under the EHCI host controller may be
691 * switchable.
692 *
693 * The ports should be switched over to xHCI before PCI probes for any device
694 * start. This avoids active devices under EHCI being disconnected during the
695 * port switchover, which could cause loss of data on USB storage devices, or
696 * failed boot when the root file system is on a USB mass storage device and is
697 * enumerated under EHCI first.
698 *
699 * We write into the xHC's PCI configuration space in some Intel-specific
700 * registers to switch the ports over. The USB 3.0 terminations and the USB
701 * 2.0 data wires are switched separately. We want to enable the SuperSpeed
702 * terminations before switching the USB 2.0 wires over, so that USB 3.0
703 * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
704 */
705void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
706{
707 u32 ports_available;
708
709 ports_available = 0xffffffff;
710 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
711 * Register, to turn on SuperSpeed terminations for all
712 * available ports.
713 */
714 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
715 cpu_to_le32(ports_available));
716
717 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
718 &ports_available);
719 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
720 "under xHCI: 0x%x\n", ports_available);
721
722 ports_available = 0xffffffff;
723 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
724 * switch the USB 2.0 power and data lines over to the xHCI
725 * host.
726 */
727 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
728 cpu_to_le32(ports_available));
729
730 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
731 &ports_available);
732 dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
733 "to xHCI: 0x%x\n", ports_available);
734}
735EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
736
676/** 737/**
677 * PCI Quirks for xHCI. 738 * PCI Quirks for xHCI.
678 * 739 *
@@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
732 writel(XHCI_LEGACY_DISABLE_SMI, 793 writel(XHCI_LEGACY_DISABLE_SMI,
733 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 794 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
734 795
796 if (usb_is_intel_switchable_xhci(pdev))
797 usb_enable_xhci_ports(pdev);
735hc_init: 798hc_init:
736 op_reg_base = base + XHCI_HC_LENGTH(readl(base)); 799 op_reg_base = base + XHCI_HC_LENGTH(readl(base));
737 800
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6ae9f78e9938..b1002a8ef96f 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
8void usb_amd_dev_put(void); 8void usb_amd_dev_put(void);
9void usb_amd_quirk_pll_disable(void); 9void usb_amd_quirk_pll_disable(void);
10void usb_amd_quirk_pll_enable(void); 10void usb_amd_quirk_pll_enable(void);
11bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
12void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
11#else 13#else
12static inline void usb_amd_quirk_pll_disable(void) {} 14static inline void usb_amd_quirk_pll_disable(void) {}
13static inline void usb_amd_quirk_pll_enable(void) {} 15static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cbc4d491e626..c408e9f6a707 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -118,6 +118,12 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
118 /* AMD PLL quirk */ 118 /* AMD PLL quirk */
119 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 119 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
120 xhci->quirks |= XHCI_AMD_PLL_FIX; 120 xhci->quirks |= XHCI_AMD_PLL_FIX;
121 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
122 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
123 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
124 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
125 xhci->limit_active_eps = 64;
126 }
121 127
122 /* Make sure the HC is halted. */ 128 /* Make sure the HC is halted. */
123 retval = xhci_halt(xhci); 129 retval = xhci_halt(xhci);
@@ -242,8 +248,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
242static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) 248static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
243{ 249{
244 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 250 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
251 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
245 int retval = 0; 252 int retval = 0;
246 253
254 /* The BIOS on systems with the Intel Panther Point chipset may or may
255 * not support xHCI natively. That means that during system resume, it
256 * may switch the ports back to EHCI so that users can use their
257 * keyboard to select a kernel from GRUB after resume from hibernate.
258 *
259 * The BIOS is supposed to remember whether the OS had xHCI ports
260 * enabled before resume, and switch the ports back to xHCI when the
261 * BIOS/OS semaphore is written, but we all know we can't trust BIOS
262 * writers.
263 *
264 * Unconditionally switch the ports back to xHCI after a system resume.
265 * We can't tell whether the EHCI or xHCI controller will be resumed
266 * first, so we have to do the port switchover in both drivers. Writing
267 * a '1' to the port switchover registers should have no effect if the
268 * port was already switched over.
269 */
270 if (usb_is_intel_switchable_xhci(pdev))
271 usb_enable_xhci_ports(pdev);
272
247 retval = xhci_resume(xhci, hibernated); 273 retval = xhci_resume(xhci, hibernated);
248 return retval; 274 return retval;
249} 275}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 237a765f8d18..cc1485bfed38 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
167 next = ring->dequeue; 167 next = ring->dequeue;
168 } 168 }
169 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); 169 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
170 if (ring == xhci->event_ring)
171 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
172 else if (ring == xhci->cmd_ring)
173 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
174 else
175 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
176} 170}
177 171
178/* 172/*
@@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
248 next = ring->enqueue; 242 next = ring->enqueue;
249 } 243 }
250 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); 244 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
251 if (ring == xhci->event_ring)
252 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
253 else if (ring == xhci->cmd_ring)
254 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
255 else
256 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
257} 245}
258 246
259/* 247/*
@@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
636 } 624 }
637 } 625 }
638 usb_hcd_unlink_urb_from_ep(hcd, urb); 626 usb_hcd_unlink_urb_from_ep(hcd, urb);
639 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
640 627
641 spin_unlock(&xhci->lock); 628 spin_unlock(&xhci->lock);
642 usb_hcd_giveback_urb(hcd, urb, status); 629 usb_hcd_giveback_urb(hcd, urb, status);
643 xhci_urb_free_priv(xhci, urb_priv); 630 xhci_urb_free_priv(xhci, urb_priv);
644 spin_lock(&xhci->lock); 631 spin_lock(&xhci->lock);
645 xhci_dbg(xhci, "%s URB given back\n", adjective);
646 } 632 }
647} 633}
648 634
@@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
692 678
693 if (list_empty(&ep->cancelled_td_list)) { 679 if (list_empty(&ep->cancelled_td_list)) {
694 xhci_stop_watchdog_timer_in_irq(xhci, ep); 680 xhci_stop_watchdog_timer_in_irq(xhci, ep);
681 ep->stopped_td = NULL;
682 ep->stopped_trb = NULL;
695 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 683 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
696 return; 684 return;
697 } 685 }
@@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1093 complete(&xhci->addr_dev); 1081 complete(&xhci->addr_dev);
1094 break; 1082 break;
1095 case TRB_TYPE(TRB_DISABLE_SLOT): 1083 case TRB_TYPE(TRB_DISABLE_SLOT):
1096 if (xhci->devs[slot_id]) 1084 if (xhci->devs[slot_id]) {
1085 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1086 /* Delete default control endpoint resources */
1087 xhci_free_device_endpoint_resources(xhci,
1088 xhci->devs[slot_id], true);
1097 xhci_free_virt_device(xhci, slot_id); 1089 xhci_free_virt_device(xhci, slot_id);
1090 }
1098 break; 1091 break;
1099 case TRB_TYPE(TRB_CONFIG_EP): 1092 case TRB_TYPE(TRB_CONFIG_EP):
1100 virt_dev = xhci->devs[slot_id]; 1093 virt_dev = xhci->devs[slot_id];
@@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1630 "without IOC set??\n"); 1623 "without IOC set??\n");
1631 *status = -ESHUTDOWN; 1624 *status = -ESHUTDOWN;
1632 } else { 1625 } else {
1633 xhci_dbg(xhci, "Successful control transfer!\n");
1634 *status = 0; 1626 *status = 0;
1635 } 1627 }
1636 break; 1628 break;
@@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1727 switch (trb_comp_code) { 1719 switch (trb_comp_code) {
1728 case COMP_SUCCESS: 1720 case COMP_SUCCESS:
1729 frame->status = 0; 1721 frame->status = 0;
1730 xhci_dbg(xhci, "Successful isoc transfer!\n");
1731 break; 1722 break;
1732 case COMP_SHORT_TX: 1723 case COMP_SHORT_TX:
1733 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? 1724 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
@@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1837 else 1828 else
1838 *status = 0; 1829 *status = 0;
1839 } else { 1830 } else {
1840 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1841 xhci_dbg(xhci, "Successful bulk "
1842 "transfer!\n");
1843 else
1844 xhci_dbg(xhci, "Successful interrupt "
1845 "transfer!\n");
1846 *status = 0; 1831 *status = 0;
1847 } 1832 }
1848 break; 1833 break;
@@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1856 /* Others already handled above */ 1841 /* Others already handled above */
1857 break; 1842 break;
1858 } 1843 }
1859 xhci_dbg(xhci, "ep %#x - asked for %d bytes, " 1844 if (trb_comp_code == COMP_SHORT_TX)
1860 "%d bytes untransferred\n", 1845 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1861 td->urb->ep->desc.bEndpointAddress, 1846 "%d bytes untransferred\n",
1862 td->urb->transfer_buffer_length, 1847 td->urb->ep->desc.bEndpointAddress,
1863 TRB_LEN(le32_to_cpu(event->transfer_len))); 1848 td->urb->transfer_buffer_length,
1849 TRB_LEN(le32_to_cpu(event->transfer_len)));
1864 /* Fast path - was this the last TRB in the TD for this URB? */ 1850 /* Fast path - was this the last TRB in the TD for this URB? */
1865 if (event_trb == td->last_trb) { 1851 if (event_trb == td->last_trb) {
1866 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 1852 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
@@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1954 1940
1955 /* Endpoint ID is 1 based, our index is zero based */ 1941 /* Endpoint ID is 1 based, our index is zero based */
1956 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1942 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1957 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1958 ep = &xdev->eps[ep_index]; 1943 ep = &xdev->eps[ep_index];
1959 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); 1944 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1960 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1945 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2081 if (!event_seg) { 2066 if (!event_seg) {
2082 if (!ep->skip || 2067 if (!ep->skip ||
2083 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2068 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2069 /* Some host controllers give a spurious
2070 * successful event after a short transfer.
2071 * Ignore it.
2072 */
2073 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2074 ep_ring->last_td_was_short) {
2075 ep_ring->last_td_was_short = false;
2076 ret = 0;
2077 goto cleanup;
2078 }
2084 /* HC is busted, give up! */ 2079 /* HC is busted, give up! */
2085 xhci_err(xhci, 2080 xhci_err(xhci,
2086 "ERROR Transfer event TRB DMA ptr not " 2081 "ERROR Transfer event TRB DMA ptr not "
@@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2091 ret = skip_isoc_td(xhci, td, event, ep, &status); 2086 ret = skip_isoc_td(xhci, td, event, ep, &status);
2092 goto cleanup; 2087 goto cleanup;
2093 } 2088 }
2089 if (trb_comp_code == COMP_SHORT_TX)
2090 ep_ring->last_td_was_short = true;
2091 else
2092 ep_ring->last_td_was_short = false;
2094 2093
2095 if (ep->skip) { 2094 if (ep->skip) {
2096 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2095 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
@@ -2149,9 +2148,15 @@ cleanup:
2149 xhci_urb_free_priv(xhci, urb_priv); 2148 xhci_urb_free_priv(xhci, urb_priv);
2150 2149
2151 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2150 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2152 xhci_dbg(xhci, "Giveback URB %p, len = %d, " 2151 if ((urb->actual_length != urb->transfer_buffer_length &&
2153 "status = %d\n", 2152 (urb->transfer_flags &
2154 urb, urb->actual_length, status); 2153 URB_SHORT_NOT_OK)) ||
2154 status != 0)
2155 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2156 "expected = %x, status = %d\n",
2157 urb, urb->actual_length,
2158 urb->transfer_buffer_length,
2159 status);
2155 spin_unlock(&xhci->lock); 2160 spin_unlock(&xhci->lock);
2156 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2161 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2157 spin_lock(&xhci->lock); 2162 spin_lock(&xhci->lock);
@@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2180 int update_ptrs = 1; 2185 int update_ptrs = 1;
2181 int ret; 2186 int ret;
2182 2187
2183 xhci_dbg(xhci, "In %s\n", __func__);
2184 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 2188 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2185 xhci->error_bitmask |= 1 << 1; 2189 xhci->error_bitmask |= 1 << 1;
2186 return 0; 2190 return 0;
@@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2193 xhci->error_bitmask |= 1 << 2; 2197 xhci->error_bitmask |= 1 << 2;
2194 return 0; 2198 return 0;
2195 } 2199 }
2196 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
2197 2200
2198 /* 2201 /*
2199 * Barrier between reading the TRB_CYCLE (valid) flag above and any 2202 * Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
2203 /* FIXME: Handle more event types. */ 2206 /* FIXME: Handle more event types. */
2204 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { 2207 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2205 case TRB_TYPE(TRB_COMPLETION): 2208 case TRB_TYPE(TRB_COMPLETION):
2206 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
2207 handle_cmd_completion(xhci, &event->event_cmd); 2209 handle_cmd_completion(xhci, &event->event_cmd);
2208 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
2209 break; 2210 break;
2210 case TRB_TYPE(TRB_PORT_STATUS): 2211 case TRB_TYPE(TRB_PORT_STATUS):
2211 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
2212 handle_port_status(xhci, event); 2212 handle_port_status(xhci, event);
2213 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
2214 update_ptrs = 0; 2213 update_ptrs = 0;
2215 break; 2214 break;
2216 case TRB_TYPE(TRB_TRANSFER): 2215 case TRB_TYPE(TRB_TRANSFER):
2217 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
2218 ret = handle_tx_event(xhci, &event->trans_event); 2216 ret = handle_tx_event(xhci, &event->trans_event);
2219 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
2220 if (ret < 0) 2217 if (ret < 0)
2221 xhci->error_bitmask |= 1 << 9; 2218 xhci->error_bitmask |= 1 << 9;
2222 else 2219 else
@@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2273 spin_unlock(&xhci->lock); 2270 spin_unlock(&xhci->lock);
2274 return IRQ_NONE; 2271 return IRQ_NONE;
2275 } 2272 }
2276 xhci_dbg(xhci, "op reg status = %08x\n", status);
2277 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
2278 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
2279 (unsigned long long)
2280 xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
2281 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2282 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
2283 (unsigned int) le32_to_cpu(trb->link.intr_target),
2284 (unsigned int) le32_to_cpu(trb->link.control));
2285
2286 if (status & STS_FATAL) { 2273 if (status & STS_FATAL) {
2287 xhci_warn(xhci, "WARNING: Host System Error\n"); 2274 xhci_warn(xhci, "WARNING: Host System Error\n");
2288 xhci_halt(xhci); 2275 xhci_halt(xhci);
@@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2397 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 2384 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2398{ 2385{
2399 /* Make sure the endpoint has been added to xHC schedule */ 2386 /* Make sure the endpoint has been added to xHC schedule */
2400 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
2401 switch (ep_state) { 2387 switch (ep_state) {
2402 case EP_STATE_DISABLED: 2388 case EP_STATE_DISABLED:
2403 /* 2389 /*
@@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2434 struct xhci_ring *ring = ep_ring; 2420 struct xhci_ring *ring = ep_ring;
2435 union xhci_trb *next; 2421 union xhci_trb *next;
2436 2422
2437 xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
2438 next = ring->enqueue; 2423 next = ring->enqueue;
2439 2424
2440 while (last_trb(xhci, ring, ring->enq_seg, next)) { 2425 while (last_trb(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8f2a56ece44f..d9660eb97eb9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1314,8 +1314,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1314 if (ret <= 0) 1314 if (ret <= 0)
1315 return ret; 1315 return ret;
1316 xhci = hcd_to_xhci(hcd); 1316 xhci = hcd_to_xhci(hcd);
1317 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1317 if (xhci->xhc_state & XHCI_STATE_DYING)
1318 return -ENODEV;
1318 1319
1320 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1319 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1321 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1320 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1322 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1321 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1323 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1401,6 +1403,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1401 return ret; 1403 return ret;
1402 } 1404 }
1403 xhci = hcd_to_xhci(hcd); 1405 xhci = hcd_to_xhci(hcd);
1406 if (xhci->xhc_state & XHCI_STATE_DYING)
1407 return -ENODEV;
1404 1408
1405 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1409 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1406 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1410 last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1578,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1578 return ret; 1582 return ret;
1579} 1583}
1580 1584
1585static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1586 struct xhci_container_ctx *in_ctx)
1587{
1588 struct xhci_input_control_ctx *ctrl_ctx;
1589 u32 valid_add_flags;
1590 u32 valid_drop_flags;
1591
1592 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1593 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1594 * (bit 1). The default control endpoint is added during the Address
1595 * Device command and is never removed until the slot is disabled.
1596 */
1597 valid_add_flags = ctrl_ctx->add_flags >> 2;
1598 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1599
1600 /* Use hweight32 to count the number of ones in the add flags, or
1601 * number of endpoints added. Don't count endpoints that are changed
1602 * (both added and dropped).
1603 */
1604 return hweight32(valid_add_flags) -
1605 hweight32(valid_add_flags & valid_drop_flags);
1606}
1607
1608static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1609 struct xhci_container_ctx *in_ctx)
1610{
1611 struct xhci_input_control_ctx *ctrl_ctx;
1612 u32 valid_add_flags;
1613 u32 valid_drop_flags;
1614
1615 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1616 valid_add_flags = ctrl_ctx->add_flags >> 2;
1617 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1618
1619 return hweight32(valid_drop_flags) -
1620 hweight32(valid_add_flags & valid_drop_flags);
1621}
1622
1623/*
1624 * We need to reserve the new number of endpoints before the configure endpoint
1625 * command completes. We can't subtract the dropped endpoints from the number
1626 * of active endpoints until the command completes because we can oversubscribe
1627 * the host in this case:
1628 *
1629 * - the first configure endpoint command drops more endpoints than it adds
1630 * - a second configure endpoint command that adds more endpoints is queued
1631 * - the first configure endpoint command fails, so the config is unchanged
1632 * - the second command may succeed, even though there isn't enough resources
1633 *
1634 * Must be called with xhci->lock held.
1635 */
1636static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1637 struct xhci_container_ctx *in_ctx)
1638{
1639 u32 added_eps;
1640
1641 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1642 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1643 xhci_dbg(xhci, "Not enough ep ctxs: "
1644 "%u active, need to add %u, limit is %u.\n",
1645 xhci->num_active_eps, added_eps,
1646 xhci->limit_active_eps);
1647 return -ENOMEM;
1648 }
1649 xhci->num_active_eps += added_eps;
1650 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1651 xhci->num_active_eps);
1652 return 0;
1653}
1654
1655/*
1656 * The configure endpoint was failed by the xHC for some other reason, so we
1657 * need to revert the resources that failed configuration would have used.
1658 *
1659 * Must be called with xhci->lock held.
1660 */
1661static void xhci_free_host_resources(struct xhci_hcd *xhci,
1662 struct xhci_container_ctx *in_ctx)
1663{
1664 u32 num_failed_eps;
1665
1666 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1667 xhci->num_active_eps -= num_failed_eps;
1668 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1669 num_failed_eps,
1670 xhci->num_active_eps);
1671}
1672
1673/*
1674 * Now that the command has completed, clean up the active endpoint count by
1675 * subtracting out the endpoints that were dropped (but not changed).
1676 *
1677 * Must be called with xhci->lock held.
1678 */
1679static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1680 struct xhci_container_ctx *in_ctx)
1681{
1682 u32 num_dropped_eps;
1683
1684 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1685 xhci->num_active_eps -= num_dropped_eps;
1686 if (num_dropped_eps)
1687 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1688 num_dropped_eps,
1689 xhci->num_active_eps);
1690}
1691
1581/* Issue a configure endpoint command or evaluate context command 1692/* Issue a configure endpoint command or evaluate context command
1582 * and wait for it to finish. 1693 * and wait for it to finish.
1583 */ 1694 */
@@ -1598,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1598 virt_dev = xhci->devs[udev->slot_id]; 1709 virt_dev = xhci->devs[udev->slot_id];
1599 if (command) { 1710 if (command) {
1600 in_ctx = command->in_ctx; 1711 in_ctx = command->in_ctx;
1712 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1713 xhci_reserve_host_resources(xhci, in_ctx)) {
1714 spin_unlock_irqrestore(&xhci->lock, flags);
1715 xhci_warn(xhci, "Not enough host resources, "
1716 "active endpoint contexts = %u\n",
1717 xhci->num_active_eps);
1718 return -ENOMEM;
1719 }
1720
1601 cmd_completion = command->completion; 1721 cmd_completion = command->completion;
1602 cmd_status = &command->status; 1722 cmd_status = &command->status;
1603 command->command_trb = xhci->cmd_ring->enqueue; 1723 command->command_trb = xhci->cmd_ring->enqueue;
@@ -1613,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1613 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 1733 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1614 } else { 1734 } else {
1615 in_ctx = virt_dev->in_ctx; 1735 in_ctx = virt_dev->in_ctx;
1736 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
1737 xhci_reserve_host_resources(xhci, in_ctx)) {
1738 spin_unlock_irqrestore(&xhci->lock, flags);
1739 xhci_warn(xhci, "Not enough host resources, "
1740 "active endpoint contexts = %u\n",
1741 xhci->num_active_eps);
1742 return -ENOMEM;
1743 }
1616 cmd_completion = &virt_dev->cmd_completion; 1744 cmd_completion = &virt_dev->cmd_completion;
1617 cmd_status = &virt_dev->cmd_status; 1745 cmd_status = &virt_dev->cmd_status;
1618 } 1746 }
@@ -1627,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1627 if (ret < 0) { 1755 if (ret < 0) {
1628 if (command) 1756 if (command)
1629 list_del(&command->cmd_list); 1757 list_del(&command->cmd_list);
1758 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
1759 xhci_free_host_resources(xhci, in_ctx);
1630 spin_unlock_irqrestore(&xhci->lock, flags); 1760 spin_unlock_irqrestore(&xhci->lock, flags);
1631 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 1761 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1632 return -ENOMEM; 1762 return -ENOMEM;
@@ -1649,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1649 } 1779 }
1650 1780
1651 if (!ctx_change) 1781 if (!ctx_change)
1652 return xhci_configure_endpoint_result(xhci, udev, cmd_status); 1782 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
1653 return xhci_evaluate_context_result(xhci, udev, cmd_status); 1783 else
1784 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
1785
1786 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
1787 spin_lock_irqsave(&xhci->lock, flags);
1788 /* If the command failed, remove the reserved resources.
1789 * Otherwise, clean up the estimate to include dropped eps.
1790 */
1791 if (ret)
1792 xhci_free_host_resources(xhci, in_ctx);
1793 else
1794 xhci_finish_resource_reservation(xhci, in_ctx);
1795 spin_unlock_irqrestore(&xhci->lock, flags);
1796 }
1797 return ret;
1654} 1798}
1655 1799
1656/* Called after one or more calls to xhci_add_endpoint() or 1800/* Called after one or more calls to xhci_add_endpoint() or
@@ -1676,6 +1820,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1676 if (ret <= 0) 1820 if (ret <= 0)
1677 return ret; 1821 return ret;
1678 xhci = hcd_to_xhci(hcd); 1822 xhci = hcd_to_xhci(hcd);
1823 if (xhci->xhc_state & XHCI_STATE_DYING)
1824 return -ENODEV;
1679 1825
1680 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1826 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1681 virt_dev = xhci->devs[udev->slot_id]; 1827 virt_dev = xhci->devs[udev->slot_id];
@@ -2266,6 +2412,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2266} 2412}
2267 2413
2268/* 2414/*
2415 * Deletes endpoint resources for endpoints that were active before a Reset
2416 * Device command, or a Disable Slot command. The Reset Device command leaves
2417 * the control endpoint intact, whereas the Disable Slot command deletes it.
2418 *
2419 * Must be called with xhci->lock held.
2420 */
2421void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
2422 struct xhci_virt_device *virt_dev, bool drop_control_ep)
2423{
2424 int i;
2425 unsigned int num_dropped_eps = 0;
2426 unsigned int drop_flags = 0;
2427
2428 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
2429 if (virt_dev->eps[i].ring) {
2430 drop_flags |= 1 << i;
2431 num_dropped_eps++;
2432 }
2433 }
2434 xhci->num_active_eps -= num_dropped_eps;
2435 if (num_dropped_eps)
2436 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
2437 "%u now active.\n",
2438 num_dropped_eps, drop_flags,
2439 xhci->num_active_eps);
2440}
2441
2442/*
2269 * This submits a Reset Device Command, which will set the device state to 0, 2443 * This submits a Reset Device Command, which will set the device state to 0,
2270 * set the device address to 0, and disable all the endpoints except the default 2444 * set the device address to 0, and disable all the endpoints except the default
2271 * control endpoint. The USB core should come back and call 2445 * control endpoint. The USB core should come back and call
@@ -2406,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2406 goto command_cleanup; 2580 goto command_cleanup;
2407 } 2581 }
2408 2582
2583 /* Free up host controller endpoint resources */
2584 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2585 spin_lock_irqsave(&xhci->lock, flags);
2586 /* Don't delete the default control endpoint resources */
2587 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
2588 spin_unlock_irqrestore(&xhci->lock, flags);
2589 }
2590
2409 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 2591 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2410 last_freed_endpoint = 1; 2592 last_freed_endpoint = 1;
2411 for (i = 1; i < 31; ++i) { 2593 for (i = 1; i < 31; ++i) {
@@ -2479,6 +2661,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2479} 2661}
2480 2662
2481/* 2663/*
2664 * Checks if we have enough host controller resources for the default control
2665 * endpoint.
2666 *
2667 * Must be called with xhci->lock held.
2668 */
2669static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
2670{
2671 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
2672 xhci_dbg(xhci, "Not enough ep ctxs: "
2673 "%u active, need to add 1, limit is %u.\n",
2674 xhci->num_active_eps, xhci->limit_active_eps);
2675 return -ENOMEM;
2676 }
2677 xhci->num_active_eps += 1;
2678 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
2679 xhci->num_active_eps);
2680 return 0;
2681}
2682
2683
2684/*
2482 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 2685 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2483 * timed out, or allocating memory failed. Returns 1 on success. 2686 * timed out, or allocating memory failed. Returns 1 on success.
2484 */ 2687 */
@@ -2513,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2513 xhci_err(xhci, "Error while assigning device slot ID\n"); 2716 xhci_err(xhci, "Error while assigning device slot ID\n");
2514 return 0; 2717 return 0;
2515 } 2718 }
2516 /* xhci_alloc_virt_device() does not touch rings; no need to lock. 2719
2517 * Use GFP_NOIO, since this function can be called from 2720 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2721 spin_lock_irqsave(&xhci->lock, flags);
2722 ret = xhci_reserve_host_control_ep_resources(xhci);
2723 if (ret) {
2724 spin_unlock_irqrestore(&xhci->lock, flags);
2725 xhci_warn(xhci, "Not enough host resources, "
2726 "active endpoint contexts = %u\n",
2727 xhci->num_active_eps);
2728 goto disable_slot;
2729 }
2730 spin_unlock_irqrestore(&xhci->lock, flags);
2731 }
2732 /* Use GFP_NOIO, since this function can be called from
2518 * xhci_discover_or_reset_device(), which may be called as part of 2733 * xhci_discover_or_reset_device(), which may be called as part of
2519 * mass storage driver error handling. 2734 * mass storage driver error handling.
2520 */ 2735 */
2521 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 2736 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2522 /* Disable slot, if we can do it without mem alloc */
2523 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 2737 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2524 spin_lock_irqsave(&xhci->lock, flags); 2738 goto disable_slot;
2525 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2526 xhci_ring_cmd_db(xhci);
2527 spin_unlock_irqrestore(&xhci->lock, flags);
2528 return 0;
2529 } 2739 }
2530 udev->slot_id = xhci->slot_id; 2740 udev->slot_id = xhci->slot_id;
2531 /* Is this a LS or FS device under a HS hub? */ 2741 /* Is this a LS or FS device under a HS hub? */
2532 /* Hub or peripherial? */ 2742 /* Hub or peripherial? */
2533 return 1; 2743 return 1;
2744
2745disable_slot:
2746 /* Disable slot, if we can do it without mem alloc */
2747 spin_lock_irqsave(&xhci->lock, flags);
2748 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2749 xhci_ring_cmd_db(xhci);
2750 spin_unlock_irqrestore(&xhci->lock, flags);
2751 return 0;
2534} 2752}
2535 2753
2536/* 2754/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e12db7cfb9bb..ac0196e7fcf1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1123,6 +1123,7 @@ struct xhci_ring {
1123 */ 1123 */
1124 u32 cycle_state; 1124 u32 cycle_state;
1125 unsigned int stream_id; 1125 unsigned int stream_id;
1126 bool last_td_was_short;
1126}; 1127};
1127 1128
1128struct xhci_erst_entry { 1129struct xhci_erst_entry {
@@ -1290,6 +1291,19 @@ struct xhci_hcd {
1290#define XHCI_RESET_EP_QUIRK (1 << 1) 1291#define XHCI_RESET_EP_QUIRK (1 << 1)
1291#define XHCI_NEC_HOST (1 << 2) 1292#define XHCI_NEC_HOST (1 << 2)
1292#define XHCI_AMD_PLL_FIX (1 << 3) 1293#define XHCI_AMD_PLL_FIX (1 << 3)
1294#define XHCI_SPURIOUS_SUCCESS (1 << 4)
1295/*
1296 * Certain Intel host controllers have a limit to the number of endpoint
1297 * contexts they can handle. Ideally, they would signal that they can't handle
1298 * anymore endpoint contexts by returning a Resource Error for the Configure
1299 * Endpoint command, but they don't. Instead they expect software to keep track
1300 * of the number of active endpoints for them, across configure endpoint
1301 * commands, reset device commands, disable slot commands, and address device
1302 * commands.
1303 */
1304#define XHCI_EP_LIMIT_QUIRK (1 << 5)
1305 unsigned int num_active_eps;
1306 unsigned int limit_active_eps;
1293 /* There are two roothubs to keep track of bus suspend info for */ 1307 /* There are two roothubs to keep track of bus suspend info for */
1294 struct xhci_bus_state bus_state[2]; 1308 struct xhci_bus_state bus_state[2];
1295 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ 1309 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
@@ -1338,9 +1352,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1338static inline void xhci_writel(struct xhci_hcd *xhci, 1352static inline void xhci_writel(struct xhci_hcd *xhci,
1339 const unsigned int val, __le32 __iomem *regs) 1353 const unsigned int val, __le32 __iomem *regs)
1340{ 1354{
1341 xhci_dbg(xhci,
1342 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
1343 regs, val);
1344 writel(val, regs); 1355 writel(val, regs);
1345} 1356}
1346 1357
@@ -1368,9 +1379,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
1368 u32 val_lo = lower_32_bits(val); 1379 u32 val_lo = lower_32_bits(val);
1369 u32 val_hi = upper_32_bits(val); 1380 u32 val_hi = upper_32_bits(val);
1370 1381
1371 xhci_dbg(xhci,
1372 "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
1373 regs, (long unsigned int) val);
1374 writel(val_lo, ptr); 1382 writel(val_lo, ptr);
1375 writel(val_hi, ptr + 1); 1383 writel(val_hi, ptr + 1);
1376} 1384}
@@ -1439,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
1439void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, 1447void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
1440 struct xhci_ep_ctx *ep_ctx, 1448 struct xhci_ep_ctx *ep_ctx,
1441 struct xhci_virt_ep *ep); 1449 struct xhci_virt_ep *ep);
1450void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
1451 struct xhci_virt_device *virt_dev, bool drop_control_ep);
1442struct xhci_ring *xhci_dma_to_transfer_ring( 1452struct xhci_ring *xhci_dma_to_transfer_ring(
1443 struct xhci_virt_ep *ep, 1453 struct xhci_virt_ep *ep,
1444 u64 address); 1454 u64 address);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 24787b751286..a311008af5e1 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2483,6 +2483,7 @@
2483#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f 2483#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
2484#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 2484#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
2485#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 2485#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
2486#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
2486#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40 2487#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
2487#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f 2488#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
2488#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 2489#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310