diff options
author | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2011-05-11 19:14:58 -0400 |
---|---|---|
committer | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2011-05-27 15:08:14 -0400 |
commit | 2cf95c18d5069e13c02a8667d91e064df8e17e09 (patch) | |
tree | 05ae0e90ceab6790ccd1e624b695b984c6081f87 /drivers/usb | |
parent | ad808333d8201d53075a11bc8dd83b81f3d68f0b (diff) |
Intel xhci: Limit number of active endpoints to 64.
The Panther Point chipset has an xHCI host controller that has a limit to
the number of active endpoints it can handle. Ideally, it would signal
that it can't handle anymore endpoints by returning a Resource Error for
the Configure Endpoint command, but they don't. Instead it needs software
to keep track of the number of active endpoints, across configure endpoint
commands, reset device commands, disable slot commands, and address device
commands.
Add a new endpoint context counter, xhci_hcd->num_active_eps, and use it
to track the number of endpoints the xHC has active. This gets a little
tricky, because commands to change the number of active endpoints can
fail. This patch adds a new xHCI quirk for these Intel hosts, and the new
code should not have any effect on other xHCI host controllers.
Fail a new device allocation if we don't have room for the new default
control endpoint. Use the endpoint ring pointers to determine what
endpoints were active before a Reset Device command or a Disable Slot
command, and drop those once the command completes.
Fail a configure endpoint command if it would add too many new endpoints.
We have to be a bit over zealous here, and only count the number of new
endpoints to be added, without subtracting the number of dropped
endpoints. That's because a second configure endpoint command for a
different device could sneak in before we know if the first command is
completed. If the first command dropped resources, the host controller
fails the command for some reason, and we're nearing the limit of
endpoints, we could end up oversubscribing the host.
To fix this race condition, when evaluating whether a configure endpoint
command will fix in our bandwidth budget, only add the new endpoints to
xhci->num_active_eps, and don't subtract the dropped endpoints. Ignore
changed endpoints (ones that are dropped and then re-added), as that
shouldn't effect the host's endpoint resources. When the configure
endpoint command completes, subtract off the dropped endpoints.
This may mean some configuration changes may temporarily fail, but it's
always better to under-subscribe than over-subscribe resources.
(Originally my plan had been to push the resource allocation down into the
ring allocation functions. However, that would cause us to allocate
unnecessary resources when endpoints were changed, because the xHCI driver
allocates a new ring for the changed endpoint, and only deletes the old
ring once the Configure Endpoint command succeeds. A further complication
would have been dealing with the per-device endpoint ring cache.)
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/host/xhci-pci.c | 2 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 7 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 232 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 14 |
4 files changed, 244 insertions, 11 deletions
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index eafd17fae949..c408e9f6a707 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -121,6 +121,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) | |||
121 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 121 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
122 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { | 122 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { |
123 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | 123 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
124 | xhci->quirks |= XHCI_EP_LIMIT_QUIRK; | ||
125 | xhci->limit_active_eps = 64; | ||
124 | } | 126 | } |
125 | 127 | ||
126 | /* Make sure the HC is halted. */ | 128 | /* Make sure the HC is halted. */ |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 56f6c584c651..cc1485bfed38 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1081,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1081 | complete(&xhci->addr_dev); | 1081 | complete(&xhci->addr_dev); |
1082 | break; | 1082 | break; |
1083 | case TRB_TYPE(TRB_DISABLE_SLOT): | 1083 | case TRB_TYPE(TRB_DISABLE_SLOT): |
1084 | if (xhci->devs[slot_id]) | 1084 | if (xhci->devs[slot_id]) { |
1085 | if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) | ||
1086 | /* Delete default control endpoint resources */ | ||
1087 | xhci_free_device_endpoint_resources(xhci, | ||
1088 | xhci->devs[slot_id], true); | ||
1085 | xhci_free_virt_device(xhci, slot_id); | 1089 | xhci_free_virt_device(xhci, slot_id); |
1090 | } | ||
1086 | break; | 1091 | break; |
1087 | case TRB_TYPE(TRB_CONFIG_EP): | 1092 | case TRB_TYPE(TRB_CONFIG_EP): |
1088 | virt_dev = xhci->devs[slot_id]; | 1093 | virt_dev = xhci->devs[slot_id]; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 58183d2a8089..d9660eb97eb9 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -1582,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |||
1582 | return ret; | 1582 | return ret; |
1583 | } | 1583 | } |
1584 | 1584 | ||
1585 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, | ||
1586 | struct xhci_container_ctx *in_ctx) | ||
1587 | { | ||
1588 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1589 | u32 valid_add_flags; | ||
1590 | u32 valid_drop_flags; | ||
1591 | |||
1592 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
1593 | /* Ignore the slot flag (bit 0), and the default control endpoint flag | ||
1594 | * (bit 1). The default control endpoint is added during the Address | ||
1595 | * Device command and is never removed until the slot is disabled. | ||
1596 | */ | ||
1597 | valid_add_flags = ctrl_ctx->add_flags >> 2; | ||
1598 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; | ||
1599 | |||
1600 | /* Use hweight32 to count the number of ones in the add flags, or | ||
1601 | * number of endpoints added. Don't count endpoints that are changed | ||
1602 | * (both added and dropped). | ||
1603 | */ | ||
1604 | return hweight32(valid_add_flags) - | ||
1605 | hweight32(valid_add_flags & valid_drop_flags); | ||
1606 | } | ||
1607 | |||
1608 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, | ||
1609 | struct xhci_container_ctx *in_ctx) | ||
1610 | { | ||
1611 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1612 | u32 valid_add_flags; | ||
1613 | u32 valid_drop_flags; | ||
1614 | |||
1615 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
1616 | valid_add_flags = ctrl_ctx->add_flags >> 2; | ||
1617 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; | ||
1618 | |||
1619 | return hweight32(valid_drop_flags) - | ||
1620 | hweight32(valid_add_flags & valid_drop_flags); | ||
1621 | } | ||
1622 | |||
1623 | /* | ||
1624 | * We need to reserve the new number of endpoints before the configure endpoint | ||
1625 | * command completes. We can't subtract the dropped endpoints from the number | ||
1626 | * of active endpoints until the command completes because we can oversubscribe | ||
1627 | * the host in this case: | ||
1628 | * | ||
1629 | * - the first configure endpoint command drops more endpoints than it adds | ||
1630 | * - a second configure endpoint command that adds more endpoints is queued | ||
1631 | * - the first configure endpoint command fails, so the config is unchanged | ||
1632 | * - the second command may succeed, even though there isn't enough resources | ||
1633 | * | ||
1634 | * Must be called with xhci->lock held. | ||
1635 | */ | ||
1636 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, | ||
1637 | struct xhci_container_ctx *in_ctx) | ||
1638 | { | ||
1639 | u32 added_eps; | ||
1640 | |||
1641 | added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); | ||
1642 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { | ||
1643 | xhci_dbg(xhci, "Not enough ep ctxs: " | ||
1644 | "%u active, need to add %u, limit is %u.\n", | ||
1645 | xhci->num_active_eps, added_eps, | ||
1646 | xhci->limit_active_eps); | ||
1647 | return -ENOMEM; | ||
1648 | } | ||
1649 | xhci->num_active_eps += added_eps; | ||
1650 | xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, | ||
1651 | xhci->num_active_eps); | ||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | /* | ||
1656 | * The configure endpoint was failed by the xHC for some other reason, so we | ||
1657 | * need to revert the resources that failed configuration would have used. | ||
1658 | * | ||
1659 | * Must be called with xhci->lock held. | ||
1660 | */ | ||
1661 | static void xhci_free_host_resources(struct xhci_hcd *xhci, | ||
1662 | struct xhci_container_ctx *in_ctx) | ||
1663 | { | ||
1664 | u32 num_failed_eps; | ||
1665 | |||
1666 | num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); | ||
1667 | xhci->num_active_eps -= num_failed_eps; | ||
1668 | xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", | ||
1669 | num_failed_eps, | ||
1670 | xhci->num_active_eps); | ||
1671 | } | ||
1672 | |||
1673 | /* | ||
1674 | * Now that the command has completed, clean up the active endpoint count by | ||
1675 | * subtracting out the endpoints that were dropped (but not changed). | ||
1676 | * | ||
1677 | * Must be called with xhci->lock held. | ||
1678 | */ | ||
1679 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | ||
1680 | struct xhci_container_ctx *in_ctx) | ||
1681 | { | ||
1682 | u32 num_dropped_eps; | ||
1683 | |||
1684 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); | ||
1685 | xhci->num_active_eps -= num_dropped_eps; | ||
1686 | if (num_dropped_eps) | ||
1687 | xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", | ||
1688 | num_dropped_eps, | ||
1689 | xhci->num_active_eps); | ||
1690 | } | ||
1691 | |||
1585 | /* Issue a configure endpoint command or evaluate context command | 1692 | /* Issue a configure endpoint command or evaluate context command |
1586 | * and wait for it to finish. | 1693 | * and wait for it to finish. |
1587 | */ | 1694 | */ |
@@ -1602,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1602 | virt_dev = xhci->devs[udev->slot_id]; | 1709 | virt_dev = xhci->devs[udev->slot_id]; |
1603 | if (command) { | 1710 | if (command) { |
1604 | in_ctx = command->in_ctx; | 1711 | in_ctx = command->in_ctx; |
1712 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
1713 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
1714 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1715 | xhci_warn(xhci, "Not enough host resources, " | ||
1716 | "active endpoint contexts = %u\n", | ||
1717 | xhci->num_active_eps); | ||
1718 | return -ENOMEM; | ||
1719 | } | ||
1720 | |||
1605 | cmd_completion = command->completion; | 1721 | cmd_completion = command->completion; |
1606 | cmd_status = &command->status; | 1722 | cmd_status = &command->status; |
1607 | command->command_trb = xhci->cmd_ring->enqueue; | 1723 | command->command_trb = xhci->cmd_ring->enqueue; |
@@ -1617,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1617 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | 1733 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
1618 | } else { | 1734 | } else { |
1619 | in_ctx = virt_dev->in_ctx; | 1735 | in_ctx = virt_dev->in_ctx; |
1736 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
1737 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
1738 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1739 | xhci_warn(xhci, "Not enough host resources, " | ||
1740 | "active endpoint contexts = %u\n", | ||
1741 | xhci->num_active_eps); | ||
1742 | return -ENOMEM; | ||
1743 | } | ||
1620 | cmd_completion = &virt_dev->cmd_completion; | 1744 | cmd_completion = &virt_dev->cmd_completion; |
1621 | cmd_status = &virt_dev->cmd_status; | 1745 | cmd_status = &virt_dev->cmd_status; |
1622 | } | 1746 | } |
@@ -1631,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1631 | if (ret < 0) { | 1755 | if (ret < 0) { |
1632 | if (command) | 1756 | if (command) |
1633 | list_del(&command->cmd_list); | 1757 | list_del(&command->cmd_list); |
1758 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | ||
1759 | xhci_free_host_resources(xhci, in_ctx); | ||
1634 | spin_unlock_irqrestore(&xhci->lock, flags); | 1760 | spin_unlock_irqrestore(&xhci->lock, flags); |
1635 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); | 1761 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); |
1636 | return -ENOMEM; | 1762 | return -ENOMEM; |
@@ -1653,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1653 | } | 1779 | } |
1654 | 1780 | ||
1655 | if (!ctx_change) | 1781 | if (!ctx_change) |
1656 | return xhci_configure_endpoint_result(xhci, udev, cmd_status); | 1782 | ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); |
1657 | return xhci_evaluate_context_result(xhci, udev, cmd_status); | 1783 | else |
1784 | ret = xhci_evaluate_context_result(xhci, udev, cmd_status); | ||
1785 | |||
1786 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | ||
1787 | spin_lock_irqsave(&xhci->lock, flags); | ||
1788 | /* If the command failed, remove the reserved resources. | ||
1789 | * Otherwise, clean up the estimate to include dropped eps. | ||
1790 | */ | ||
1791 | if (ret) | ||
1792 | xhci_free_host_resources(xhci, in_ctx); | ||
1793 | else | ||
1794 | xhci_finish_resource_reservation(xhci, in_ctx); | ||
1795 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1796 | } | ||
1797 | return ret; | ||
1658 | } | 1798 | } |
1659 | 1799 | ||
1660 | /* Called after one or more calls to xhci_add_endpoint() or | 1800 | /* Called after one or more calls to xhci_add_endpoint() or |
@@ -2272,6 +2412,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | |||
2272 | } | 2412 | } |
2273 | 2413 | ||
2274 | /* | 2414 | /* |
2415 | * Deletes endpoint resources for endpoints that were active before a Reset | ||
2416 | * Device command, or a Disable Slot command. The Reset Device command leaves | ||
2417 | * the control endpoint intact, whereas the Disable Slot command deletes it. | ||
2418 | * | ||
2419 | * Must be called with xhci->lock held. | ||
2420 | */ | ||
2421 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, | ||
2422 | struct xhci_virt_device *virt_dev, bool drop_control_ep) | ||
2423 | { | ||
2424 | int i; | ||
2425 | unsigned int num_dropped_eps = 0; | ||
2426 | unsigned int drop_flags = 0; | ||
2427 | |||
2428 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { | ||
2429 | if (virt_dev->eps[i].ring) { | ||
2430 | drop_flags |= 1 << i; | ||
2431 | num_dropped_eps++; | ||
2432 | } | ||
2433 | } | ||
2434 | xhci->num_active_eps -= num_dropped_eps; | ||
2435 | if (num_dropped_eps) | ||
2436 | xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " | ||
2437 | "%u now active.\n", | ||
2438 | num_dropped_eps, drop_flags, | ||
2439 | xhci->num_active_eps); | ||
2440 | } | ||
2441 | |||
2442 | /* | ||
2275 | * This submits a Reset Device Command, which will set the device state to 0, | 2443 | * This submits a Reset Device Command, which will set the device state to 0, |
2276 | * set the device address to 0, and disable all the endpoints except the default | 2444 | * set the device address to 0, and disable all the endpoints except the default |
2277 | * control endpoint. The USB core should come back and call | 2445 | * control endpoint. The USB core should come back and call |
@@ -2412,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2412 | goto command_cleanup; | 2580 | goto command_cleanup; |
2413 | } | 2581 | } |
2414 | 2582 | ||
2583 | /* Free up host controller endpoint resources */ | ||
2584 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | ||
2585 | spin_lock_irqsave(&xhci->lock, flags); | ||
2586 | /* Don't delete the default control endpoint resources */ | ||
2587 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); | ||
2588 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2589 | } | ||
2590 | |||
2415 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ | 2591 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
2416 | last_freed_endpoint = 1; | 2592 | last_freed_endpoint = 1; |
2417 | for (i = 1; i < 31; ++i) { | 2593 | for (i = 1; i < 31; ++i) { |
@@ -2485,6 +2661,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2485 | } | 2661 | } |
2486 | 2662 | ||
2487 | /* | 2663 | /* |
2664 | * Checks if we have enough host controller resources for the default control | ||
2665 | * endpoint. | ||
2666 | * | ||
2667 | * Must be called with xhci->lock held. | ||
2668 | */ | ||
2669 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) | ||
2670 | { | ||
2671 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { | ||
2672 | xhci_dbg(xhci, "Not enough ep ctxs: " | ||
2673 | "%u active, need to add 1, limit is %u.\n", | ||
2674 | xhci->num_active_eps, xhci->limit_active_eps); | ||
2675 | return -ENOMEM; | ||
2676 | } | ||
2677 | xhci->num_active_eps += 1; | ||
2678 | xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", | ||
2679 | xhci->num_active_eps); | ||
2680 | return 0; | ||
2681 | } | ||
2682 | |||
2683 | |||
2684 | /* | ||
2488 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | 2685 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
2489 | * timed out, or allocating memory failed. Returns 1 on success. | 2686 | * timed out, or allocating memory failed. Returns 1 on success. |
2490 | */ | 2687 | */ |
@@ -2519,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2519 | xhci_err(xhci, "Error while assigning device slot ID\n"); | 2716 | xhci_err(xhci, "Error while assigning device slot ID\n"); |
2520 | return 0; | 2717 | return 0; |
2521 | } | 2718 | } |
2522 | /* xhci_alloc_virt_device() does not touch rings; no need to lock. | 2719 | |
2523 | * Use GFP_NOIO, since this function can be called from | 2720 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
2721 | spin_lock_irqsave(&xhci->lock, flags); | ||
2722 | ret = xhci_reserve_host_control_ep_resources(xhci); | ||
2723 | if (ret) { | ||
2724 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2725 | xhci_warn(xhci, "Not enough host resources, " | ||
2726 | "active endpoint contexts = %u\n", | ||
2727 | xhci->num_active_eps); | ||
2728 | goto disable_slot; | ||
2729 | } | ||
2730 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2731 | } | ||
2732 | /* Use GFP_NOIO, since this function can be called from | ||
2524 | * xhci_discover_or_reset_device(), which may be called as part of | 2733 | * xhci_discover_or_reset_device(), which may be called as part of |
2525 | * mass storage driver error handling. | 2734 | * mass storage driver error handling. |
2526 | */ | 2735 | */ |
2527 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { | 2736 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { |
2528 | /* Disable slot, if we can do it without mem alloc */ | ||
2529 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | 2737 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
2530 | spin_lock_irqsave(&xhci->lock, flags); | 2738 | goto disable_slot; |
2531 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | ||
2532 | xhci_ring_cmd_db(xhci); | ||
2533 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2534 | return 0; | ||
2535 | } | 2739 | } |
2536 | udev->slot_id = xhci->slot_id; | 2740 | udev->slot_id = xhci->slot_id; |
2537 | /* Is this a LS or FS device under a HS hub? */ | 2741 | /* Is this a LS or FS device under a HS hub? */ |
2538 | /* Hub or peripherial? */ | 2742 | /* Hub or peripherial? */ |
2539 | return 1; | 2743 | return 1; |
2744 | |||
2745 | disable_slot: | ||
2746 | /* Disable slot, if we can do it without mem alloc */ | ||
2747 | spin_lock_irqsave(&xhci->lock, flags); | ||
2748 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) | ||
2749 | xhci_ring_cmd_db(xhci); | ||
2750 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2751 | return 0; | ||
2540 | } | 2752 | } |
2541 | 2753 | ||
2542 | /* | 2754 | /* |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 5cfeb8614b87..ac0196e7fcf1 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1292,6 +1292,18 @@ struct xhci_hcd { | |||
1292 | #define XHCI_NEC_HOST (1 << 2) | 1292 | #define XHCI_NEC_HOST (1 << 2) |
1293 | #define XHCI_AMD_PLL_FIX (1 << 3) | 1293 | #define XHCI_AMD_PLL_FIX (1 << 3) |
1294 | #define XHCI_SPURIOUS_SUCCESS (1 << 4) | 1294 | #define XHCI_SPURIOUS_SUCCESS (1 << 4) |
1295 | /* | ||
1296 | * Certain Intel host controllers have a limit to the number of endpoint | ||
1297 | * contexts they can handle. Ideally, they would signal that they can't handle | ||
1298 | * anymore endpoint contexts by returning a Resource Error for the Configure | ||
1299 | * Endpoint command, but they don't. Instead they expect software to keep track | ||
1300 | * of the number of active endpoints for them, across configure endpoint | ||
1301 | * commands, reset device commands, disable slot commands, and address device | ||
1302 | * commands. | ||
1303 | */ | ||
1304 | #define XHCI_EP_LIMIT_QUIRK (1 << 5) | ||
1305 | unsigned int num_active_eps; | ||
1306 | unsigned int limit_active_eps; | ||
1295 | /* There are two roothubs to keep track of bus suspend info for */ | 1307 | /* There are two roothubs to keep track of bus suspend info for */ |
1296 | struct xhci_bus_state bus_state[2]; | 1308 | struct xhci_bus_state bus_state[2]; |
1297 | /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ | 1309 | /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ |
@@ -1435,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, | |||
1435 | void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, | 1447 | void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, |
1436 | struct xhci_ep_ctx *ep_ctx, | 1448 | struct xhci_ep_ctx *ep_ctx, |
1437 | struct xhci_virt_ep *ep); | 1449 | struct xhci_virt_ep *ep); |
1450 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, | ||
1451 | struct xhci_virt_device *virt_dev, bool drop_control_ep); | ||
1438 | struct xhci_ring *xhci_dma_to_transfer_ring( | 1452 | struct xhci_ring *xhci_dma_to_transfer_ring( |
1439 | struct xhci_virt_ep *ep, | 1453 | struct xhci_virt_ep *ep, |
1440 | u64 address); | 1454 | u64 address); |