aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c399
1 files changed, 397 insertions, 2 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 077dfcd57dc9..2e370fea9590 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/log2.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -726,8 +727,21 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
726 spin_lock_irqsave(&xhci->lock, flags); 727 spin_lock_irqsave(&xhci->lock, flags);
727 if (xhci->xhc_state & XHCI_STATE_DYING) 728 if (xhci->xhc_state & XHCI_STATE_DYING)
728 goto dying; 729 goto dying;
729 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 730 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
730 slot_id, ep_index); 731 EP_GETTING_STREAMS) {
732 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
733 "is transitioning to using streams.\n");
734 ret = -EINVAL;
735 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
736 EP_GETTING_NO_STREAMS) {
737 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
738 "is transitioning to "
739 "not having streams.\n");
740 ret = -EINVAL;
741 } else {
742 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
743 slot_id, ep_index);
744 }
731 spin_unlock_irqrestore(&xhci->lock, flags); 745 spin_unlock_irqrestore(&xhci->lock, flags);
732 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 746 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
733 spin_lock_irqsave(&xhci->lock, flags); 747 spin_lock_irqsave(&xhci->lock, flags);
@@ -1446,6 +1460,387 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1446 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 1460 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1447} 1461}
1448 1462
1463static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1464 struct usb_device *udev, struct usb_host_endpoint *ep,
1465 unsigned int slot_id)
1466{
1467 int ret;
1468 unsigned int ep_index;
1469 unsigned int ep_state;
1470
1471 if (!ep)
1472 return -EINVAL;
1473 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__);
1474 if (ret <= 0)
1475 return -EINVAL;
1476 if (!ep->ss_ep_comp) {
1477 xhci_warn(xhci, "WARN: No SuperSpeed Endpoint Companion"
1478 " descriptor for ep 0x%x\n",
1479 ep->desc.bEndpointAddress);
1480 return -EINVAL;
1481 }
1482 if (ep->ss_ep_comp->desc.bmAttributes == 0) {
1483 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1484 " descriptor for ep 0x%x does not support streams\n",
1485 ep->desc.bEndpointAddress);
1486 return -EINVAL;
1487 }
1488
1489 ep_index = xhci_get_endpoint_index(&ep->desc);
1490 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1491 if (ep_state & EP_HAS_STREAMS ||
1492 ep_state & EP_GETTING_STREAMS) {
1493 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1494 "already has streams set up.\n",
1495 ep->desc.bEndpointAddress);
1496 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1497 "dynamic stream context array reallocation.\n");
1498 return -EINVAL;
1499 }
1500 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1501 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1502 "endpoint 0x%x; URBs are pending.\n",
1503 ep->desc.bEndpointAddress);
1504 return -EINVAL;
1505 }
1506 return 0;
1507}
1508
1509static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1510 unsigned int *num_streams, unsigned int *num_stream_ctxs)
1511{
1512 unsigned int max_streams;
1513
1514 /* The stream context array size must be a power of two */
1515 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
1516 /*
1517 * Find out how many primary stream array entries the host controller
1518 * supports. Later we may use secondary stream arrays (similar to 2nd
1519 * level page entries), but that's an optional feature for xHCI host
1520 * controllers. xHCs must support at least 4 stream IDs.
1521 */
1522 max_streams = HCC_MAX_PSA(xhci->hcc_params);
1523 if (*num_stream_ctxs > max_streams) {
1524 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1525 max_streams);
1526 *num_stream_ctxs = max_streams;
1527 *num_streams = max_streams;
1528 }
1529}
1530
1531/* Returns an error code if one of the endpoint already has streams.
1532 * This does not change any data structures, it only checks and gathers
1533 * information.
1534 */
1535static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1536 struct usb_device *udev,
1537 struct usb_host_endpoint **eps, unsigned int num_eps,
1538 unsigned int *num_streams, u32 *changed_ep_bitmask)
1539{
1540 struct usb_host_ss_ep_comp *ss_ep_comp;
1541 unsigned int max_streams;
1542 unsigned int endpoint_flag;
1543 int i;
1544 int ret;
1545
1546 for (i = 0; i < num_eps; i++) {
1547 ret = xhci_check_streams_endpoint(xhci, udev,
1548 eps[i], udev->slot_id);
1549 if (ret < 0)
1550 return ret;
1551
1552 ss_ep_comp = eps[i]->ss_ep_comp;
1553 max_streams = USB_SS_MAX_STREAMS(ss_ep_comp->desc.bmAttributes);
1554 if (max_streams < (*num_streams - 1)) {
1555 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1556 eps[i]->desc.bEndpointAddress,
1557 max_streams);
1558 *num_streams = max_streams+1;
1559 }
1560
1561 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
1562 if (*changed_ep_bitmask & endpoint_flag)
1563 return -EINVAL;
1564 *changed_ep_bitmask |= endpoint_flag;
1565 }
1566 return 0;
1567}
1568
1569static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1570 struct usb_device *udev,
1571 struct usb_host_endpoint **eps, unsigned int num_eps)
1572{
1573 u32 changed_ep_bitmask = 0;
1574 unsigned int slot_id;
1575 unsigned int ep_index;
1576 unsigned int ep_state;
1577 int i;
1578
1579 slot_id = udev->slot_id;
1580 if (!xhci->devs[slot_id])
1581 return 0;
1582
1583 for (i = 0; i < num_eps; i++) {
1584 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1585 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1586 /* Are streams already being freed for the endpoint? */
1587 if (ep_state & EP_GETTING_NO_STREAMS) {
1588 xhci_warn(xhci, "WARN Can't disable streams for "
1589 "endpoint 0x%x\n, "
1590 "streams are being disabled already.",
1591 eps[i]->desc.bEndpointAddress);
1592 return 0;
1593 }
1594 /* Are there actually any streams to free? */
1595 if (!(ep_state & EP_HAS_STREAMS) &&
1596 !(ep_state & EP_GETTING_STREAMS)) {
1597 xhci_warn(xhci, "WARN Can't disable streams for "
1598 "endpoint 0x%x\n, "
1599 "streams are already disabled!",
1600 eps[i]->desc.bEndpointAddress);
1601 xhci_warn(xhci, "WARN xhci_free_streams() called "
1602 "with non-streams endpoint\n");
1603 return 0;
1604 }
1605 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
1606 }
1607 return changed_ep_bitmask;
1608}
1609
1610/*
1611 * The USB device drivers use this function (though the HCD interface in USB
1612 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
1613 * coordinate mass storage command queueing across multiple endpoints (basically
1614 * a stream ID == a task ID).
1615 *
1616 * Setting up streams involves allocating the same size stream context array
1617 * for each endpoint and issuing a configure endpoint command for all endpoints.
1618 *
1619 * Don't allow the call to succeed if one endpoint only supports one stream
1620 * (which means it doesn't support streams at all).
1621 *
1622 * Drivers may get less stream IDs than they asked for, if the host controller
1623 * hardware or endpoints claim they can't support the number of requested
1624 * stream IDs.
1625 */
1626int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1627 struct usb_host_endpoint **eps, unsigned int num_eps,
1628 unsigned int num_streams, gfp_t mem_flags)
1629{
1630 int i, ret;
1631 struct xhci_hcd *xhci;
1632 struct xhci_virt_device *vdev;
1633 struct xhci_command *config_cmd;
1634 unsigned int ep_index;
1635 unsigned int num_stream_ctxs;
1636 unsigned long flags;
1637 u32 changed_ep_bitmask = 0;
1638
1639 if (!eps)
1640 return -EINVAL;
1641
1642 /* Add one to the number of streams requested to account for
1643 * stream 0 that is reserved for xHCI usage.
1644 */
1645 num_streams += 1;
1646 xhci = hcd_to_xhci(hcd);
1647 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
1648 num_streams);
1649
1650 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1651 if (!config_cmd) {
1652 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1653 return -ENOMEM;
1654 }
1655
1656 /* Check to make sure all endpoints are not already configured for
1657 * streams. While we're at it, find the maximum number of streams that
1658 * all the endpoints will support and check for duplicate endpoints.
1659 */
1660 spin_lock_irqsave(&xhci->lock, flags);
1661 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
1662 num_eps, &num_streams, &changed_ep_bitmask);
1663 if (ret < 0) {
1664 xhci_free_command(xhci, config_cmd);
1665 spin_unlock_irqrestore(&xhci->lock, flags);
1666 return ret;
1667 }
1668 if (num_streams <= 1) {
1669 xhci_warn(xhci, "WARN: endpoints can't handle "
1670 "more than one stream.\n");
1671 xhci_free_command(xhci, config_cmd);
1672 spin_unlock_irqrestore(&xhci->lock, flags);
1673 return -EINVAL;
1674 }
1675 vdev = xhci->devs[udev->slot_id];
1676 /* Mark each endpoint as being in transistion, so
1677 * xhci_urb_enqueue() will reject all URBs.
1678 */
1679 for (i = 0; i < num_eps; i++) {
1680 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1681 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
1682 }
1683 spin_unlock_irqrestore(&xhci->lock, flags);
1684
1685 /* Setup internal data structures and allocate HW data structures for
1686 * streams (but don't install the HW structures in the input context
1687 * until we're sure all memory allocation succeeded).
1688 */
1689 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
1690 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
1691 num_stream_ctxs, num_streams);
1692
1693 for (i = 0; i < num_eps; i++) {
1694 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1695 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
1696 num_stream_ctxs,
1697 num_streams, mem_flags);
1698 if (!vdev->eps[ep_index].stream_info)
1699 goto cleanup;
1700 /* Set maxPstreams in endpoint context and update deq ptr to
1701 * point to stream context array. FIXME
1702 */
1703 }
1704
1705 /* Set up the input context for a configure endpoint command. */
1706 for (i = 0; i < num_eps; i++) {
1707 struct xhci_ep_ctx *ep_ctx;
1708
1709 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1710 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
1711
1712 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
1713 vdev->out_ctx, ep_index);
1714 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
1715 vdev->eps[ep_index].stream_info);
1716 }
1717 /* Tell the HW to drop its old copy of the endpoint context info
1718 * and add the updated copy from the input context.
1719 */
1720 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
1721 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
1722
1723 /* Issue and wait for the configure endpoint command */
1724 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
1725 false, false);
1726
1727 /* xHC rejected the configure endpoint command for some reason, so we
1728 * leave the old ring intact and free our internal streams data
1729 * structure.
1730 */
1731 if (ret < 0)
1732 goto cleanup;
1733
1734 spin_lock_irqsave(&xhci->lock, flags);
1735 for (i = 0; i < num_eps; i++) {
1736 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1737 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
1738 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
1739 udev->slot_id, ep_index);
1740 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
1741 }
1742 xhci_free_command(xhci, config_cmd);
1743 spin_unlock_irqrestore(&xhci->lock, flags);
1744
1745 /* Subtract 1 for stream 0, which drivers can't use */
1746 return num_streams - 1;
1747
1748cleanup:
1749 /* If it didn't work, free the streams! */
1750 for (i = 0; i < num_eps; i++) {
1751 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1752 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1753 /* FIXME Unset maxPstreams in endpoint context and
1754 * update deq ptr to point to normal string ring.
1755 */
1756 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
1757 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
1758 xhci_endpoint_zero(xhci, vdev, eps[i]);
1759 }
1760 xhci_free_command(xhci, config_cmd);
1761 return -ENOMEM;
1762}
1763
1764/* Transition the endpoint from using streams to being a "normal" endpoint
1765 * without streams.
1766 *
1767 * Modify the endpoint context state, submit a configure endpoint command,
1768 * and free all endpoint rings for streams if that completes successfully.
1769 */
1770int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1771 struct usb_host_endpoint **eps, unsigned int num_eps,
1772 gfp_t mem_flags)
1773{
1774 int i, ret;
1775 struct xhci_hcd *xhci;
1776 struct xhci_virt_device *vdev;
1777 struct xhci_command *command;
1778 unsigned int ep_index;
1779 unsigned long flags;
1780 u32 changed_ep_bitmask;
1781
1782 xhci = hcd_to_xhci(hcd);
1783 vdev = xhci->devs[udev->slot_id];
1784
1785 /* Set up a configure endpoint command to remove the streams rings */
1786 spin_lock_irqsave(&xhci->lock, flags);
1787 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
1788 udev, eps, num_eps);
1789 if (changed_ep_bitmask == 0) {
1790 spin_unlock_irqrestore(&xhci->lock, flags);
1791 return -EINVAL;
1792 }
1793
1794 /* Use the xhci_command structure from the first endpoint. We may have
1795 * allocated too many, but the driver may call xhci_free_streams() for
1796 * each endpoint it grouped into one call to xhci_alloc_streams().
1797 */
1798 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
1799 command = vdev->eps[ep_index].stream_info->free_streams_command;
1800 for (i = 0; i < num_eps; i++) {
1801 struct xhci_ep_ctx *ep_ctx;
1802
1803 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1804 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1805 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
1806 EP_GETTING_NO_STREAMS;
1807
1808 xhci_endpoint_copy(xhci, command->in_ctx,
1809 vdev->out_ctx, ep_index);
1810 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
1811 &vdev->eps[ep_index]);
1812 }
1813 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
1814 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
1815 spin_unlock_irqrestore(&xhci->lock, flags);
1816
1817 /* Issue and wait for the configure endpoint command,
1818 * which must succeed.
1819 */
1820 ret = xhci_configure_endpoint(xhci, udev, command,
1821 false, true);
1822
1823 /* xHC rejected the configure endpoint command for some reason, so we
1824 * leave the streams rings intact.
1825 */
1826 if (ret < 0)
1827 return ret;
1828
1829 spin_lock_irqsave(&xhci->lock, flags);
1830 for (i = 0; i < num_eps; i++) {
1831 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1832 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1833 /* FIXME Unset maxPstreams in endpoint context and
1834 * update deq ptr to point to normal string ring.
1835 */
1836 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
1837 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
1838 }
1839 spin_unlock_irqrestore(&xhci->lock, flags);
1840
1841 return 0;
1842}
1843
1449/* 1844/*
1450 * This submits a Reset Device Command, which will set the device state to 0, 1845 * This submits a Reset Device Command, which will set the device state to 0,
1451 * set the device address to 0, and disable all the endpoints except the default 1846 * set the device address to 0, and disable all the endpoints except the default