aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-09-02 14:05:52 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-09-09 18:52:53 -0400
commitc29eea621900f18287d50519f72cb9113746d75a (patch)
treead5b4d4e43cba9f9785b23ab6167c271e2b45f42 /drivers/usb/host/xhci.c
parent2e27980e6eb78114c4ecbaad1ba71836e3887d18 (diff)
xhci: Implement HS/FS/LS bandwidth checking.
Now that we have a bandwidth interval table per root port or TT that describes the endpoint bandwidth information, we can finally use it to check whether the bus bandwidth is oversubscribed for a new device configuration/alternate interface setting. The complication for this algorithm is that the bit of hardware logic that creates the bus schedule is only 12-bit logic. In order to make sure it can represent the maximum bus bandwidth in 12 bits, it has to convert the endpoint max packet size and max esit payload into "blocks" (basically a less-precise representation). The block size for each speed of device is different, aside from low speed and full speed. In order to make sure we don't allow a setup where the scheduler might fail, we also have to do the bandwidth checking in blocks. After checking that the endpoints fit in the schedule, we store the bandwidth used for this root port or TT. If this is a FS/LS device under an external HS hub, we also update the TT bandwidth and the root port bandwidth (if this is a newly activated or deactivated TT). I won't go into the details of the algorithm, as it's pretty well documented in the comments. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c268
1 files changed, 266 insertions, 2 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 51c4d385b779..40b82f7e4297 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1747,13 +1747,275 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1747 xhci->num_active_eps); 1747 xhci->num_active_eps);
1748} 1748}
1749 1749
1750/* Run the algorithm on the bandwidth table. If this table is part of a 1750unsigned int xhci_get_block_size(struct usb_device *udev)
1751 * TT, see if we need to update the number of active TTs. 1751{
1752 switch (udev->speed) {
1753 case USB_SPEED_LOW:
1754 case USB_SPEED_FULL:
1755 return FS_BLOCK;
1756 case USB_SPEED_HIGH:
1757 return HS_BLOCK;
1758 case USB_SPEED_SUPER:
1759 return SS_BLOCK;
1760 case USB_SPEED_UNKNOWN:
1761 case USB_SPEED_WIRELESS:
1762 default:
1763 /* Should never happen */
1764 return 1;
1765 }
1766}
1767
1768unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1769{
1770 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1771 return LS_OVERHEAD;
1772 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1773 return FS_OVERHEAD;
1774 return HS_OVERHEAD;
1775}
1776
1777/* If we are changing a LS/FS device under a HS hub,
1778 * make sure (if we are activating a new TT) that the HS bus has enough
1779 * bandwidth for this new TT.
1780 */
1781static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1782 struct xhci_virt_device *virt_dev,
1783 int old_active_eps)
1784{
1785 struct xhci_interval_bw_table *bw_table;
1786 struct xhci_tt_bw_info *tt_info;
1787
1788 /* Find the bandwidth table for the root port this TT is attached to. */
1789 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1790 tt_info = virt_dev->tt_info;
1791 /* If this TT already had active endpoints, the bandwidth for this TT
1792 * has already been added. Removing all periodic endpoints (and thus
1793 * making the TT enactive) will only decrease the bandwidth used.
1794 */
1795 if (old_active_eps)
1796 return 0;
1797 if (old_active_eps == 0 && tt_info->active_eps != 0) {
1798 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
1799 return -ENOMEM;
1800 return 0;
1801 }
1802 /* Not sure why we would have no new active endpoints...
1803 *
1804 * Maybe because of an Evaluate Context change for a hub update or a
1805 * control endpoint 0 max packet size change?
1806 * FIXME: skip the bandwidth calculation in that case.
1807 */
1808 return 0;
1809}
1810
1811/*
1812 * This algorithm is a very conservative estimate of the worst-case scheduling
1813 * scenario for any one interval. The hardware dynamically schedules the
1814 * packets, so we can't tell which microframe could be the limiting factor in
1815 * the bandwidth scheduling. This only takes into account periodic endpoints.
1816 *
1817 * Obviously, we can't solve an NP complete problem to find the minimum worst
1818 * case scenario. Instead, we come up with an estimate that is no less than
1819 * the worst case bandwidth used for any one microframe, but may be an
1820 * over-estimate.
1821 *
1822 * We walk the requirements for each endpoint by interval, starting with the
1823 * smallest interval, and place packets in the schedule where there is only one
1824 * possible way to schedule packets for that interval. In order to simplify
1825 * this algorithm, we record the largest max packet size for each interval, and
1826 * assume all packets will be that size.
1827 *
1828 * For interval 0, we obviously must schedule all packets for each interval.
1829 * The bandwidth for interval 0 is just the amount of data to be transmitted
1830 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
1831 * the number of packets).
1832 *
1833 * For interval 1, we have two possible microframes to schedule those packets
1834 * in. For this algorithm, if we can schedule the same number of packets for
1835 * each possible scheduling opportunity (each microframe), we will do so. The
1836 * remaining number of packets will be saved to be transmitted in the gaps in
1837 * the next interval's scheduling sequence.
1838 *
1839 * As we move those remaining packets to be scheduled with interval 2 packets,
1840 * we have to double the number of remaining packets to transmit. This is
1841 * because the intervals are actually powers of 2, and we would be transmitting
1842 * the previous interval's packets twice in this interval. We also have to be
1843 * sure that when we look at the largest max packet size for this interval, we
1844 * also look at the largest max packet size for the remaining packets and take
1845 * the greater of the two.
1846 *
1847 * The algorithm continues to evenly distribute packets in each scheduling
1848 * opportunity, and push the remaining packets out, until we get to the last
1849 * interval. Then those packets and their associated overhead are just added
1850 * to the bandwidth used.
1752 */ 1851 */
1753static int xhci_check_bw_table(struct xhci_hcd *xhci, 1852static int xhci_check_bw_table(struct xhci_hcd *xhci,
1754 struct xhci_virt_device *virt_dev, 1853 struct xhci_virt_device *virt_dev,
1755 int old_active_eps) 1854 int old_active_eps)
1756{ 1855{
1856 unsigned int bw_reserved;
1857 unsigned int max_bandwidth;
1858 unsigned int bw_used;
1859 unsigned int block_size;
1860 struct xhci_interval_bw_table *bw_table;
1861 unsigned int packet_size = 0;
1862 unsigned int overhead = 0;
1863 unsigned int packets_transmitted = 0;
1864 unsigned int packets_remaining = 0;
1865 unsigned int i;
1866
1867 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
1868 max_bandwidth = HS_BW_LIMIT;
1869 /* Convert percent of bus BW reserved to blocks reserved */
1870 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
1871 } else {
1872 max_bandwidth = FS_BW_LIMIT;
1873 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
1874 }
1875
1876 bw_table = virt_dev->bw_table;
1877 /* We need to translate the max packet size and max ESIT payloads into
1878 * the units the hardware uses.
1879 */
1880 block_size = xhci_get_block_size(virt_dev->udev);
1881
1882 /* If we are manipulating a LS/FS device under a HS hub, double check
1883 * that the HS bus has enough bandwidth if we are activing a new TT.
1884 */
1885 if (virt_dev->tt_info) {
1886 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1887 virt_dev->real_port);
1888 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
1889 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
1890 "newly activated TT.\n");
1891 return -ENOMEM;
1892 }
1893 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
1894 virt_dev->tt_info->slot_id,
1895 virt_dev->tt_info->ttport);
1896 } else {
1897 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1898 virt_dev->real_port);
1899 }
1900
1901 /* Add in how much bandwidth will be used for interval zero, or the
1902 * rounded max ESIT payload + number of packets * largest overhead.
1903 */
1904 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
1905 bw_table->interval_bw[0].num_packets *
1906 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
1907
1908 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
1909 unsigned int bw_added;
1910 unsigned int largest_mps;
1911 unsigned int interval_overhead;
1912
1913 /*
1914 * How many packets could we transmit in this interval?
1915 * If packets didn't fit in the previous interval, we will need
1916 * to transmit that many packets twice within this interval.
1917 */
1918 packets_remaining = 2 * packets_remaining +
1919 bw_table->interval_bw[i].num_packets;
1920
1921 /* Find the largest max packet size of this or the previous
1922 * interval.
1923 */
1924 if (list_empty(&bw_table->interval_bw[i].endpoints))
1925 largest_mps = 0;
1926 else {
1927 struct xhci_virt_ep *virt_ep;
1928 struct list_head *ep_entry;
1929
1930 ep_entry = bw_table->interval_bw[i].endpoints.next;
1931 virt_ep = list_entry(ep_entry,
1932 struct xhci_virt_ep, bw_endpoint_list);
1933 /* Convert to blocks, rounding up */
1934 largest_mps = DIV_ROUND_UP(
1935 virt_ep->bw_info.max_packet_size,
1936 block_size);
1937 }
1938 if (largest_mps > packet_size)
1939 packet_size = largest_mps;
1940
1941 /* Use the larger overhead of this or the previous interval. */
1942 interval_overhead = xhci_get_largest_overhead(
1943 &bw_table->interval_bw[i]);
1944 if (interval_overhead > overhead)
1945 overhead = interval_overhead;
1946
1947 /* How many packets can we evenly distribute across
1948 * (1 << (i + 1)) possible scheduling opportunities?
1949 */
1950 packets_transmitted = packets_remaining >> (i + 1);
1951
1952 /* Add in the bandwidth used for those scheduled packets */
1953 bw_added = packets_transmitted * (overhead + packet_size);
1954
1955 /* How many packets do we have remaining to transmit? */
1956 packets_remaining = packets_remaining % (1 << (i + 1));
1957
1958 /* What largest max packet size should those packets have? */
1959 /* If we've transmitted all packets, don't carry over the
1960 * largest packet size.
1961 */
1962 if (packets_remaining == 0) {
1963 packet_size = 0;
1964 overhead = 0;
1965 } else if (packets_transmitted > 0) {
1966 /* Otherwise if we do have remaining packets, and we've
1967 * scheduled some packets in this interval, take the
1968 * largest max packet size from endpoints with this
1969 * interval.
1970 */
1971 packet_size = largest_mps;
1972 overhead = interval_overhead;
1973 }
1974 /* Otherwise carry over packet_size and overhead from the last
1975 * time we had a remainder.
1976 */
1977 bw_used += bw_added;
1978 if (bw_used > max_bandwidth) {
1979 xhci_warn(xhci, "Not enough bandwidth. "
1980 "Proposed: %u, Max: %u\n",
1981 bw_used, max_bandwidth);
1982 return -ENOMEM;
1983 }
1984 }
1985 /*
1986 * Ok, we know we have some packets left over after even-handedly
1987 * scheduling interval 15. We don't know which microframes they will
1988 * fit into, so we over-schedule and say they will be scheduled every
1989 * microframe.
1990 */
1991 if (packets_remaining > 0)
1992 bw_used += overhead + packet_size;
1993
1994 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
1995 unsigned int port_index = virt_dev->real_port - 1;
1996
1997 /* OK, we're manipulating a HS device attached to a
1998 * root port bandwidth domain. Include the number of active TTs
1999 * in the bandwidth used.
2000 */
2001 bw_used += TT_HS_OVERHEAD *
2002 xhci->rh_bw[port_index].num_active_tts;
2003 }
2004
2005 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2006 "Available: %u " "percent\n",
2007 bw_used, max_bandwidth, bw_reserved,
2008 (max_bandwidth - bw_used - bw_reserved) * 100 /
2009 max_bandwidth);
2010
2011 bw_used += bw_reserved;
2012 if (bw_used > max_bandwidth) {
2013 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2014 bw_used, max_bandwidth);
2015 return -ENOMEM;
2016 }
2017
2018 bw_table->bw_used = bw_used;
1757 return 0; 2019 return 0;
1758} 2020}
1759 2021
@@ -1888,9 +2150,11 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
1888 if (old_active_eps == 0 && 2150 if (old_active_eps == 0 &&
1889 virt_dev->tt_info->active_eps != 0) { 2151 virt_dev->tt_info->active_eps != 0) {
1890 rh_bw_info->num_active_tts += 1; 2152 rh_bw_info->num_active_tts += 1;
2153 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
1891 } else if (old_active_eps != 0 && 2154 } else if (old_active_eps != 0 &&
1892 virt_dev->tt_info->active_eps == 0) { 2155 virt_dev->tt_info->active_eps == 0) {
1893 rh_bw_info->num_active_tts -= 1; 2156 rh_bw_info->num_active_tts -= 1;
2157 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
1894 } 2158 }
1895} 2159}
1896 2160