aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-09-02 14:05:50 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-09-09 18:52:53 -0400
commit2e27980e6eb78114c4ecbaad1ba71836e3887d18 (patch)
treef87552d67d6a23090ceb97868f7857ccf2ce6f97 /drivers/usb/host/xhci.c
parent9af5d71d8e1fc404ad2ac1b568dafa1a2f9b3be2 (diff)
xhci: Track interval bandwidth tables per port/TT.
In order to update the root port or TT's bandwidth interval table, we will need to keep track of a list of endpoints, per interval. That way we can easily know the new largest max packet size when we have to remove an endpoint. Add an endpoint list for each root port or TT structure, sorted by endpoint max packet size. Insert new endpoints into the list such that the head of the list always has the endpoint with the greatest max packet size. Only insert endpoints and update the interval table with new information when those endpoints are periodic. Make sure to update the number of active TTs when we add or drop periodic endpoints. A TT is only considered active if it has one or more periodic endpoints attached (control and bulk are best effort, and counted in the 20% reserved on the high speed bus). If the number of active endpoints for a TT was zero, and it's now non-zero, increment the number of active TTs for the rootport. If the number of active endpoints was non-zero, and it's now zero, decrement the number of active TTs. We have to be careful when we're checking the bandwidth for a new configuration/alt setting. If we don't have enough bandwidth, we need to be able to "roll back" the bandwidth information stored in the endpoint and the root port/TT interval bandwidth table. We can't just create a copy of the interval bandwidth table, modify it, and check the bandwidth with the copy because we have lists of endpoints and entries can't be on more than one list. Instead, we copy the old endpoint bandwidth information, and use it to revert the interval table when the bandwidth check fails. We don't check the bandwidth after endpoints are dropped from the interval table when a device is reset or freed after a disconnect, because having endpoints use less bandwidth should not push the bandwidth usage over the limits. Besides which, we can't fail a device disconnect. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c255
1 files changed, 254 insertions, 1 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 827914643f3e..51c4d385b779 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1747,6 +1747,241 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1747 xhci->num_active_eps); 1747 xhci->num_active_eps);
1748} 1748}
1749 1749
1750/* Run the algorithm on the bandwidth table. If this table is part of a
1751 * TT, see if we need to update the number of active TTs.
1752 */
1753static int xhci_check_bw_table(struct xhci_hcd *xhci,
1754 struct xhci_virt_device *virt_dev,
1755 int old_active_eps)
1756{
1757 return 0;
1758}
1759
1760static bool xhci_is_async_ep(unsigned int ep_type)
1761{
1762 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1763 ep_type != ISOC_IN_EP &&
1764 ep_type != INT_IN_EP);
1765}
1766
1767void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
1768 struct xhci_bw_info *ep_bw,
1769 struct xhci_interval_bw_table *bw_table,
1770 struct usb_device *udev,
1771 struct xhci_virt_ep *virt_ep,
1772 struct xhci_tt_bw_info *tt_info)
1773{
1774 struct xhci_interval_bw *interval_bw;
1775 int normalized_interval;
1776
1777 if (xhci_is_async_ep(ep_bw->type) ||
1778 list_empty(&virt_ep->bw_endpoint_list))
1779 return;
1780
1781 /* For LS/FS devices, we need to translate the interval expressed in
1782 * microframes to frames.
1783 */
1784 if (udev->speed == USB_SPEED_HIGH)
1785 normalized_interval = ep_bw->ep_interval;
1786 else
1787 normalized_interval = ep_bw->ep_interval - 3;
1788
1789 if (normalized_interval == 0)
1790 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
1791 interval_bw = &bw_table->interval_bw[normalized_interval];
1792 interval_bw->num_packets -= ep_bw->num_packets;
1793 switch (udev->speed) {
1794 case USB_SPEED_LOW:
1795 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
1796 break;
1797 case USB_SPEED_FULL:
1798 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
1799 break;
1800 case USB_SPEED_HIGH:
1801 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
1802 break;
1803 case USB_SPEED_SUPER:
1804 case USB_SPEED_UNKNOWN:
1805 case USB_SPEED_WIRELESS:
1806 /* Should never happen because only LS/FS/HS endpoints will get
1807 * added to the endpoint list.
1808 */
1809 return;
1810 }
1811 if (tt_info)
1812 tt_info->active_eps -= 1;
1813 list_del_init(&virt_ep->bw_endpoint_list);
1814}
1815
1816static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
1817 struct xhci_bw_info *ep_bw,
1818 struct xhci_interval_bw_table *bw_table,
1819 struct usb_device *udev,
1820 struct xhci_virt_ep *virt_ep,
1821 struct xhci_tt_bw_info *tt_info)
1822{
1823 struct xhci_interval_bw *interval_bw;
1824 struct xhci_virt_ep *smaller_ep;
1825 int normalized_interval;
1826
1827 if (xhci_is_async_ep(ep_bw->type))
1828 return;
1829
1830 /* For LS/FS devices, we need to translate the interval expressed in
1831 * microframes to frames.
1832 */
1833 if (udev->speed == USB_SPEED_HIGH)
1834 normalized_interval = ep_bw->ep_interval;
1835 else
1836 normalized_interval = ep_bw->ep_interval - 3;
1837
1838 if (normalized_interval == 0)
1839 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
1840 interval_bw = &bw_table->interval_bw[normalized_interval];
1841 interval_bw->num_packets += ep_bw->num_packets;
1842 switch (udev->speed) {
1843 case USB_SPEED_LOW:
1844 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
1845 break;
1846 case USB_SPEED_FULL:
1847 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
1848 break;
1849 case USB_SPEED_HIGH:
1850 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
1851 break;
1852 case USB_SPEED_SUPER:
1853 case USB_SPEED_UNKNOWN:
1854 case USB_SPEED_WIRELESS:
1855 /* Should never happen because only LS/FS/HS endpoints will get
1856 * added to the endpoint list.
1857 */
1858 return;
1859 }
1860
1861 if (tt_info)
1862 tt_info->active_eps += 1;
1863 /* Insert the endpoint into the list, largest max packet size first. */
1864 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
1865 bw_endpoint_list) {
1866 if (ep_bw->max_packet_size >=
1867 smaller_ep->bw_info.max_packet_size) {
1868 /* Add the new ep before the smaller endpoint */
1869 list_add_tail(&virt_ep->bw_endpoint_list,
1870 &smaller_ep->bw_endpoint_list);
1871 return;
1872 }
1873 }
1874 /* Add the new endpoint at the end of the list. */
1875 list_add_tail(&virt_ep->bw_endpoint_list,
1876 &interval_bw->endpoints);
1877}
1878
1879void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
1880 struct xhci_virt_device *virt_dev,
1881 int old_active_eps)
1882{
1883 struct xhci_root_port_bw_info *rh_bw_info;
1884 if (!virt_dev->tt_info)
1885 return;
1886
1887 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
1888 if (old_active_eps == 0 &&
1889 virt_dev->tt_info->active_eps != 0) {
1890 rh_bw_info->num_active_tts += 1;
1891 } else if (old_active_eps != 0 &&
1892 virt_dev->tt_info->active_eps == 0) {
1893 rh_bw_info->num_active_tts -= 1;
1894 }
1895}
1896
1897static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
1898 struct xhci_virt_device *virt_dev,
1899 struct xhci_container_ctx *in_ctx)
1900{
1901 struct xhci_bw_info ep_bw_info[31];
1902 int i;
1903 struct xhci_input_control_ctx *ctrl_ctx;
1904 int old_active_eps = 0;
1905
1906 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1907 return 0;
1908
1909 if (virt_dev->tt_info)
1910 old_active_eps = virt_dev->tt_info->active_eps;
1911
1912 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1913
1914 for (i = 0; i < 31; i++) {
1915 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
1916 continue;
1917
1918 /* Make a copy of the BW info in case we need to revert this */
1919 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
1920 sizeof(ep_bw_info[i]));
1921 /* Drop the endpoint from the interval table if the endpoint is
1922 * being dropped or changed.
1923 */
1924 if (EP_IS_DROPPED(ctrl_ctx, i))
1925 xhci_drop_ep_from_interval_table(xhci,
1926 &virt_dev->eps[i].bw_info,
1927 virt_dev->bw_table,
1928 virt_dev->udev,
1929 &virt_dev->eps[i],
1930 virt_dev->tt_info);
1931 }
1932 /* Overwrite the information stored in the endpoints' bw_info */
1933 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
1934 for (i = 0; i < 31; i++) {
1935 /* Add any changed or added endpoints to the interval table */
1936 if (EP_IS_ADDED(ctrl_ctx, i))
1937 xhci_add_ep_to_interval_table(xhci,
1938 &virt_dev->eps[i].bw_info,
1939 virt_dev->bw_table,
1940 virt_dev->udev,
1941 &virt_dev->eps[i],
1942 virt_dev->tt_info);
1943 }
1944
1945 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
1946 /* Ok, this fits in the bandwidth we have.
1947 * Update the number of active TTs.
1948 */
1949 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
1950 return 0;
1951 }
1952
1953 /* We don't have enough bandwidth for this, revert the stored info. */
1954 for (i = 0; i < 31; i++) {
1955 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
1956 continue;
1957
1958 /* Drop the new copies of any added or changed endpoints from
1959 * the interval table.
1960 */
1961 if (EP_IS_ADDED(ctrl_ctx, i)) {
1962 xhci_drop_ep_from_interval_table(xhci,
1963 &virt_dev->eps[i].bw_info,
1964 virt_dev->bw_table,
1965 virt_dev->udev,
1966 &virt_dev->eps[i],
1967 virt_dev->tt_info);
1968 }
1969 /* Revert the endpoint back to its old information */
1970 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
1971 sizeof(ep_bw_info[i]));
1972 /* Add any changed or dropped endpoints back into the table */
1973 if (EP_IS_DROPPED(ctrl_ctx, i))
1974 xhci_add_ep_to_interval_table(xhci,
1975 &virt_dev->eps[i].bw_info,
1976 virt_dev->bw_table,
1977 virt_dev->udev,
1978 &virt_dev->eps[i],
1979 virt_dev->tt_info);
1980 }
1981 return -ENOMEM;
1982}
1983
1984
1750/* Issue a configure endpoint command or evaluate context command 1985/* Issue a configure endpoint command or evaluate context command
1751 * and wait for it to finish. 1986 * and wait for it to finish.
1752 */ 1987 */
@@ -1779,6 +2014,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1779 xhci->num_active_eps); 2014 xhci->num_active_eps);
1780 return -ENOMEM; 2015 return -ENOMEM;
1781 } 2016 }
2017 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2018 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2019 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2020 xhci_free_host_resources(xhci, in_ctx);
2021 spin_unlock_irqrestore(&xhci->lock, flags);
2022 xhci_warn(xhci, "Not enough bandwidth\n");
2023 return -ENOMEM;
2024 }
1782 2025
1783 if (command) { 2026 if (command) {
1784 cmd_completion = command->completion; 2027 cmd_completion = command->completion;
@@ -1912,7 +2155,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1912 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) 2155 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
1913 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2156 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1914 } 2157 }
1915 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
1916 xhci_zero_in_ctx(xhci, virt_dev); 2158 xhci_zero_in_ctx(xhci, virt_dev);
1917 /* 2159 /*
1918 * Install any rings for completely new endpoints or changed endpoints, 2160 * Install any rings for completely new endpoints or changed endpoints,
@@ -2528,6 +2770,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2528 int timeleft; 2770 int timeleft;
2529 int last_freed_endpoint; 2771 int last_freed_endpoint;
2530 struct xhci_slot_ctx *slot_ctx; 2772 struct xhci_slot_ctx *slot_ctx;
2773 int old_active_eps = 0;
2531 2774
2532 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 2775 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2533 if (ret <= 0) 2776 if (ret <= 0)
@@ -2669,8 +2912,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2669 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2912 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2670 last_freed_endpoint = i; 2913 last_freed_endpoint = i;
2671 } 2914 }
2915 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
2916 xhci_drop_ep_from_interval_table(xhci,
2917 &virt_dev->eps[i].bw_info,
2918 virt_dev->bw_table,
2919 udev,
2920 &virt_dev->eps[i],
2921 virt_dev->tt_info);
2672 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 2922 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
2673 } 2923 }
2924 /* If necessary, update the number of active TTs on this root port */
2925 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2926
2674 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 2927 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2675 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 2928 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2676 ret = 0; 2929 ret = 0;