aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-09-02 14:05:50 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-09-09 18:52:53 -0400
commit2e27980e6eb78114c4ecbaad1ba71836e3887d18 (patch)
treef87552d67d6a23090ceb97868f7857ccf2ce6f97 /drivers/usb
parent9af5d71d8e1fc404ad2ac1b568dafa1a2f9b3be2 (diff)
xhci: Track interval bandwidth tables per port/TT.
In order to update the root port or TT's bandwidth interval table, we will need to keep track of a list of endpoints, per interval. That way we can easily know the new largest max packet size when we have to remove an endpoint. Add an endpoint list for each root port or TT structure, sorted by endpoint max packet size. Insert new endpoints into the list such that the head of the list always has the endpoint with the greatest max packet size. Only insert endpoints and update the interval table with new information when those endpoints are periodic. Make sure to update the number of active TTs when we add or drop periodic endpoints. A TT is only considered active if it has one or more periodic endpoints attached (control and bulk are best effort, and counted in the 20% reserved on the high speed bus). If the number of active endpoints for a TT was zero, and it's now non-zero, increment the number of active TTs for the rootport. If the number of active endpoints was non-zero, and it's now zero, decrement the number of active TTs. We have to be careful when we're checking the bandwidth for a new configuration/alt setting. If we don't have enough bandwidth, we need to be able to "roll back" the bandwidth information stored in the endpoint and the root port/TT interval bandwidth table. We can't just create a copy of the interval bandwidth table, modify it, and check the bandwidth with the copy because we have lists of endpoints and entries can't be on more than one list. Instead, we copy the old endpoint bandwidth information, and use it to revert the interval table when the bandwidth check fails. We don't check the bandwidth after endpoints are dropped from the interval table when a device is reset or freed after a disconnect, because having endpoints use less bandwidth should not push the bandwidth usage over the limits. Besides which, we can't fail a device disconnect. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-mem.c26
-rw-r--r--drivers/usb/host/xhci.c255
-rw-r--r--drivers/usb/host/xhci.h15
3 files changed, 293 insertions, 3 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9451d94b78d9..1c5c9ba141db 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -783,6 +783,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
783{ 783{
784 struct xhci_virt_device *dev; 784 struct xhci_virt_device *dev;
785 int i; 785 int i;
786 int old_active_eps = 0;
786 787
787 /* Slot ID 0 is reserved */ 788 /* Slot ID 0 is reserved */
788 if (slot_id == 0 || !xhci->devs[slot_id]) 789 if (slot_id == 0 || !xhci->devs[slot_id])
@@ -793,15 +794,29 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
793 if (!dev) 794 if (!dev)
794 return; 795 return;
795 796
797 if (dev->tt_info)
798 old_active_eps = dev->tt_info->active_eps;
799
796 for (i = 0; i < 31; ++i) { 800 for (i = 0; i < 31; ++i) {
797 if (dev->eps[i].ring) 801 if (dev->eps[i].ring)
798 xhci_ring_free(xhci, dev->eps[i].ring); 802 xhci_ring_free(xhci, dev->eps[i].ring);
799 if (dev->eps[i].stream_info) 803 if (dev->eps[i].stream_info)
800 xhci_free_stream_info(xhci, 804 xhci_free_stream_info(xhci,
801 dev->eps[i].stream_info); 805 dev->eps[i].stream_info);
806 /* Endpoints on the TT/root port lists should have been removed
807 * when usb_disable_device() was called for the device.
808 * We can't drop them anyway, because the udev might have gone
809 * away by this point, and we can't tell what speed it was.
810 */
811 if (!list_empty(&dev->eps[i].bw_endpoint_list))
812 xhci_warn(xhci, "Slot %u endpoint %u "
813 "not removed from BW list!\n",
814 slot_id, i);
802 } 815 }
803 /* If this is a hub, free the TT(s) from the TT list */ 816 /* If this is a hub, free the TT(s) from the TT list */
804 xhci_free_tt_info(xhci, dev, slot_id); 817 xhci_free_tt_info(xhci, dev, slot_id);
818 /* If necessary, update the number of active TTs on this root port */
819 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
805 820
806 if (dev->ring_cache) { 821 if (dev->ring_cache) {
807 for (i = 0; i < dev->num_rings_cached; i++) 822 for (i = 0; i < dev->num_rings_cached; i++)
@@ -855,6 +870,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
855 for (i = 0; i < 31; i++) { 870 for (i = 0; i < 31; i++) {
856 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 871 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
857 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 872 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
873 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
858 } 874 }
859 875
860 /* Allocate endpoint 0 ring */ 876 /* Allocate endpoint 0 ring */
@@ -1994,7 +2010,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1994 __le32 __iomem *addr; 2010 __le32 __iomem *addr;
1995 u32 offset; 2011 u32 offset;
1996 unsigned int num_ports; 2012 unsigned int num_ports;
1997 int i, port_index; 2013 int i, j, port_index;
1998 2014
1999 addr = &xhci->cap_regs->hcc_params; 2015 addr = &xhci->cap_regs->hcc_params;
2000 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); 2016 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
@@ -2012,8 +2028,14 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2012 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags); 2028 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2013 if (!xhci->rh_bw) 2029 if (!xhci->rh_bw)
2014 return -ENOMEM; 2030 return -ENOMEM;
2015 for (i = 0; i < num_ports; i++) 2031 for (i = 0; i < num_ports; i++) {
2032 struct xhci_interval_bw_table *bw_table;
2033
2016 INIT_LIST_HEAD(&xhci->rh_bw[i].tts); 2034 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2035 bw_table = &xhci->rh_bw[i].bw_table;
2036 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2037 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2038 }
2017 2039
2018 /* 2040 /*
2019 * For whatever reason, the first capability offset is from the 2041 * For whatever reason, the first capability offset is from the
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 827914643f3e..51c4d385b779 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1747,6 +1747,241 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1747 xhci->num_active_eps); 1747 xhci->num_active_eps);
1748} 1748}
1749 1749
1750/* Run the algorithm on the bandwidth table. If this table is part of a
1751 * TT, see if we need to update the number of active TTs.
1752 */
1753static int xhci_check_bw_table(struct xhci_hcd *xhci,
1754 struct xhci_virt_device *virt_dev,
1755 int old_active_eps)
1756{
1757 return 0;
1758}
1759
1760static bool xhci_is_async_ep(unsigned int ep_type)
1761{
1762 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1763 ep_type != ISOC_IN_EP &&
1764 ep_type != INT_IN_EP);
1765}
1766
1767void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
1768 struct xhci_bw_info *ep_bw,
1769 struct xhci_interval_bw_table *bw_table,
1770 struct usb_device *udev,
1771 struct xhci_virt_ep *virt_ep,
1772 struct xhci_tt_bw_info *tt_info)
1773{
1774 struct xhci_interval_bw *interval_bw;
1775 int normalized_interval;
1776
1777 if (xhci_is_async_ep(ep_bw->type) ||
1778 list_empty(&virt_ep->bw_endpoint_list))
1779 return;
1780
1781 /* For LS/FS devices, we need to translate the interval expressed in
1782 * microframes to frames.
1783 */
1784 if (udev->speed == USB_SPEED_HIGH)
1785 normalized_interval = ep_bw->ep_interval;
1786 else
1787 normalized_interval = ep_bw->ep_interval - 3;
1788
1789 if (normalized_interval == 0)
1790 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
1791 interval_bw = &bw_table->interval_bw[normalized_interval];
1792 interval_bw->num_packets -= ep_bw->num_packets;
1793 switch (udev->speed) {
1794 case USB_SPEED_LOW:
1795 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
1796 break;
1797 case USB_SPEED_FULL:
1798 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
1799 break;
1800 case USB_SPEED_HIGH:
1801 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
1802 break;
1803 case USB_SPEED_SUPER:
1804 case USB_SPEED_UNKNOWN:
1805 case USB_SPEED_WIRELESS:
1806 /* Should never happen because only LS/FS/HS endpoints will get
1807 * added to the endpoint list.
1808 */
1809 return;
1810 }
1811 if (tt_info)
1812 tt_info->active_eps -= 1;
1813 list_del_init(&virt_ep->bw_endpoint_list);
1814}
1815
1816static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
1817 struct xhci_bw_info *ep_bw,
1818 struct xhci_interval_bw_table *bw_table,
1819 struct usb_device *udev,
1820 struct xhci_virt_ep *virt_ep,
1821 struct xhci_tt_bw_info *tt_info)
1822{
1823 struct xhci_interval_bw *interval_bw;
1824 struct xhci_virt_ep *smaller_ep;
1825 int normalized_interval;
1826
1827 if (xhci_is_async_ep(ep_bw->type))
1828 return;
1829
1830 /* For LS/FS devices, we need to translate the interval expressed in
1831 * microframes to frames.
1832 */
1833 if (udev->speed == USB_SPEED_HIGH)
1834 normalized_interval = ep_bw->ep_interval;
1835 else
1836 normalized_interval = ep_bw->ep_interval - 3;
1837
1838 if (normalized_interval == 0)
1839 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
1840 interval_bw = &bw_table->interval_bw[normalized_interval];
1841 interval_bw->num_packets += ep_bw->num_packets;
1842 switch (udev->speed) {
1843 case USB_SPEED_LOW:
1844 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
1845 break;
1846 case USB_SPEED_FULL:
1847 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
1848 break;
1849 case USB_SPEED_HIGH:
1850 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
1851 break;
1852 case USB_SPEED_SUPER:
1853 case USB_SPEED_UNKNOWN:
1854 case USB_SPEED_WIRELESS:
1855 /* Should never happen because only LS/FS/HS endpoints will get
1856 * added to the endpoint list.
1857 */
1858 return;
1859 }
1860
1861 if (tt_info)
1862 tt_info->active_eps += 1;
1863 /* Insert the endpoint into the list, largest max packet size first. */
1864 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
1865 bw_endpoint_list) {
1866 if (ep_bw->max_packet_size >=
1867 smaller_ep->bw_info.max_packet_size) {
1868 /* Add the new ep before the smaller endpoint */
1869 list_add_tail(&virt_ep->bw_endpoint_list,
1870 &smaller_ep->bw_endpoint_list);
1871 return;
1872 }
1873 }
1874 /* Add the new endpoint at the end of the list. */
1875 list_add_tail(&virt_ep->bw_endpoint_list,
1876 &interval_bw->endpoints);
1877}
1878
1879void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
1880 struct xhci_virt_device *virt_dev,
1881 int old_active_eps)
1882{
1883 struct xhci_root_port_bw_info *rh_bw_info;
1884 if (!virt_dev->tt_info)
1885 return;
1886
1887 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
1888 if (old_active_eps == 0 &&
1889 virt_dev->tt_info->active_eps != 0) {
1890 rh_bw_info->num_active_tts += 1;
1891 } else if (old_active_eps != 0 &&
1892 virt_dev->tt_info->active_eps == 0) {
1893 rh_bw_info->num_active_tts -= 1;
1894 }
1895}
1896
1897static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
1898 struct xhci_virt_device *virt_dev,
1899 struct xhci_container_ctx *in_ctx)
1900{
1901 struct xhci_bw_info ep_bw_info[31];
1902 int i;
1903 struct xhci_input_control_ctx *ctrl_ctx;
1904 int old_active_eps = 0;
1905
1906 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1907 return 0;
1908
1909 if (virt_dev->tt_info)
1910 old_active_eps = virt_dev->tt_info->active_eps;
1911
1912 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1913
1914 for (i = 0; i < 31; i++) {
1915 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
1916 continue;
1917
1918 /* Make a copy of the BW info in case we need to revert this */
1919 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
1920 sizeof(ep_bw_info[i]));
1921 /* Drop the endpoint from the interval table if the endpoint is
1922 * being dropped or changed.
1923 */
1924 if (EP_IS_DROPPED(ctrl_ctx, i))
1925 xhci_drop_ep_from_interval_table(xhci,
1926 &virt_dev->eps[i].bw_info,
1927 virt_dev->bw_table,
1928 virt_dev->udev,
1929 &virt_dev->eps[i],
1930 virt_dev->tt_info);
1931 }
1932 /* Overwrite the information stored in the endpoints' bw_info */
1933 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
1934 for (i = 0; i < 31; i++) {
1935 /* Add any changed or added endpoints to the interval table */
1936 if (EP_IS_ADDED(ctrl_ctx, i))
1937 xhci_add_ep_to_interval_table(xhci,
1938 &virt_dev->eps[i].bw_info,
1939 virt_dev->bw_table,
1940 virt_dev->udev,
1941 &virt_dev->eps[i],
1942 virt_dev->tt_info);
1943 }
1944
1945 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
1946 /* Ok, this fits in the bandwidth we have.
1947 * Update the number of active TTs.
1948 */
1949 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
1950 return 0;
1951 }
1952
1953 /* We don't have enough bandwidth for this, revert the stored info. */
1954 for (i = 0; i < 31; i++) {
1955 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
1956 continue;
1957
1958 /* Drop the new copies of any added or changed endpoints from
1959 * the interval table.
1960 */
1961 if (EP_IS_ADDED(ctrl_ctx, i)) {
1962 xhci_drop_ep_from_interval_table(xhci,
1963 &virt_dev->eps[i].bw_info,
1964 virt_dev->bw_table,
1965 virt_dev->udev,
1966 &virt_dev->eps[i],
1967 virt_dev->tt_info);
1968 }
1969 /* Revert the endpoint back to its old information */
1970 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
1971 sizeof(ep_bw_info[i]));
1972 /* Add any changed or dropped endpoints back into the table */
1973 if (EP_IS_DROPPED(ctrl_ctx, i))
1974 xhci_add_ep_to_interval_table(xhci,
1975 &virt_dev->eps[i].bw_info,
1976 virt_dev->bw_table,
1977 virt_dev->udev,
1978 &virt_dev->eps[i],
1979 virt_dev->tt_info);
1980 }
1981 return -ENOMEM;
1982}
1983
1984
1750/* Issue a configure endpoint command or evaluate context command 1985/* Issue a configure endpoint command or evaluate context command
1751 * and wait for it to finish. 1986 * and wait for it to finish.
1752 */ 1987 */
@@ -1779,6 +2014,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1779 xhci->num_active_eps); 2014 xhci->num_active_eps);
1780 return -ENOMEM; 2015 return -ENOMEM;
1781 } 2016 }
2017 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2018 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2019 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2020 xhci_free_host_resources(xhci, in_ctx);
2021 spin_unlock_irqrestore(&xhci->lock, flags);
2022 xhci_warn(xhci, "Not enough bandwidth\n");
2023 return -ENOMEM;
2024 }
1782 2025
1783 if (command) { 2026 if (command) {
1784 cmd_completion = command->completion; 2027 cmd_completion = command->completion;
@@ -1912,7 +2155,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1912 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) 2155 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
1913 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2156 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1914 } 2157 }
1915 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
1916 xhci_zero_in_ctx(xhci, virt_dev); 2158 xhci_zero_in_ctx(xhci, virt_dev);
1917 /* 2159 /*
1918 * Install any rings for completely new endpoints or changed endpoints, 2160 * Install any rings for completely new endpoints or changed endpoints,
@@ -2528,6 +2770,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2528 int timeleft; 2770 int timeleft;
2529 int last_freed_endpoint; 2771 int last_freed_endpoint;
2530 struct xhci_slot_ctx *slot_ctx; 2772 struct xhci_slot_ctx *slot_ctx;
2773 int old_active_eps = 0;
2531 2774
2532 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 2775 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2533 if (ret <= 0) 2776 if (ret <= 0)
@@ -2669,8 +2912,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2669 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2912 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2670 last_freed_endpoint = i; 2913 last_freed_endpoint = i;
2671 } 2914 }
2915 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
2916 xhci_drop_ep_from_interval_table(xhci,
2917 &virt_dev->eps[i].bw_info,
2918 virt_dev->bw_table,
2919 udev,
2920 &virt_dev->eps[i],
2921 virt_dev->tt_info);
2672 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 2922 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
2673 } 2923 }
2924 /* If necessary, update the number of active TTs on this root port */
2925 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2926
2674 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 2927 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2675 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 2928 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2676 ret = 0; 2929 ret = 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index af15b903e061..050f07b1e790 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -797,7 +797,9 @@ struct xhci_virt_ep {
797 * process the missed tds on the endpoint ring. 797 * process the missed tds on the endpoint ring.
798 */ 798 */
799 bool skip; 799 bool skip;
800 /* Bandwidth checking storage */
800 struct xhci_bw_info bw_info; 801 struct xhci_bw_info bw_info;
802 struct list_head bw_endpoint_list;
801}; 803};
802 804
803enum xhci_overhead_type { 805enum xhci_overhead_type {
@@ -808,6 +810,10 @@ enum xhci_overhead_type {
808 810
809struct xhci_interval_bw { 811struct xhci_interval_bw {
810 unsigned int num_packets; 812 unsigned int num_packets;
813 /* Sorted by max packet size.
814 * Head of the list is the greatest max packet size.
815 */
816 struct list_head endpoints;
811 /* How many endpoints of each speed are present. */ 817 /* How many endpoints of each speed are present. */
812 unsigned int overhead[3]; 818 unsigned int overhead[3];
813}; 819};
@@ -1511,6 +1517,15 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
1511unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index); 1517unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
1512unsigned int xhci_last_valid_endpoint(u32 added_ctxs); 1518unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
1513void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep); 1519void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
1520void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
1521 struct xhci_bw_info *ep_bw,
1522 struct xhci_interval_bw_table *bw_table,
1523 struct usb_device *udev,
1524 struct xhci_virt_ep *virt_ep,
1525 struct xhci_tt_bw_info *tt_info);
1526void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
1527 struct xhci_virt_device *virt_dev,
1528 int old_active_eps);
1514void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info); 1529void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
1515void xhci_update_bw_info(struct xhci_hcd *xhci, 1530void xhci_update_bw_info(struct xhci_hcd *xhci,
1516 struct xhci_container_ctx *in_ctx, 1531 struct xhci_container_ctx *in_ctx,