diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 1151 |
1 files changed, 1064 insertions, 87 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3a0f695138f4..1ff95a0df576 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -175,28 +175,19 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
175 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); | 175 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* | 178 | #ifdef CONFIG_PCI |
179 | * Free IRQs | 179 | static int xhci_free_msi(struct xhci_hcd *xhci) |
180 | * free all IRQs request | ||
181 | */ | ||
182 | static void xhci_free_irq(struct xhci_hcd *xhci) | ||
183 | { | 180 | { |
184 | int i; | 181 | int i; |
185 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
186 | 182 | ||
187 | /* return if using legacy interrupt */ | 183 | if (!xhci->msix_entries) |
188 | if (xhci_to_hcd(xhci)->irq >= 0) | 184 | return -EINVAL; |
189 | return; | ||
190 | |||
191 | if (xhci->msix_entries) { | ||
192 | for (i = 0; i < xhci->msix_count; i++) | ||
193 | if (xhci->msix_entries[i].vector) | ||
194 | free_irq(xhci->msix_entries[i].vector, | ||
195 | xhci_to_hcd(xhci)); | ||
196 | } else if (pdev->irq >= 0) | ||
197 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | ||
198 | 185 | ||
199 | return; | 186 | for (i = 0; i < xhci->msix_count; i++) |
187 | if (xhci->msix_entries[i].vector) | ||
188 | free_irq(xhci->msix_entries[i].vector, | ||
189 | xhci_to_hcd(xhci)); | ||
190 | return 0; | ||
200 | } | 191 | } |
201 | 192 | ||
202 | /* | 193 | /* |
@@ -224,6 +215,28 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) | |||
224 | } | 215 | } |
225 | 216 | ||
226 | /* | 217 | /* |
218 | * Free IRQs | ||
219 | * free all IRQs request | ||
220 | */ | ||
221 | static void xhci_free_irq(struct xhci_hcd *xhci) | ||
222 | { | ||
223 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
224 | int ret; | ||
225 | |||
226 | /* return if using legacy interrupt */ | ||
227 | if (xhci_to_hcd(xhci)->irq >= 0) | ||
228 | return; | ||
229 | |||
230 | ret = xhci_free_msi(xhci); | ||
231 | if (!ret) | ||
232 | return; | ||
233 | if (pdev->irq >= 0) | ||
234 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | ||
235 | |||
236 | return; | ||
237 | } | ||
238 | |||
239 | /* | ||
227 | * Set up MSI-X | 240 | * Set up MSI-X |
228 | */ | 241 | */ |
229 | static int xhci_setup_msix(struct xhci_hcd *xhci) | 242 | static int xhci_setup_msix(struct xhci_hcd *xhci) |
@@ -302,6 +315,72 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |||
302 | return; | 315 | return; |
303 | } | 316 | } |
304 | 317 | ||
318 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) | ||
319 | { | ||
320 | int i; | ||
321 | |||
322 | if (xhci->msix_entries) { | ||
323 | for (i = 0; i < xhci->msix_count; i++) | ||
324 | synchronize_irq(xhci->msix_entries[i].vector); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static int xhci_try_enable_msi(struct usb_hcd *hcd) | ||
329 | { | ||
330 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
331 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
332 | int ret; | ||
333 | |||
334 | /* | ||
335 | * Some Fresco Logic host controllers advertise MSI, but fail to | ||
336 | * generate interrupts. Don't even try to enable MSI. | ||
337 | */ | ||
338 | if (xhci->quirks & XHCI_BROKEN_MSI) | ||
339 | return 0; | ||
340 | |||
341 | /* unregister the legacy interrupt */ | ||
342 | if (hcd->irq) | ||
343 | free_irq(hcd->irq, hcd); | ||
344 | hcd->irq = -1; | ||
345 | |||
346 | ret = xhci_setup_msix(xhci); | ||
347 | if (ret) | ||
348 | /* fall back to msi*/ | ||
349 | ret = xhci_setup_msi(xhci); | ||
350 | |||
351 | if (!ret) | ||
352 | /* hcd->irq is -1, we have MSI */ | ||
353 | return 0; | ||
354 | |||
355 | /* fall back to legacy interrupt*/ | ||
356 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | ||
357 | hcd->irq_descr, hcd); | ||
358 | if (ret) { | ||
359 | xhci_err(xhci, "request interrupt %d failed\n", | ||
360 | pdev->irq); | ||
361 | return ret; | ||
362 | } | ||
363 | hcd->irq = pdev->irq; | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | #else | ||
368 | |||
369 | static int xhci_try_enable_msi(struct usb_hcd *hcd) | ||
370 | { | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | ||
375 | { | ||
376 | } | ||
377 | |||
378 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) | ||
379 | { | ||
380 | } | ||
381 | |||
382 | #endif | ||
383 | |||
305 | /* | 384 | /* |
306 | * Initialize memory for HCD and xHC (one-time init). | 385 | * Initialize memory for HCD and xHC (one-time init). |
307 | * | 386 | * |
@@ -316,7 +395,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
316 | 395 | ||
317 | xhci_dbg(xhci, "xhci_init\n"); | 396 | xhci_dbg(xhci, "xhci_init\n"); |
318 | spin_lock_init(&xhci->lock); | 397 | spin_lock_init(&xhci->lock); |
319 | if (link_quirk) { | 398 | if (xhci->hci_version == 0x95 && link_quirk) { |
320 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | 399 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); |
321 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | 400 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
322 | } else { | 401 | } else { |
@@ -413,9 +492,8 @@ int xhci_run(struct usb_hcd *hcd) | |||
413 | { | 492 | { |
414 | u32 temp; | 493 | u32 temp; |
415 | u64 temp_64; | 494 | u64 temp_64; |
416 | u32 ret; | 495 | int ret; |
417 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 496 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
418 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
419 | 497 | ||
420 | /* Start the xHCI host controller running only after the USB 2.0 roothub | 498 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
421 | * is setup. | 499 | * is setup. |
@@ -426,34 +504,10 @@ int xhci_run(struct usb_hcd *hcd) | |||
426 | return xhci_run_finished(xhci); | 504 | return xhci_run_finished(xhci); |
427 | 505 | ||
428 | xhci_dbg(xhci, "xhci_run\n"); | 506 | xhci_dbg(xhci, "xhci_run\n"); |
429 | /* unregister the legacy interrupt */ | ||
430 | if (hcd->irq) | ||
431 | free_irq(hcd->irq, hcd); | ||
432 | hcd->irq = -1; | ||
433 | |||
434 | /* Some Fresco Logic host controllers advertise MSI, but fail to | ||
435 | * generate interrupts. Don't even try to enable MSI. | ||
436 | */ | ||
437 | if (xhci->quirks & XHCI_BROKEN_MSI) | ||
438 | goto legacy_irq; | ||
439 | 507 | ||
440 | ret = xhci_setup_msix(xhci); | 508 | ret = xhci_try_enable_msi(hcd); |
441 | if (ret) | 509 | if (ret) |
442 | /* fall back to msi*/ | 510 | return ret; |
443 | ret = xhci_setup_msi(xhci); | ||
444 | |||
445 | if (ret) { | ||
446 | legacy_irq: | ||
447 | /* fall back to legacy interrupt*/ | ||
448 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | ||
449 | hcd->irq_descr, hcd); | ||
450 | if (ret) { | ||
451 | xhci_err(xhci, "request interrupt %d failed\n", | ||
452 | pdev->irq); | ||
453 | return ret; | ||
454 | } | ||
455 | hcd->irq = pdev->irq; | ||
456 | } | ||
457 | 511 | ||
458 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 512 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
459 | init_timer(&xhci->event_ring_timer); | 513 | init_timer(&xhci->event_ring_timer); |
@@ -694,7 +748,6 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
694 | int rc = 0; | 748 | int rc = 0; |
695 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 749 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
696 | u32 command; | 750 | u32 command; |
697 | int i; | ||
698 | 751 | ||
699 | spin_lock_irq(&xhci->lock); | 752 | spin_lock_irq(&xhci->lock); |
700 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 753 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
@@ -730,10 +783,7 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
730 | 783 | ||
731 | /* step 5: remove core well power */ | 784 | /* step 5: remove core well power */ |
732 | /* synchronize irq when using MSI-X */ | 785 | /* synchronize irq when using MSI-X */ |
733 | if (xhci->msix_entries) { | 786 | xhci_msix_sync_irqs(xhci); |
734 | for (i = 0; i < xhci->msix_count; i++) | ||
735 | synchronize_irq(xhci->msix_entries[i].vector); | ||
736 | } | ||
737 | 787 | ||
738 | return rc; | 788 | return rc; |
739 | } | 789 | } |
@@ -945,8 +995,7 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
945 | return -ENODEV; | 995 | return -ENODEV; |
946 | 996 | ||
947 | if (check_virt_dev) { | 997 | if (check_virt_dev) { |
948 | if (!udev->slot_id || !xhci->devs | 998 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
949 | || !xhci->devs[udev->slot_id]) { | ||
950 | printk(KERN_DEBUG "xHCI %s called with unaddressed " | 999 | printk(KERN_DEBUG "xHCI %s called with unaddressed " |
951 | "device\n", func); | 1000 | "device\n", func); |
952 | return -EINVAL; | 1001 | return -EINVAL; |
@@ -987,7 +1036,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
987 | out_ctx = xhci->devs[slot_id]->out_ctx; | 1036 | out_ctx = xhci->devs[slot_id]->out_ctx; |
988 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | 1037 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
989 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); | 1038 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
990 | max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize); | 1039 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); |
991 | if (hw_max_packet_size != max_packet_size) { | 1040 | if (hw_max_packet_size != max_packet_size) { |
992 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | 1041 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); |
993 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | 1042 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", |
@@ -1035,6 +1084,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
1035 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | 1084 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
1036 | { | 1085 | { |
1037 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1086 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1087 | struct xhci_td *buffer; | ||
1038 | unsigned long flags; | 1088 | unsigned long flags; |
1039 | int ret = 0; | 1089 | int ret = 0; |
1040 | unsigned int slot_id, ep_index; | 1090 | unsigned int slot_id, ep_index; |
@@ -1065,13 +1115,15 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1065 | if (!urb_priv) | 1115 | if (!urb_priv) |
1066 | return -ENOMEM; | 1116 | return -ENOMEM; |
1067 | 1117 | ||
1118 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); | ||
1119 | if (!buffer) { | ||
1120 | kfree(urb_priv); | ||
1121 | return -ENOMEM; | ||
1122 | } | ||
1123 | |||
1068 | for (i = 0; i < size; i++) { | 1124 | for (i = 0; i < size; i++) { |
1069 | urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); | 1125 | urb_priv->td[i] = buffer; |
1070 | if (!urb_priv->td[i]) { | 1126 | buffer++; |
1071 | urb_priv->length = i; | ||
1072 | xhci_urb_free_priv(xhci, urb_priv); | ||
1073 | return -ENOMEM; | ||
1074 | } | ||
1075 | } | 1127 | } |
1076 | 1128 | ||
1077 | urb_priv->length = size; | 1129 | urb_priv->length = size; |
@@ -1747,6 +1799,564 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | |||
1747 | xhci->num_active_eps); | 1799 | xhci->num_active_eps); |
1748 | } | 1800 | } |
1749 | 1801 | ||
1802 | unsigned int xhci_get_block_size(struct usb_device *udev) | ||
1803 | { | ||
1804 | switch (udev->speed) { | ||
1805 | case USB_SPEED_LOW: | ||
1806 | case USB_SPEED_FULL: | ||
1807 | return FS_BLOCK; | ||
1808 | case USB_SPEED_HIGH: | ||
1809 | return HS_BLOCK; | ||
1810 | case USB_SPEED_SUPER: | ||
1811 | return SS_BLOCK; | ||
1812 | case USB_SPEED_UNKNOWN: | ||
1813 | case USB_SPEED_WIRELESS: | ||
1814 | default: | ||
1815 | /* Should never happen */ | ||
1816 | return 1; | ||
1817 | } | ||
1818 | } | ||
1819 | |||
1820 | unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) | ||
1821 | { | ||
1822 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) | ||
1823 | return LS_OVERHEAD; | ||
1824 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) | ||
1825 | return FS_OVERHEAD; | ||
1826 | return HS_OVERHEAD; | ||
1827 | } | ||
1828 | |||
1829 | /* If we are changing a LS/FS device under a HS hub, | ||
1830 | * make sure (if we are activating a new TT) that the HS bus has enough | ||
1831 | * bandwidth for this new TT. | ||
1832 | */ | ||
1833 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, | ||
1834 | struct xhci_virt_device *virt_dev, | ||
1835 | int old_active_eps) | ||
1836 | { | ||
1837 | struct xhci_interval_bw_table *bw_table; | ||
1838 | struct xhci_tt_bw_info *tt_info; | ||
1839 | |||
1840 | /* Find the bandwidth table for the root port this TT is attached to. */ | ||
1841 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; | ||
1842 | tt_info = virt_dev->tt_info; | ||
1843 | /* If this TT already had active endpoints, the bandwidth for this TT | ||
1844 | * has already been added. Removing all periodic endpoints (and thus | ||
1845 | * making the TT enactive) will only decrease the bandwidth used. | ||
1846 | */ | ||
1847 | if (old_active_eps) | ||
1848 | return 0; | ||
1849 | if (old_active_eps == 0 && tt_info->active_eps != 0) { | ||
1850 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) | ||
1851 | return -ENOMEM; | ||
1852 | return 0; | ||
1853 | } | ||
1854 | /* Not sure why we would have no new active endpoints... | ||
1855 | * | ||
1856 | * Maybe because of an Evaluate Context change for a hub update or a | ||
1857 | * control endpoint 0 max packet size change? | ||
1858 | * FIXME: skip the bandwidth calculation in that case. | ||
1859 | */ | ||
1860 | return 0; | ||
1861 | } | ||
1862 | |||
1863 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, | ||
1864 | struct xhci_virt_device *virt_dev) | ||
1865 | { | ||
1866 | unsigned int bw_reserved; | ||
1867 | |||
1868 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); | ||
1869 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) | ||
1870 | return -ENOMEM; | ||
1871 | |||
1872 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); | ||
1873 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) | ||
1874 | return -ENOMEM; | ||
1875 | |||
1876 | return 0; | ||
1877 | } | ||
1878 | |||
1879 | /* | ||
1880 | * This algorithm is a very conservative estimate of the worst-case scheduling | ||
1881 | * scenario for any one interval. The hardware dynamically schedules the | ||
1882 | * packets, so we can't tell which microframe could be the limiting factor in | ||
1883 | * the bandwidth scheduling. This only takes into account periodic endpoints. | ||
1884 | * | ||
1885 | * Obviously, we can't solve an NP complete problem to find the minimum worst | ||
1886 | * case scenario. Instead, we come up with an estimate that is no less than | ||
1887 | * the worst case bandwidth used for any one microframe, but may be an | ||
1888 | * over-estimate. | ||
1889 | * | ||
1890 | * We walk the requirements for each endpoint by interval, starting with the | ||
1891 | * smallest interval, and place packets in the schedule where there is only one | ||
1892 | * possible way to schedule packets for that interval. In order to simplify | ||
1893 | * this algorithm, we record the largest max packet size for each interval, and | ||
1894 | * assume all packets will be that size. | ||
1895 | * | ||
1896 | * For interval 0, we obviously must schedule all packets for each interval. | ||
1897 | * The bandwidth for interval 0 is just the amount of data to be transmitted | ||
1898 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times | ||
1899 | * the number of packets). | ||
1900 | * | ||
1901 | * For interval 1, we have two possible microframes to schedule those packets | ||
1902 | * in. For this algorithm, if we can schedule the same number of packets for | ||
1903 | * each possible scheduling opportunity (each microframe), we will do so. The | ||
1904 | * remaining number of packets will be saved to be transmitted in the gaps in | ||
1905 | * the next interval's scheduling sequence. | ||
1906 | * | ||
1907 | * As we move those remaining packets to be scheduled with interval 2 packets, | ||
1908 | * we have to double the number of remaining packets to transmit. This is | ||
1909 | * because the intervals are actually powers of 2, and we would be transmitting | ||
1910 | * the previous interval's packets twice in this interval. We also have to be | ||
1911 | * sure that when we look at the largest max packet size for this interval, we | ||
1912 | * also look at the largest max packet size for the remaining packets and take | ||
1913 | * the greater of the two. | ||
1914 | * | ||
1915 | * The algorithm continues to evenly distribute packets in each scheduling | ||
1916 | * opportunity, and push the remaining packets out, until we get to the last | ||
1917 | * interval. Then those packets and their associated overhead are just added | ||
1918 | * to the bandwidth used. | ||
1919 | */ | ||
1920 | static int xhci_check_bw_table(struct xhci_hcd *xhci, | ||
1921 | struct xhci_virt_device *virt_dev, | ||
1922 | int old_active_eps) | ||
1923 | { | ||
1924 | unsigned int bw_reserved; | ||
1925 | unsigned int max_bandwidth; | ||
1926 | unsigned int bw_used; | ||
1927 | unsigned int block_size; | ||
1928 | struct xhci_interval_bw_table *bw_table; | ||
1929 | unsigned int packet_size = 0; | ||
1930 | unsigned int overhead = 0; | ||
1931 | unsigned int packets_transmitted = 0; | ||
1932 | unsigned int packets_remaining = 0; | ||
1933 | unsigned int i; | ||
1934 | |||
1935 | if (virt_dev->udev->speed == USB_SPEED_SUPER) | ||
1936 | return xhci_check_ss_bw(xhci, virt_dev); | ||
1937 | |||
1938 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { | ||
1939 | max_bandwidth = HS_BW_LIMIT; | ||
1940 | /* Convert percent of bus BW reserved to blocks reserved */ | ||
1941 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); | ||
1942 | } else { | ||
1943 | max_bandwidth = FS_BW_LIMIT; | ||
1944 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); | ||
1945 | } | ||
1946 | |||
1947 | bw_table = virt_dev->bw_table; | ||
1948 | /* We need to translate the max packet size and max ESIT payloads into | ||
1949 | * the units the hardware uses. | ||
1950 | */ | ||
1951 | block_size = xhci_get_block_size(virt_dev->udev); | ||
1952 | |||
1953 | /* If we are manipulating a LS/FS device under a HS hub, double check | ||
1954 | * that the HS bus has enough bandwidth if we are activing a new TT. | ||
1955 | */ | ||
1956 | if (virt_dev->tt_info) { | ||
1957 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", | ||
1958 | virt_dev->real_port); | ||
1959 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { | ||
1960 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " | ||
1961 | "newly activated TT.\n"); | ||
1962 | return -ENOMEM; | ||
1963 | } | ||
1964 | xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", | ||
1965 | virt_dev->tt_info->slot_id, | ||
1966 | virt_dev->tt_info->ttport); | ||
1967 | } else { | ||
1968 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", | ||
1969 | virt_dev->real_port); | ||
1970 | } | ||
1971 | |||
1972 | /* Add in how much bandwidth will be used for interval zero, or the | ||
1973 | * rounded max ESIT payload + number of packets * largest overhead. | ||
1974 | */ | ||
1975 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + | ||
1976 | bw_table->interval_bw[0].num_packets * | ||
1977 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); | ||
1978 | |||
1979 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { | ||
1980 | unsigned int bw_added; | ||
1981 | unsigned int largest_mps; | ||
1982 | unsigned int interval_overhead; | ||
1983 | |||
1984 | /* | ||
1985 | * How many packets could we transmit in this interval? | ||
1986 | * If packets didn't fit in the previous interval, we will need | ||
1987 | * to transmit that many packets twice within this interval. | ||
1988 | */ | ||
1989 | packets_remaining = 2 * packets_remaining + | ||
1990 | bw_table->interval_bw[i].num_packets; | ||
1991 | |||
1992 | /* Find the largest max packet size of this or the previous | ||
1993 | * interval. | ||
1994 | */ | ||
1995 | if (list_empty(&bw_table->interval_bw[i].endpoints)) | ||
1996 | largest_mps = 0; | ||
1997 | else { | ||
1998 | struct xhci_virt_ep *virt_ep; | ||
1999 | struct list_head *ep_entry; | ||
2000 | |||
2001 | ep_entry = bw_table->interval_bw[i].endpoints.next; | ||
2002 | virt_ep = list_entry(ep_entry, | ||
2003 | struct xhci_virt_ep, bw_endpoint_list); | ||
2004 | /* Convert to blocks, rounding up */ | ||
2005 | largest_mps = DIV_ROUND_UP( | ||
2006 | virt_ep->bw_info.max_packet_size, | ||
2007 | block_size); | ||
2008 | } | ||
2009 | if (largest_mps > packet_size) | ||
2010 | packet_size = largest_mps; | ||
2011 | |||
2012 | /* Use the larger overhead of this or the previous interval. */ | ||
2013 | interval_overhead = xhci_get_largest_overhead( | ||
2014 | &bw_table->interval_bw[i]); | ||
2015 | if (interval_overhead > overhead) | ||
2016 | overhead = interval_overhead; | ||
2017 | |||
2018 | /* How many packets can we evenly distribute across | ||
2019 | * (1 << (i + 1)) possible scheduling opportunities? | ||
2020 | */ | ||
2021 | packets_transmitted = packets_remaining >> (i + 1); | ||
2022 | |||
2023 | /* Add in the bandwidth used for those scheduled packets */ | ||
2024 | bw_added = packets_transmitted * (overhead + packet_size); | ||
2025 | |||
2026 | /* How many packets do we have remaining to transmit? */ | ||
2027 | packets_remaining = packets_remaining % (1 << (i + 1)); | ||
2028 | |||
2029 | /* What largest max packet size should those packets have? */ | ||
2030 | /* If we've transmitted all packets, don't carry over the | ||
2031 | * largest packet size. | ||
2032 | */ | ||
2033 | if (packets_remaining == 0) { | ||
2034 | packet_size = 0; | ||
2035 | overhead = 0; | ||
2036 | } else if (packets_transmitted > 0) { | ||
2037 | /* Otherwise if we do have remaining packets, and we've | ||
2038 | * scheduled some packets in this interval, take the | ||
2039 | * largest max packet size from endpoints with this | ||
2040 | * interval. | ||
2041 | */ | ||
2042 | packet_size = largest_mps; | ||
2043 | overhead = interval_overhead; | ||
2044 | } | ||
2045 | /* Otherwise carry over packet_size and overhead from the last | ||
2046 | * time we had a remainder. | ||
2047 | */ | ||
2048 | bw_used += bw_added; | ||
2049 | if (bw_used > max_bandwidth) { | ||
2050 | xhci_warn(xhci, "Not enough bandwidth. " | ||
2051 | "Proposed: %u, Max: %u\n", | ||
2052 | bw_used, max_bandwidth); | ||
2053 | return -ENOMEM; | ||
2054 | } | ||
2055 | } | ||
2056 | /* | ||
2057 | * Ok, we know we have some packets left over after even-handedly | ||
2058 | * scheduling interval 15. We don't know which microframes they will | ||
2059 | * fit into, so we over-schedule and say they will be scheduled every | ||
2060 | * microframe. | ||
2061 | */ | ||
2062 | if (packets_remaining > 0) | ||
2063 | bw_used += overhead + packet_size; | ||
2064 | |||
2065 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { | ||
2066 | unsigned int port_index = virt_dev->real_port - 1; | ||
2067 | |||
2068 | /* OK, we're manipulating a HS device attached to a | ||
2069 | * root port bandwidth domain. Include the number of active TTs | ||
2070 | * in the bandwidth used. | ||
2071 | */ | ||
2072 | bw_used += TT_HS_OVERHEAD * | ||
2073 | xhci->rh_bw[port_index].num_active_tts; | ||
2074 | } | ||
2075 | |||
2076 | xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " | ||
2077 | "Available: %u " "percent\n", | ||
2078 | bw_used, max_bandwidth, bw_reserved, | ||
2079 | (max_bandwidth - bw_used - bw_reserved) * 100 / | ||
2080 | max_bandwidth); | ||
2081 | |||
2082 | bw_used += bw_reserved; | ||
2083 | if (bw_used > max_bandwidth) { | ||
2084 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", | ||
2085 | bw_used, max_bandwidth); | ||
2086 | return -ENOMEM; | ||
2087 | } | ||
2088 | |||
2089 | bw_table->bw_used = bw_used; | ||
2090 | return 0; | ||
2091 | } | ||
2092 | |||
2093 | static bool xhci_is_async_ep(unsigned int ep_type) | ||
2094 | { | ||
2095 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && | ||
2096 | ep_type != ISOC_IN_EP && | ||
2097 | ep_type != INT_IN_EP); | ||
2098 | } | ||
2099 | |||
2100 | static bool xhci_is_sync_in_ep(unsigned int ep_type) | ||
2101 | { | ||
2102 | return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); | ||
2103 | } | ||
2104 | |||
2105 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) | ||
2106 | { | ||
2107 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); | ||
2108 | |||
2109 | if (ep_bw->ep_interval == 0) | ||
2110 | return SS_OVERHEAD_BURST + | ||
2111 | (ep_bw->mult * ep_bw->num_packets * | ||
2112 | (SS_OVERHEAD + mps)); | ||
2113 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * | ||
2114 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), | ||
2115 | 1 << ep_bw->ep_interval); | ||
2116 | |||
2117 | } | ||
2118 | |||
2119 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, | ||
2120 | struct xhci_bw_info *ep_bw, | ||
2121 | struct xhci_interval_bw_table *bw_table, | ||
2122 | struct usb_device *udev, | ||
2123 | struct xhci_virt_ep *virt_ep, | ||
2124 | struct xhci_tt_bw_info *tt_info) | ||
2125 | { | ||
2126 | struct xhci_interval_bw *interval_bw; | ||
2127 | int normalized_interval; | ||
2128 | |||
2129 | if (xhci_is_async_ep(ep_bw->type)) | ||
2130 | return; | ||
2131 | |||
2132 | if (udev->speed == USB_SPEED_SUPER) { | ||
2133 | if (xhci_is_sync_in_ep(ep_bw->type)) | ||
2134 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= | ||
2135 | xhci_get_ss_bw_consumed(ep_bw); | ||
2136 | else | ||
2137 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= | ||
2138 | xhci_get_ss_bw_consumed(ep_bw); | ||
2139 | return; | ||
2140 | } | ||
2141 | |||
2142 | /* SuperSpeed endpoints never get added to intervals in the table, so | ||
2143 | * this check is only valid for HS/FS/LS devices. | ||
2144 | */ | ||
2145 | if (list_empty(&virt_ep->bw_endpoint_list)) | ||
2146 | return; | ||
2147 | /* For LS/FS devices, we need to translate the interval expressed in | ||
2148 | * microframes to frames. | ||
2149 | */ | ||
2150 | if (udev->speed == USB_SPEED_HIGH) | ||
2151 | normalized_interval = ep_bw->ep_interval; | ||
2152 | else | ||
2153 | normalized_interval = ep_bw->ep_interval - 3; | ||
2154 | |||
2155 | if (normalized_interval == 0) | ||
2156 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; | ||
2157 | interval_bw = &bw_table->interval_bw[normalized_interval]; | ||
2158 | interval_bw->num_packets -= ep_bw->num_packets; | ||
2159 | switch (udev->speed) { | ||
2160 | case USB_SPEED_LOW: | ||
2161 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; | ||
2162 | break; | ||
2163 | case USB_SPEED_FULL: | ||
2164 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; | ||
2165 | break; | ||
2166 | case USB_SPEED_HIGH: | ||
2167 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; | ||
2168 | break; | ||
2169 | case USB_SPEED_SUPER: | ||
2170 | case USB_SPEED_UNKNOWN: | ||
2171 | case USB_SPEED_WIRELESS: | ||
2172 | /* Should never happen because only LS/FS/HS endpoints will get | ||
2173 | * added to the endpoint list. | ||
2174 | */ | ||
2175 | return; | ||
2176 | } | ||
2177 | if (tt_info) | ||
2178 | tt_info->active_eps -= 1; | ||
2179 | list_del_init(&virt_ep->bw_endpoint_list); | ||
2180 | } | ||
2181 | |||
2182 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, | ||
2183 | struct xhci_bw_info *ep_bw, | ||
2184 | struct xhci_interval_bw_table *bw_table, | ||
2185 | struct usb_device *udev, | ||
2186 | struct xhci_virt_ep *virt_ep, | ||
2187 | struct xhci_tt_bw_info *tt_info) | ||
2188 | { | ||
2189 | struct xhci_interval_bw *interval_bw; | ||
2190 | struct xhci_virt_ep *smaller_ep; | ||
2191 | int normalized_interval; | ||
2192 | |||
2193 | if (xhci_is_async_ep(ep_bw->type)) | ||
2194 | return; | ||
2195 | |||
2196 | if (udev->speed == USB_SPEED_SUPER) { | ||
2197 | if (xhci_is_sync_in_ep(ep_bw->type)) | ||
2198 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += | ||
2199 | xhci_get_ss_bw_consumed(ep_bw); | ||
2200 | else | ||
2201 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += | ||
2202 | xhci_get_ss_bw_consumed(ep_bw); | ||
2203 | return; | ||
2204 | } | ||
2205 | |||
2206 | /* For LS/FS devices, we need to translate the interval expressed in | ||
2207 | * microframes to frames. | ||
2208 | */ | ||
2209 | if (udev->speed == USB_SPEED_HIGH) | ||
2210 | normalized_interval = ep_bw->ep_interval; | ||
2211 | else | ||
2212 | normalized_interval = ep_bw->ep_interval - 3; | ||
2213 | |||
2214 | if (normalized_interval == 0) | ||
2215 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; | ||
2216 | interval_bw = &bw_table->interval_bw[normalized_interval]; | ||
2217 | interval_bw->num_packets += ep_bw->num_packets; | ||
2218 | switch (udev->speed) { | ||
2219 | case USB_SPEED_LOW: | ||
2220 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; | ||
2221 | break; | ||
2222 | case USB_SPEED_FULL: | ||
2223 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; | ||
2224 | break; | ||
2225 | case USB_SPEED_HIGH: | ||
2226 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; | ||
2227 | break; | ||
2228 | case USB_SPEED_SUPER: | ||
2229 | case USB_SPEED_UNKNOWN: | ||
2230 | case USB_SPEED_WIRELESS: | ||
2231 | /* Should never happen because only LS/FS/HS endpoints will get | ||
2232 | * added to the endpoint list. | ||
2233 | */ | ||
2234 | return; | ||
2235 | } | ||
2236 | |||
2237 | if (tt_info) | ||
2238 | tt_info->active_eps += 1; | ||
2239 | /* Insert the endpoint into the list, largest max packet size first. */ | ||
2240 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, | ||
2241 | bw_endpoint_list) { | ||
2242 | if (ep_bw->max_packet_size >= | ||
2243 | smaller_ep->bw_info.max_packet_size) { | ||
2244 | /* Add the new ep before the smaller endpoint */ | ||
2245 | list_add_tail(&virt_ep->bw_endpoint_list, | ||
2246 | &smaller_ep->bw_endpoint_list); | ||
2247 | return; | ||
2248 | } | ||
2249 | } | ||
2250 | /* Add the new endpoint at the end of the list. */ | ||
2251 | list_add_tail(&virt_ep->bw_endpoint_list, | ||
2252 | &interval_bw->endpoints); | ||
2253 | } | ||
2254 | |||
2255 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, | ||
2256 | struct xhci_virt_device *virt_dev, | ||
2257 | int old_active_eps) | ||
2258 | { | ||
2259 | struct xhci_root_port_bw_info *rh_bw_info; | ||
2260 | if (!virt_dev->tt_info) | ||
2261 | return; | ||
2262 | |||
2263 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; | ||
2264 | if (old_active_eps == 0 && | ||
2265 | virt_dev->tt_info->active_eps != 0) { | ||
2266 | rh_bw_info->num_active_tts += 1; | ||
2267 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; | ||
2268 | } else if (old_active_eps != 0 && | ||
2269 | virt_dev->tt_info->active_eps == 0) { | ||
2270 | rh_bw_info->num_active_tts -= 1; | ||
2271 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; | ||
2272 | } | ||
2273 | } | ||
2274 | |||
2275 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, | ||
2276 | struct xhci_virt_device *virt_dev, | ||
2277 | struct xhci_container_ctx *in_ctx) | ||
2278 | { | ||
2279 | struct xhci_bw_info ep_bw_info[31]; | ||
2280 | int i; | ||
2281 | struct xhci_input_control_ctx *ctrl_ctx; | ||
2282 | int old_active_eps = 0; | ||
2283 | |||
2284 | if (virt_dev->tt_info) | ||
2285 | old_active_eps = virt_dev->tt_info->active_eps; | ||
2286 | |||
2287 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
2288 | |||
2289 | for (i = 0; i < 31; i++) { | ||
2290 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | ||
2291 | continue; | ||
2292 | |||
2293 | /* Make a copy of the BW info in case we need to revert this */ | ||
2294 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, | ||
2295 | sizeof(ep_bw_info[i])); | ||
2296 | /* Drop the endpoint from the interval table if the endpoint is | ||
2297 | * being dropped or changed. | ||
2298 | */ | ||
2299 | if (EP_IS_DROPPED(ctrl_ctx, i)) | ||
2300 | xhci_drop_ep_from_interval_table(xhci, | ||
2301 | &virt_dev->eps[i].bw_info, | ||
2302 | virt_dev->bw_table, | ||
2303 | virt_dev->udev, | ||
2304 | &virt_dev->eps[i], | ||
2305 | virt_dev->tt_info); | ||
2306 | } | ||
2307 | /* Overwrite the information stored in the endpoints' bw_info */ | ||
2308 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); | ||
2309 | for (i = 0; i < 31; i++) { | ||
2310 | /* Add any changed or added endpoints to the interval table */ | ||
2311 | if (EP_IS_ADDED(ctrl_ctx, i)) | ||
2312 | xhci_add_ep_to_interval_table(xhci, | ||
2313 | &virt_dev->eps[i].bw_info, | ||
2314 | virt_dev->bw_table, | ||
2315 | virt_dev->udev, | ||
2316 | &virt_dev->eps[i], | ||
2317 | virt_dev->tt_info); | ||
2318 | } | ||
2319 | |||
2320 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { | ||
2321 | /* Ok, this fits in the bandwidth we have. | ||
2322 | * Update the number of active TTs. | ||
2323 | */ | ||
2324 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | ||
2325 | return 0; | ||
2326 | } | ||
2327 | |||
2328 | /* We don't have enough bandwidth for this, revert the stored info. */ | ||
2329 | for (i = 0; i < 31; i++) { | ||
2330 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | ||
2331 | continue; | ||
2332 | |||
2333 | /* Drop the new copies of any added or changed endpoints from | ||
2334 | * the interval table. | ||
2335 | */ | ||
2336 | if (EP_IS_ADDED(ctrl_ctx, i)) { | ||
2337 | xhci_drop_ep_from_interval_table(xhci, | ||
2338 | &virt_dev->eps[i].bw_info, | ||
2339 | virt_dev->bw_table, | ||
2340 | virt_dev->udev, | ||
2341 | &virt_dev->eps[i], | ||
2342 | virt_dev->tt_info); | ||
2343 | } | ||
2344 | /* Revert the endpoint back to its old information */ | ||
2345 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], | ||
2346 | sizeof(ep_bw_info[i])); | ||
2347 | /* Add any changed or dropped endpoints back into the table */ | ||
2348 | if (EP_IS_DROPPED(ctrl_ctx, i)) | ||
2349 | xhci_add_ep_to_interval_table(xhci, | ||
2350 | &virt_dev->eps[i].bw_info, | ||
2351 | virt_dev->bw_table, | ||
2352 | virt_dev->udev, | ||
2353 | &virt_dev->eps[i], | ||
2354 | virt_dev->tt_info); | ||
2355 | } | ||
2356 | return -ENOMEM; | ||
2357 | } | ||
2358 | |||
2359 | |||
1750 | /* Issue a configure endpoint command or evaluate context command | 2360 | /* Issue a configure endpoint command or evaluate context command |
1751 | * and wait for it to finish. | 2361 | * and wait for it to finish. |
1752 | */ | 2362 | */ |
@@ -1765,17 +2375,30 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1765 | 2375 | ||
1766 | spin_lock_irqsave(&xhci->lock, flags); | 2376 | spin_lock_irqsave(&xhci->lock, flags); |
1767 | virt_dev = xhci->devs[udev->slot_id]; | 2377 | virt_dev = xhci->devs[udev->slot_id]; |
1768 | if (command) { | 2378 | |
2379 | if (command) | ||
1769 | in_ctx = command->in_ctx; | 2380 | in_ctx = command->in_ctx; |
1770 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | 2381 | else |
1771 | xhci_reserve_host_resources(xhci, in_ctx)) { | 2382 | in_ctx = virt_dev->in_ctx; |
1772 | spin_unlock_irqrestore(&xhci->lock, flags); | 2383 | |
1773 | xhci_warn(xhci, "Not enough host resources, " | 2384 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
1774 | "active endpoint contexts = %u\n", | 2385 | xhci_reserve_host_resources(xhci, in_ctx)) { |
1775 | xhci->num_active_eps); | 2386 | spin_unlock_irqrestore(&xhci->lock, flags); |
1776 | return -ENOMEM; | 2387 | xhci_warn(xhci, "Not enough host resources, " |
1777 | } | 2388 | "active endpoint contexts = %u\n", |
2389 | xhci->num_active_eps); | ||
2390 | return -ENOMEM; | ||
2391 | } | ||
2392 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && | ||
2393 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { | ||
2394 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | ||
2395 | xhci_free_host_resources(xhci, in_ctx); | ||
2396 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2397 | xhci_warn(xhci, "Not enough bandwidth\n"); | ||
2398 | return -ENOMEM; | ||
2399 | } | ||
1778 | 2400 | ||
2401 | if (command) { | ||
1779 | cmd_completion = command->completion; | 2402 | cmd_completion = command->completion; |
1780 | cmd_status = &command->status; | 2403 | cmd_status = &command->status; |
1781 | command->command_trb = xhci->cmd_ring->enqueue; | 2404 | command->command_trb = xhci->cmd_ring->enqueue; |
@@ -1789,15 +2412,6 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1789 | 2412 | ||
1790 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | 2413 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
1791 | } else { | 2414 | } else { |
1792 | in_ctx = virt_dev->in_ctx; | ||
1793 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
1794 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
1795 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1796 | xhci_warn(xhci, "Not enough host resources, " | ||
1797 | "active endpoint contexts = %u\n", | ||
1798 | xhci->num_active_eps); | ||
1799 | return -ENOMEM; | ||
1800 | } | ||
1801 | cmd_completion = &virt_dev->cmd_completion; | 2415 | cmd_completion = &virt_dev->cmd_completion; |
1802 | cmd_status = &virt_dev->cmd_status; | 2416 | cmd_status = &virt_dev->cmd_status; |
1803 | } | 2417 | } |
@@ -1888,6 +2502,12 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1888 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | 2502 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1889 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); | 2503 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
1890 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); | 2504 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
2505 | |||
2506 | /* Don't issue the command if there's no endpoints to update. */ | ||
2507 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && | ||
2508 | ctrl_ctx->drop_flags == 0) | ||
2509 | return 0; | ||
2510 | |||
1891 | xhci_dbg(xhci, "New Input Control Context:\n"); | 2511 | xhci_dbg(xhci, "New Input Control Context:\n"); |
1892 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 2512 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1893 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 2513 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
@@ -2525,6 +3145,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2525 | int timeleft; | 3145 | int timeleft; |
2526 | int last_freed_endpoint; | 3146 | int last_freed_endpoint; |
2527 | struct xhci_slot_ctx *slot_ctx; | 3147 | struct xhci_slot_ctx *slot_ctx; |
3148 | int old_active_eps = 0; | ||
2528 | 3149 | ||
2529 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); | 3150 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
2530 | if (ret <= 0) | 3151 | if (ret <= 0) |
@@ -2666,7 +3287,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2666 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | 3287 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
2667 | last_freed_endpoint = i; | 3288 | last_freed_endpoint = i; |
2668 | } | 3289 | } |
2669 | } | 3290 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
3291 | xhci_drop_ep_from_interval_table(xhci, | ||
3292 | &virt_dev->eps[i].bw_info, | ||
3293 | virt_dev->bw_table, | ||
3294 | udev, | ||
3295 | &virt_dev->eps[i], | ||
3296 | virt_dev->tt_info); | ||
3297 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); | ||
3298 | } | ||
3299 | /* If necessary, update the number of active TTs on this root port */ | ||
3300 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | ||
3301 | |||
2670 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | 3302 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
2671 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | 3303 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
2672 | ret = 0; | 3304 | ret = 0; |
@@ -2704,6 +3336,11 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2704 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | 3336 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
2705 | } | 3337 | } |
2706 | 3338 | ||
3339 | if (udev->usb2_hw_lpm_enabled) { | ||
3340 | xhci_set_usb2_hardware_lpm(hcd, udev, 0); | ||
3341 | udev->usb2_hw_lpm_enabled = 0; | ||
3342 | } | ||
3343 | |||
2707 | spin_lock_irqsave(&xhci->lock, flags); | 3344 | spin_lock_irqsave(&xhci->lock, flags); |
2708 | /* Don't disable the slot if the host controller is dead. */ | 3345 | /* Don't disable the slot if the host controller is dead. */ |
2709 | state = xhci_readl(xhci, &xhci->op_regs->status); | 3346 | state = xhci_readl(xhci, &xhci->op_regs->status); |
@@ -2889,7 +3526,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2889 | * command on a timeout. | 3526 | * command on a timeout. |
2890 | */ | 3527 | */ |
2891 | if (timeleft <= 0) { | 3528 | if (timeleft <= 0) { |
2892 | xhci_warn(xhci, "%s while waiting for a slot\n", | 3529 | xhci_warn(xhci, "%s while waiting for address device command\n", |
2893 | timeleft == 0 ? "Timeout" : "Signal"); | 3530 | timeleft == 0 ? "Timeout" : "Signal"); |
2894 | /* FIXME cancel the address device command */ | 3531 | /* FIXME cancel the address device command */ |
2895 | return -ETIME; | 3532 | return -ETIME; |
@@ -2957,6 +3594,254 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2957 | return 0; | 3594 | return 0; |
2958 | } | 3595 | } |
2959 | 3596 | ||
3597 | #ifdef CONFIG_USB_SUSPEND | ||
3598 | |||
3599 | /* BESL to HIRD Encoding array for USB2 LPM */ | ||
3600 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, | ||
3601 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; | ||
3602 | |||
3603 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ | ||
3604 | static int xhci_calculate_hird_besl(int u2del, bool use_besl) | ||
3605 | { | ||
3606 | int hird; | ||
3607 | |||
3608 | if (use_besl) { | ||
3609 | for (hird = 0; hird < 16; hird++) { | ||
3610 | if (xhci_besl_encoding[hird] >= u2del) | ||
3611 | break; | ||
3612 | } | ||
3613 | } else { | ||
3614 | if (u2del <= 50) | ||
3615 | hird = 0; | ||
3616 | else | ||
3617 | hird = (u2del - 51) / 75 + 1; | ||
3618 | |||
3619 | if (hird > 15) | ||
3620 | hird = 15; | ||
3621 | } | ||
3622 | |||
3623 | return hird; | ||
3624 | } | ||
3625 | |||
3626 | static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, | ||
3627 | struct usb_device *udev) | ||
3628 | { | ||
3629 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3630 | struct dev_info *dev_info; | ||
3631 | __le32 __iomem **port_array; | ||
3632 | __le32 __iomem *addr, *pm_addr; | ||
3633 | u32 temp, dev_id; | ||
3634 | unsigned int port_num; | ||
3635 | unsigned long flags; | ||
3636 | int u2del, hird; | ||
3637 | int ret; | ||
3638 | |||
3639 | if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || | ||
3640 | !udev->lpm_capable) | ||
3641 | return -EINVAL; | ||
3642 | |||
3643 | /* we only support lpm for non-hub device connected to root hub yet */ | ||
3644 | if (!udev->parent || udev->parent->parent || | ||
3645 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | ||
3646 | return -EINVAL; | ||
3647 | |||
3648 | spin_lock_irqsave(&xhci->lock, flags); | ||
3649 | |||
3650 | /* Look for devices in lpm_failed_devs list */ | ||
3651 | dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | | ||
3652 | le16_to_cpu(udev->descriptor.idProduct); | ||
3653 | list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { | ||
3654 | if (dev_info->dev_id == dev_id) { | ||
3655 | ret = -EINVAL; | ||
3656 | goto finish; | ||
3657 | } | ||
3658 | } | ||
3659 | |||
3660 | port_array = xhci->usb2_ports; | ||
3661 | port_num = udev->portnum - 1; | ||
3662 | |||
3663 | if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { | ||
3664 | xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); | ||
3665 | ret = -EINVAL; | ||
3666 | goto finish; | ||
3667 | } | ||
3668 | |||
3669 | /* | ||
3670 | * Test USB 2.0 software LPM. | ||
3671 | * FIXME: some xHCI 1.0 hosts may implement a new register to set up | ||
3672 | * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 | ||
3673 | * in the June 2011 errata release. | ||
3674 | */ | ||
3675 | xhci_dbg(xhci, "test port %d software LPM\n", port_num); | ||
3676 | /* | ||
3677 | * Set L1 Device Slot and HIRD/BESL. | ||
3678 | * Check device's USB 2.0 extension descriptor to determine whether | ||
3679 | * HIRD or BESL shoule be used. See USB2.0 LPM errata. | ||
3680 | */ | ||
3681 | pm_addr = port_array[port_num] + 1; | ||
3682 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); | ||
3683 | if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) | ||
3684 | hird = xhci_calculate_hird_besl(u2del, 1); | ||
3685 | else | ||
3686 | hird = xhci_calculate_hird_besl(u2del, 0); | ||
3687 | |||
3688 | temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); | ||
3689 | xhci_writel(xhci, temp, pm_addr); | ||
3690 | |||
3691 | /* Set port link state to U2(L1) */ | ||
3692 | addr = port_array[port_num]; | ||
3693 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); | ||
3694 | |||
3695 | /* wait for ACK */ | ||
3696 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3697 | msleep(10); | ||
3698 | spin_lock_irqsave(&xhci->lock, flags); | ||
3699 | |||
3700 | /* Check L1 Status */ | ||
3701 | ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); | ||
3702 | if (ret != -ETIMEDOUT) { | ||
3703 | /* enter L1 successfully */ | ||
3704 | temp = xhci_readl(xhci, addr); | ||
3705 | xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", | ||
3706 | port_num, temp); | ||
3707 | ret = 0; | ||
3708 | } else { | ||
3709 | temp = xhci_readl(xhci, pm_addr); | ||
3710 | xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", | ||
3711 | port_num, temp & PORT_L1S_MASK); | ||
3712 | ret = -EINVAL; | ||
3713 | } | ||
3714 | |||
3715 | /* Resume the port */ | ||
3716 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); | ||
3717 | |||
3718 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3719 | msleep(10); | ||
3720 | spin_lock_irqsave(&xhci->lock, flags); | ||
3721 | |||
3722 | /* Clear PLC */ | ||
3723 | xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); | ||
3724 | |||
3725 | /* Check PORTSC to make sure the device is in the right state */ | ||
3726 | if (!ret) { | ||
3727 | temp = xhci_readl(xhci, addr); | ||
3728 | xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); | ||
3729 | if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || | ||
3730 | (temp & PORT_PLS_MASK) != XDEV_U0) { | ||
3731 | xhci_dbg(xhci, "port L1 resume fail\n"); | ||
3732 | ret = -EINVAL; | ||
3733 | } | ||
3734 | } | ||
3735 | |||
3736 | if (ret) { | ||
3737 | /* Insert dev to lpm_failed_devs list */ | ||
3738 | xhci_warn(xhci, "device LPM test failed, may disconnect and " | ||
3739 | "re-enumerate\n"); | ||
3740 | dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); | ||
3741 | if (!dev_info) { | ||
3742 | ret = -ENOMEM; | ||
3743 | goto finish; | ||
3744 | } | ||
3745 | dev_info->dev_id = dev_id; | ||
3746 | INIT_LIST_HEAD(&dev_info->list); | ||
3747 | list_add(&dev_info->list, &xhci->lpm_failed_devs); | ||
3748 | } else { | ||
3749 | xhci_ring_device(xhci, udev->slot_id); | ||
3750 | } | ||
3751 | |||
3752 | finish: | ||
3753 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3754 | return ret; | ||
3755 | } | ||
3756 | |||
3757 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | ||
3758 | struct usb_device *udev, int enable) | ||
3759 | { | ||
3760 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3761 | __le32 __iomem **port_array; | ||
3762 | __le32 __iomem *pm_addr; | ||
3763 | u32 temp; | ||
3764 | unsigned int port_num; | ||
3765 | unsigned long flags; | ||
3766 | int u2del, hird; | ||
3767 | |||
3768 | if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || | ||
3769 | !udev->lpm_capable) | ||
3770 | return -EPERM; | ||
3771 | |||
3772 | if (!udev->parent || udev->parent->parent || | ||
3773 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | ||
3774 | return -EPERM; | ||
3775 | |||
3776 | if (udev->usb2_hw_lpm_capable != 1) | ||
3777 | return -EPERM; | ||
3778 | |||
3779 | spin_lock_irqsave(&xhci->lock, flags); | ||
3780 | |||
3781 | port_array = xhci->usb2_ports; | ||
3782 | port_num = udev->portnum - 1; | ||
3783 | pm_addr = port_array[port_num] + 1; | ||
3784 | temp = xhci_readl(xhci, pm_addr); | ||
3785 | |||
3786 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", | ||
3787 | enable ? "enable" : "disable", port_num); | ||
3788 | |||
3789 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); | ||
3790 | if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) | ||
3791 | hird = xhci_calculate_hird_besl(u2del, 1); | ||
3792 | else | ||
3793 | hird = xhci_calculate_hird_besl(u2del, 0); | ||
3794 | |||
3795 | if (enable) { | ||
3796 | temp &= ~PORT_HIRD_MASK; | ||
3797 | temp |= PORT_HIRD(hird) | PORT_RWE; | ||
3798 | xhci_writel(xhci, temp, pm_addr); | ||
3799 | temp = xhci_readl(xhci, pm_addr); | ||
3800 | temp |= PORT_HLE; | ||
3801 | xhci_writel(xhci, temp, pm_addr); | ||
3802 | } else { | ||
3803 | temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); | ||
3804 | xhci_writel(xhci, temp, pm_addr); | ||
3805 | } | ||
3806 | |||
3807 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3808 | return 0; | ||
3809 | } | ||
3810 | |||
3811 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | ||
3812 | { | ||
3813 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3814 | int ret; | ||
3815 | |||
3816 | ret = xhci_usb2_software_lpm_test(hcd, udev); | ||
3817 | if (!ret) { | ||
3818 | xhci_dbg(xhci, "software LPM test succeed\n"); | ||
3819 | if (xhci->hw_lpm_support == 1) { | ||
3820 | udev->usb2_hw_lpm_capable = 1; | ||
3821 | ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); | ||
3822 | if (!ret) | ||
3823 | udev->usb2_hw_lpm_enabled = 1; | ||
3824 | } | ||
3825 | } | ||
3826 | |||
3827 | return 0; | ||
3828 | } | ||
3829 | |||
3830 | #else | ||
3831 | |||
3832 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | ||
3833 | struct usb_device *udev, int enable) | ||
3834 | { | ||
3835 | return 0; | ||
3836 | } | ||
3837 | |||
3838 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | ||
3839 | { | ||
3840 | return 0; | ||
3841 | } | ||
3842 | |||
3843 | #endif /* CONFIG_USB_SUSPEND */ | ||
3844 | |||
2960 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's | 3845 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
2961 | * internal data structures for the device. | 3846 | * internal data structures for the device. |
2962 | */ | 3847 | */ |
@@ -2988,6 +3873,14 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |||
2988 | } | 3873 | } |
2989 | 3874 | ||
2990 | spin_lock_irqsave(&xhci->lock, flags); | 3875 | spin_lock_irqsave(&xhci->lock, flags); |
3876 | if (hdev->speed == USB_SPEED_HIGH && | ||
3877 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { | ||
3878 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); | ||
3879 | xhci_free_command(xhci, config_cmd); | ||
3880 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3881 | return -ENOMEM; | ||
3882 | } | ||
3883 | |||
2991 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | 3884 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
2992 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | 3885 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); |
2993 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | 3886 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
@@ -3051,22 +3944,108 @@ int xhci_get_frame(struct usb_hcd *hcd) | |||
3051 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; | 3944 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; |
3052 | } | 3945 | } |
3053 | 3946 | ||
3947 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | ||
3948 | { | ||
3949 | struct xhci_hcd *xhci; | ||
3950 | struct device *dev = hcd->self.controller; | ||
3951 | int retval; | ||
3952 | u32 temp; | ||
3953 | |||
3954 | hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; | ||
3955 | |||
3956 | if (usb_hcd_is_primary_hcd(hcd)) { | ||
3957 | xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); | ||
3958 | if (!xhci) | ||
3959 | return -ENOMEM; | ||
3960 | *((struct xhci_hcd **) hcd->hcd_priv) = xhci; | ||
3961 | xhci->main_hcd = hcd; | ||
3962 | /* Mark the first roothub as being USB 2.0. | ||
3963 | * The xHCI driver will register the USB 3.0 roothub. | ||
3964 | */ | ||
3965 | hcd->speed = HCD_USB2; | ||
3966 | hcd->self.root_hub->speed = USB_SPEED_HIGH; | ||
3967 | /* | ||
3968 | * USB 2.0 roothub under xHCI has an integrated TT, | ||
3969 | * (rate matching hub) as opposed to having an OHCI/UHCI | ||
3970 | * companion controller. | ||
3971 | */ | ||
3972 | hcd->has_tt = 1; | ||
3973 | } else { | ||
3974 | /* xHCI private pointer was set in xhci_pci_probe for the second | ||
3975 | * registered roothub. | ||
3976 | */ | ||
3977 | xhci = hcd_to_xhci(hcd); | ||
3978 | temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | ||
3979 | if (HCC_64BIT_ADDR(temp)) { | ||
3980 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); | ||
3981 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); | ||
3982 | } else { | ||
3983 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); | ||
3984 | } | ||
3985 | return 0; | ||
3986 | } | ||
3987 | |||
3988 | xhci->cap_regs = hcd->regs; | ||
3989 | xhci->op_regs = hcd->regs + | ||
3990 | HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); | ||
3991 | xhci->run_regs = hcd->regs + | ||
3992 | (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); | ||
3993 | /* Cache read-only capability registers */ | ||
3994 | xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); | ||
3995 | xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); | ||
3996 | xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); | ||
3997 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); | ||
3998 | xhci->hci_version = HC_VERSION(xhci->hcc_params); | ||
3999 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | ||
4000 | xhci_print_registers(xhci); | ||
4001 | |||
4002 | get_quirks(dev, xhci); | ||
4003 | |||
4004 | /* Make sure the HC is halted. */ | ||
4005 | retval = xhci_halt(xhci); | ||
4006 | if (retval) | ||
4007 | goto error; | ||
4008 | |||
4009 | xhci_dbg(xhci, "Resetting HCD\n"); | ||
4010 | /* Reset the internal HC memory state and registers. */ | ||
4011 | retval = xhci_reset(xhci); | ||
4012 | if (retval) | ||
4013 | goto error; | ||
4014 | xhci_dbg(xhci, "Reset complete\n"); | ||
4015 | |||
4016 | temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | ||
4017 | if (HCC_64BIT_ADDR(temp)) { | ||
4018 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); | ||
4019 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); | ||
4020 | } else { | ||
4021 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); | ||
4022 | } | ||
4023 | |||
4024 | xhci_dbg(xhci, "Calling HCD init\n"); | ||
4025 | /* Initialize HCD and host controller data structures. */ | ||
4026 | retval = xhci_init(hcd); | ||
4027 | if (retval) | ||
4028 | goto error; | ||
4029 | xhci_dbg(xhci, "Called HCD init\n"); | ||
4030 | return 0; | ||
4031 | error: | ||
4032 | kfree(xhci); | ||
4033 | return retval; | ||
4034 | } | ||
4035 | |||
3054 | MODULE_DESCRIPTION(DRIVER_DESC); | 4036 | MODULE_DESCRIPTION(DRIVER_DESC); |
3055 | MODULE_AUTHOR(DRIVER_AUTHOR); | 4037 | MODULE_AUTHOR(DRIVER_AUTHOR); |
3056 | MODULE_LICENSE("GPL"); | 4038 | MODULE_LICENSE("GPL"); |
3057 | 4039 | ||
3058 | static int __init xhci_hcd_init(void) | 4040 | static int __init xhci_hcd_init(void) |
3059 | { | 4041 | { |
3060 | #ifdef CONFIG_PCI | 4042 | int retval; |
3061 | int retval = 0; | ||
3062 | 4043 | ||
3063 | retval = xhci_register_pci(); | 4044 | retval = xhci_register_pci(); |
3064 | |||
3065 | if (retval < 0) { | 4045 | if (retval < 0) { |
3066 | printk(KERN_DEBUG "Problem registering PCI driver."); | 4046 | printk(KERN_DEBUG "Problem registering PCI driver."); |
3067 | return retval; | 4047 | return retval; |
3068 | } | 4048 | } |
3069 | #endif | ||
3070 | /* | 4049 | /* |
3071 | * Check the compiler generated sizes of structures that must be laid | 4050 | * Check the compiler generated sizes of structures that must be laid |
3072 | * out in specific ways for hardware access. | 4051 | * out in specific ways for hardware access. |
@@ -3091,8 +4070,6 @@ module_init(xhci_hcd_init); | |||
3091 | 4070 | ||
3092 | static void __exit xhci_hcd_cleanup(void) | 4071 | static void __exit xhci_hcd_cleanup(void) |
3093 | { | 4072 | { |
3094 | #ifdef CONFIG_PCI | ||
3095 | xhci_unregister_pci(); | 4073 | xhci_unregister_pci(); |
3096 | #endif | ||
3097 | } | 4074 | } |
3098 | module_exit(xhci_hcd_cleanup); | 4075 | module_exit(xhci_hcd_cleanup); |