diff options
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r-- | drivers/usb/host/xhci.c | 1185 |
1 files changed, 1082 insertions, 103 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3a0f695138f4..aa94c0195791 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -175,28 +175,19 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
175 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); | 175 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* | 178 | #ifdef CONFIG_PCI |
179 | * Free IRQs | 179 | static int xhci_free_msi(struct xhci_hcd *xhci) |
180 | * free all IRQs request | ||
181 | */ | ||
182 | static void xhci_free_irq(struct xhci_hcd *xhci) | ||
183 | { | 180 | { |
184 | int i; | 181 | int i; |
185 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
186 | 182 | ||
187 | /* return if using legacy interrupt */ | 183 | if (!xhci->msix_entries) |
188 | if (xhci_to_hcd(xhci)->irq >= 0) | 184 | return -EINVAL; |
189 | return; | ||
190 | |||
191 | if (xhci->msix_entries) { | ||
192 | for (i = 0; i < xhci->msix_count; i++) | ||
193 | if (xhci->msix_entries[i].vector) | ||
194 | free_irq(xhci->msix_entries[i].vector, | ||
195 | xhci_to_hcd(xhci)); | ||
196 | } else if (pdev->irq >= 0) | ||
197 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | ||
198 | 185 | ||
199 | return; | 186 | for (i = 0; i < xhci->msix_count; i++) |
187 | if (xhci->msix_entries[i].vector) | ||
188 | free_irq(xhci->msix_entries[i].vector, | ||
189 | xhci_to_hcd(xhci)); | ||
190 | return 0; | ||
200 | } | 191 | } |
201 | 192 | ||
202 | /* | 193 | /* |
@@ -224,6 +215,28 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) | |||
224 | } | 215 | } |
225 | 216 | ||
226 | /* | 217 | /* |
218 | * Free IRQs | ||
219 | * free all IRQs request | ||
220 | */ | ||
221 | static void xhci_free_irq(struct xhci_hcd *xhci) | ||
222 | { | ||
223 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
224 | int ret; | ||
225 | |||
226 | /* return if using legacy interrupt */ | ||
227 | if (xhci_to_hcd(xhci)->irq >= 0) | ||
228 | return; | ||
229 | |||
230 | ret = xhci_free_msi(xhci); | ||
231 | if (!ret) | ||
232 | return; | ||
233 | if (pdev->irq >= 0) | ||
234 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | ||
235 | |||
236 | return; | ||
237 | } | ||
238 | |||
239 | /* | ||
227 | * Set up MSI-X | 240 | * Set up MSI-X |
228 | */ | 241 | */ |
229 | static int xhci_setup_msix(struct xhci_hcd *xhci) | 242 | static int xhci_setup_msix(struct xhci_hcd *xhci) |
@@ -302,6 +315,72 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |||
302 | return; | 315 | return; |
303 | } | 316 | } |
304 | 317 | ||
318 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) | ||
319 | { | ||
320 | int i; | ||
321 | |||
322 | if (xhci->msix_entries) { | ||
323 | for (i = 0; i < xhci->msix_count; i++) | ||
324 | synchronize_irq(xhci->msix_entries[i].vector); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static int xhci_try_enable_msi(struct usb_hcd *hcd) | ||
329 | { | ||
330 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
331 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
332 | int ret; | ||
333 | |||
334 | /* | ||
335 | * Some Fresco Logic host controllers advertise MSI, but fail to | ||
336 | * generate interrupts. Don't even try to enable MSI. | ||
337 | */ | ||
338 | if (xhci->quirks & XHCI_BROKEN_MSI) | ||
339 | return 0; | ||
340 | |||
341 | /* unregister the legacy interrupt */ | ||
342 | if (hcd->irq) | ||
343 | free_irq(hcd->irq, hcd); | ||
344 | hcd->irq = -1; | ||
345 | |||
346 | ret = xhci_setup_msix(xhci); | ||
347 | if (ret) | ||
348 | /* fall back to msi*/ | ||
349 | ret = xhci_setup_msi(xhci); | ||
350 | |||
351 | if (!ret) | ||
352 | /* hcd->irq is -1, we have MSI */ | ||
353 | return 0; | ||
354 | |||
355 | /* fall back to legacy interrupt*/ | ||
356 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | ||
357 | hcd->irq_descr, hcd); | ||
358 | if (ret) { | ||
359 | xhci_err(xhci, "request interrupt %d failed\n", | ||
360 | pdev->irq); | ||
361 | return ret; | ||
362 | } | ||
363 | hcd->irq = pdev->irq; | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | #else | ||
368 | |||
369 | static int xhci_try_enable_msi(struct usb_hcd *hcd) | ||
370 | { | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | ||
375 | { | ||
376 | } | ||
377 | |||
378 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) | ||
379 | { | ||
380 | } | ||
381 | |||
382 | #endif | ||
383 | |||
305 | /* | 384 | /* |
306 | * Initialize memory for HCD and xHC (one-time init). | 385 | * Initialize memory for HCD and xHC (one-time init). |
307 | * | 386 | * |
@@ -316,7 +395,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
316 | 395 | ||
317 | xhci_dbg(xhci, "xhci_init\n"); | 396 | xhci_dbg(xhci, "xhci_init\n"); |
318 | spin_lock_init(&xhci->lock); | 397 | spin_lock_init(&xhci->lock); |
319 | if (link_quirk) { | 398 | if (xhci->hci_version == 0x95 && link_quirk) { |
320 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); | 399 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); |
321 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | 400 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
322 | } else { | 401 | } else { |
@@ -413,9 +492,8 @@ int xhci_run(struct usb_hcd *hcd) | |||
413 | { | 492 | { |
414 | u32 temp; | 493 | u32 temp; |
415 | u64 temp_64; | 494 | u64 temp_64; |
416 | u32 ret; | 495 | int ret; |
417 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 496 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
418 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
419 | 497 | ||
420 | /* Start the xHCI host controller running only after the USB 2.0 roothub | 498 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
421 | * is setup. | 499 | * is setup. |
@@ -426,34 +504,10 @@ int xhci_run(struct usb_hcd *hcd) | |||
426 | return xhci_run_finished(xhci); | 504 | return xhci_run_finished(xhci); |
427 | 505 | ||
428 | xhci_dbg(xhci, "xhci_run\n"); | 506 | xhci_dbg(xhci, "xhci_run\n"); |
429 | /* unregister the legacy interrupt */ | ||
430 | if (hcd->irq) | ||
431 | free_irq(hcd->irq, hcd); | ||
432 | hcd->irq = -1; | ||
433 | 507 | ||
434 | /* Some Fresco Logic host controllers advertise MSI, but fail to | 508 | ret = xhci_try_enable_msi(hcd); |
435 | * generate interrupts. Don't even try to enable MSI. | ||
436 | */ | ||
437 | if (xhci->quirks & XHCI_BROKEN_MSI) | ||
438 | goto legacy_irq; | ||
439 | |||
440 | ret = xhci_setup_msix(xhci); | ||
441 | if (ret) | 509 | if (ret) |
442 | /* fall back to msi*/ | 510 | return ret; |
443 | ret = xhci_setup_msi(xhci); | ||
444 | |||
445 | if (ret) { | ||
446 | legacy_irq: | ||
447 | /* fall back to legacy interrupt*/ | ||
448 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | ||
449 | hcd->irq_descr, hcd); | ||
450 | if (ret) { | ||
451 | xhci_err(xhci, "request interrupt %d failed\n", | ||
452 | pdev->irq); | ||
453 | return ret; | ||
454 | } | ||
455 | hcd->irq = pdev->irq; | ||
456 | } | ||
457 | 511 | ||
458 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 512 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
459 | init_timer(&xhci->event_ring_timer); | 513 | init_timer(&xhci->event_ring_timer); |
@@ -694,7 +748,6 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
694 | int rc = 0; | 748 | int rc = 0; |
695 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 749 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
696 | u32 command; | 750 | u32 command; |
697 | int i; | ||
698 | 751 | ||
699 | spin_lock_irq(&xhci->lock); | 752 | spin_lock_irq(&xhci->lock); |
700 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 753 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
@@ -730,10 +783,7 @@ int xhci_suspend(struct xhci_hcd *xhci) | |||
730 | 783 | ||
731 | /* step 5: remove core well power */ | 784 | /* step 5: remove core well power */ |
732 | /* synchronize irq when using MSI-X */ | 785 | /* synchronize irq when using MSI-X */ |
733 | if (xhci->msix_entries) { | 786 | xhci_msix_sync_irqs(xhci); |
734 | for (i = 0; i < xhci->msix_count; i++) | ||
735 | synchronize_irq(xhci->msix_entries[i].vector); | ||
736 | } | ||
737 | 787 | ||
738 | return rc; | 788 | return rc; |
739 | } | 789 | } |
@@ -749,7 +799,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
749 | u32 command, temp = 0; | 799 | u32 command, temp = 0; |
750 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | 800 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
751 | struct usb_hcd *secondary_hcd; | 801 | struct usb_hcd *secondary_hcd; |
752 | int retval; | 802 | int retval = 0; |
753 | 803 | ||
754 | /* Wait a bit if either of the roothubs need to settle from the | 804 | /* Wait a bit if either of the roothubs need to settle from the |
755 | * transition into bus suspend. | 805 | * transition into bus suspend. |
@@ -759,6 +809,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
759 | xhci->bus_state[1].next_statechange)) | 809 | xhci->bus_state[1].next_statechange)) |
760 | msleep(100); | 810 | msleep(100); |
761 | 811 | ||
812 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
813 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | ||
814 | |||
762 | spin_lock_irq(&xhci->lock); | 815 | spin_lock_irq(&xhci->lock); |
763 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | 816 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
764 | hibernated = true; | 817 | hibernated = true; |
@@ -828,20 +881,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
828 | return retval; | 881 | return retval; |
829 | xhci_dbg(xhci, "Start the primary HCD\n"); | 882 | xhci_dbg(xhci, "Start the primary HCD\n"); |
830 | retval = xhci_run(hcd->primary_hcd); | 883 | retval = xhci_run(hcd->primary_hcd); |
831 | if (retval) | ||
832 | goto failed_restart; | ||
833 | |||
834 | xhci_dbg(xhci, "Start the secondary HCD\n"); | ||
835 | retval = xhci_run(secondary_hcd); | ||
836 | if (!retval) { | 884 | if (!retval) { |
837 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 885 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
838 | set_bit(HCD_FLAG_HW_ACCESSIBLE, | 886 | retval = xhci_run(secondary_hcd); |
839 | &xhci->shared_hcd->flags); | ||
840 | } | 887 | } |
841 | failed_restart: | ||
842 | hcd->state = HC_STATE_SUSPENDED; | 888 | hcd->state = HC_STATE_SUSPENDED; |
843 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; | 889 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
844 | return retval; | 890 | goto done; |
845 | } | 891 | } |
846 | 892 | ||
847 | /* step 4: set Run/Stop bit */ | 893 | /* step 4: set Run/Stop bit */ |
@@ -860,11 +906,14 @@ failed_restart: | |||
860 | * Running endpoints by ringing their doorbells | 906 | * Running endpoints by ringing their doorbells |
861 | */ | 907 | */ |
862 | 908 | ||
863 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | ||
864 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | ||
865 | |||
866 | spin_unlock_irq(&xhci->lock); | 909 | spin_unlock_irq(&xhci->lock); |
867 | return 0; | 910 | |
911 | done: | ||
912 | if (retval == 0) { | ||
913 | usb_hcd_resume_root_hub(hcd); | ||
914 | usb_hcd_resume_root_hub(xhci->shared_hcd); | ||
915 | } | ||
916 | return retval; | ||
868 | } | 917 | } |
869 | #endif /* CONFIG_PM */ | 918 | #endif /* CONFIG_PM */ |
870 | 919 | ||
@@ -945,8 +994,7 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |||
945 | return -ENODEV; | 994 | return -ENODEV; |
946 | 995 | ||
947 | if (check_virt_dev) { | 996 | if (check_virt_dev) { |
948 | if (!udev->slot_id || !xhci->devs | 997 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
949 | || !xhci->devs[udev->slot_id]) { | ||
950 | printk(KERN_DEBUG "xHCI %s called with unaddressed " | 998 | printk(KERN_DEBUG "xHCI %s called with unaddressed " |
951 | "device\n", func); | 999 | "device\n", func); |
952 | return -EINVAL; | 1000 | return -EINVAL; |
@@ -987,7 +1035,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
987 | out_ctx = xhci->devs[slot_id]->out_ctx; | 1035 | out_ctx = xhci->devs[slot_id]->out_ctx; |
988 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | 1036 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
989 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); | 1037 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
990 | max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize); | 1038 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); |
991 | if (hw_max_packet_size != max_packet_size) { | 1039 | if (hw_max_packet_size != max_packet_size) { |
992 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); | 1040 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); |
993 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", | 1041 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", |
@@ -1035,6 +1083,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |||
1035 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | 1083 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
1036 | { | 1084 | { |
1037 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1085 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1086 | struct xhci_td *buffer; | ||
1038 | unsigned long flags; | 1087 | unsigned long flags; |
1039 | int ret = 0; | 1088 | int ret = 0; |
1040 | unsigned int slot_id, ep_index; | 1089 | unsigned int slot_id, ep_index; |
@@ -1065,13 +1114,15 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1065 | if (!urb_priv) | 1114 | if (!urb_priv) |
1066 | return -ENOMEM; | 1115 | return -ENOMEM; |
1067 | 1116 | ||
1117 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); | ||
1118 | if (!buffer) { | ||
1119 | kfree(urb_priv); | ||
1120 | return -ENOMEM; | ||
1121 | } | ||
1122 | |||
1068 | for (i = 0; i < size; i++) { | 1123 | for (i = 0; i < size; i++) { |
1069 | urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); | 1124 | urb_priv->td[i] = buffer; |
1070 | if (!urb_priv->td[i]) { | 1125 | buffer++; |
1071 | urb_priv->length = i; | ||
1072 | xhci_urb_free_priv(xhci, urb_priv); | ||
1073 | return -ENOMEM; | ||
1074 | } | ||
1075 | } | 1126 | } |
1076 | 1127 | ||
1077 | urb_priv->length = size; | 1128 | urb_priv->length = size; |
@@ -1747,6 +1798,564 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | |||
1747 | xhci->num_active_eps); | 1798 | xhci->num_active_eps); |
1748 | } | 1799 | } |
1749 | 1800 | ||
1801 | unsigned int xhci_get_block_size(struct usb_device *udev) | ||
1802 | { | ||
1803 | switch (udev->speed) { | ||
1804 | case USB_SPEED_LOW: | ||
1805 | case USB_SPEED_FULL: | ||
1806 | return FS_BLOCK; | ||
1807 | case USB_SPEED_HIGH: | ||
1808 | return HS_BLOCK; | ||
1809 | case USB_SPEED_SUPER: | ||
1810 | return SS_BLOCK; | ||
1811 | case USB_SPEED_UNKNOWN: | ||
1812 | case USB_SPEED_WIRELESS: | ||
1813 | default: | ||
1814 | /* Should never happen */ | ||
1815 | return 1; | ||
1816 | } | ||
1817 | } | ||
1818 | |||
1819 | unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) | ||
1820 | { | ||
1821 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) | ||
1822 | return LS_OVERHEAD; | ||
1823 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) | ||
1824 | return FS_OVERHEAD; | ||
1825 | return HS_OVERHEAD; | ||
1826 | } | ||
1827 | |||
1828 | /* If we are changing a LS/FS device under a HS hub, | ||
1829 | * make sure (if we are activating a new TT) that the HS bus has enough | ||
1830 | * bandwidth for this new TT. | ||
1831 | */ | ||
1832 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, | ||
1833 | struct xhci_virt_device *virt_dev, | ||
1834 | int old_active_eps) | ||
1835 | { | ||
1836 | struct xhci_interval_bw_table *bw_table; | ||
1837 | struct xhci_tt_bw_info *tt_info; | ||
1838 | |||
1839 | /* Find the bandwidth table for the root port this TT is attached to. */ | ||
1840 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; | ||
1841 | tt_info = virt_dev->tt_info; | ||
1842 | /* If this TT already had active endpoints, the bandwidth for this TT | ||
1843 | * has already been added. Removing all periodic endpoints (and thus | ||
1844 | * making the TT enactive) will only decrease the bandwidth used. | ||
1845 | */ | ||
1846 | if (old_active_eps) | ||
1847 | return 0; | ||
1848 | if (old_active_eps == 0 && tt_info->active_eps != 0) { | ||
1849 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) | ||
1850 | return -ENOMEM; | ||
1851 | return 0; | ||
1852 | } | ||
1853 | /* Not sure why we would have no new active endpoints... | ||
1854 | * | ||
1855 | * Maybe because of an Evaluate Context change for a hub update or a | ||
1856 | * control endpoint 0 max packet size change? | ||
1857 | * FIXME: skip the bandwidth calculation in that case. | ||
1858 | */ | ||
1859 | return 0; | ||
1860 | } | ||
1861 | |||
1862 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, | ||
1863 | struct xhci_virt_device *virt_dev) | ||
1864 | { | ||
1865 | unsigned int bw_reserved; | ||
1866 | |||
1867 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); | ||
1868 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) | ||
1869 | return -ENOMEM; | ||
1870 | |||
1871 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); | ||
1872 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) | ||
1873 | return -ENOMEM; | ||
1874 | |||
1875 | return 0; | ||
1876 | } | ||
1877 | |||
1878 | /* | ||
1879 | * This algorithm is a very conservative estimate of the worst-case scheduling | ||
1880 | * scenario for any one interval. The hardware dynamically schedules the | ||
1881 | * packets, so we can't tell which microframe could be the limiting factor in | ||
1882 | * the bandwidth scheduling. This only takes into account periodic endpoints. | ||
1883 | * | ||
1884 | * Obviously, we can't solve an NP complete problem to find the minimum worst | ||
1885 | * case scenario. Instead, we come up with an estimate that is no less than | ||
1886 | * the worst case bandwidth used for any one microframe, but may be an | ||
1887 | * over-estimate. | ||
1888 | * | ||
1889 | * We walk the requirements for each endpoint by interval, starting with the | ||
1890 | * smallest interval, and place packets in the schedule where there is only one | ||
1891 | * possible way to schedule packets for that interval. In order to simplify | ||
1892 | * this algorithm, we record the largest max packet size for each interval, and | ||
1893 | * assume all packets will be that size. | ||
1894 | * | ||
1895 | * For interval 0, we obviously must schedule all packets for each interval. | ||
1896 | * The bandwidth for interval 0 is just the amount of data to be transmitted | ||
1897 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times | ||
1898 | * the number of packets). | ||
1899 | * | ||
1900 | * For interval 1, we have two possible microframes to schedule those packets | ||
1901 | * in. For this algorithm, if we can schedule the same number of packets for | ||
1902 | * each possible scheduling opportunity (each microframe), we will do so. The | ||
1903 | * remaining number of packets will be saved to be transmitted in the gaps in | ||
1904 | * the next interval's scheduling sequence. | ||
1905 | * | ||
1906 | * As we move those remaining packets to be scheduled with interval 2 packets, | ||
1907 | * we have to double the number of remaining packets to transmit. This is | ||
1908 | * because the intervals are actually powers of 2, and we would be transmitting | ||
1909 | * the previous interval's packets twice in this interval. We also have to be | ||
1910 | * sure that when we look at the largest max packet size for this interval, we | ||
1911 | * also look at the largest max packet size for the remaining packets and take | ||
1912 | * the greater of the two. | ||
1913 | * | ||
1914 | * The algorithm continues to evenly distribute packets in each scheduling | ||
1915 | * opportunity, and push the remaining packets out, until we get to the last | ||
1916 | * interval. Then those packets and their associated overhead are just added | ||
1917 | * to the bandwidth used. | ||
1918 | */ | ||
1919 | static int xhci_check_bw_table(struct xhci_hcd *xhci, | ||
1920 | struct xhci_virt_device *virt_dev, | ||
1921 | int old_active_eps) | ||
1922 | { | ||
1923 | unsigned int bw_reserved; | ||
1924 | unsigned int max_bandwidth; | ||
1925 | unsigned int bw_used; | ||
1926 | unsigned int block_size; | ||
1927 | struct xhci_interval_bw_table *bw_table; | ||
1928 | unsigned int packet_size = 0; | ||
1929 | unsigned int overhead = 0; | ||
1930 | unsigned int packets_transmitted = 0; | ||
1931 | unsigned int packets_remaining = 0; | ||
1932 | unsigned int i; | ||
1933 | |||
1934 | if (virt_dev->udev->speed == USB_SPEED_SUPER) | ||
1935 | return xhci_check_ss_bw(xhci, virt_dev); | ||
1936 | |||
1937 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { | ||
1938 | max_bandwidth = HS_BW_LIMIT; | ||
1939 | /* Convert percent of bus BW reserved to blocks reserved */ | ||
1940 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); | ||
1941 | } else { | ||
1942 | max_bandwidth = FS_BW_LIMIT; | ||
1943 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); | ||
1944 | } | ||
1945 | |||
1946 | bw_table = virt_dev->bw_table; | ||
1947 | /* We need to translate the max packet size and max ESIT payloads into | ||
1948 | * the units the hardware uses. | ||
1949 | */ | ||
1950 | block_size = xhci_get_block_size(virt_dev->udev); | ||
1951 | |||
1952 | /* If we are manipulating a LS/FS device under a HS hub, double check | ||
1953 | * that the HS bus has enough bandwidth if we are activing a new TT. | ||
1954 | */ | ||
1955 | if (virt_dev->tt_info) { | ||
1956 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", | ||
1957 | virt_dev->real_port); | ||
1958 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { | ||
1959 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " | ||
1960 | "newly activated TT.\n"); | ||
1961 | return -ENOMEM; | ||
1962 | } | ||
1963 | xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", | ||
1964 | virt_dev->tt_info->slot_id, | ||
1965 | virt_dev->tt_info->ttport); | ||
1966 | } else { | ||
1967 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", | ||
1968 | virt_dev->real_port); | ||
1969 | } | ||
1970 | |||
1971 | /* Add in how much bandwidth will be used for interval zero, or the | ||
1972 | * rounded max ESIT payload + number of packets * largest overhead. | ||
1973 | */ | ||
1974 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + | ||
1975 | bw_table->interval_bw[0].num_packets * | ||
1976 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); | ||
1977 | |||
1978 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { | ||
1979 | unsigned int bw_added; | ||
1980 | unsigned int largest_mps; | ||
1981 | unsigned int interval_overhead; | ||
1982 | |||
1983 | /* | ||
1984 | * How many packets could we transmit in this interval? | ||
1985 | * If packets didn't fit in the previous interval, we will need | ||
1986 | * to transmit that many packets twice within this interval. | ||
1987 | */ | ||
1988 | packets_remaining = 2 * packets_remaining + | ||
1989 | bw_table->interval_bw[i].num_packets; | ||
1990 | |||
1991 | /* Find the largest max packet size of this or the previous | ||
1992 | * interval. | ||
1993 | */ | ||
1994 | if (list_empty(&bw_table->interval_bw[i].endpoints)) | ||
1995 | largest_mps = 0; | ||
1996 | else { | ||
1997 | struct xhci_virt_ep *virt_ep; | ||
1998 | struct list_head *ep_entry; | ||
1999 | |||
2000 | ep_entry = bw_table->interval_bw[i].endpoints.next; | ||
2001 | virt_ep = list_entry(ep_entry, | ||
2002 | struct xhci_virt_ep, bw_endpoint_list); | ||
2003 | /* Convert to blocks, rounding up */ | ||
2004 | largest_mps = DIV_ROUND_UP( | ||
2005 | virt_ep->bw_info.max_packet_size, | ||
2006 | block_size); | ||
2007 | } | ||
2008 | if (largest_mps > packet_size) | ||
2009 | packet_size = largest_mps; | ||
2010 | |||
2011 | /* Use the larger overhead of this or the previous interval. */ | ||
2012 | interval_overhead = xhci_get_largest_overhead( | ||
2013 | &bw_table->interval_bw[i]); | ||
2014 | if (interval_overhead > overhead) | ||
2015 | overhead = interval_overhead; | ||
2016 | |||
2017 | /* How many packets can we evenly distribute across | ||
2018 | * (1 << (i + 1)) possible scheduling opportunities? | ||
2019 | */ | ||
2020 | packets_transmitted = packets_remaining >> (i + 1); | ||
2021 | |||
2022 | /* Add in the bandwidth used for those scheduled packets */ | ||
2023 | bw_added = packets_transmitted * (overhead + packet_size); | ||
2024 | |||
2025 | /* How many packets do we have remaining to transmit? */ | ||
2026 | packets_remaining = packets_remaining % (1 << (i + 1)); | ||
2027 | |||
2028 | /* What largest max packet size should those packets have? */ | ||
2029 | /* If we've transmitted all packets, don't carry over the | ||
2030 | * largest packet size. | ||
2031 | */ | ||
2032 | if (packets_remaining == 0) { | ||
2033 | packet_size = 0; | ||
2034 | overhead = 0; | ||
2035 | } else if (packets_transmitted > 0) { | ||
2036 | /* Otherwise if we do have remaining packets, and we've | ||
2037 | * scheduled some packets in this interval, take the | ||
2038 | * largest max packet size from endpoints with this | ||
2039 | * interval. | ||
2040 | */ | ||
2041 | packet_size = largest_mps; | ||
2042 | overhead = interval_overhead; | ||
2043 | } | ||
2044 | /* Otherwise carry over packet_size and overhead from the last | ||
2045 | * time we had a remainder. | ||
2046 | */ | ||
2047 | bw_used += bw_added; | ||
2048 | if (bw_used > max_bandwidth) { | ||
2049 | xhci_warn(xhci, "Not enough bandwidth. " | ||
2050 | "Proposed: %u, Max: %u\n", | ||
2051 | bw_used, max_bandwidth); | ||
2052 | return -ENOMEM; | ||
2053 | } | ||
2054 | } | ||
2055 | /* | ||
2056 | * Ok, we know we have some packets left over after even-handedly | ||
2057 | * scheduling interval 15. We don't know which microframes they will | ||
2058 | * fit into, so we over-schedule and say they will be scheduled every | ||
2059 | * microframe. | ||
2060 | */ | ||
2061 | if (packets_remaining > 0) | ||
2062 | bw_used += overhead + packet_size; | ||
2063 | |||
2064 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { | ||
2065 | unsigned int port_index = virt_dev->real_port - 1; | ||
2066 | |||
2067 | /* OK, we're manipulating a HS device attached to a | ||
2068 | * root port bandwidth domain. Include the number of active TTs | ||
2069 | * in the bandwidth used. | ||
2070 | */ | ||
2071 | bw_used += TT_HS_OVERHEAD * | ||
2072 | xhci->rh_bw[port_index].num_active_tts; | ||
2073 | } | ||
2074 | |||
2075 | xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " | ||
2076 | "Available: %u " "percent\n", | ||
2077 | bw_used, max_bandwidth, bw_reserved, | ||
2078 | (max_bandwidth - bw_used - bw_reserved) * 100 / | ||
2079 | max_bandwidth); | ||
2080 | |||
2081 | bw_used += bw_reserved; | ||
2082 | if (bw_used > max_bandwidth) { | ||
2083 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", | ||
2084 | bw_used, max_bandwidth); | ||
2085 | return -ENOMEM; | ||
2086 | } | ||
2087 | |||
2088 | bw_table->bw_used = bw_used; | ||
2089 | return 0; | ||
2090 | } | ||
2091 | |||
2092 | static bool xhci_is_async_ep(unsigned int ep_type) | ||
2093 | { | ||
2094 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && | ||
2095 | ep_type != ISOC_IN_EP && | ||
2096 | ep_type != INT_IN_EP); | ||
2097 | } | ||
2098 | |||
2099 | static bool xhci_is_sync_in_ep(unsigned int ep_type) | ||
2100 | { | ||
2101 | return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); | ||
2102 | } | ||
2103 | |||
2104 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) | ||
2105 | { | ||
2106 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); | ||
2107 | |||
2108 | if (ep_bw->ep_interval == 0) | ||
2109 | return SS_OVERHEAD_BURST + | ||
2110 | (ep_bw->mult * ep_bw->num_packets * | ||
2111 | (SS_OVERHEAD + mps)); | ||
2112 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * | ||
2113 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), | ||
2114 | 1 << ep_bw->ep_interval); | ||
2115 | |||
2116 | } | ||
2117 | |||
2118 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, | ||
2119 | struct xhci_bw_info *ep_bw, | ||
2120 | struct xhci_interval_bw_table *bw_table, | ||
2121 | struct usb_device *udev, | ||
2122 | struct xhci_virt_ep *virt_ep, | ||
2123 | struct xhci_tt_bw_info *tt_info) | ||
2124 | { | ||
2125 | struct xhci_interval_bw *interval_bw; | ||
2126 | int normalized_interval; | ||
2127 | |||
2128 | if (xhci_is_async_ep(ep_bw->type)) | ||
2129 | return; | ||
2130 | |||
2131 | if (udev->speed == USB_SPEED_SUPER) { | ||
2132 | if (xhci_is_sync_in_ep(ep_bw->type)) | ||
2133 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= | ||
2134 | xhci_get_ss_bw_consumed(ep_bw); | ||
2135 | else | ||
2136 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= | ||
2137 | xhci_get_ss_bw_consumed(ep_bw); | ||
2138 | return; | ||
2139 | } | ||
2140 | |||
2141 | /* SuperSpeed endpoints never get added to intervals in the table, so | ||
2142 | * this check is only valid for HS/FS/LS devices. | ||
2143 | */ | ||
2144 | if (list_empty(&virt_ep->bw_endpoint_list)) | ||
2145 | return; | ||
2146 | /* For LS/FS devices, we need to translate the interval expressed in | ||
2147 | * microframes to frames. | ||
2148 | */ | ||
2149 | if (udev->speed == USB_SPEED_HIGH) | ||
2150 | normalized_interval = ep_bw->ep_interval; | ||
2151 | else | ||
2152 | normalized_interval = ep_bw->ep_interval - 3; | ||
2153 | |||
2154 | if (normalized_interval == 0) | ||
2155 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; | ||
2156 | interval_bw = &bw_table->interval_bw[normalized_interval]; | ||
2157 | interval_bw->num_packets -= ep_bw->num_packets; | ||
2158 | switch (udev->speed) { | ||
2159 | case USB_SPEED_LOW: | ||
2160 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; | ||
2161 | break; | ||
2162 | case USB_SPEED_FULL: | ||
2163 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; | ||
2164 | break; | ||
2165 | case USB_SPEED_HIGH: | ||
2166 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; | ||
2167 | break; | ||
2168 | case USB_SPEED_SUPER: | ||
2169 | case USB_SPEED_UNKNOWN: | ||
2170 | case USB_SPEED_WIRELESS: | ||
2171 | /* Should never happen because only LS/FS/HS endpoints will get | ||
2172 | * added to the endpoint list. | ||
2173 | */ | ||
2174 | return; | ||
2175 | } | ||
2176 | if (tt_info) | ||
2177 | tt_info->active_eps -= 1; | ||
2178 | list_del_init(&virt_ep->bw_endpoint_list); | ||
2179 | } | ||
2180 | |||
2181 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, | ||
2182 | struct xhci_bw_info *ep_bw, | ||
2183 | struct xhci_interval_bw_table *bw_table, | ||
2184 | struct usb_device *udev, | ||
2185 | struct xhci_virt_ep *virt_ep, | ||
2186 | struct xhci_tt_bw_info *tt_info) | ||
2187 | { | ||
2188 | struct xhci_interval_bw *interval_bw; | ||
2189 | struct xhci_virt_ep *smaller_ep; | ||
2190 | int normalized_interval; | ||
2191 | |||
2192 | if (xhci_is_async_ep(ep_bw->type)) | ||
2193 | return; | ||
2194 | |||
2195 | if (udev->speed == USB_SPEED_SUPER) { | ||
2196 | if (xhci_is_sync_in_ep(ep_bw->type)) | ||
2197 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += | ||
2198 | xhci_get_ss_bw_consumed(ep_bw); | ||
2199 | else | ||
2200 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += | ||
2201 | xhci_get_ss_bw_consumed(ep_bw); | ||
2202 | return; | ||
2203 | } | ||
2204 | |||
2205 | /* For LS/FS devices, we need to translate the interval expressed in | ||
2206 | * microframes to frames. | ||
2207 | */ | ||
2208 | if (udev->speed == USB_SPEED_HIGH) | ||
2209 | normalized_interval = ep_bw->ep_interval; | ||
2210 | else | ||
2211 | normalized_interval = ep_bw->ep_interval - 3; | ||
2212 | |||
2213 | if (normalized_interval == 0) | ||
2214 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; | ||
2215 | interval_bw = &bw_table->interval_bw[normalized_interval]; | ||
2216 | interval_bw->num_packets += ep_bw->num_packets; | ||
2217 | switch (udev->speed) { | ||
2218 | case USB_SPEED_LOW: | ||
2219 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; | ||
2220 | break; | ||
2221 | case USB_SPEED_FULL: | ||
2222 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; | ||
2223 | break; | ||
2224 | case USB_SPEED_HIGH: | ||
2225 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; | ||
2226 | break; | ||
2227 | case USB_SPEED_SUPER: | ||
2228 | case USB_SPEED_UNKNOWN: | ||
2229 | case USB_SPEED_WIRELESS: | ||
2230 | /* Should never happen because only LS/FS/HS endpoints will get | ||
2231 | * added to the endpoint list. | ||
2232 | */ | ||
2233 | return; | ||
2234 | } | ||
2235 | |||
2236 | if (tt_info) | ||
2237 | tt_info->active_eps += 1; | ||
2238 | /* Insert the endpoint into the list, largest max packet size first. */ | ||
2239 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, | ||
2240 | bw_endpoint_list) { | ||
2241 | if (ep_bw->max_packet_size >= | ||
2242 | smaller_ep->bw_info.max_packet_size) { | ||
2243 | /* Add the new ep before the smaller endpoint */ | ||
2244 | list_add_tail(&virt_ep->bw_endpoint_list, | ||
2245 | &smaller_ep->bw_endpoint_list); | ||
2246 | return; | ||
2247 | } | ||
2248 | } | ||
2249 | /* Add the new endpoint at the end of the list. */ | ||
2250 | list_add_tail(&virt_ep->bw_endpoint_list, | ||
2251 | &interval_bw->endpoints); | ||
2252 | } | ||
2253 | |||
2254 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, | ||
2255 | struct xhci_virt_device *virt_dev, | ||
2256 | int old_active_eps) | ||
2257 | { | ||
2258 | struct xhci_root_port_bw_info *rh_bw_info; | ||
2259 | if (!virt_dev->tt_info) | ||
2260 | return; | ||
2261 | |||
2262 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; | ||
2263 | if (old_active_eps == 0 && | ||
2264 | virt_dev->tt_info->active_eps != 0) { | ||
2265 | rh_bw_info->num_active_tts += 1; | ||
2266 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; | ||
2267 | } else if (old_active_eps != 0 && | ||
2268 | virt_dev->tt_info->active_eps == 0) { | ||
2269 | rh_bw_info->num_active_tts -= 1; | ||
2270 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; | ||
2271 | } | ||
2272 | } | ||
2273 | |||
2274 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, | ||
2275 | struct xhci_virt_device *virt_dev, | ||
2276 | struct xhci_container_ctx *in_ctx) | ||
2277 | { | ||
2278 | struct xhci_bw_info ep_bw_info[31]; | ||
2279 | int i; | ||
2280 | struct xhci_input_control_ctx *ctrl_ctx; | ||
2281 | int old_active_eps = 0; | ||
2282 | |||
2283 | if (virt_dev->tt_info) | ||
2284 | old_active_eps = virt_dev->tt_info->active_eps; | ||
2285 | |||
2286 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
2287 | |||
2288 | for (i = 0; i < 31; i++) { | ||
2289 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | ||
2290 | continue; | ||
2291 | |||
2292 | /* Make a copy of the BW info in case we need to revert this */ | ||
2293 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, | ||
2294 | sizeof(ep_bw_info[i])); | ||
2295 | /* Drop the endpoint from the interval table if the endpoint is | ||
2296 | * being dropped or changed. | ||
2297 | */ | ||
2298 | if (EP_IS_DROPPED(ctrl_ctx, i)) | ||
2299 | xhci_drop_ep_from_interval_table(xhci, | ||
2300 | &virt_dev->eps[i].bw_info, | ||
2301 | virt_dev->bw_table, | ||
2302 | virt_dev->udev, | ||
2303 | &virt_dev->eps[i], | ||
2304 | virt_dev->tt_info); | ||
2305 | } | ||
2306 | /* Overwrite the information stored in the endpoints' bw_info */ | ||
2307 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); | ||
2308 | for (i = 0; i < 31; i++) { | ||
2309 | /* Add any changed or added endpoints to the interval table */ | ||
2310 | if (EP_IS_ADDED(ctrl_ctx, i)) | ||
2311 | xhci_add_ep_to_interval_table(xhci, | ||
2312 | &virt_dev->eps[i].bw_info, | ||
2313 | virt_dev->bw_table, | ||
2314 | virt_dev->udev, | ||
2315 | &virt_dev->eps[i], | ||
2316 | virt_dev->tt_info); | ||
2317 | } | ||
2318 | |||
2319 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { | ||
2320 | /* Ok, this fits in the bandwidth we have. | ||
2321 | * Update the number of active TTs. | ||
2322 | */ | ||
2323 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | ||
2324 | return 0; | ||
2325 | } | ||
2326 | |||
2327 | /* We don't have enough bandwidth for this, revert the stored info. */ | ||
2328 | for (i = 0; i < 31; i++) { | ||
2329 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | ||
2330 | continue; | ||
2331 | |||
2332 | /* Drop the new copies of any added or changed endpoints from | ||
2333 | * the interval table. | ||
2334 | */ | ||
2335 | if (EP_IS_ADDED(ctrl_ctx, i)) { | ||
2336 | xhci_drop_ep_from_interval_table(xhci, | ||
2337 | &virt_dev->eps[i].bw_info, | ||
2338 | virt_dev->bw_table, | ||
2339 | virt_dev->udev, | ||
2340 | &virt_dev->eps[i], | ||
2341 | virt_dev->tt_info); | ||
2342 | } | ||
2343 | /* Revert the endpoint back to its old information */ | ||
2344 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], | ||
2345 | sizeof(ep_bw_info[i])); | ||
2346 | /* Add any changed or dropped endpoints back into the table */ | ||
2347 | if (EP_IS_DROPPED(ctrl_ctx, i)) | ||
2348 | xhci_add_ep_to_interval_table(xhci, | ||
2349 | &virt_dev->eps[i].bw_info, | ||
2350 | virt_dev->bw_table, | ||
2351 | virt_dev->udev, | ||
2352 | &virt_dev->eps[i], | ||
2353 | virt_dev->tt_info); | ||
2354 | } | ||
2355 | return -ENOMEM; | ||
2356 | } | ||
2357 | |||
2358 | |||
1750 | /* Issue a configure endpoint command or evaluate context command | 2359 | /* Issue a configure endpoint command or evaluate context command |
1751 | * and wait for it to finish. | 2360 | * and wait for it to finish. |
1752 | */ | 2361 | */ |
@@ -1765,17 +2374,30 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1765 | 2374 | ||
1766 | spin_lock_irqsave(&xhci->lock, flags); | 2375 | spin_lock_irqsave(&xhci->lock, flags); |
1767 | virt_dev = xhci->devs[udev->slot_id]; | 2376 | virt_dev = xhci->devs[udev->slot_id]; |
1768 | if (command) { | 2377 | |
2378 | if (command) | ||
1769 | in_ctx = command->in_ctx; | 2379 | in_ctx = command->in_ctx; |
1770 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | 2380 | else |
1771 | xhci_reserve_host_resources(xhci, in_ctx)) { | 2381 | in_ctx = virt_dev->in_ctx; |
1772 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1773 | xhci_warn(xhci, "Not enough host resources, " | ||
1774 | "active endpoint contexts = %u\n", | ||
1775 | xhci->num_active_eps); | ||
1776 | return -ENOMEM; | ||
1777 | } | ||
1778 | 2382 | ||
2383 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
2384 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
2385 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2386 | xhci_warn(xhci, "Not enough host resources, " | ||
2387 | "active endpoint contexts = %u\n", | ||
2388 | xhci->num_active_eps); | ||
2389 | return -ENOMEM; | ||
2390 | } | ||
2391 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && | ||
2392 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { | ||
2393 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | ||
2394 | xhci_free_host_resources(xhci, in_ctx); | ||
2395 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
2396 | xhci_warn(xhci, "Not enough bandwidth\n"); | ||
2397 | return -ENOMEM; | ||
2398 | } | ||
2399 | |||
2400 | if (command) { | ||
1779 | cmd_completion = command->completion; | 2401 | cmd_completion = command->completion; |
1780 | cmd_status = &command->status; | 2402 | cmd_status = &command->status; |
1781 | command->command_trb = xhci->cmd_ring->enqueue; | 2403 | command->command_trb = xhci->cmd_ring->enqueue; |
@@ -1789,15 +2411,6 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
1789 | 2411 | ||
1790 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | 2412 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
1791 | } else { | 2413 | } else { |
1792 | in_ctx = virt_dev->in_ctx; | ||
1793 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | ||
1794 | xhci_reserve_host_resources(xhci, in_ctx)) { | ||
1795 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1796 | xhci_warn(xhci, "Not enough host resources, " | ||
1797 | "active endpoint contexts = %u\n", | ||
1798 | xhci->num_active_eps); | ||
1799 | return -ENOMEM; | ||
1800 | } | ||
1801 | cmd_completion = &virt_dev->cmd_completion; | 2414 | cmd_completion = &virt_dev->cmd_completion; |
1802 | cmd_status = &virt_dev->cmd_status; | 2415 | cmd_status = &virt_dev->cmd_status; |
1803 | } | 2416 | } |
@@ -1888,6 +2501,12 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1888 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | 2501 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
1889 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); | 2502 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
1890 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); | 2503 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
2504 | |||
2505 | /* Don't issue the command if there's no endpoints to update. */ | ||
2506 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && | ||
2507 | ctrl_ctx->drop_flags == 0) | ||
2508 | return 0; | ||
2509 | |||
1891 | xhci_dbg(xhci, "New Input Control Context:\n"); | 2510 | xhci_dbg(xhci, "New Input Control Context:\n"); |
1892 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | 2511 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
1893 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | 2512 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
@@ -2525,6 +3144,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2525 | int timeleft; | 3144 | int timeleft; |
2526 | int last_freed_endpoint; | 3145 | int last_freed_endpoint; |
2527 | struct xhci_slot_ctx *slot_ctx; | 3146 | struct xhci_slot_ctx *slot_ctx; |
3147 | int old_active_eps = 0; | ||
2528 | 3148 | ||
2529 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); | 3149 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
2530 | if (ret <= 0) | 3150 | if (ret <= 0) |
@@ -2666,7 +3286,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2666 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | 3286 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
2667 | last_freed_endpoint = i; | 3287 | last_freed_endpoint = i; |
2668 | } | 3288 | } |
2669 | } | 3289 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
3290 | xhci_drop_ep_from_interval_table(xhci, | ||
3291 | &virt_dev->eps[i].bw_info, | ||
3292 | virt_dev->bw_table, | ||
3293 | udev, | ||
3294 | &virt_dev->eps[i], | ||
3295 | virt_dev->tt_info); | ||
3296 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); | ||
3297 | } | ||
3298 | /* If necessary, update the number of active TTs on this root port */ | ||
3299 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | ||
3300 | |||
2670 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | 3301 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
2671 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | 3302 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
2672 | ret = 0; | 3303 | ret = 0; |
@@ -2704,6 +3335,11 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
2704 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | 3335 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
2705 | } | 3336 | } |
2706 | 3337 | ||
3338 | if (udev->usb2_hw_lpm_enabled) { | ||
3339 | xhci_set_usb2_hardware_lpm(hcd, udev, 0); | ||
3340 | udev->usb2_hw_lpm_enabled = 0; | ||
3341 | } | ||
3342 | |||
2707 | spin_lock_irqsave(&xhci->lock, flags); | 3343 | spin_lock_irqsave(&xhci->lock, flags); |
2708 | /* Don't disable the slot if the host controller is dead. */ | 3344 | /* Don't disable the slot if the host controller is dead. */ |
2709 | state = xhci_readl(xhci, &xhci->op_regs->status); | 3345 | state = xhci_readl(xhci, &xhci->op_regs->status); |
@@ -2867,6 +3503,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2867 | /* Otherwise, update the control endpoint ring enqueue pointer. */ | 3503 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
2868 | else | 3504 | else |
2869 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | 3505 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
3506 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
3507 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); | ||
3508 | ctrl_ctx->drop_flags = 0; | ||
3509 | |||
2870 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 3510 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
2871 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | 3511 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
2872 | 3512 | ||
@@ -2889,7 +3529,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2889 | * command on a timeout. | 3529 | * command on a timeout. |
2890 | */ | 3530 | */ |
2891 | if (timeleft <= 0) { | 3531 | if (timeleft <= 0) { |
2892 | xhci_warn(xhci, "%s while waiting for a slot\n", | 3532 | xhci_warn(xhci, "%s while waiting for address device command\n", |
2893 | timeleft == 0 ? "Timeout" : "Signal"); | 3533 | timeleft == 0 ? "Timeout" : "Signal"); |
2894 | /* FIXME cancel the address device command */ | 3534 | /* FIXME cancel the address device command */ |
2895 | return -ETIME; | 3535 | return -ETIME; |
@@ -2948,7 +3588,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2948 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) | 3588 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) |
2949 | + 1; | 3589 | + 1; |
2950 | /* Zero the input context control for later use */ | 3590 | /* Zero the input context control for later use */ |
2951 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); | ||
2952 | ctrl_ctx->add_flags = 0; | 3591 | ctrl_ctx->add_flags = 0; |
2953 | ctrl_ctx->drop_flags = 0; | 3592 | ctrl_ctx->drop_flags = 0; |
2954 | 3593 | ||
@@ -2957,6 +3596,254 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
2957 | return 0; | 3596 | return 0; |
2958 | } | 3597 | } |
2959 | 3598 | ||
3599 | #ifdef CONFIG_USB_SUSPEND | ||
3600 | |||
3601 | /* BESL to HIRD Encoding array for USB2 LPM */ | ||
3602 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, | ||
3603 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; | ||
3604 | |||
3605 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ | ||
3606 | static int xhci_calculate_hird_besl(int u2del, bool use_besl) | ||
3607 | { | ||
3608 | int hird; | ||
3609 | |||
3610 | if (use_besl) { | ||
3611 | for (hird = 0; hird < 16; hird++) { | ||
3612 | if (xhci_besl_encoding[hird] >= u2del) | ||
3613 | break; | ||
3614 | } | ||
3615 | } else { | ||
3616 | if (u2del <= 50) | ||
3617 | hird = 0; | ||
3618 | else | ||
3619 | hird = (u2del - 51) / 75 + 1; | ||
3620 | |||
3621 | if (hird > 15) | ||
3622 | hird = 15; | ||
3623 | } | ||
3624 | |||
3625 | return hird; | ||
3626 | } | ||
3627 | |||
3628 | static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, | ||
3629 | struct usb_device *udev) | ||
3630 | { | ||
3631 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3632 | struct dev_info *dev_info; | ||
3633 | __le32 __iomem **port_array; | ||
3634 | __le32 __iomem *addr, *pm_addr; | ||
3635 | u32 temp, dev_id; | ||
3636 | unsigned int port_num; | ||
3637 | unsigned long flags; | ||
3638 | int u2del, hird; | ||
3639 | int ret; | ||
3640 | |||
3641 | if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || | ||
3642 | !udev->lpm_capable) | ||
3643 | return -EINVAL; | ||
3644 | |||
3645 | /* we only support lpm for non-hub device connected to root hub yet */ | ||
3646 | if (!udev->parent || udev->parent->parent || | ||
3647 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | ||
3648 | return -EINVAL; | ||
3649 | |||
3650 | spin_lock_irqsave(&xhci->lock, flags); | ||
3651 | |||
3652 | /* Look for devices in lpm_failed_devs list */ | ||
3653 | dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | | ||
3654 | le16_to_cpu(udev->descriptor.idProduct); | ||
3655 | list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { | ||
3656 | if (dev_info->dev_id == dev_id) { | ||
3657 | ret = -EINVAL; | ||
3658 | goto finish; | ||
3659 | } | ||
3660 | } | ||
3661 | |||
3662 | port_array = xhci->usb2_ports; | ||
3663 | port_num = udev->portnum - 1; | ||
3664 | |||
3665 | if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { | ||
3666 | xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); | ||
3667 | ret = -EINVAL; | ||
3668 | goto finish; | ||
3669 | } | ||
3670 | |||
3671 | /* | ||
3672 | * Test USB 2.0 software LPM. | ||
3673 | * FIXME: some xHCI 1.0 hosts may implement a new register to set up | ||
3674 | * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 | ||
3675 | * in the June 2011 errata release. | ||
3676 | */ | ||
3677 | xhci_dbg(xhci, "test port %d software LPM\n", port_num); | ||
3678 | /* | ||
3679 | * Set L1 Device Slot and HIRD/BESL. | ||
3680 | * Check device's USB 2.0 extension descriptor to determine whether | ||
3681 | * HIRD or BESL shoule be used. See USB2.0 LPM errata. | ||
3682 | */ | ||
3683 | pm_addr = port_array[port_num] + 1; | ||
3684 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); | ||
3685 | if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) | ||
3686 | hird = xhci_calculate_hird_besl(u2del, 1); | ||
3687 | else | ||
3688 | hird = xhci_calculate_hird_besl(u2del, 0); | ||
3689 | |||
3690 | temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); | ||
3691 | xhci_writel(xhci, temp, pm_addr); | ||
3692 | |||
3693 | /* Set port link state to U2(L1) */ | ||
3694 | addr = port_array[port_num]; | ||
3695 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); | ||
3696 | |||
3697 | /* wait for ACK */ | ||
3698 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3699 | msleep(10); | ||
3700 | spin_lock_irqsave(&xhci->lock, flags); | ||
3701 | |||
3702 | /* Check L1 Status */ | ||
3703 | ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); | ||
3704 | if (ret != -ETIMEDOUT) { | ||
3705 | /* enter L1 successfully */ | ||
3706 | temp = xhci_readl(xhci, addr); | ||
3707 | xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", | ||
3708 | port_num, temp); | ||
3709 | ret = 0; | ||
3710 | } else { | ||
3711 | temp = xhci_readl(xhci, pm_addr); | ||
3712 | xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", | ||
3713 | port_num, temp & PORT_L1S_MASK); | ||
3714 | ret = -EINVAL; | ||
3715 | } | ||
3716 | |||
3717 | /* Resume the port */ | ||
3718 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); | ||
3719 | |||
3720 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3721 | msleep(10); | ||
3722 | spin_lock_irqsave(&xhci->lock, flags); | ||
3723 | |||
3724 | /* Clear PLC */ | ||
3725 | xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); | ||
3726 | |||
3727 | /* Check PORTSC to make sure the device is in the right state */ | ||
3728 | if (!ret) { | ||
3729 | temp = xhci_readl(xhci, addr); | ||
3730 | xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); | ||
3731 | if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || | ||
3732 | (temp & PORT_PLS_MASK) != XDEV_U0) { | ||
3733 | xhci_dbg(xhci, "port L1 resume fail\n"); | ||
3734 | ret = -EINVAL; | ||
3735 | } | ||
3736 | } | ||
3737 | |||
3738 | if (ret) { | ||
3739 | /* Insert dev to lpm_failed_devs list */ | ||
3740 | xhci_warn(xhci, "device LPM test failed, may disconnect and " | ||
3741 | "re-enumerate\n"); | ||
3742 | dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); | ||
3743 | if (!dev_info) { | ||
3744 | ret = -ENOMEM; | ||
3745 | goto finish; | ||
3746 | } | ||
3747 | dev_info->dev_id = dev_id; | ||
3748 | INIT_LIST_HEAD(&dev_info->list); | ||
3749 | list_add(&dev_info->list, &xhci->lpm_failed_devs); | ||
3750 | } else { | ||
3751 | xhci_ring_device(xhci, udev->slot_id); | ||
3752 | } | ||
3753 | |||
3754 | finish: | ||
3755 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3756 | return ret; | ||
3757 | } | ||
3758 | |||
3759 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | ||
3760 | struct usb_device *udev, int enable) | ||
3761 | { | ||
3762 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3763 | __le32 __iomem **port_array; | ||
3764 | __le32 __iomem *pm_addr; | ||
3765 | u32 temp; | ||
3766 | unsigned int port_num; | ||
3767 | unsigned long flags; | ||
3768 | int u2del, hird; | ||
3769 | |||
3770 | if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || | ||
3771 | !udev->lpm_capable) | ||
3772 | return -EPERM; | ||
3773 | |||
3774 | if (!udev->parent || udev->parent->parent || | ||
3775 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | ||
3776 | return -EPERM; | ||
3777 | |||
3778 | if (udev->usb2_hw_lpm_capable != 1) | ||
3779 | return -EPERM; | ||
3780 | |||
3781 | spin_lock_irqsave(&xhci->lock, flags); | ||
3782 | |||
3783 | port_array = xhci->usb2_ports; | ||
3784 | port_num = udev->portnum - 1; | ||
3785 | pm_addr = port_array[port_num] + 1; | ||
3786 | temp = xhci_readl(xhci, pm_addr); | ||
3787 | |||
3788 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", | ||
3789 | enable ? "enable" : "disable", port_num); | ||
3790 | |||
3791 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); | ||
3792 | if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) | ||
3793 | hird = xhci_calculate_hird_besl(u2del, 1); | ||
3794 | else | ||
3795 | hird = xhci_calculate_hird_besl(u2del, 0); | ||
3796 | |||
3797 | if (enable) { | ||
3798 | temp &= ~PORT_HIRD_MASK; | ||
3799 | temp |= PORT_HIRD(hird) | PORT_RWE; | ||
3800 | xhci_writel(xhci, temp, pm_addr); | ||
3801 | temp = xhci_readl(xhci, pm_addr); | ||
3802 | temp |= PORT_HLE; | ||
3803 | xhci_writel(xhci, temp, pm_addr); | ||
3804 | } else { | ||
3805 | temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); | ||
3806 | xhci_writel(xhci, temp, pm_addr); | ||
3807 | } | ||
3808 | |||
3809 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3810 | return 0; | ||
3811 | } | ||
3812 | |||
3813 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | ||
3814 | { | ||
3815 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3816 | int ret; | ||
3817 | |||
3818 | ret = xhci_usb2_software_lpm_test(hcd, udev); | ||
3819 | if (!ret) { | ||
3820 | xhci_dbg(xhci, "software LPM test succeed\n"); | ||
3821 | if (xhci->hw_lpm_support == 1) { | ||
3822 | udev->usb2_hw_lpm_capable = 1; | ||
3823 | ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); | ||
3824 | if (!ret) | ||
3825 | udev->usb2_hw_lpm_enabled = 1; | ||
3826 | } | ||
3827 | } | ||
3828 | |||
3829 | return 0; | ||
3830 | } | ||
3831 | |||
3832 | #else | ||
3833 | |||
3834 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | ||
3835 | struct usb_device *udev, int enable) | ||
3836 | { | ||
3837 | return 0; | ||
3838 | } | ||
3839 | |||
3840 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | ||
3841 | { | ||
3842 | return 0; | ||
3843 | } | ||
3844 | |||
3845 | #endif /* CONFIG_USB_SUSPEND */ | ||
3846 | |||
2960 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's | 3847 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
2961 | * internal data structures for the device. | 3848 | * internal data structures for the device. |
2962 | */ | 3849 | */ |
@@ -2988,6 +3875,14 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |||
2988 | } | 3875 | } |
2989 | 3876 | ||
2990 | spin_lock_irqsave(&xhci->lock, flags); | 3877 | spin_lock_irqsave(&xhci->lock, flags); |
3878 | if (hdev->speed == USB_SPEED_HIGH && | ||
3879 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { | ||
3880 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); | ||
3881 | xhci_free_command(xhci, config_cmd); | ||
3882 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3883 | return -ENOMEM; | ||
3884 | } | ||
3885 | |||
2991 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | 3886 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
2992 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); | 3887 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); |
2993 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | 3888 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
@@ -3051,22 +3946,108 @@ int xhci_get_frame(struct usb_hcd *hcd) | |||
3051 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; | 3946 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; |
3052 | } | 3947 | } |
3053 | 3948 | ||
3949 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | ||
3950 | { | ||
3951 | struct xhci_hcd *xhci; | ||
3952 | struct device *dev = hcd->self.controller; | ||
3953 | int retval; | ||
3954 | u32 temp; | ||
3955 | |||
3956 | hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; | ||
3957 | |||
3958 | if (usb_hcd_is_primary_hcd(hcd)) { | ||
3959 | xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); | ||
3960 | if (!xhci) | ||
3961 | return -ENOMEM; | ||
3962 | *((struct xhci_hcd **) hcd->hcd_priv) = xhci; | ||
3963 | xhci->main_hcd = hcd; | ||
3964 | /* Mark the first roothub as being USB 2.0. | ||
3965 | * The xHCI driver will register the USB 3.0 roothub. | ||
3966 | */ | ||
3967 | hcd->speed = HCD_USB2; | ||
3968 | hcd->self.root_hub->speed = USB_SPEED_HIGH; | ||
3969 | /* | ||
3970 | * USB 2.0 roothub under xHCI has an integrated TT, | ||
3971 | * (rate matching hub) as opposed to having an OHCI/UHCI | ||
3972 | * companion controller. | ||
3973 | */ | ||
3974 | hcd->has_tt = 1; | ||
3975 | } else { | ||
3976 | /* xHCI private pointer was set in xhci_pci_probe for the second | ||
3977 | * registered roothub. | ||
3978 | */ | ||
3979 | xhci = hcd_to_xhci(hcd); | ||
3980 | temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | ||
3981 | if (HCC_64BIT_ADDR(temp)) { | ||
3982 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); | ||
3983 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); | ||
3984 | } else { | ||
3985 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); | ||
3986 | } | ||
3987 | return 0; | ||
3988 | } | ||
3989 | |||
3990 | xhci->cap_regs = hcd->regs; | ||
3991 | xhci->op_regs = hcd->regs + | ||
3992 | HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); | ||
3993 | xhci->run_regs = hcd->regs + | ||
3994 | (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); | ||
3995 | /* Cache read-only capability registers */ | ||
3996 | xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); | ||
3997 | xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); | ||
3998 | xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); | ||
3999 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); | ||
4000 | xhci->hci_version = HC_VERSION(xhci->hcc_params); | ||
4001 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | ||
4002 | xhci_print_registers(xhci); | ||
4003 | |||
4004 | get_quirks(dev, xhci); | ||
4005 | |||
4006 | /* Make sure the HC is halted. */ | ||
4007 | retval = xhci_halt(xhci); | ||
4008 | if (retval) | ||
4009 | goto error; | ||
4010 | |||
4011 | xhci_dbg(xhci, "Resetting HCD\n"); | ||
4012 | /* Reset the internal HC memory state and registers. */ | ||
4013 | retval = xhci_reset(xhci); | ||
4014 | if (retval) | ||
4015 | goto error; | ||
4016 | xhci_dbg(xhci, "Reset complete\n"); | ||
4017 | |||
4018 | temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); | ||
4019 | if (HCC_64BIT_ADDR(temp)) { | ||
4020 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); | ||
4021 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); | ||
4022 | } else { | ||
4023 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); | ||
4024 | } | ||
4025 | |||
4026 | xhci_dbg(xhci, "Calling HCD init\n"); | ||
4027 | /* Initialize HCD and host controller data structures. */ | ||
4028 | retval = xhci_init(hcd); | ||
4029 | if (retval) | ||
4030 | goto error; | ||
4031 | xhci_dbg(xhci, "Called HCD init\n"); | ||
4032 | return 0; | ||
4033 | error: | ||
4034 | kfree(xhci); | ||
4035 | return retval; | ||
4036 | } | ||
4037 | |||
3054 | MODULE_DESCRIPTION(DRIVER_DESC); | 4038 | MODULE_DESCRIPTION(DRIVER_DESC); |
3055 | MODULE_AUTHOR(DRIVER_AUTHOR); | 4039 | MODULE_AUTHOR(DRIVER_AUTHOR); |
3056 | MODULE_LICENSE("GPL"); | 4040 | MODULE_LICENSE("GPL"); |
3057 | 4041 | ||
3058 | static int __init xhci_hcd_init(void) | 4042 | static int __init xhci_hcd_init(void) |
3059 | { | 4043 | { |
3060 | #ifdef CONFIG_PCI | 4044 | int retval; |
3061 | int retval = 0; | ||
3062 | 4045 | ||
3063 | retval = xhci_register_pci(); | 4046 | retval = xhci_register_pci(); |
3064 | |||
3065 | if (retval < 0) { | 4047 | if (retval < 0) { |
3066 | printk(KERN_DEBUG "Problem registering PCI driver."); | 4048 | printk(KERN_DEBUG "Problem registering PCI driver."); |
3067 | return retval; | 4049 | return retval; |
3068 | } | 4050 | } |
3069 | #endif | ||
3070 | /* | 4051 | /* |
3071 | * Check the compiler generated sizes of structures that must be laid | 4052 | * Check the compiler generated sizes of structures that must be laid |
3072 | * out in specific ways for hardware access. | 4053 | * out in specific ways for hardware access. |
@@ -3091,8 +4072,6 @@ module_init(xhci_hcd_init); | |||
3091 | 4072 | ||
3092 | static void __exit xhci_hcd_cleanup(void) | 4073 | static void __exit xhci_hcd_cleanup(void) |
3093 | { | 4074 | { |
3094 | #ifdef CONFIG_PCI | ||
3095 | xhci_unregister_pci(); | 4075 | xhci_unregister_pci(); |
3096 | #endif | ||
3097 | } | 4076 | } |
3098 | module_exit(xhci_hcd_cleanup); | 4077 | module_exit(xhci_hcd_cleanup); |