aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c371
1 files changed, 306 insertions, 65 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d446886b22b0..42a22b8e6922 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -61,8 +61,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
61 61
62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
63{ 63{
64 if (!seg)
65 return;
66 if (seg->trbs) { 64 if (seg->trbs) {
67 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", 65 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
68 seg->trbs, (unsigned long long)seg->dma); 66 seg->trbs, (unsigned long long)seg->dma);
@@ -81,7 +79,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
81 * related flags, such as End TRB, Toggle Cycle, and no snoop. 79 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 */ 80 */
83static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 81static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
84 struct xhci_segment *next, bool link_trbs) 82 struct xhci_segment *next, bool link_trbs, bool isoc)
85{ 83{
86 u32 val; 84 u32 val;
87 85
@@ -97,7 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
97 val &= ~TRB_TYPE_BITMASK; 95 val &= ~TRB_TYPE_BITMASK;
98 val |= TRB_TYPE(TRB_LINK); 96 val |= TRB_TYPE(TRB_LINK);
99 /* Always set the chain bit with 0.95 hardware */ 97 /* Always set the chain bit with 0.95 hardware */
100 if (xhci_link_trb_quirk(xhci)) 98 /* Set chain bit for isoc rings on AMD 0.96 host */
99 if (xhci_link_trb_quirk(xhci) ||
100 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
101 val |= TRB_CHAIN; 101 val |= TRB_CHAIN;
102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
103 } 103 }
@@ -112,18 +112,20 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
112 struct xhci_segment *seg; 112 struct xhci_segment *seg;
113 struct xhci_segment *first_seg; 113 struct xhci_segment *first_seg;
114 114
115 if (!ring || !ring->first_seg) 115 if (!ring)
116 return; 116 return;
117 first_seg = ring->first_seg; 117 if (ring->first_seg) {
118 seg = first_seg->next; 118 first_seg = ring->first_seg;
119 xhci_dbg(xhci, "Freeing ring at %p\n", ring); 119 seg = first_seg->next;
120 while (seg != first_seg) { 120 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
121 struct xhci_segment *next = seg->next; 121 while (seg != first_seg) {
122 xhci_segment_free(xhci, seg); 122 struct xhci_segment *next = seg->next;
123 seg = next; 123 xhci_segment_free(xhci, seg);
124 seg = next;
125 }
126 xhci_segment_free(xhci, first_seg);
127 ring->first_seg = NULL;
124 } 128 }
125 xhci_segment_free(xhci, first_seg);
126 ring->first_seg = NULL;
127 kfree(ring); 129 kfree(ring);
128} 130}
129 131
@@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
152 * See section 4.9.1 and figures 15 and 16. 154 * See section 4.9.1 and figures 15 and 16.
153 */ 155 */
154static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 156static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
155 unsigned int num_segs, bool link_trbs, gfp_t flags) 157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
156{ 158{
157 struct xhci_ring *ring; 159 struct xhci_ring *ring;
158 struct xhci_segment *prev; 160 struct xhci_segment *prev;
@@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
178 next = xhci_segment_alloc(xhci, flags); 180 next = xhci_segment_alloc(xhci, flags);
179 if (!next) 181 if (!next)
180 goto fail; 182 goto fail;
181 xhci_link_segments(xhci, prev, next, link_trbs); 183 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
182 184
183 prev = next; 185 prev = next;
184 num_segs--; 186 num_segs--;
185 } 187 }
186 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 188 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
187 189
188 if (link_trbs) { 190 if (link_trbs) {
189 /* See section 4.9.2.1 and 6.4.4.1 */ 191 /* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
229 * pointers to the beginning of the ring. 231 * pointers to the beginning of the ring.
230 */ 232 */
231static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 233static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
232 struct xhci_ring *ring) 234 struct xhci_ring *ring, bool isoc)
233{ 235{
234 struct xhci_segment *seg = ring->first_seg; 236 struct xhci_segment *seg = ring->first_seg;
235 do { 237 do {
236 memset(seg->trbs, 0, 238 memset(seg->trbs, 0,
237 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 239 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
238 /* All endpoint rings have link TRBs */ 240 /* All endpoint rings have link TRBs */
239 xhci_link_segments(xhci, seg, seg->next, 1); 241 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
240 seg = seg->next; 242 seg = seg->next;
241 } while (seg != ring->first_seg); 243 } while (seg != ring->first_seg);
242 xhci_initialize_ring_info(ring); 244 xhci_initialize_ring_info(ring);
@@ -315,7 +317,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
315 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 317 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
316 318
317 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 319 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
318 pci_free_consistent(pdev, 320 dma_free_coherent(&pdev->dev,
319 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 321 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
320 stream_ctx, dma); 322 stream_ctx, dma);
321 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 323 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
@@ -343,9 +345,9 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
343 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
344 346
345 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 347 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
346 return pci_alloc_consistent(pdev, 348 return dma_alloc_coherent(&pdev->dev,
347 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 349 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
348 dma); 350 dma, mem_flags);
349 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 351 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
350 return dma_pool_alloc(xhci->small_streams_pool, 352 return dma_pool_alloc(xhci->small_streams_pool,
351 mem_flags, dma); 353 mem_flags, dma);
@@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
540 */ 542 */
541 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 543 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
542 stream_info->stream_rings[cur_stream] = 544 stream_info->stream_rings[cur_stream] =
543 xhci_ring_alloc(xhci, 1, true, mem_flags); 545 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
544 cur_ring = stream_info->stream_rings[cur_stream]; 546 cur_ring = stream_info->stream_rings[cur_stream];
545 if (!cur_ring) 547 if (!cur_ring)
546 goto cleanup_rings; 548 goto cleanup_rings;
@@ -687,11 +689,103 @@ static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
687 ep->xhci = xhci; 689 ep->xhci = xhci;
688} 690}
689 691
690/* All the xhci_tds in the ring's TD list should be freed at this point */ 692static void xhci_free_tt_info(struct xhci_hcd *xhci,
693 struct xhci_virt_device *virt_dev,
694 int slot_id)
695{
696 struct list_head *tt;
697 struct list_head *tt_list_head;
698 struct list_head *tt_next;
699 struct xhci_tt_bw_info *tt_info;
700
701 /* If the device never made it past the Set Address stage,
702 * it may not have the real_port set correctly.
703 */
704 if (virt_dev->real_port == 0 ||
705 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
706 xhci_dbg(xhci, "Bad real port.\n");
707 return;
708 }
709
710 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
711 if (list_empty(tt_list_head))
712 return;
713
714 list_for_each(tt, tt_list_head) {
715 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
716 if (tt_info->slot_id == slot_id)
717 break;
718 }
719 /* Cautionary measure in case the hub was disconnected before we
720 * stored the TT information.
721 */
722 if (tt_info->slot_id != slot_id)
723 return;
724
725 tt_next = tt->next;
726 tt_info = list_entry(tt, struct xhci_tt_bw_info,
727 tt_list);
728 /* Multi-TT hubs will have more than one entry */
729 do {
730 list_del(tt);
731 kfree(tt_info);
732 tt = tt_next;
733 if (list_empty(tt_list_head))
734 break;
735 tt_next = tt->next;
736 tt_info = list_entry(tt, struct xhci_tt_bw_info,
737 tt_list);
738 } while (tt_info->slot_id == slot_id);
739}
740
741int xhci_alloc_tt_info(struct xhci_hcd *xhci,
742 struct xhci_virt_device *virt_dev,
743 struct usb_device *hdev,
744 struct usb_tt *tt, gfp_t mem_flags)
745{
746 struct xhci_tt_bw_info *tt_info;
747 unsigned int num_ports;
748 int i, j;
749
750 if (!tt->multi)
751 num_ports = 1;
752 else
753 num_ports = hdev->maxchild;
754
755 for (i = 0; i < num_ports; i++, tt_info++) {
756 struct xhci_interval_bw_table *bw_table;
757
758 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
759 if (!tt_info)
760 goto free_tts;
761 INIT_LIST_HEAD(&tt_info->tt_list);
762 list_add(&tt_info->tt_list,
763 &xhci->rh_bw[virt_dev->real_port - 1].tts);
764 tt_info->slot_id = virt_dev->udev->slot_id;
765 if (tt->multi)
766 tt_info->ttport = i+1;
767 bw_table = &tt_info->bw_table;
768 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
769 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
770 }
771 return 0;
772
773free_tts:
774 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
775 return -ENOMEM;
776}
777
778
779/* All the xhci_tds in the ring's TD list should be freed at this point.
780 * Should be called with xhci->lock held if there is any chance the TT lists
781 * will be manipulated by the configure endpoint, allocate device, or update
782 * hub functions while this function is removing the TT entries from the list.
783 */
691void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 784void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
692{ 785{
693 struct xhci_virt_device *dev; 786 struct xhci_virt_device *dev;
694 int i; 787 int i;
788 int old_active_eps = 0;
695 789
696 /* Slot ID 0 is reserved */ 790 /* Slot ID 0 is reserved */
697 if (slot_id == 0 || !xhci->devs[slot_id]) 791 if (slot_id == 0 || !xhci->devs[slot_id])
@@ -702,13 +796,29 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
702 if (!dev) 796 if (!dev)
703 return; 797 return;
704 798
799 if (dev->tt_info)
800 old_active_eps = dev->tt_info->active_eps;
801
705 for (i = 0; i < 31; ++i) { 802 for (i = 0; i < 31; ++i) {
706 if (dev->eps[i].ring) 803 if (dev->eps[i].ring)
707 xhci_ring_free(xhci, dev->eps[i].ring); 804 xhci_ring_free(xhci, dev->eps[i].ring);
708 if (dev->eps[i].stream_info) 805 if (dev->eps[i].stream_info)
709 xhci_free_stream_info(xhci, 806 xhci_free_stream_info(xhci,
710 dev->eps[i].stream_info); 807 dev->eps[i].stream_info);
808 /* Endpoints on the TT/root port lists should have been removed
809 * when usb_disable_device() was called for the device.
810 * We can't drop them anyway, because the udev might have gone
811 * away by this point, and we can't tell what speed it was.
812 */
813 if (!list_empty(&dev->eps[i].bw_endpoint_list))
814 xhci_warn(xhci, "Slot %u endpoint %u "
815 "not removed from BW list!\n",
816 slot_id, i);
711 } 817 }
818 /* If this is a hub, free the TT(s) from the TT list */
819 xhci_free_tt_info(xhci, dev, slot_id);
820 /* If necessary, update the number of active TTs on this root port */
821 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
712 822
713 if (dev->ring_cache) { 823 if (dev->ring_cache) {
714 for (i = 0; i < dev->num_rings_cached; i++) 824 for (i = 0; i < dev->num_rings_cached; i++)
@@ -762,10 +872,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
762 for (i = 0; i < 31; i++) { 872 for (i = 0; i < 31; i++) {
763 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 873 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
764 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 874 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
875 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
765 } 876 }
766 877
767 /* Allocate endpoint 0 ring */ 878 /* Allocate endpoint 0 ring */
768 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 879 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
769 if (!dev->eps[0].ring) 880 if (!dev->eps[0].ring)
770 goto fail; 881 goto fail;
771 882
@@ -921,9 +1032,40 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
921 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1032 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
922 top_dev = top_dev->parent) 1033 top_dev = top_dev->parent)
923 /* Found device below root hub */; 1034 /* Found device below root hub */;
924 dev->port = top_dev->portnum; 1035 dev->fake_port = top_dev->portnum;
1036 dev->real_port = port_num;
925 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); 1037 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
926 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->port); 1038 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1039
1040 /* Find the right bandwidth table that this device will be a part of.
1041 * If this is a full speed device attached directly to a root port (or a
1042 * decendent of one), it counts as a primary bandwidth domain, not a
1043 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1044 * will never be created for the HS root hub.
1045 */
1046 if (!udev->tt || !udev->tt->hub->parent) {
1047 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1048 } else {
1049 struct xhci_root_port_bw_info *rh_bw;
1050 struct xhci_tt_bw_info *tt_bw;
1051
1052 rh_bw = &xhci->rh_bw[port_num - 1];
1053 /* Find the right TT. */
1054 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1055 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1056 continue;
1057
1058 if (!dev->udev->tt->multi ||
1059 (udev->tt->multi &&
1060 tt_bw->ttport == dev->udev->ttport)) {
1061 dev->bw_table = &tt_bw->bw_table;
1062 dev->tt_info = tt_bw;
1063 break;
1064 }
1065 }
1066 if (!dev->tt_info)
1067 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1068 }
927 1069
928 /* Is this a LS/FS device under an external HS hub? */ 1070 /* Is this a LS/FS device under an external HS hub? */
929 if (udev->tt && udev->tt->hub->parent) { 1071 if (udev->tt && udev->tt->hub->parent) {
@@ -1141,8 +1283,8 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1141 if (udev->speed == USB_SPEED_SUPER) 1283 if (udev->speed == USB_SPEED_SUPER)
1142 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1284 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1143 1285
1144 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); 1286 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1145 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11; 1287 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1146 /* A 0 in max burst means 1 transfer per ESIT */ 1288 /* A 0 in max burst means 1 transfer per ESIT */
1147 return max_packet * (max_burst + 1); 1289 return max_packet * (max_burst + 1);
1148} 1290}
@@ -1175,10 +1317,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1175 */ 1317 */
1176 if (usb_endpoint_xfer_isoc(&ep->desc)) 1318 if (usb_endpoint_xfer_isoc(&ep->desc))
1177 virt_dev->eps[ep_index].new_ring = 1319 virt_dev->eps[ep_index].new_ring =
1178 xhci_ring_alloc(xhci, 8, true, mem_flags); 1320 xhci_ring_alloc(xhci, 8, true, true, mem_flags);
1179 else 1321 else
1180 virt_dev->eps[ep_index].new_ring = 1322 virt_dev->eps[ep_index].new_ring =
1181 xhci_ring_alloc(xhci, 1, true, mem_flags); 1323 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
1182 if (!virt_dev->eps[ep_index].new_ring) { 1324 if (!virt_dev->eps[ep_index].new_ring) {
1183 /* Attempt to use the ring cache */ 1325 /* Attempt to use the ring cache */
1184 if (virt_dev->num_rings_cached == 0) 1326 if (virt_dev->num_rings_cached == 0)
@@ -1187,7 +1329,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1187 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1329 virt_dev->ring_cache[virt_dev->num_rings_cached];
1188 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1330 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1189 virt_dev->num_rings_cached--; 1331 virt_dev->num_rings_cached--;
1190 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); 1332 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1333 usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1191 } 1334 }
1192 virt_dev->eps[ep_index].skip = false; 1335 virt_dev->eps[ep_index].skip = false;
1193 ep_ring = virt_dev->eps[ep_index].new_ring; 1336 ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -1211,7 +1354,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1211 /* Set the max packet size and max burst */ 1354 /* Set the max packet size and max burst */
1212 switch (udev->speed) { 1355 switch (udev->speed) {
1213 case USB_SPEED_SUPER: 1356 case USB_SPEED_SUPER:
1214 max_packet = le16_to_cpu(ep->desc.wMaxPacketSize); 1357 max_packet = usb_endpoint_maxp(&ep->desc);
1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1358 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1216 /* dig out max burst from ep companion desc */ 1359 /* dig out max burst from ep companion desc */
1217 max_packet = ep->ss_ep_comp.bMaxBurst; 1360 max_packet = ep->ss_ep_comp.bMaxBurst;
@@ -1223,14 +1366,14 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1223 */ 1366 */
1224 if (usb_endpoint_xfer_isoc(&ep->desc) || 1367 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1225 usb_endpoint_xfer_int(&ep->desc)) { 1368 usb_endpoint_xfer_int(&ep->desc)) {
1226 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) 1369 max_burst = (usb_endpoint_maxp(&ep->desc)
1227 & 0x1800) >> 11; 1370 & 0x1800) >> 11;
1228 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); 1371 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
1229 } 1372 }
1230 /* Fall through */ 1373 /* Fall through */
1231 case USB_SPEED_FULL: 1374 case USB_SPEED_FULL:
1232 case USB_SPEED_LOW: 1375 case USB_SPEED_LOW:
1233 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); 1376 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1234 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1377 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1235 break; 1378 break;
1236 default: 1379 default:
@@ -1286,6 +1429,70 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
1286 */ 1429 */
1287} 1430}
1288 1431
1432void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1433{
1434 bw_info->ep_interval = 0;
1435 bw_info->mult = 0;
1436 bw_info->num_packets = 0;
1437 bw_info->max_packet_size = 0;
1438 bw_info->type = 0;
1439 bw_info->max_esit_payload = 0;
1440}
1441
1442void xhci_update_bw_info(struct xhci_hcd *xhci,
1443 struct xhci_container_ctx *in_ctx,
1444 struct xhci_input_control_ctx *ctrl_ctx,
1445 struct xhci_virt_device *virt_dev)
1446{
1447 struct xhci_bw_info *bw_info;
1448 struct xhci_ep_ctx *ep_ctx;
1449 unsigned int ep_type;
1450 int i;
1451
1452 for (i = 1; i < 31; ++i) {
1453 bw_info = &virt_dev->eps[i].bw_info;
1454
1455 /* We can't tell what endpoint type is being dropped, but
1456 * unconditionally clearing the bandwidth info for non-periodic
1457 * endpoints should be harmless because the info will never be
1458 * set in the first place.
1459 */
1460 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1461 /* Dropped endpoint */
1462 xhci_clear_endpoint_bw_info(bw_info);
1463 continue;
1464 }
1465
1466 if (EP_IS_ADDED(ctrl_ctx, i)) {
1467 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1468 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1469
1470 /* Ignore non-periodic endpoints */
1471 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1472 ep_type != ISOC_IN_EP &&
1473 ep_type != INT_IN_EP)
1474 continue;
1475
1476 /* Added or changed endpoint */
1477 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1478 le32_to_cpu(ep_ctx->ep_info));
1479 /* Number of packets and mult are zero-based in the
1480 * input context, but we want one-based for the
1481 * interval table.
1482 */
1483 bw_info->mult = CTX_TO_EP_MULT(
1484 le32_to_cpu(ep_ctx->ep_info)) + 1;
1485 bw_info->num_packets = CTX_TO_MAX_BURST(
1486 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1487 bw_info->max_packet_size = MAX_PACKET_DECODED(
1488 le32_to_cpu(ep_ctx->ep_info2));
1489 bw_info->type = ep_type;
1490 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1491 le32_to_cpu(ep_ctx->tx_info));
1492 }
1493 }
1494}
1495
1289/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1496/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1290 * Useful when you want to change one particular aspect of the endpoint and then 1497 * Useful when you want to change one particular aspect of the endpoint and then
1291 * issue a configure endpoint command. 1498 * issue a configure endpoint command.
@@ -1344,10 +1551,9 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1344 if (!xhci->scratchpad) 1551 if (!xhci->scratchpad)
1345 goto fail_sp; 1552 goto fail_sp;
1346 1553
1347 xhci->scratchpad->sp_array = 1554 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1348 pci_alloc_consistent(to_pci_dev(dev),
1349 num_sp * sizeof(u64), 1555 num_sp * sizeof(u64),
1350 &xhci->scratchpad->sp_dma); 1556 &xhci->scratchpad->sp_dma, flags);
1351 if (!xhci->scratchpad->sp_array) 1557 if (!xhci->scratchpad->sp_array)
1352 goto fail_sp2; 1558 goto fail_sp2;
1353 1559
@@ -1364,8 +1570,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1364 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1570 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1365 for (i = 0; i < num_sp; i++) { 1571 for (i = 0; i < num_sp; i++) {
1366 dma_addr_t dma; 1572 dma_addr_t dma;
1367 void *buf = pci_alloc_consistent(to_pci_dev(dev), 1573 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1368 xhci->page_size, &dma); 1574 flags);
1369 if (!buf) 1575 if (!buf)
1370 goto fail_sp5; 1576 goto fail_sp5;
1371 1577
@@ -1378,7 +1584,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1378 1584
1379 fail_sp5: 1585 fail_sp5:
1380 for (i = i - 1; i >= 0; i--) { 1586 for (i = i - 1; i >= 0; i--) {
1381 pci_free_consistent(to_pci_dev(dev), xhci->page_size, 1587 dma_free_coherent(dev, xhci->page_size,
1382 xhci->scratchpad->sp_buffers[i], 1588 xhci->scratchpad->sp_buffers[i],
1383 xhci->scratchpad->sp_dma_buffers[i]); 1589 xhci->scratchpad->sp_dma_buffers[i]);
1384 } 1590 }
@@ -1388,7 +1594,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1388 kfree(xhci->scratchpad->sp_buffers); 1594 kfree(xhci->scratchpad->sp_buffers);
1389 1595
1390 fail_sp3: 1596 fail_sp3:
1391 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), 1597 dma_free_coherent(dev, num_sp * sizeof(u64),
1392 xhci->scratchpad->sp_array, 1598 xhci->scratchpad->sp_array,
1393 xhci->scratchpad->sp_dma); 1599 xhci->scratchpad->sp_dma);
1394 1600
@@ -1412,13 +1618,13 @@ static void scratchpad_free(struct xhci_hcd *xhci)
1412 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1618 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1413 1619
1414 for (i = 0; i < num_sp; i++) { 1620 for (i = 0; i < num_sp; i++) {
1415 pci_free_consistent(pdev, xhci->page_size, 1621 dma_free_coherent(&pdev->dev, xhci->page_size,
1416 xhci->scratchpad->sp_buffers[i], 1622 xhci->scratchpad->sp_buffers[i],
1417 xhci->scratchpad->sp_dma_buffers[i]); 1623 xhci->scratchpad->sp_dma_buffers[i]);
1418 } 1624 }
1419 kfree(xhci->scratchpad->sp_dma_buffers); 1625 kfree(xhci->scratchpad->sp_dma_buffers);
1420 kfree(xhci->scratchpad->sp_buffers); 1626 kfree(xhci->scratchpad->sp_buffers);
1421 pci_free_consistent(pdev, num_sp * sizeof(u64), 1627 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1422 xhci->scratchpad->sp_array, 1628 xhci->scratchpad->sp_array,
1423 xhci->scratchpad->sp_dma); 1629 xhci->scratchpad->sp_dma);
1424 kfree(xhci->scratchpad); 1630 kfree(xhci->scratchpad);
@@ -1463,18 +1669,10 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1463 1669
1464void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) 1670void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1465{ 1671{
1466 int last; 1672 if (urb_priv) {
1467 1673 kfree(urb_priv->td[0]);
1468 if (!urb_priv) 1674 kfree(urb_priv);
1469 return;
1470
1471 last = urb_priv->length - 1;
1472 if (last >= 0) {
1473 int i;
1474 for (i = 0; i <= last; i++)
1475 kfree(urb_priv->td[i]);
1476 } 1675 }
1477 kfree(urb_priv);
1478} 1676}
1479 1677
1480void xhci_free_command(struct xhci_hcd *xhci, 1678void xhci_free_command(struct xhci_hcd *xhci,
@@ -1489,6 +1687,8 @@ void xhci_free_command(struct xhci_hcd *xhci,
1489void xhci_mem_cleanup(struct xhci_hcd *xhci) 1687void xhci_mem_cleanup(struct xhci_hcd *xhci)
1490{ 1688{
1491 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1689 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1690 struct dev_info *dev_info, *next;
1691 unsigned long flags;
1492 int size; 1692 int size;
1493 int i; 1693 int i;
1494 1694
@@ -1500,7 +1700,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1500 } 1700 }
1501 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1701 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1502 if (xhci->erst.entries) 1702 if (xhci->erst.entries)
1503 pci_free_consistent(pdev, size, 1703 dma_free_coherent(&pdev->dev, size,
1504 xhci->erst.entries, xhci->erst.erst_dma_addr); 1704 xhci->erst.entries, xhci->erst.erst_dma_addr);
1505 xhci->erst.entries = NULL; 1705 xhci->erst.entries = NULL;
1506 xhci_dbg(xhci, "Freed ERST\n"); 1706 xhci_dbg(xhci, "Freed ERST\n");
@@ -1540,17 +1740,25 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1540 1740
1541 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 1741 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1542 if (xhci->dcbaa) 1742 if (xhci->dcbaa)
1543 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 1743 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1544 xhci->dcbaa, xhci->dcbaa->dma); 1744 xhci->dcbaa, xhci->dcbaa->dma);
1545 xhci->dcbaa = NULL; 1745 xhci->dcbaa = NULL;
1546 1746
1547 scratchpad_free(xhci); 1747 scratchpad_free(xhci);
1548 1748
1749 spin_lock_irqsave(&xhci->lock, flags);
1750 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1751 list_del(&dev_info->list);
1752 kfree(dev_info);
1753 }
1754 spin_unlock_irqrestore(&xhci->lock, flags);
1755
1549 xhci->num_usb2_ports = 0; 1756 xhci->num_usb2_ports = 0;
1550 xhci->num_usb3_ports = 0; 1757 xhci->num_usb3_ports = 0;
1551 kfree(xhci->usb2_ports); 1758 kfree(xhci->usb2_ports);
1552 kfree(xhci->usb3_ports); 1759 kfree(xhci->usb3_ports);
1553 kfree(xhci->port_array); 1760 kfree(xhci->port_array);
1761 kfree(xhci->rh_bw);
1554 1762
1555 xhci->page_size = 0; 1763 xhci->page_size = 0;
1556 xhci->page_shift = 0; 1764 xhci->page_shift = 0;
@@ -1762,6 +1970,23 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1762 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 1970 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
1763 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 1971 /* WTF? "Valid values are ‘1’ to MaxPorts" */
1764 return; 1972 return;
1973
1974 /* Check the host's USB2 LPM capability */
1975 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
1976 (temp & XHCI_L1C)) {
1977 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
1978 xhci->sw_lpm_support = 1;
1979 }
1980
1981 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
1982 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
1983 xhci->sw_lpm_support = 1;
1984 if (temp & XHCI_HLC) {
1985 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
1986 xhci->hw_lpm_support = 1;
1987 }
1988 }
1989
1765 port_offset--; 1990 port_offset--;
1766 for (i = port_offset; i < (port_offset + port_count); i++) { 1991 for (i = port_offset; i < (port_offset + port_count); i++) {
1767 /* Duplicate entry. Ignore the port if the revisions differ. */ 1992 /* Duplicate entry. Ignore the port if the revisions differ. */
@@ -1806,7 +2031,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1806 __le32 __iomem *addr; 2031 __le32 __iomem *addr;
1807 u32 offset; 2032 u32 offset;
1808 unsigned int num_ports; 2033 unsigned int num_ports;
1809 int i, port_index; 2034 int i, j, port_index;
1810 2035
1811 addr = &xhci->cap_regs->hcc_params; 2036 addr = &xhci->cap_regs->hcc_params;
1812 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); 2037 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
@@ -1821,6 +2046,18 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1821 if (!xhci->port_array) 2046 if (!xhci->port_array)
1822 return -ENOMEM; 2047 return -ENOMEM;
1823 2048
2049 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2050 if (!xhci->rh_bw)
2051 return -ENOMEM;
2052 for (i = 0; i < num_ports; i++) {
2053 struct xhci_interval_bw_table *bw_table;
2054
2055 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2056 bw_table = &xhci->rh_bw[i].bw_table;
2057 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2058 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2059 }
2060
1824 /* 2061 /*
1825 * For whatever reason, the first capability offset is from the 2062 * For whatever reason, the first capability offset is from the
1826 * capability register base, not from the HCCPARAMS register. 2063 * capability register base, not from the HCCPARAMS register.
@@ -1959,8 +2196,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1959 * Section 5.4.8 - doorbell array must be 2196 * Section 5.4.8 - doorbell array must be
1960 * "physically contiguous and 64-byte (cache line) aligned". 2197 * "physically contiguous and 64-byte (cache line) aligned".
1961 */ 2198 */
1962 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), 2199 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
1963 sizeof(*xhci->dcbaa), &dma); 2200 GFP_KERNEL);
1964 if (!xhci->dcbaa) 2201 if (!xhci->dcbaa)
1965 goto fail; 2202 goto fail;
1966 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2203 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
@@ -1994,14 +2231,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1994 dma_pool_create("xHCI 1KB stream ctx arrays", 2231 dma_pool_create("xHCI 1KB stream ctx arrays",
1995 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2232 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
1996 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2233 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
1997 * will be allocated with pci_alloc_consistent() 2234 * will be allocated with dma_alloc_coherent()
1998 */ 2235 */
1999 2236
2000 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2237 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2001 goto fail; 2238 goto fail;
2002 2239
2003 /* Set up the command ring to have one segments for now. */ 2240 /* Set up the command ring to have one segments for now. */
2004 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 2241 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2005 if (!xhci->cmd_ring) 2242 if (!xhci->cmd_ring)
2006 goto fail; 2243 goto fail;
2007 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2244 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2032,14 +2269,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2032 * the event ring segment table (ERST). Section 4.9.3. 2269 * the event ring segment table (ERST). Section 4.9.3.
2033 */ 2270 */
2034 xhci_dbg(xhci, "// Allocating event ring\n"); 2271 xhci_dbg(xhci, "// Allocating event ring\n");
2035 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 2272 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2273 flags);
2036 if (!xhci->event_ring) 2274 if (!xhci->event_ring)
2037 goto fail; 2275 goto fail;
2038 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 2276 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2039 goto fail; 2277 goto fail;
2040 2278
2041 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 2279 xhci->erst.entries = dma_alloc_coherent(dev,
2042 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 2280 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2281 GFP_KERNEL);
2043 if (!xhci->erst.entries) 2282 if (!xhci->erst.entries)
2044 goto fail; 2283 goto fail;
2045 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 2284 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
@@ -2102,6 +2341,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2102 if (xhci_setup_port_arrays(xhci, flags)) 2341 if (xhci_setup_port_arrays(xhci, flags))
2103 goto fail; 2342 goto fail;
2104 2343
2344 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2345
2105 return 0; 2346 return 0;
2106 2347
2107fail: 2348fail: