aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c376
1 files changed, 306 insertions, 70 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d446886b22b0..0e4b25fa3bcd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -61,8 +61,6 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
61 61
62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) 62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
63{ 63{
64 if (!seg)
65 return;
66 if (seg->trbs) { 64 if (seg->trbs) {
67 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", 65 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
68 seg->trbs, (unsigned long long)seg->dma); 66 seg->trbs, (unsigned long long)seg->dma);
@@ -81,7 +79,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
81 * related flags, such as End TRB, Toggle Cycle, and no snoop. 79 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 */ 80 */
83static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, 81static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
84 struct xhci_segment *next, bool link_trbs) 82 struct xhci_segment *next, bool link_trbs, bool isoc)
85{ 83{
86 u32 val; 84 u32 val;
87 85
@@ -97,7 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
97 val &= ~TRB_TYPE_BITMASK; 95 val &= ~TRB_TYPE_BITMASK;
98 val |= TRB_TYPE(TRB_LINK); 96 val |= TRB_TYPE(TRB_LINK);
99 /* Always set the chain bit with 0.95 hardware */ 97 /* Always set the chain bit with 0.95 hardware */
100 if (xhci_link_trb_quirk(xhci)) 98 /* Set chain bit for isoc rings on AMD 0.96 host */
99 if (xhci_link_trb_quirk(xhci) ||
100 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
101 val |= TRB_CHAIN; 101 val |= TRB_CHAIN;
102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
103 } 103 }
@@ -112,18 +112,20 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
112 struct xhci_segment *seg; 112 struct xhci_segment *seg;
113 struct xhci_segment *first_seg; 113 struct xhci_segment *first_seg;
114 114
115 if (!ring || !ring->first_seg) 115 if (!ring)
116 return; 116 return;
117 first_seg = ring->first_seg; 117 if (ring->first_seg) {
118 seg = first_seg->next; 118 first_seg = ring->first_seg;
119 xhci_dbg(xhci, "Freeing ring at %p\n", ring); 119 seg = first_seg->next;
120 while (seg != first_seg) { 120 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
121 struct xhci_segment *next = seg->next; 121 while (seg != first_seg) {
122 xhci_segment_free(xhci, seg); 122 struct xhci_segment *next = seg->next;
123 seg = next; 123 xhci_segment_free(xhci, seg);
124 seg = next;
125 }
126 xhci_segment_free(xhci, first_seg);
127 ring->first_seg = NULL;
124 } 128 }
125 xhci_segment_free(xhci, first_seg);
126 ring->first_seg = NULL;
127 kfree(ring); 129 kfree(ring);
128} 130}
129 131
@@ -152,7 +154,7 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
152 * See section 4.9.1 and figures 15 and 16. 154 * See section 4.9.1 and figures 15 and 16.
153 */ 155 */
154static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, 156static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
155 unsigned int num_segs, bool link_trbs, gfp_t flags) 157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
156{ 158{
157 struct xhci_ring *ring; 159 struct xhci_ring *ring;
158 struct xhci_segment *prev; 160 struct xhci_segment *prev;
@@ -178,12 +180,12 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
178 next = xhci_segment_alloc(xhci, flags); 180 next = xhci_segment_alloc(xhci, flags);
179 if (!next) 181 if (!next)
180 goto fail; 182 goto fail;
181 xhci_link_segments(xhci, prev, next, link_trbs); 183 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
182 184
183 prev = next; 185 prev = next;
184 num_segs--; 186 num_segs--;
185 } 187 }
186 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); 188 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
187 189
188 if (link_trbs) { 190 if (link_trbs) {
189 /* See section 4.9.2.1 and 6.4.4.1 */ 191 /* See section 4.9.2.1 and 6.4.4.1 */
@@ -229,14 +231,14 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
229 * pointers to the beginning of the ring. 231 * pointers to the beginning of the ring.
230 */ 232 */
231static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, 233static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
232 struct xhci_ring *ring) 234 struct xhci_ring *ring, bool isoc)
233{ 235{
234 struct xhci_segment *seg = ring->first_seg; 236 struct xhci_segment *seg = ring->first_seg;
235 do { 237 do {
236 memset(seg->trbs, 0, 238 memset(seg->trbs, 0,
237 sizeof(union xhci_trb)*TRBS_PER_SEGMENT); 239 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
238 /* All endpoint rings have link TRBs */ 240 /* All endpoint rings have link TRBs */
239 xhci_link_segments(xhci, seg, seg->next, 1); 241 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
240 seg = seg->next; 242 seg = seg->next;
241 } while (seg != ring->first_seg); 243 } while (seg != ring->first_seg);
242 xhci_initialize_ring_info(ring); 244 xhci_initialize_ring_info(ring);
@@ -315,7 +317,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
315 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 317 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
316 318
317 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 319 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
318 pci_free_consistent(pdev, 320 dma_free_coherent(&pdev->dev,
319 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 321 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
320 stream_ctx, dma); 322 stream_ctx, dma);
321 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 323 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
@@ -343,9 +345,9 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
343 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
344 346
345 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) 347 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
346 return pci_alloc_consistent(pdev, 348 return dma_alloc_coherent(&pdev->dev,
347 sizeof(struct xhci_stream_ctx)*num_stream_ctxs, 349 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
348 dma); 350 dma, mem_flags);
349 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) 351 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
350 return dma_pool_alloc(xhci->small_streams_pool, 352 return dma_pool_alloc(xhci->small_streams_pool,
351 mem_flags, dma); 353 mem_flags, dma);
@@ -540,7 +542,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
540 */ 542 */
541 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { 543 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
542 stream_info->stream_rings[cur_stream] = 544 stream_info->stream_rings[cur_stream] =
543 xhci_ring_alloc(xhci, 1, true, mem_flags); 545 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
544 cur_ring = stream_info->stream_rings[cur_stream]; 546 cur_ring = stream_info->stream_rings[cur_stream];
545 if (!cur_ring) 547 if (!cur_ring)
546 goto cleanup_rings; 548 goto cleanup_rings;
@@ -687,11 +689,103 @@ static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
687 ep->xhci = xhci; 689 ep->xhci = xhci;
688} 690}
689 691
690/* All the xhci_tds in the ring's TD list should be freed at this point */ 692static void xhci_free_tt_info(struct xhci_hcd *xhci,
693 struct xhci_virt_device *virt_dev,
694 int slot_id)
695{
696 struct list_head *tt;
697 struct list_head *tt_list_head;
698 struct list_head *tt_next;
699 struct xhci_tt_bw_info *tt_info;
700
701 /* If the device never made it past the Set Address stage,
702 * it may not have the real_port set correctly.
703 */
704 if (virt_dev->real_port == 0 ||
705 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
706 xhci_dbg(xhci, "Bad real port.\n");
707 return;
708 }
709
710 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
711 if (list_empty(tt_list_head))
712 return;
713
714 list_for_each(tt, tt_list_head) {
715 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
716 if (tt_info->slot_id == slot_id)
717 break;
718 }
719 /* Cautionary measure in case the hub was disconnected before we
720 * stored the TT information.
721 */
722 if (tt_info->slot_id != slot_id)
723 return;
724
725 tt_next = tt->next;
726 tt_info = list_entry(tt, struct xhci_tt_bw_info,
727 tt_list);
728 /* Multi-TT hubs will have more than one entry */
729 do {
730 list_del(tt);
731 kfree(tt_info);
732 tt = tt_next;
733 if (list_empty(tt_list_head))
734 break;
735 tt_next = tt->next;
736 tt_info = list_entry(tt, struct xhci_tt_bw_info,
737 tt_list);
738 } while (tt_info->slot_id == slot_id);
739}
740
741int xhci_alloc_tt_info(struct xhci_hcd *xhci,
742 struct xhci_virt_device *virt_dev,
743 struct usb_device *hdev,
744 struct usb_tt *tt, gfp_t mem_flags)
745{
746 struct xhci_tt_bw_info *tt_info;
747 unsigned int num_ports;
748 int i, j;
749
750 if (!tt->multi)
751 num_ports = 1;
752 else
753 num_ports = hdev->maxchild;
754
755 for (i = 0; i < num_ports; i++, tt_info++) {
756 struct xhci_interval_bw_table *bw_table;
757
758 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
759 if (!tt_info)
760 goto free_tts;
761 INIT_LIST_HEAD(&tt_info->tt_list);
762 list_add(&tt_info->tt_list,
763 &xhci->rh_bw[virt_dev->real_port - 1].tts);
764 tt_info->slot_id = virt_dev->udev->slot_id;
765 if (tt->multi)
766 tt_info->ttport = i+1;
767 bw_table = &tt_info->bw_table;
768 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
769 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
770 }
771 return 0;
772
773free_tts:
774 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
775 return -ENOMEM;
776}
777
778
779/* All the xhci_tds in the ring's TD list should be freed at this point.
780 * Should be called with xhci->lock held if there is any chance the TT lists
781 * will be manipulated by the configure endpoint, allocate device, or update
782 * hub functions while this function is removing the TT entries from the list.
783 */
691void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 784void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
692{ 785{
693 struct xhci_virt_device *dev; 786 struct xhci_virt_device *dev;
694 int i; 787 int i;
788 int old_active_eps = 0;
695 789
696 /* Slot ID 0 is reserved */ 790 /* Slot ID 0 is reserved */
697 if (slot_id == 0 || !xhci->devs[slot_id]) 791 if (slot_id == 0 || !xhci->devs[slot_id])
@@ -702,13 +796,29 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
702 if (!dev) 796 if (!dev)
703 return; 797 return;
704 798
799 if (dev->tt_info)
800 old_active_eps = dev->tt_info->active_eps;
801
705 for (i = 0; i < 31; ++i) { 802 for (i = 0; i < 31; ++i) {
706 if (dev->eps[i].ring) 803 if (dev->eps[i].ring)
707 xhci_ring_free(xhci, dev->eps[i].ring); 804 xhci_ring_free(xhci, dev->eps[i].ring);
708 if (dev->eps[i].stream_info) 805 if (dev->eps[i].stream_info)
709 xhci_free_stream_info(xhci, 806 xhci_free_stream_info(xhci,
710 dev->eps[i].stream_info); 807 dev->eps[i].stream_info);
808 /* Endpoints on the TT/root port lists should have been removed
809 * when usb_disable_device() was called for the device.
810 * We can't drop them anyway, because the udev might have gone
811 * away by this point, and we can't tell what speed it was.
812 */
813 if (!list_empty(&dev->eps[i].bw_endpoint_list))
814 xhci_warn(xhci, "Slot %u endpoint %u "
815 "not removed from BW list!\n",
816 slot_id, i);
711 } 817 }
818 /* If this is a hub, free the TT(s) from the TT list */
819 xhci_free_tt_info(xhci, dev, slot_id);
820 /* If necessary, update the number of active TTs on this root port */
821 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
712 822
713 if (dev->ring_cache) { 823 if (dev->ring_cache) {
714 for (i = 0; i < dev->num_rings_cached; i++) 824 for (i = 0; i < dev->num_rings_cached; i++)
@@ -762,10 +872,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
762 for (i = 0; i < 31; i++) { 872 for (i = 0; i < 31; i++) {
763 xhci_init_endpoint_timer(xhci, &dev->eps[i]); 873 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
764 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 874 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
875 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
765 } 876 }
766 877
767 /* Allocate endpoint 0 ring */ 878 /* Allocate endpoint 0 ring */
768 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 879 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
769 if (!dev->eps[0].ring) 880 if (!dev->eps[0].ring)
770 goto fail; 881 goto fail;
771 882
@@ -871,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
871 struct xhci_virt_device *dev; 982 struct xhci_virt_device *dev;
872 struct xhci_ep_ctx *ep0_ctx; 983 struct xhci_ep_ctx *ep0_ctx;
873 struct xhci_slot_ctx *slot_ctx; 984 struct xhci_slot_ctx *slot_ctx;
874 struct xhci_input_control_ctx *ctrl_ctx;
875 u32 port_num; 985 u32 port_num;
876 struct usb_device *top_dev; 986 struct usb_device *top_dev;
877 987
@@ -883,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
883 return -EINVAL; 993 return -EINVAL;
884 } 994 }
885 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 995 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
886 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
887 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 996 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
888 997
889 /* 2) New slot context and endpoint 0 context are valid*/
890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
891
892 /* 3) Only the control endpoint is valid - one endpoint context */ 998 /* 3) Only the control endpoint is valid - one endpoint context */
893 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 999 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
894 switch (udev->speed) { 1000 switch (udev->speed) {
@@ -921,9 +1027,40 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
921 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 1027 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
922 top_dev = top_dev->parent) 1028 top_dev = top_dev->parent)
923 /* Found device below root hub */; 1029 /* Found device below root hub */;
924 dev->port = top_dev->portnum; 1030 dev->fake_port = top_dev->portnum;
1031 dev->real_port = port_num;
925 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); 1032 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
926 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->port); 1033 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1034
1035 /* Find the right bandwidth table that this device will be a part of.
1036 * If this is a full speed device attached directly to a root port (or a
1037 * decendent of one), it counts as a primary bandwidth domain, not a
1038 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1039 * will never be created for the HS root hub.
1040 */
1041 if (!udev->tt || !udev->tt->hub->parent) {
1042 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1043 } else {
1044 struct xhci_root_port_bw_info *rh_bw;
1045 struct xhci_tt_bw_info *tt_bw;
1046
1047 rh_bw = &xhci->rh_bw[port_num - 1];
1048 /* Find the right TT. */
1049 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1050 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1051 continue;
1052
1053 if (!dev->udev->tt->multi ||
1054 (udev->tt->multi &&
1055 tt_bw->ttport == dev->udev->ttport)) {
1056 dev->bw_table = &tt_bw->bw_table;
1057 dev->tt_info = tt_bw;
1058 break;
1059 }
1060 }
1061 if (!dev->tt_info)
1062 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1063 }
927 1064
928 /* Is this a LS/FS device under an external HS hub? */ 1065 /* Is this a LS/FS device under an external HS hub? */
929 if (udev->tt && udev->tt->hub->parent) { 1066 if (udev->tt && udev->tt->hub->parent) {
@@ -1141,8 +1278,8 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1141 if (udev->speed == USB_SPEED_SUPER) 1278 if (udev->speed == USB_SPEED_SUPER)
1142 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); 1279 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1143 1280
1144 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); 1281 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1145 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11; 1282 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1146 /* A 0 in max burst means 1 transfer per ESIT */ 1283 /* A 0 in max burst means 1 transfer per ESIT */
1147 return max_packet * (max_burst + 1); 1284 return max_packet * (max_burst + 1);
1148} 1285}
@@ -1175,10 +1312,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1175 */ 1312 */
1176 if (usb_endpoint_xfer_isoc(&ep->desc)) 1313 if (usb_endpoint_xfer_isoc(&ep->desc))
1177 virt_dev->eps[ep_index].new_ring = 1314 virt_dev->eps[ep_index].new_ring =
1178 xhci_ring_alloc(xhci, 8, true, mem_flags); 1315 xhci_ring_alloc(xhci, 8, true, true, mem_flags);
1179 else 1316 else
1180 virt_dev->eps[ep_index].new_ring = 1317 virt_dev->eps[ep_index].new_ring =
1181 xhci_ring_alloc(xhci, 1, true, mem_flags); 1318 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
1182 if (!virt_dev->eps[ep_index].new_ring) { 1319 if (!virt_dev->eps[ep_index].new_ring) {
1183 /* Attempt to use the ring cache */ 1320 /* Attempt to use the ring cache */
1184 if (virt_dev->num_rings_cached == 0) 1321 if (virt_dev->num_rings_cached == 0)
@@ -1187,7 +1324,8 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1187 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1324 virt_dev->ring_cache[virt_dev->num_rings_cached];
1188 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1325 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1189 virt_dev->num_rings_cached--; 1326 virt_dev->num_rings_cached--;
1190 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); 1327 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1328 usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
1191 } 1329 }
1192 virt_dev->eps[ep_index].skip = false; 1330 virt_dev->eps[ep_index].skip = false;
1193 ep_ring = virt_dev->eps[ep_index].new_ring; 1331 ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -1211,7 +1349,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1211 /* Set the max packet size and max burst */ 1349 /* Set the max packet size and max burst */
1212 switch (udev->speed) { 1350 switch (udev->speed) {
1213 case USB_SPEED_SUPER: 1351 case USB_SPEED_SUPER:
1214 max_packet = le16_to_cpu(ep->desc.wMaxPacketSize); 1352 max_packet = usb_endpoint_maxp(&ep->desc);
1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1353 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1216 /* dig out max burst from ep companion desc */ 1354 /* dig out max burst from ep companion desc */
1217 max_packet = ep->ss_ep_comp.bMaxBurst; 1355 max_packet = ep->ss_ep_comp.bMaxBurst;
@@ -1223,14 +1361,14 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1223 */ 1361 */
1224 if (usb_endpoint_xfer_isoc(&ep->desc) || 1362 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1225 usb_endpoint_xfer_int(&ep->desc)) { 1363 usb_endpoint_xfer_int(&ep->desc)) {
1226 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) 1364 max_burst = (usb_endpoint_maxp(&ep->desc)
1227 & 0x1800) >> 11; 1365 & 0x1800) >> 11;
1228 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); 1366 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
1229 } 1367 }
1230 /* Fall through */ 1368 /* Fall through */
1231 case USB_SPEED_FULL: 1369 case USB_SPEED_FULL:
1232 case USB_SPEED_LOW: 1370 case USB_SPEED_LOW:
1233 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize)); 1371 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1234 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1372 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1235 break; 1373 break;
1236 default: 1374 default:
@@ -1286,6 +1424,70 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
1286 */ 1424 */
1287} 1425}
1288 1426
1427void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1428{
1429 bw_info->ep_interval = 0;
1430 bw_info->mult = 0;
1431 bw_info->num_packets = 0;
1432 bw_info->max_packet_size = 0;
1433 bw_info->type = 0;
1434 bw_info->max_esit_payload = 0;
1435}
1436
1437void xhci_update_bw_info(struct xhci_hcd *xhci,
1438 struct xhci_container_ctx *in_ctx,
1439 struct xhci_input_control_ctx *ctrl_ctx,
1440 struct xhci_virt_device *virt_dev)
1441{
1442 struct xhci_bw_info *bw_info;
1443 struct xhci_ep_ctx *ep_ctx;
1444 unsigned int ep_type;
1445 int i;
1446
1447 for (i = 1; i < 31; ++i) {
1448 bw_info = &virt_dev->eps[i].bw_info;
1449
1450 /* We can't tell what endpoint type is being dropped, but
1451 * unconditionally clearing the bandwidth info for non-periodic
1452 * endpoints should be harmless because the info will never be
1453 * set in the first place.
1454 */
1455 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1456 /* Dropped endpoint */
1457 xhci_clear_endpoint_bw_info(bw_info);
1458 continue;
1459 }
1460
1461 if (EP_IS_ADDED(ctrl_ctx, i)) {
1462 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1463 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1464
1465 /* Ignore non-periodic endpoints */
1466 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1467 ep_type != ISOC_IN_EP &&
1468 ep_type != INT_IN_EP)
1469 continue;
1470
1471 /* Added or changed endpoint */
1472 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1473 le32_to_cpu(ep_ctx->ep_info));
1474 /* Number of packets and mult are zero-based in the
1475 * input context, but we want one-based for the
1476 * interval table.
1477 */
1478 bw_info->mult = CTX_TO_EP_MULT(
1479 le32_to_cpu(ep_ctx->ep_info)) + 1;
1480 bw_info->num_packets = CTX_TO_MAX_BURST(
1481 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1482 bw_info->max_packet_size = MAX_PACKET_DECODED(
1483 le32_to_cpu(ep_ctx->ep_info2));
1484 bw_info->type = ep_type;
1485 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1486 le32_to_cpu(ep_ctx->tx_info));
1487 }
1488 }
1489}
1490
1289/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 1491/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1290 * Useful when you want to change one particular aspect of the endpoint and then 1492 * Useful when you want to change one particular aspect of the endpoint and then
1291 * issue a configure endpoint command. 1493 * issue a configure endpoint command.
@@ -1344,10 +1546,9 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1344 if (!xhci->scratchpad) 1546 if (!xhci->scratchpad)
1345 goto fail_sp; 1547 goto fail_sp;
1346 1548
1347 xhci->scratchpad->sp_array = 1549 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1348 pci_alloc_consistent(to_pci_dev(dev),
1349 num_sp * sizeof(u64), 1550 num_sp * sizeof(u64),
1350 &xhci->scratchpad->sp_dma); 1551 &xhci->scratchpad->sp_dma, flags);
1351 if (!xhci->scratchpad->sp_array) 1552 if (!xhci->scratchpad->sp_array)
1352 goto fail_sp2; 1553 goto fail_sp2;
1353 1554
@@ -1364,8 +1565,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1364 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1565 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1365 for (i = 0; i < num_sp; i++) { 1566 for (i = 0; i < num_sp; i++) {
1366 dma_addr_t dma; 1567 dma_addr_t dma;
1367 void *buf = pci_alloc_consistent(to_pci_dev(dev), 1568 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1368 xhci->page_size, &dma); 1569 flags);
1369 if (!buf) 1570 if (!buf)
1370 goto fail_sp5; 1571 goto fail_sp5;
1371 1572
@@ -1378,7 +1579,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1378 1579
1379 fail_sp5: 1580 fail_sp5:
1380 for (i = i - 1; i >= 0; i--) { 1581 for (i = i - 1; i >= 0; i--) {
1381 pci_free_consistent(to_pci_dev(dev), xhci->page_size, 1582 dma_free_coherent(dev, xhci->page_size,
1382 xhci->scratchpad->sp_buffers[i], 1583 xhci->scratchpad->sp_buffers[i],
1383 xhci->scratchpad->sp_dma_buffers[i]); 1584 xhci->scratchpad->sp_dma_buffers[i]);
1384 } 1585 }
@@ -1388,7 +1589,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1388 kfree(xhci->scratchpad->sp_buffers); 1589 kfree(xhci->scratchpad->sp_buffers);
1389 1590
1390 fail_sp3: 1591 fail_sp3:
1391 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), 1592 dma_free_coherent(dev, num_sp * sizeof(u64),
1392 xhci->scratchpad->sp_array, 1593 xhci->scratchpad->sp_array,
1393 xhci->scratchpad->sp_dma); 1594 xhci->scratchpad->sp_dma);
1394 1595
@@ -1412,13 +1613,13 @@ static void scratchpad_free(struct xhci_hcd *xhci)
1412 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1613 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1413 1614
1414 for (i = 0; i < num_sp; i++) { 1615 for (i = 0; i < num_sp; i++) {
1415 pci_free_consistent(pdev, xhci->page_size, 1616 dma_free_coherent(&pdev->dev, xhci->page_size,
1416 xhci->scratchpad->sp_buffers[i], 1617 xhci->scratchpad->sp_buffers[i],
1417 xhci->scratchpad->sp_dma_buffers[i]); 1618 xhci->scratchpad->sp_dma_buffers[i]);
1418 } 1619 }
1419 kfree(xhci->scratchpad->sp_dma_buffers); 1620 kfree(xhci->scratchpad->sp_dma_buffers);
1420 kfree(xhci->scratchpad->sp_buffers); 1621 kfree(xhci->scratchpad->sp_buffers);
1421 pci_free_consistent(pdev, num_sp * sizeof(u64), 1622 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
1422 xhci->scratchpad->sp_array, 1623 xhci->scratchpad->sp_array,
1423 xhci->scratchpad->sp_dma); 1624 xhci->scratchpad->sp_dma);
1424 kfree(xhci->scratchpad); 1625 kfree(xhci->scratchpad);
@@ -1463,18 +1664,10 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1463 1664
1464void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) 1665void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1465{ 1666{
1466 int last; 1667 if (urb_priv) {
1467 1668 kfree(urb_priv->td[0]);
1468 if (!urb_priv) 1669 kfree(urb_priv);
1469 return;
1470
1471 last = urb_priv->length - 1;
1472 if (last >= 0) {
1473 int i;
1474 for (i = 0; i <= last; i++)
1475 kfree(urb_priv->td[i]);
1476 } 1670 }
1477 kfree(urb_priv);
1478} 1671}
1479 1672
1480void xhci_free_command(struct xhci_hcd *xhci, 1673void xhci_free_command(struct xhci_hcd *xhci,
@@ -1489,6 +1682,8 @@ void xhci_free_command(struct xhci_hcd *xhci,
1489void xhci_mem_cleanup(struct xhci_hcd *xhci) 1682void xhci_mem_cleanup(struct xhci_hcd *xhci)
1490{ 1683{
1491 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1684 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1685 struct dev_info *dev_info, *next;
1686 unsigned long flags;
1492 int size; 1687 int size;
1493 int i; 1688 int i;
1494 1689
@@ -1500,7 +1695,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1500 } 1695 }
1501 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1696 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1502 if (xhci->erst.entries) 1697 if (xhci->erst.entries)
1503 pci_free_consistent(pdev, size, 1698 dma_free_coherent(&pdev->dev, size,
1504 xhci->erst.entries, xhci->erst.erst_dma_addr); 1699 xhci->erst.entries, xhci->erst.erst_dma_addr);
1505 xhci->erst.entries = NULL; 1700 xhci->erst.entries = NULL;
1506 xhci_dbg(xhci, "Freed ERST\n"); 1701 xhci_dbg(xhci, "Freed ERST\n");
@@ -1540,17 +1735,25 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1540 1735
1541 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 1736 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1542 if (xhci->dcbaa) 1737 if (xhci->dcbaa)
1543 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 1738 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1544 xhci->dcbaa, xhci->dcbaa->dma); 1739 xhci->dcbaa, xhci->dcbaa->dma);
1545 xhci->dcbaa = NULL; 1740 xhci->dcbaa = NULL;
1546 1741
1547 scratchpad_free(xhci); 1742 scratchpad_free(xhci);
1548 1743
1744 spin_lock_irqsave(&xhci->lock, flags);
1745 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1746 list_del(&dev_info->list);
1747 kfree(dev_info);
1748 }
1749 spin_unlock_irqrestore(&xhci->lock, flags);
1750
1549 xhci->num_usb2_ports = 0; 1751 xhci->num_usb2_ports = 0;
1550 xhci->num_usb3_ports = 0; 1752 xhci->num_usb3_ports = 0;
1551 kfree(xhci->usb2_ports); 1753 kfree(xhci->usb2_ports);
1552 kfree(xhci->usb3_ports); 1754 kfree(xhci->usb3_ports);
1553 kfree(xhci->port_array); 1755 kfree(xhci->port_array);
1756 kfree(xhci->rh_bw);
1554 1757
1555 xhci->page_size = 0; 1758 xhci->page_size = 0;
1556 xhci->page_shift = 0; 1759 xhci->page_shift = 0;
@@ -1762,6 +1965,23 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1762 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 1965 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
1763 /* WTF? "Valid values are ‘1’ to MaxPorts" */ 1966 /* WTF? "Valid values are ‘1’ to MaxPorts" */
1764 return; 1967 return;
1968
1969 /* Check the host's USB2 LPM capability */
1970 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
1971 (temp & XHCI_L1C)) {
1972 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
1973 xhci->sw_lpm_support = 1;
1974 }
1975
1976 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
1977 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
1978 xhci->sw_lpm_support = 1;
1979 if (temp & XHCI_HLC) {
1980 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
1981 xhci->hw_lpm_support = 1;
1982 }
1983 }
1984
1765 port_offset--; 1985 port_offset--;
1766 for (i = port_offset; i < (port_offset + port_count); i++) { 1986 for (i = port_offset; i < (port_offset + port_count); i++) {
1767 /* Duplicate entry. Ignore the port if the revisions differ. */ 1987 /* Duplicate entry. Ignore the port if the revisions differ. */
@@ -1806,7 +2026,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1806 __le32 __iomem *addr; 2026 __le32 __iomem *addr;
1807 u32 offset; 2027 u32 offset;
1808 unsigned int num_ports; 2028 unsigned int num_ports;
1809 int i, port_index; 2029 int i, j, port_index;
1810 2030
1811 addr = &xhci->cap_regs->hcc_params; 2031 addr = &xhci->cap_regs->hcc_params;
1812 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); 2032 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
@@ -1821,6 +2041,18 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1821 if (!xhci->port_array) 2041 if (!xhci->port_array)
1822 return -ENOMEM; 2042 return -ENOMEM;
1823 2043
2044 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2045 if (!xhci->rh_bw)
2046 return -ENOMEM;
2047 for (i = 0; i < num_ports; i++) {
2048 struct xhci_interval_bw_table *bw_table;
2049
2050 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2051 bw_table = &xhci->rh_bw[i].bw_table;
2052 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2053 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2054 }
2055
1824 /* 2056 /*
1825 * For whatever reason, the first capability offset is from the 2057 * For whatever reason, the first capability offset is from the
1826 * capability register base, not from the HCCPARAMS register. 2058 * capability register base, not from the HCCPARAMS register.
@@ -1959,8 +2191,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1959 * Section 5.4.8 - doorbell array must be 2191 * Section 5.4.8 - doorbell array must be
1960 * "physically contiguous and 64-byte (cache line) aligned". 2192 * "physically contiguous and 64-byte (cache line) aligned".
1961 */ 2193 */
1962 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), 2194 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
1963 sizeof(*xhci->dcbaa), &dma); 2195 GFP_KERNEL);
1964 if (!xhci->dcbaa) 2196 if (!xhci->dcbaa)
1965 goto fail; 2197 goto fail;
1966 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2198 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
@@ -1994,14 +2226,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1994 dma_pool_create("xHCI 1KB stream ctx arrays", 2226 dma_pool_create("xHCI 1KB stream ctx arrays",
1995 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); 2227 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
1996 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE 2228 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
1997 * will be allocated with pci_alloc_consistent() 2229 * will be allocated with dma_alloc_coherent()
1998 */ 2230 */
1999 2231
2000 if (!xhci->small_streams_pool || !xhci->medium_streams_pool) 2232 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2001 goto fail; 2233 goto fail;
2002 2234
2003 /* Set up the command ring to have one segments for now. */ 2235 /* Set up the command ring to have one segments for now. */
2004 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); 2236 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
2005 if (!xhci->cmd_ring) 2237 if (!xhci->cmd_ring)
2006 goto fail; 2238 goto fail;
2007 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2239 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2032,14 +2264,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2032 * the event ring segment table (ERST). Section 4.9.3. 2264 * the event ring segment table (ERST). Section 4.9.3.
2033 */ 2265 */
2034 xhci_dbg(xhci, "// Allocating event ring\n"); 2266 xhci_dbg(xhci, "// Allocating event ring\n");
2035 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 2267 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2268 flags);
2036 if (!xhci->event_ring) 2269 if (!xhci->event_ring)
2037 goto fail; 2270 goto fail;
2038 if (xhci_check_trb_in_td_math(xhci, flags) < 0) 2271 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2039 goto fail; 2272 goto fail;
2040 2273
2041 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 2274 xhci->erst.entries = dma_alloc_coherent(dev,
2042 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 2275 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2276 GFP_KERNEL);
2043 if (!xhci->erst.entries) 2277 if (!xhci->erst.entries)
2044 goto fail; 2278 goto fail;
2045 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 2279 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
@@ -2102,6 +2336,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2102 if (xhci_setup_port_arrays(xhci, flags)) 2336 if (xhci_setup_port_arrays(xhci, flags))
2103 goto fail; 2337 goto fail;
2104 2338
2339 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2340
2105 return 0; 2341 return 0;
2106 2342
2107fail: 2343fail: