aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2010-04-02 18:34:43 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-05-20 16:21:38 -0400
commite9df17eb1408cfafa3d1844bfc7f22c7237b31b8 (patch)
tree175badf12e5098bd15bc5d6a37642badd7c6f4b9 /drivers/usb/host/xhci-ring.c
parent8df75f42f8e67e2851cdcf6da91640fb881defd1 (diff)
USB: xhci: Correct assumptions about number of rings per endpoint.
Much of the xHCI driver code assumes that endpoints only have one ring. Now an endpoint can have one ring per enabled stream ID, so correct that assumption. Use functions that translate the stream_id field in the URB or the DMA address of a TRB into the correct stream ring. Correct the polling loop to print out all enabled stream rings. Make the URB cancellation routine find the correct stream ring if the URB has stream_id set. Make sure the URB enqueueing routine does the same. Also correct the code that handles stalled/halted endpoints. Check that commands and registers that can take stream IDs handle them properly. That includes ringing an endpoint doorbell, resetting a stalled/halted endpoint, and setting a transfer ring dequeue pointer (since that command can set the dequeue pointer in a stream context or an endpoint context). Correct the transfer event handler to translate a TRB DMA address into the stream ring it was enqueued to. Make the code to allocate and prepare TD structures adds the TD to the right td_list for the stream ring. Make sure the code to give the first TRB in a TD to the hardware manipulates the correct stream ring. When an endpoint stalls, store the stream ID of the stream ring that stalled in the xhci_virt_ep structure. Use that instead of the stream ID in the URB, since an URB may be re-used after it is given back after a non-control endpoint stall. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c192
1 files changed, 147 insertions, 45 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a14f657e279b..16ef5fd77ce2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -312,7 +312,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
312 312
313static void ring_ep_doorbell(struct xhci_hcd *xhci, 313static void ring_ep_doorbell(struct xhci_hcd *xhci,
314 unsigned int slot_id, 314 unsigned int slot_id,
315 unsigned int ep_index) 315 unsigned int ep_index,
316 unsigned int stream_id)
316{ 317{
317 struct xhci_virt_ep *ep; 318 struct xhci_virt_ep *ep;
318 unsigned int ep_state; 319 unsigned int ep_state;
@@ -331,7 +332,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
331 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 332 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
332 && !(ep_state & EP_HALTED)) { 333 && !(ep_state & EP_HALTED)) {
333 field = xhci_readl(xhci, db_addr) & DB_MASK; 334 field = xhci_readl(xhci, db_addr) & DB_MASK;
334 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 335 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
336 xhci_writel(xhci, field, db_addr);
335 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 337 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
336 * isn't time-critical and we shouldn't make the CPU wait for 338 * isn't time-critical and we shouldn't make the CPU wait for
337 * the flush. 339 * the flush.
@@ -340,6 +342,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
340 } 342 }
341} 343}
342 344
345/* Ring the doorbell for any rings with pending URBs */
346static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
347 unsigned int slot_id,
348 unsigned int ep_index)
349{
350 unsigned int stream_id;
351 struct xhci_virt_ep *ep;
352
353 ep = &xhci->devs[slot_id]->eps[ep_index];
354
355 /* A ring has pending URBs if its TD list is not empty */
356 if (!(ep->ep_state & EP_HAS_STREAMS)) {
357 if (!(list_empty(&ep->ring->td_list)))
358 ring_ep_doorbell(xhci, slot_id, ep_index, 0);
359 return;
360 }
361
362 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
363 stream_id++) {
364 struct xhci_stream_info *stream_info = ep->stream_info;
365 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
366 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
367 }
368}
369
343/* 370/*
344 * Find the segment that trb is in. Start searching in start_seg. 371 * Find the segment that trb is in. Start searching in start_seg.
345 * If we must move past a segment that has a link TRB with a toggle cycle state 372 * If we must move past a segment that has a link TRB with a toggle cycle state
@@ -382,14 +409,23 @@ static struct xhci_segment *find_trb_seg(
382 */ 409 */
383void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 410void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
384 unsigned int slot_id, unsigned int ep_index, 411 unsigned int slot_id, unsigned int ep_index,
385 struct xhci_td *cur_td, struct xhci_dequeue_state *state) 412 unsigned int stream_id, struct xhci_td *cur_td,
413 struct xhci_dequeue_state *state)
386{ 414{
387 struct xhci_virt_device *dev = xhci->devs[slot_id]; 415 struct xhci_virt_device *dev = xhci->devs[slot_id];
388 struct xhci_ring *ep_ring = dev->eps[ep_index].ring; 416 struct xhci_ring *ep_ring;
389 struct xhci_generic_trb *trb; 417 struct xhci_generic_trb *trb;
390 struct xhci_ep_ctx *ep_ctx; 418 struct xhci_ep_ctx *ep_ctx;
391 dma_addr_t addr; 419 dma_addr_t addr;
392 420
421 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
422 ep_index, stream_id);
423 if (!ep_ring) {
424 xhci_warn(xhci, "WARN can't find new dequeue state "
425 "for invalid stream ID %u.\n",
426 stream_id);
427 return;
428 }
393 state->new_cycle_state = 0; 429 state->new_cycle_state = 0;
394 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 430 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
395 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 431 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
@@ -469,11 +505,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
469} 505}
470 506
471static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 507static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
472 unsigned int ep_index, struct xhci_segment *deq_seg, 508 unsigned int ep_index, unsigned int stream_id,
509 struct xhci_segment *deq_seg,
473 union xhci_trb *deq_ptr, u32 cycle_state); 510 union xhci_trb *deq_ptr, u32 cycle_state);
474 511
475void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 512void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
476 unsigned int slot_id, unsigned int ep_index, 513 unsigned int slot_id, unsigned int ep_index,
514 unsigned int stream_id,
477 struct xhci_dequeue_state *deq_state) 515 struct xhci_dequeue_state *deq_state)
478{ 516{
479 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 517 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
@@ -485,7 +523,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
485 deq_state->new_deq_ptr, 523 deq_state->new_deq_ptr,
486 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 524 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
487 deq_state->new_cycle_state); 525 deq_state->new_cycle_state);
488 queue_set_tr_deq(xhci, slot_id, ep_index, 526 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
489 deq_state->new_deq_seg, 527 deq_state->new_deq_seg,
490 deq_state->new_deq_ptr, 528 deq_state->new_deq_ptr,
491 (u32) deq_state->new_cycle_state); 529 (u32) deq_state->new_cycle_state);
@@ -553,11 +591,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
553 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 591 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
554 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 592 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
555 ep = &xhci->devs[slot_id]->eps[ep_index]; 593 ep = &xhci->devs[slot_id]->eps[ep_index];
556 ep_ring = ep->ring;
557 594
558 if (list_empty(&ep->cancelled_td_list)) { 595 if (list_empty(&ep->cancelled_td_list)) {
559 xhci_stop_watchdog_timer_in_irq(xhci, ep); 596 xhci_stop_watchdog_timer_in_irq(xhci, ep);
560 ring_ep_doorbell(xhci, slot_id, ep_index); 597 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
561 return; 598 return;
562 } 599 }
563 600
@@ -571,15 +608,36 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
571 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 608 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
572 cur_td->first_trb, 609 cur_td->first_trb,
573 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 610 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
611 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
612 if (!ep_ring) {
613 /* This shouldn't happen unless a driver is mucking
614 * with the stream ID after submission. This will
615 * leave the TD on the hardware ring, and the hardware
616 * will try to execute it, and may access a buffer
617 * that has already been freed. In the best case, the
618 * hardware will execute it, and the event handler will
619 * ignore the completion event for that TD, since it was
620 * removed from the td_list for that endpoint. In
621 * short, don't muck with the stream ID after
622 * submission.
623 */
624 xhci_warn(xhci, "WARN Cancelled URB %p "
625 "has invalid stream ID %u.\n",
626 cur_td->urb,
627 cur_td->urb->stream_id);
628 goto remove_finished_td;
629 }
574 /* 630 /*
575 * If we stopped on the TD we need to cancel, then we have to 631 * If we stopped on the TD we need to cancel, then we have to
576 * move the xHC endpoint ring dequeue pointer past this TD. 632 * move the xHC endpoint ring dequeue pointer past this TD.
577 */ 633 */
578 if (cur_td == ep->stopped_td) 634 if (cur_td == ep->stopped_td)
579 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 635 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
580 &deq_state); 636 cur_td->urb->stream_id,
637 cur_td, &deq_state);
581 else 638 else
582 td_to_noop(xhci, ep_ring, cur_td); 639 td_to_noop(xhci, ep_ring, cur_td);
640remove_finished_td:
583 /* 641 /*
584 * The event handler won't see a completion for this TD anymore, 642 * The event handler won't see a completion for this TD anymore,
585 * so remove it from the endpoint ring's TD list. Keep it in 643 * so remove it from the endpoint ring's TD list. Keep it in
@@ -593,11 +651,13 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
593 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 651 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
594 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 652 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
595 xhci_queue_new_dequeue_state(xhci, 653 xhci_queue_new_dequeue_state(xhci,
596 slot_id, ep_index, &deq_state); 654 slot_id, ep_index,
655 ep->stopped_td->urb->stream_id,
656 &deq_state);
597 xhci_ring_cmd_db(xhci); 657 xhci_ring_cmd_db(xhci);
598 } else { 658 } else {
599 /* Otherwise just ring the doorbell to restart the ring */ 659 /* Otherwise ring the doorbell(s) to restart queued transfers */
600 ring_ep_doorbell(xhci, slot_id, ep_index); 660 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
601 } 661 }
602 ep->stopped_td = NULL; 662 ep->stopped_td = NULL;
603 ep->stopped_trb = NULL; 663 ep->stopped_trb = NULL;
@@ -757,6 +817,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
757{ 817{
758 unsigned int slot_id; 818 unsigned int slot_id;
759 unsigned int ep_index; 819 unsigned int ep_index;
820 unsigned int stream_id;
760 struct xhci_ring *ep_ring; 821 struct xhci_ring *ep_ring;
761 struct xhci_virt_device *dev; 822 struct xhci_virt_device *dev;
762 struct xhci_ep_ctx *ep_ctx; 823 struct xhci_ep_ctx *ep_ctx;
@@ -764,8 +825,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
764 825
765 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 826 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
766 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 827 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
828 stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
767 dev = xhci->devs[slot_id]; 829 dev = xhci->devs[slot_id];
768 ep_ring = dev->eps[ep_index].ring; 830
831 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
832 if (!ep_ring) {
833 xhci_warn(xhci, "WARN Set TR deq ptr command for "
834 "freed stream ID %u\n",
835 stream_id);
836 /* XXX: Harmless??? */
837 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
838 return;
839 }
840
769 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 841 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
770 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 842 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
771 843
@@ -810,7 +882,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
810 } 882 }
811 883
812 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 884 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
813 ring_ep_doorbell(xhci, slot_id, ep_index); 885 /* Restart any rings with pending URBs */
886 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
814} 887}
815 888
816static void handle_reset_ep_completion(struct xhci_hcd *xhci, 889static void handle_reset_ep_completion(struct xhci_hcd *xhci,
@@ -819,11 +892,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
819{ 892{
820 int slot_id; 893 int slot_id;
821 unsigned int ep_index; 894 unsigned int ep_index;
822 struct xhci_ring *ep_ring;
823 895
824 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 896 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
825 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 897 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
826 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
827 /* This command will only fail if the endpoint wasn't halted, 898 /* This command will only fail if the endpoint wasn't halted,
828 * but we don't care. 899 * but we don't care.
829 */ 900 */
@@ -841,9 +912,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
841 false); 912 false);
842 xhci_ring_cmd_db(xhci); 913 xhci_ring_cmd_db(xhci);
843 } else { 914 } else {
844 /* Clear our internal halted state and restart the ring */ 915 /* Clear our internal halted state and restart the ring(s) */
845 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 916 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
846 ring_ep_doorbell(xhci, slot_id, ep_index); 917 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
847 } 918 }
848} 919}
849 920
@@ -929,8 +1000,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
929 /* Input ctx add_flags are the endpoint index plus one */ 1000 /* Input ctx add_flags are the endpoint index plus one */
930 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1001 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
931 /* A usb_set_interface() call directly after clearing a halted 1002 /* A usb_set_interface() call directly after clearing a halted
932 * condition may race on this quirky hardware. 1003 * condition may race on this quirky hardware. Not worth
933 * Not worth worrying about, since this is prototype hardware. 1004 * worrying about, since this is prototype hardware. Not sure
1005 * if this will work for streams, but streams support was
1006 * untested on this prototype.
934 */ 1007 */
935 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1008 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
936 ep_index != (unsigned int) -1 && 1009 ep_index != (unsigned int) -1 &&
@@ -943,10 +1016,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
943 xhci_dbg(xhci, "Completed config ep cmd - " 1016 xhci_dbg(xhci, "Completed config ep cmd - "
944 "last ep index = %d, state = %d\n", 1017 "last ep index = %d, state = %d\n",
945 ep_index, ep_state); 1018 ep_index, ep_state);
946 /* Clear our internal halted state and restart ring */ 1019 /* Clear internal halted state and restart ring(s) */
947 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1020 xhci->devs[slot_id]->eps[ep_index].ep_state &=
948 ~EP_HALTED; 1021 ~EP_HALTED;
949 ring_ep_doorbell(xhci, slot_id, ep_index); 1022 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
950 break; 1023 break;
951 } 1024 }
952bandwidth_change: 1025bandwidth_change:
@@ -1079,12 +1152,14 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1079 1152
1080static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1153static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1081 unsigned int slot_id, unsigned int ep_index, 1154 unsigned int slot_id, unsigned int ep_index,
1155 unsigned int stream_id,
1082 struct xhci_td *td, union xhci_trb *event_trb) 1156 struct xhci_td *td, union xhci_trb *event_trb)
1083{ 1157{
1084 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1158 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1085 ep->ep_state |= EP_HALTED; 1159 ep->ep_state |= EP_HALTED;
1086 ep->stopped_td = td; 1160 ep->stopped_td = td;
1087 ep->stopped_trb = event_trb; 1161 ep->stopped_trb = event_trb;
1162 ep->stopped_stream = stream_id;
1088 1163
1089 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1164 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1090 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1165 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
@@ -1169,10 +1244,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1169 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1244 ep_index = TRB_TO_EP_ID(event->flags) - 1;
1170 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1245 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1171 ep = &xdev->eps[ep_index]; 1246 ep = &xdev->eps[ep_index];
1172 ep_ring = ep->ring; 1247 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1173 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1248 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1174 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1249 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
1175 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 1250 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1251 "or incorrect stream ring\n");
1176 return -ENODEV; 1252 return -ENODEV;
1177 } 1253 }
1178 1254
@@ -1303,7 +1379,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1303 td->urb->actual_length = 0; 1379 td->urb->actual_length = 0;
1304 1380
1305 xhci_cleanup_halted_endpoint(xhci, 1381 xhci_cleanup_halted_endpoint(xhci,
1306 slot_id, ep_index, td, event_trb); 1382 slot_id, ep_index, 0, td, event_trb);
1307 goto td_cleanup; 1383 goto td_cleanup;
1308 } 1384 }
1309 /* 1385 /*
@@ -1452,6 +1528,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1452 */ 1528 */
1453 ep->stopped_td = td; 1529 ep->stopped_td = td;
1454 ep->stopped_trb = event_trb; 1530 ep->stopped_trb = event_trb;
1531 ep->stopped_stream = ep_ring->stream_id;
1455 } else if (xhci_requires_manual_halt_cleanup(xhci, 1532 } else if (xhci_requires_manual_halt_cleanup(xhci,
1456 ep_ctx, trb_comp_code)) { 1533 ep_ctx, trb_comp_code)) {
1457 /* Other types of errors halt the endpoint, but the 1534 /* Other types of errors halt the endpoint, but the
@@ -1460,7 +1537,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1460 * xHCI hardware manually. 1537 * xHCI hardware manually.
1461 */ 1538 */
1462 xhci_cleanup_halted_endpoint(xhci, 1539 xhci_cleanup_halted_endpoint(xhci,
1463 slot_id, ep_index, td, event_trb); 1540 slot_id, ep_index, ep_ring->stream_id, td, event_trb);
1464 } else { 1541 } else {
1465 /* Update ring dequeue pointer */ 1542 /* Update ring dequeue pointer */
1466 while (ep_ring->dequeue != td->last_trb) 1543 while (ep_ring->dequeue != td->last_trb)
@@ -1656,14 +1733,24 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1656static int prepare_transfer(struct xhci_hcd *xhci, 1733static int prepare_transfer(struct xhci_hcd *xhci,
1657 struct xhci_virt_device *xdev, 1734 struct xhci_virt_device *xdev,
1658 unsigned int ep_index, 1735 unsigned int ep_index,
1736 unsigned int stream_id,
1659 unsigned int num_trbs, 1737 unsigned int num_trbs,
1660 struct urb *urb, 1738 struct urb *urb,
1661 struct xhci_td **td, 1739 struct xhci_td **td,
1662 gfp_t mem_flags) 1740 gfp_t mem_flags)
1663{ 1741{
1664 int ret; 1742 int ret;
1743 struct xhci_ring *ep_ring;
1665 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1744 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1666 ret = prepare_ring(xhci, xdev->eps[ep_index].ring, 1745
1746 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
1747 if (!ep_ring) {
1748 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
1749 stream_id);
1750 return -EINVAL;
1751 }
1752
1753 ret = prepare_ring(xhci, ep_ring,
1667 ep_ctx->ep_info & EP_STATE_MASK, 1754 ep_ctx->ep_info & EP_STATE_MASK,
1668 num_trbs, mem_flags); 1755 num_trbs, mem_flags);
1669 if (ret) 1756 if (ret)
@@ -1683,9 +1770,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1683 (*td)->urb = urb; 1770 (*td)->urb = urb;
1684 urb->hcpriv = (void *) (*td); 1771 urb->hcpriv = (void *) (*td);
1685 /* Add this TD to the tail of the endpoint ring's TD list */ 1772 /* Add this TD to the tail of the endpoint ring's TD list */
1686 list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); 1773 list_add_tail(&(*td)->td_list, &ep_ring->td_list);
1687 (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; 1774 (*td)->start_seg = ep_ring->enq_seg;
1688 (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; 1775 (*td)->first_trb = ep_ring->enqueue;
1689 1776
1690 return 0; 1777 return 0;
1691} 1778}
@@ -1751,7 +1838,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1751} 1838}
1752 1839
1753static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1840static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1754 unsigned int ep_index, int start_cycle, 1841 unsigned int ep_index, unsigned int stream_id, int start_cycle,
1755 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1842 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1756{ 1843{
1757 /* 1844 /*
@@ -1760,7 +1847,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1760 */ 1847 */
1761 wmb(); 1848 wmb();
1762 start_trb->field[3] |= start_cycle; 1849 start_trb->field[3] |= start_cycle;
1763 ring_ep_doorbell(xhci, slot_id, ep_index); 1850 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
1764} 1851}
1765 1852
1766/* 1853/*
@@ -1834,12 +1921,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1834 struct xhci_generic_trb *start_trb; 1921 struct xhci_generic_trb *start_trb;
1835 int start_cycle; 1922 int start_cycle;
1836 1923
1837 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1924 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1925 if (!ep_ring)
1926 return -EINVAL;
1927
1838 num_trbs = count_sg_trbs_needed(xhci, urb); 1928 num_trbs = count_sg_trbs_needed(xhci, urb);
1839 num_sgs = urb->num_sgs; 1929 num_sgs = urb->num_sgs;
1840 1930
1841 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 1931 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1842 ep_index, num_trbs, urb, &td, mem_flags); 1932 ep_index, urb->stream_id,
1933 num_trbs, urb, &td, mem_flags);
1843 if (trb_buff_len < 0) 1934 if (trb_buff_len < 0)
1844 return trb_buff_len; 1935 return trb_buff_len;
1845 /* 1936 /*
@@ -1948,7 +2039,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1948 } while (running_total < urb->transfer_buffer_length); 2039 } while (running_total < urb->transfer_buffer_length);
1949 2040
1950 check_trb_math(urb, num_trbs, running_total); 2041 check_trb_math(urb, num_trbs, running_total);
1951 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2042 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2043 start_cycle, start_trb, td);
1952 return 0; 2044 return 0;
1953} 2045}
1954 2046
@@ -1970,7 +2062,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1970 if (urb->num_sgs) 2062 if (urb->num_sgs)
1971 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2063 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1972 2064
1973 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2065 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2066 if (!ep_ring)
2067 return -EINVAL;
1974 2068
1975 num_trbs = 0; 2069 num_trbs = 0;
1976 /* How much data is (potentially) left before the 64KB boundary? */ 2070 /* How much data is (potentially) left before the 64KB boundary? */
@@ -1997,7 +2091,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1997 (unsigned long long)urb->transfer_dma, 2091 (unsigned long long)urb->transfer_dma,
1998 num_trbs); 2092 num_trbs);
1999 2093
2000 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 2094 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2095 ep_index, urb->stream_id,
2001 num_trbs, urb, &td, mem_flags); 2096 num_trbs, urb, &td, mem_flags);
2002 if (ret < 0) 2097 if (ret < 0)
2003 return ret; 2098 return ret;
@@ -2067,7 +2162,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2067 } while (running_total < urb->transfer_buffer_length); 2162 } while (running_total < urb->transfer_buffer_length);
2068 2163
2069 check_trb_math(urb, num_trbs, running_total); 2164 check_trb_math(urb, num_trbs, running_total);
2070 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2165 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2166 start_cycle, start_trb, td);
2071 return 0; 2167 return 0;
2072} 2168}
2073 2169
@@ -2084,7 +2180,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2084 u32 field, length_field; 2180 u32 field, length_field;
2085 struct xhci_td *td; 2181 struct xhci_td *td;
2086 2182
2087 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2183 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2184 if (!ep_ring)
2185 return -EINVAL;
2088 2186
2089 /* 2187 /*
2090 * Need to copy setup packet into setup TRB, so we can't use the setup 2188 * Need to copy setup packet into setup TRB, so we can't use the setup
@@ -2105,8 +2203,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2105 */ 2203 */
2106 if (urb->transfer_buffer_length > 0) 2204 if (urb->transfer_buffer_length > 0)
2107 num_trbs++; 2205 num_trbs++;
2108 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 2206 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2109 urb, &td, mem_flags); 2207 ep_index, urb->stream_id,
2208 num_trbs, urb, &td, mem_flags);
2110 if (ret < 0) 2209 if (ret < 0)
2111 return ret; 2210 return ret;
2112 2211
@@ -2161,7 +2260,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2161 /* Event on completion */ 2260 /* Event on completion */
2162 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2261 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
2163 2262
2164 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2263 giveback_first_trb(xhci, slot_id, ep_index, 0,
2264 start_cycle, start_trb, td);
2165 return 0; 2265 return 0;
2166} 2266}
2167 2267
@@ -2273,12 +2373,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
2273 * This should not be used for endpoints that have streams enabled. 2373 * This should not be used for endpoints that have streams enabled.
2274 */ 2374 */
2275static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 2375static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2276 unsigned int ep_index, struct xhci_segment *deq_seg, 2376 unsigned int ep_index, unsigned int stream_id,
2377 struct xhci_segment *deq_seg,
2277 union xhci_trb *deq_ptr, u32 cycle_state) 2378 union xhci_trb *deq_ptr, u32 cycle_state)
2278{ 2379{
2279 dma_addr_t addr; 2380 dma_addr_t addr;
2280 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2381 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
2281 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2382 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
2383 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
2282 u32 type = TRB_TYPE(TRB_SET_DEQ); 2384 u32 type = TRB_TYPE(TRB_SET_DEQ);
2283 2385
2284 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 2386 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
@@ -2289,7 +2391,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2289 return 0; 2391 return 0;
2290 } 2392 }
2291 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 2393 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
2292 upper_32_bits(addr), 0, 2394 upper_32_bits(addr), trb_stream_id,
2293 trb_slot_id | trb_ep_index | type, false); 2395 trb_slot_id | trb_ep_index | type, false);
2294} 2396}
2295 2397