aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c329
1 files changed, 254 insertions, 75 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 85d7e8f2085e..36c858e5b529 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -112,6 +112,12 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 112 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
113} 113}
114 114
115static inline int enqueue_is_link_trb(struct xhci_ring *ring)
116{
117 struct xhci_link_trb *link = &ring->enqueue->link;
118 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
119}
120
115/* Updates trb to point to the next TRB in the ring, and updates seg if the next 121/* Updates trb to point to the next TRB in the ring, and updates seg if the next
116 * TRB is in a new segment. This does not skip over link TRBs, and it does not 122 * TRB is in a new segment. This does not skip over link TRBs, and it does not
117 * effect the ring dequeue or enqueue pointers. 123 * effect the ring dequeue or enqueue pointers.
@@ -193,20 +199,15 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
193 while (last_trb(xhci, ring, ring->enq_seg, next)) { 199 while (last_trb(xhci, ring, ring->enq_seg, next)) {
194 if (!consumer) { 200 if (!consumer) {
195 if (ring != xhci->event_ring) { 201 if (ring != xhci->event_ring) {
196 /* If we're not dealing with 0.95 hardware, 202 if (chain) {
197 * carry over the chain bit of the previous TRB 203 next->link.control |= TRB_CHAIN;
198 * (which may mean the chain bit is cleared). 204
199 */ 205 /* Give this link TRB to the hardware */
200 if (!xhci_link_trb_quirk(xhci)) { 206 wmb();
201 next->link.control &= ~TRB_CHAIN; 207 next->link.control ^= TRB_CYCLE;
202 next->link.control |= chain; 208 } else {
209 break;
203 } 210 }
204 /* Give this link TRB to the hardware */
205 wmb();
206 if (next->link.control & TRB_CYCLE)
207 next->link.control &= (u32) ~TRB_CYCLE;
208 else
209 next->link.control |= (u32) TRB_CYCLE;
210 } 211 }
211 /* Toggle the cycle bit after the last ring segment. */ 212 /* Toggle the cycle bit after the last ring segment. */
212 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { 213 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -242,10 +243,34 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
242 int i; 243 int i;
243 union xhci_trb *enq = ring->enqueue; 244 union xhci_trb *enq = ring->enqueue;
244 struct xhci_segment *enq_seg = ring->enq_seg; 245 struct xhci_segment *enq_seg = ring->enq_seg;
246 struct xhci_segment *cur_seg;
247 unsigned int left_on_ring;
248
249 /* If we are currently pointing to a link TRB, advance the
250 * enqueue pointer before checking for space */
251 while (last_trb(xhci, ring, enq_seg, enq)) {
252 enq_seg = enq_seg->next;
253 enq = enq_seg->trbs;
254 }
245 255
246 /* Check if ring is empty */ 256 /* Check if ring is empty */
247 if (enq == ring->dequeue) 257 if (enq == ring->dequeue) {
258 /* Can't use link trbs */
259 left_on_ring = TRBS_PER_SEGMENT - 1;
260 for (cur_seg = enq_seg->next; cur_seg != enq_seg;
261 cur_seg = cur_seg->next)
262 left_on_ring += TRBS_PER_SEGMENT - 1;
263
264 /* Always need one TRB free in the ring. */
265 left_on_ring -= 1;
266 if (num_trbs > left_on_ring) {
267 xhci_warn(xhci, "Not enough room on ring; "
268 "need %u TRBs, %u TRBs left\n",
269 num_trbs, left_on_ring);
270 return 0;
271 }
248 return 1; 272 return 1;
273 }
249 /* Make sure there's an extra empty TRB available */ 274 /* Make sure there's an extra empty TRB available */
250 for (i = 0; i <= num_trbs; ++i) { 275 for (i = 0; i <= num_trbs; ++i) {
251 if (enq == ring->dequeue) 276 if (enq == ring->dequeue)
@@ -295,7 +320,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
295 320
296static void ring_ep_doorbell(struct xhci_hcd *xhci, 321static void ring_ep_doorbell(struct xhci_hcd *xhci,
297 unsigned int slot_id, 322 unsigned int slot_id,
298 unsigned int ep_index) 323 unsigned int ep_index,
324 unsigned int stream_id)
299{ 325{
300 struct xhci_virt_ep *ep; 326 struct xhci_virt_ep *ep;
301 unsigned int ep_state; 327 unsigned int ep_state;
@@ -306,11 +332,16 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
306 ep_state = ep->ep_state; 332 ep_state = ep->ep_state;
307 /* Don't ring the doorbell for this endpoint if there are pending 333 /* Don't ring the doorbell for this endpoint if there are pending
308 * cancellations because the we don't want to interrupt processing. 334 * cancellations because the we don't want to interrupt processing.
335 * We don't want to restart any stream rings if there's a set dequeue
336 * pointer command pending because the device can choose to start any
337 * stream once the endpoint is on the HW schedule.
338 * FIXME - check all the stream rings for pending cancellations.
309 */ 339 */
310 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 340 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
311 && !(ep_state & EP_HALTED)) { 341 && !(ep_state & EP_HALTED)) {
312 field = xhci_readl(xhci, db_addr) & DB_MASK; 342 field = xhci_readl(xhci, db_addr) & DB_MASK;
313 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 343 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
344 xhci_writel(xhci, field, db_addr);
314 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 345 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
315 * isn't time-critical and we shouldn't make the CPU wait for 346 * isn't time-critical and we shouldn't make the CPU wait for
316 * the flush. 347 * the flush.
@@ -319,6 +350,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
319 } 350 }
320} 351}
321 352
353/* Ring the doorbell for any rings with pending URBs */
354static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
355 unsigned int slot_id,
356 unsigned int ep_index)
357{
358 unsigned int stream_id;
359 struct xhci_virt_ep *ep;
360
361 ep = &xhci->devs[slot_id]->eps[ep_index];
362
363 /* A ring has pending URBs if its TD list is not empty */
364 if (!(ep->ep_state & EP_HAS_STREAMS)) {
365 if (!(list_empty(&ep->ring->td_list)))
366 ring_ep_doorbell(xhci, slot_id, ep_index, 0);
367 return;
368 }
369
370 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
371 stream_id++) {
372 struct xhci_stream_info *stream_info = ep->stream_info;
373 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
374 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
375 }
376}
377
322/* 378/*
323 * Find the segment that trb is in. Start searching in start_seg. 379 * Find the segment that trb is in. Start searching in start_seg.
324 * If we must move past a segment that has a link TRB with a toggle cycle state 380 * If we must move past a segment that has a link TRB with a toggle cycle state
@@ -334,13 +390,14 @@ static struct xhci_segment *find_trb_seg(
334 while (cur_seg->trbs > trb || 390 while (cur_seg->trbs > trb ||
335 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { 391 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
336 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; 392 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
337 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && 393 if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
394 TRB_TYPE(TRB_LINK) &&
338 (generic_trb->field[3] & LINK_TOGGLE)) 395 (generic_trb->field[3] & LINK_TOGGLE))
339 *cycle_state = ~(*cycle_state) & 0x1; 396 *cycle_state = ~(*cycle_state) & 0x1;
340 cur_seg = cur_seg->next; 397 cur_seg = cur_seg->next;
341 if (cur_seg == start_seg) 398 if (cur_seg == start_seg)
342 /* Looped over the entire list. Oops! */ 399 /* Looped over the entire list. Oops! */
343 return 0; 400 return NULL;
344 } 401 }
345 return cur_seg; 402 return cur_seg;
346} 403}
@@ -361,14 +418,23 @@ static struct xhci_segment *find_trb_seg(
361 */ 418 */
362void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 419void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
363 unsigned int slot_id, unsigned int ep_index, 420 unsigned int slot_id, unsigned int ep_index,
364 struct xhci_td *cur_td, struct xhci_dequeue_state *state) 421 unsigned int stream_id, struct xhci_td *cur_td,
422 struct xhci_dequeue_state *state)
365{ 423{
366 struct xhci_virt_device *dev = xhci->devs[slot_id]; 424 struct xhci_virt_device *dev = xhci->devs[slot_id];
367 struct xhci_ring *ep_ring = dev->eps[ep_index].ring; 425 struct xhci_ring *ep_ring;
368 struct xhci_generic_trb *trb; 426 struct xhci_generic_trb *trb;
369 struct xhci_ep_ctx *ep_ctx; 427 struct xhci_ep_ctx *ep_ctx;
370 dma_addr_t addr; 428 dma_addr_t addr;
371 429
430 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
431 ep_index, stream_id);
432 if (!ep_ring) {
433 xhci_warn(xhci, "WARN can't find new dequeue state "
434 "for invalid stream ID %u.\n",
435 stream_id);
436 return;
437 }
372 state->new_cycle_state = 0; 438 state->new_cycle_state = 0;
373 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 439 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
374 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 440 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
@@ -390,7 +456,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
390 BUG(); 456 BUG();
391 457
392 trb = &state->new_deq_ptr->generic; 458 trb = &state->new_deq_ptr->generic;
393 if (TRB_TYPE(trb->field[3]) == TRB_LINK && 459 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
394 (trb->field[3] & LINK_TOGGLE)) 460 (trb->field[3] & LINK_TOGGLE))
395 state->new_cycle_state = ~(state->new_cycle_state) & 0x1; 461 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
396 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 462 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -448,11 +514,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
448} 514}
449 515
450static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 516static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
451 unsigned int ep_index, struct xhci_segment *deq_seg, 517 unsigned int ep_index, unsigned int stream_id,
518 struct xhci_segment *deq_seg,
452 union xhci_trb *deq_ptr, u32 cycle_state); 519 union xhci_trb *deq_ptr, u32 cycle_state);
453 520
454void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 521void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
455 unsigned int slot_id, unsigned int ep_index, 522 unsigned int slot_id, unsigned int ep_index,
523 unsigned int stream_id,
456 struct xhci_dequeue_state *deq_state) 524 struct xhci_dequeue_state *deq_state)
457{ 525{
458 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 526 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
@@ -464,7 +532,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
464 deq_state->new_deq_ptr, 532 deq_state->new_deq_ptr,
465 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 533 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
466 deq_state->new_cycle_state); 534 deq_state->new_cycle_state);
467 queue_set_tr_deq(xhci, slot_id, ep_index, 535 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
468 deq_state->new_deq_seg, 536 deq_state->new_deq_seg,
469 deq_state->new_deq_ptr, 537 deq_state->new_deq_ptr,
470 (u32) deq_state->new_cycle_state); 538 (u32) deq_state->new_cycle_state);
@@ -523,7 +591,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
523 struct xhci_ring *ep_ring; 591 struct xhci_ring *ep_ring;
524 struct xhci_virt_ep *ep; 592 struct xhci_virt_ep *ep;
525 struct list_head *entry; 593 struct list_head *entry;
526 struct xhci_td *cur_td = 0; 594 struct xhci_td *cur_td = NULL;
527 struct xhci_td *last_unlinked_td; 595 struct xhci_td *last_unlinked_td;
528 596
529 struct xhci_dequeue_state deq_state; 597 struct xhci_dequeue_state deq_state;
@@ -532,11 +600,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
532 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 600 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
533 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 601 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
534 ep = &xhci->devs[slot_id]->eps[ep_index]; 602 ep = &xhci->devs[slot_id]->eps[ep_index];
535 ep_ring = ep->ring;
536 603
537 if (list_empty(&ep->cancelled_td_list)) { 604 if (list_empty(&ep->cancelled_td_list)) {
538 xhci_stop_watchdog_timer_in_irq(xhci, ep); 605 xhci_stop_watchdog_timer_in_irq(xhci, ep);
539 ring_ep_doorbell(xhci, slot_id, ep_index); 606 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
540 return; 607 return;
541 } 608 }
542 609
@@ -550,15 +617,36 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
550 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 617 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
551 cur_td->first_trb, 618 cur_td->first_trb,
552 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 619 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
620 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
621 if (!ep_ring) {
622 /* This shouldn't happen unless a driver is mucking
623 * with the stream ID after submission. This will
624 * leave the TD on the hardware ring, and the hardware
625 * will try to execute it, and may access a buffer
626 * that has already been freed. In the best case, the
627 * hardware will execute it, and the event handler will
628 * ignore the completion event for that TD, since it was
629 * removed from the td_list for that endpoint. In
630 * short, don't muck with the stream ID after
631 * submission.
632 */
633 xhci_warn(xhci, "WARN Cancelled URB %p "
634 "has invalid stream ID %u.\n",
635 cur_td->urb,
636 cur_td->urb->stream_id);
637 goto remove_finished_td;
638 }
553 /* 639 /*
554 * If we stopped on the TD we need to cancel, then we have to 640 * If we stopped on the TD we need to cancel, then we have to
555 * move the xHC endpoint ring dequeue pointer past this TD. 641 * move the xHC endpoint ring dequeue pointer past this TD.
556 */ 642 */
557 if (cur_td == ep->stopped_td) 643 if (cur_td == ep->stopped_td)
558 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 644 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
559 &deq_state); 645 cur_td->urb->stream_id,
646 cur_td, &deq_state);
560 else 647 else
561 td_to_noop(xhci, ep_ring, cur_td); 648 td_to_noop(xhci, ep_ring, cur_td);
649remove_finished_td:
562 /* 650 /*
563 * The event handler won't see a completion for this TD anymore, 651 * The event handler won't see a completion for this TD anymore,
564 * so remove it from the endpoint ring's TD list. Keep it in 652 * so remove it from the endpoint ring's TD list. Keep it in
@@ -572,12 +660,16 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
572 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 660 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
573 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 661 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
574 xhci_queue_new_dequeue_state(xhci, 662 xhci_queue_new_dequeue_state(xhci,
575 slot_id, ep_index, &deq_state); 663 slot_id, ep_index,
664 ep->stopped_td->urb->stream_id,
665 &deq_state);
576 xhci_ring_cmd_db(xhci); 666 xhci_ring_cmd_db(xhci);
577 } else { 667 } else {
578 /* Otherwise just ring the doorbell to restart the ring */ 668 /* Otherwise ring the doorbell(s) to restart queued transfers */
579 ring_ep_doorbell(xhci, slot_id, ep_index); 669 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
580 } 670 }
671 ep->stopped_td = NULL;
672 ep->stopped_trb = NULL;
581 673
582 /* 674 /*
583 * Drop the lock and complete the URBs in the cancelled TD list. 675 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -734,6 +826,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
734{ 826{
735 unsigned int slot_id; 827 unsigned int slot_id;
736 unsigned int ep_index; 828 unsigned int ep_index;
829 unsigned int stream_id;
737 struct xhci_ring *ep_ring; 830 struct xhci_ring *ep_ring;
738 struct xhci_virt_device *dev; 831 struct xhci_virt_device *dev;
739 struct xhci_ep_ctx *ep_ctx; 832 struct xhci_ep_ctx *ep_ctx;
@@ -741,8 +834,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
741 834
742 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 835 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
743 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 836 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
837 stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
744 dev = xhci->devs[slot_id]; 838 dev = xhci->devs[slot_id];
745 ep_ring = dev->eps[ep_index].ring; 839
840 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
841 if (!ep_ring) {
842 xhci_warn(xhci, "WARN Set TR deq ptr command for "
843 "freed stream ID %u\n",
844 stream_id);
845 /* XXX: Harmless??? */
846 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
847 return;
848 }
849
746 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 850 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
747 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 851 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
748 852
@@ -787,7 +891,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
787 } 891 }
788 892
789 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 893 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
790 ring_ep_doorbell(xhci, slot_id, ep_index); 894 /* Restart any rings with pending URBs */
895 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
791} 896}
792 897
793static void handle_reset_ep_completion(struct xhci_hcd *xhci, 898static void handle_reset_ep_completion(struct xhci_hcd *xhci,
@@ -796,11 +901,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
796{ 901{
797 int slot_id; 902 int slot_id;
798 unsigned int ep_index; 903 unsigned int ep_index;
799 struct xhci_ring *ep_ring;
800 904
801 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 905 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
802 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 906 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
803 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
804 /* This command will only fail if the endpoint wasn't halted, 907 /* This command will only fail if the endpoint wasn't halted,
805 * but we don't care. 908 * but we don't care.
806 */ 909 */
@@ -818,9 +921,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
818 false); 921 false);
819 xhci_ring_cmd_db(xhci); 922 xhci_ring_cmd_db(xhci);
820 } else { 923 } else {
821 /* Clear our internal halted state and restart the ring */ 924 /* Clear our internal halted state and restart the ring(s) */
822 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 925 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
823 ring_ep_doorbell(xhci, slot_id, ep_index); 926 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
824 } 927 }
825} 928}
826 929
@@ -897,16 +1000,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
897 * Configure endpoint commands can come from the USB core 1000 * Configure endpoint commands can come from the USB core
898 * configuration or alt setting changes, or because the HW 1001 * configuration or alt setting changes, or because the HW
899 * needed an extra configure endpoint command after a reset 1002 * needed an extra configure endpoint command after a reset
900 * endpoint command. In the latter case, the xHCI driver is 1003 * endpoint command or streams were being configured.
901 * not waiting on the configure endpoint command. 1004 * If the command was for a halted endpoint, the xHCI driver
1005 * is not waiting on the configure endpoint command.
902 */ 1006 */
903 ctrl_ctx = xhci_get_input_control_ctx(xhci, 1007 ctrl_ctx = xhci_get_input_control_ctx(xhci,
904 virt_dev->in_ctx); 1008 virt_dev->in_ctx);
905 /* Input ctx add_flags are the endpoint index plus one */ 1009 /* Input ctx add_flags are the endpoint index plus one */
906 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1010 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
907 /* A usb_set_interface() call directly after clearing a halted 1011 /* A usb_set_interface() call directly after clearing a halted
908 * condition may race on this quirky hardware. 1012 * condition may race on this quirky hardware. Not worth
909 * Not worth worrying about, since this is prototype hardware. 1013 * worrying about, since this is prototype hardware. Not sure
1014 * if this will work for streams, but streams support was
1015 * untested on this prototype.
910 */ 1016 */
911 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1017 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
912 ep_index != (unsigned int) -1 && 1018 ep_index != (unsigned int) -1 &&
@@ -919,10 +1025,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
919 xhci_dbg(xhci, "Completed config ep cmd - " 1025 xhci_dbg(xhci, "Completed config ep cmd - "
920 "last ep index = %d, state = %d\n", 1026 "last ep index = %d, state = %d\n",
921 ep_index, ep_state); 1027 ep_index, ep_state);
922 /* Clear our internal halted state and restart ring */ 1028 /* Clear internal halted state and restart ring(s) */
923 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1029 xhci->devs[slot_id]->eps[ep_index].ep_state &=
924 ~EP_HALTED; 1030 ~EP_HALTED;
925 ring_ep_doorbell(xhci, slot_id, ep_index); 1031 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
926 break; 1032 break;
927 } 1033 }
928bandwidth_change: 1034bandwidth_change:
@@ -1018,7 +1124,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1018 1124
1019 do { 1125 do {
1020 if (start_dma == 0) 1126 if (start_dma == 0)
1021 return 0; 1127 return NULL;
1022 /* We may get an event for a Link TRB in the middle of a TD */ 1128 /* We may get an event for a Link TRB in the middle of a TD */
1023 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 1129 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1024 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); 1130 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
@@ -1040,7 +1146,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1040 suspect_dma <= end_trb_dma)) 1146 suspect_dma <= end_trb_dma))
1041 return cur_seg; 1147 return cur_seg;
1042 } 1148 }
1043 return 0; 1149 return NULL;
1044 } else { 1150 } else {
1045 /* Might still be somewhere in this segment */ 1151 /* Might still be somewhere in this segment */
1046 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 1152 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
@@ -1050,19 +1156,27 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1050 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 1156 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1051 } while (cur_seg != start_seg); 1157 } while (cur_seg != start_seg);
1052 1158
1053 return 0; 1159 return NULL;
1054} 1160}
1055 1161
1056static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1162static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1057 unsigned int slot_id, unsigned int ep_index, 1163 unsigned int slot_id, unsigned int ep_index,
1164 unsigned int stream_id,
1058 struct xhci_td *td, union xhci_trb *event_trb) 1165 struct xhci_td *td, union xhci_trb *event_trb)
1059{ 1166{
1060 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1167 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1061 ep->ep_state |= EP_HALTED; 1168 ep->ep_state |= EP_HALTED;
1062 ep->stopped_td = td; 1169 ep->stopped_td = td;
1063 ep->stopped_trb = event_trb; 1170 ep->stopped_trb = event_trb;
1171 ep->stopped_stream = stream_id;
1172
1064 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1173 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1065 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1174 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1175
1176 ep->stopped_td = NULL;
1177 ep->stopped_trb = NULL;
1178 ep->stopped_stream = 0;
1179
1066 xhci_ring_cmd_db(xhci); 1180 xhci_ring_cmd_db(xhci);
1067} 1181}
1068 1182
@@ -1119,11 +1233,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1119 struct xhci_ring *ep_ring; 1233 struct xhci_ring *ep_ring;
1120 unsigned int slot_id; 1234 unsigned int slot_id;
1121 int ep_index; 1235 int ep_index;
1122 struct xhci_td *td = 0; 1236 struct xhci_td *td = NULL;
1123 dma_addr_t event_dma; 1237 dma_addr_t event_dma;
1124 struct xhci_segment *event_seg; 1238 struct xhci_segment *event_seg;
1125 union xhci_trb *event_trb; 1239 union xhci_trb *event_trb;
1126 struct urb *urb = 0; 1240 struct urb *urb = NULL;
1127 int status = -EINPROGRESS; 1241 int status = -EINPROGRESS;
1128 struct xhci_ep_ctx *ep_ctx; 1242 struct xhci_ep_ctx *ep_ctx;
1129 u32 trb_comp_code; 1243 u32 trb_comp_code;
@@ -1140,10 +1254,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1140 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1254 ep_index = TRB_TO_EP_ID(event->flags) - 1;
1141 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1255 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
1142 ep = &xdev->eps[ep_index]; 1256 ep = &xdev->eps[ep_index];
1143 ep_ring = ep->ring; 1257 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1144 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1258 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1145 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1259 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
1146 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 1260 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1261 "or incorrect stream ring\n");
1147 return -ENODEV; 1262 return -ENODEV;
1148 } 1263 }
1149 1264
@@ -1274,7 +1389,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1274 td->urb->actual_length = 0; 1389 td->urb->actual_length = 0;
1275 1390
1276 xhci_cleanup_halted_endpoint(xhci, 1391 xhci_cleanup_halted_endpoint(xhci,
1277 slot_id, ep_index, td, event_trb); 1392 slot_id, ep_index, 0, td, event_trb);
1278 goto td_cleanup; 1393 goto td_cleanup;
1279 } 1394 }
1280 /* 1395 /*
@@ -1390,8 +1505,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1390 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; 1505 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1391 cur_trb != event_trb; 1506 cur_trb != event_trb;
1392 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { 1507 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1393 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && 1508 if ((cur_trb->generic.field[3] &
1394 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) 1509 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
1510 (cur_trb->generic.field[3] &
1511 TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
1395 td->urb->actual_length += 1512 td->urb->actual_length +=
1396 TRB_LEN(cur_trb->generic.field[2]); 1513 TRB_LEN(cur_trb->generic.field[2]);
1397 } 1514 }
@@ -1423,6 +1540,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1423 */ 1540 */
1424 ep->stopped_td = td; 1541 ep->stopped_td = td;
1425 ep->stopped_trb = event_trb; 1542 ep->stopped_trb = event_trb;
1543 ep->stopped_stream = ep_ring->stream_id;
1426 } else if (xhci_requires_manual_halt_cleanup(xhci, 1544 } else if (xhci_requires_manual_halt_cleanup(xhci,
1427 ep_ctx, trb_comp_code)) { 1545 ep_ctx, trb_comp_code)) {
1428 /* Other types of errors halt the endpoint, but the 1546 /* Other types of errors halt the endpoint, but the
@@ -1431,7 +1549,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1431 * xHCI hardware manually. 1549 * xHCI hardware manually.
1432 */ 1550 */
1433 xhci_cleanup_halted_endpoint(xhci, 1551 xhci_cleanup_halted_endpoint(xhci,
1434 slot_id, ep_index, td, event_trb); 1552 slot_id, ep_index, ep_ring->stream_id, td, event_trb);
1435 } else { 1553 } else {
1436 /* Update ring dequeue pointer */ 1554 /* Update ring dequeue pointer */
1437 while (ep_ring->dequeue != td->last_trb) 1555 while (ep_ring->dequeue != td->last_trb)
@@ -1621,20 +1739,66 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1621 xhci_err(xhci, "ERROR no room on ep ring\n"); 1739 xhci_err(xhci, "ERROR no room on ep ring\n");
1622 return -ENOMEM; 1740 return -ENOMEM;
1623 } 1741 }
1742
1743 if (enqueue_is_link_trb(ep_ring)) {
1744 struct xhci_ring *ring = ep_ring;
1745 union xhci_trb *next;
1746
1747 xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
1748 next = ring->enqueue;
1749
1750 while (last_trb(xhci, ring, ring->enq_seg, next)) {
1751
1752 /* If we're not dealing with 0.95 hardware,
1753 * clear the chain bit.
1754 */
1755 if (!xhci_link_trb_quirk(xhci))
1756 next->link.control &= ~TRB_CHAIN;
1757 else
1758 next->link.control |= TRB_CHAIN;
1759
1760 wmb();
1761 next->link.control ^= (u32) TRB_CYCLE;
1762
1763 /* Toggle the cycle bit after the last ring segment. */
1764 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
1765 ring->cycle_state = (ring->cycle_state ? 0 : 1);
1766 if (!in_interrupt()) {
1767 xhci_dbg(xhci, "queue_trb: Toggle cycle "
1768 "state for ring %p = %i\n",
1769 ring, (unsigned int)ring->cycle_state);
1770 }
1771 }
1772 ring->enq_seg = ring->enq_seg->next;
1773 ring->enqueue = ring->enq_seg->trbs;
1774 next = ring->enqueue;
1775 }
1776 }
1777
1624 return 0; 1778 return 0;
1625} 1779}
1626 1780
1627static int prepare_transfer(struct xhci_hcd *xhci, 1781static int prepare_transfer(struct xhci_hcd *xhci,
1628 struct xhci_virt_device *xdev, 1782 struct xhci_virt_device *xdev,
1629 unsigned int ep_index, 1783 unsigned int ep_index,
1784 unsigned int stream_id,
1630 unsigned int num_trbs, 1785 unsigned int num_trbs,
1631 struct urb *urb, 1786 struct urb *urb,
1632 struct xhci_td **td, 1787 struct xhci_td **td,
1633 gfp_t mem_flags) 1788 gfp_t mem_flags)
1634{ 1789{
1635 int ret; 1790 int ret;
1791 struct xhci_ring *ep_ring;
1636 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1792 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1637 ret = prepare_ring(xhci, xdev->eps[ep_index].ring, 1793
1794 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
1795 if (!ep_ring) {
1796 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
1797 stream_id);
1798 return -EINVAL;
1799 }
1800
1801 ret = prepare_ring(xhci, ep_ring,
1638 ep_ctx->ep_info & EP_STATE_MASK, 1802 ep_ctx->ep_info & EP_STATE_MASK,
1639 num_trbs, mem_flags); 1803 num_trbs, mem_flags);
1640 if (ret) 1804 if (ret)
@@ -1654,9 +1818,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1654 (*td)->urb = urb; 1818 (*td)->urb = urb;
1655 urb->hcpriv = (void *) (*td); 1819 urb->hcpriv = (void *) (*td);
1656 /* Add this TD to the tail of the endpoint ring's TD list */ 1820 /* Add this TD to the tail of the endpoint ring's TD list */
1657 list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); 1821 list_add_tail(&(*td)->td_list, &ep_ring->td_list);
1658 (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; 1822 (*td)->start_seg = ep_ring->enq_seg;
1659 (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; 1823 (*td)->first_trb = ep_ring->enqueue;
1660 1824
1661 return 0; 1825 return 0;
1662} 1826}
@@ -1672,7 +1836,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1672 1836
1673 xhci_dbg(xhci, "count sg list trbs: \n"); 1837 xhci_dbg(xhci, "count sg list trbs: \n");
1674 num_trbs = 0; 1838 num_trbs = 0;
1675 for_each_sg(urb->sg->sg, sg, num_sgs, i) { 1839 for_each_sg(urb->sg, sg, num_sgs, i) {
1676 unsigned int previous_total_trbs = num_trbs; 1840 unsigned int previous_total_trbs = num_trbs;
1677 unsigned int len = sg_dma_len(sg); 1841 unsigned int len = sg_dma_len(sg);
1678 1842
@@ -1722,7 +1886,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1722} 1886}
1723 1887
1724static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1888static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1725 unsigned int ep_index, int start_cycle, 1889 unsigned int ep_index, unsigned int stream_id, int start_cycle,
1726 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1890 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1727{ 1891{
1728 /* 1892 /*
@@ -1731,7 +1895,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1731 */ 1895 */
1732 wmb(); 1896 wmb();
1733 start_trb->field[3] |= start_cycle; 1897 start_trb->field[3] |= start_cycle;
1734 ring_ep_doorbell(xhci, slot_id, ep_index); 1898 ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
1735} 1899}
1736 1900
1737/* 1901/*
@@ -1805,12 +1969,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1805 struct xhci_generic_trb *start_trb; 1969 struct xhci_generic_trb *start_trb;
1806 int start_cycle; 1970 int start_cycle;
1807 1971
1808 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1972 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1973 if (!ep_ring)
1974 return -EINVAL;
1975
1809 num_trbs = count_sg_trbs_needed(xhci, urb); 1976 num_trbs = count_sg_trbs_needed(xhci, urb);
1810 num_sgs = urb->num_sgs; 1977 num_sgs = urb->num_sgs;
1811 1978
1812 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 1979 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1813 ep_index, num_trbs, urb, &td, mem_flags); 1980 ep_index, urb->stream_id,
1981 num_trbs, urb, &td, mem_flags);
1814 if (trb_buff_len < 0) 1982 if (trb_buff_len < 0)
1815 return trb_buff_len; 1983 return trb_buff_len;
1816 /* 1984 /*
@@ -1831,7 +1999,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1831 * the amount of memory allocated for this scatter-gather list. 1999 * the amount of memory allocated for this scatter-gather list.
1832 * 3. TRBs buffers can't cross 64KB boundaries. 2000 * 3. TRBs buffers can't cross 64KB boundaries.
1833 */ 2001 */
1834 sg = urb->sg->sg; 2002 sg = urb->sg;
1835 addr = (u64) sg_dma_address(sg); 2003 addr = (u64) sg_dma_address(sg);
1836 this_sg_len = sg_dma_len(sg); 2004 this_sg_len = sg_dma_len(sg);
1837 trb_buff_len = TRB_MAX_BUFF_SIZE - 2005 trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -1919,7 +2087,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1919 } while (running_total < urb->transfer_buffer_length); 2087 } while (running_total < urb->transfer_buffer_length);
1920 2088
1921 check_trb_math(urb, num_trbs, running_total); 2089 check_trb_math(urb, num_trbs, running_total);
1922 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2090 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2091 start_cycle, start_trb, td);
1923 return 0; 2092 return 0;
1924} 2093}
1925 2094
@@ -1938,10 +2107,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1938 int running_total, trb_buff_len, ret; 2107 int running_total, trb_buff_len, ret;
1939 u64 addr; 2108 u64 addr;
1940 2109
1941 if (urb->sg) 2110 if (urb->num_sgs)
1942 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2111 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1943 2112
1944 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2113 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2114 if (!ep_ring)
2115 return -EINVAL;
1945 2116
1946 num_trbs = 0; 2117 num_trbs = 0;
1947 /* How much data is (potentially) left before the 64KB boundary? */ 2118 /* How much data is (potentially) left before the 64KB boundary? */
@@ -1968,7 +2139,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1968 (unsigned long long)urb->transfer_dma, 2139 (unsigned long long)urb->transfer_dma,
1969 num_trbs); 2140 num_trbs);
1970 2141
1971 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 2142 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2143 ep_index, urb->stream_id,
1972 num_trbs, urb, &td, mem_flags); 2144 num_trbs, urb, &td, mem_flags);
1973 if (ret < 0) 2145 if (ret < 0)
1974 return ret; 2146 return ret;
@@ -2038,7 +2210,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2038 } while (running_total < urb->transfer_buffer_length); 2210 } while (running_total < urb->transfer_buffer_length);
2039 2211
2040 check_trb_math(urb, num_trbs, running_total); 2212 check_trb_math(urb, num_trbs, running_total);
2041 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2213 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2214 start_cycle, start_trb, td);
2042 return 0; 2215 return 0;
2043} 2216}
2044 2217
@@ -2055,7 +2228,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2055 u32 field, length_field; 2228 u32 field, length_field;
2056 struct xhci_td *td; 2229 struct xhci_td *td;
2057 2230
2058 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2231 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2232 if (!ep_ring)
2233 return -EINVAL;
2059 2234
2060 /* 2235 /*
2061 * Need to copy setup packet into setup TRB, so we can't use the setup 2236 * Need to copy setup packet into setup TRB, so we can't use the setup
@@ -2076,8 +2251,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2076 */ 2251 */
2077 if (urb->transfer_buffer_length > 0) 2252 if (urb->transfer_buffer_length > 0)
2078 num_trbs++; 2253 num_trbs++;
2079 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 2254 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2080 urb, &td, mem_flags); 2255 ep_index, urb->stream_id,
2256 num_trbs, urb, &td, mem_flags);
2081 if (ret < 0) 2257 if (ret < 0)
2082 return ret; 2258 return ret;
2083 2259
@@ -2132,7 +2308,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2132 /* Event on completion */ 2308 /* Event on completion */
2133 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2309 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
2134 2310
2135 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2311 giveback_first_trb(xhci, slot_id, ep_index, 0,
2312 start_cycle, start_trb, td);
2136 return 0; 2313 return 0;
2137} 2314}
2138 2315
@@ -2244,12 +2421,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
2244 * This should not be used for endpoints that have streams enabled. 2421 * This should not be used for endpoints that have streams enabled.
2245 */ 2422 */
2246static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 2423static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2247 unsigned int ep_index, struct xhci_segment *deq_seg, 2424 unsigned int ep_index, unsigned int stream_id,
2425 struct xhci_segment *deq_seg,
2248 union xhci_trb *deq_ptr, u32 cycle_state) 2426 union xhci_trb *deq_ptr, u32 cycle_state)
2249{ 2427{
2250 dma_addr_t addr; 2428 dma_addr_t addr;
2251 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2429 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
2252 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2430 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
2431 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
2253 u32 type = TRB_TYPE(TRB_SET_DEQ); 2432 u32 type = TRB_TYPE(TRB_SET_DEQ);
2254 2433
2255 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 2434 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
@@ -2260,7 +2439,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
2260 return 0; 2439 return 0;
2261 } 2440 }
2262 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 2441 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
2263 upper_32_bits(addr), 0, 2442 upper_32_bits(addr), trb_stream_id,
2264 trb_slot_id | trb_ep_index | type, false); 2443 trb_slot_id | trb_ep_index | type, false);
2265} 2444}
2266 2445