aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c377
1 files changed, 311 insertions, 66 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index aa88a067148b..173c39c76489 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
172 * have their chain bit cleared (so that each Link TRB is a separate TD). 172 * have their chain bit cleared (so that each Link TRB is a separate TD).
173 * 173 *
174 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit 174 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
175 * set, but other sections talk about dealing with the chain bit set. 175 * set, but other sections talk about dealing with the chain bit set. This was
176 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. 176 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
177 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
177 */ 178 */
178static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 179static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
179{ 180{
@@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
191 while (last_trb(xhci, ring, ring->enq_seg, next)) { 192 while (last_trb(xhci, ring, ring->enq_seg, next)) {
192 if (!consumer) { 193 if (!consumer) {
193 if (ring != xhci->event_ring) { 194 if (ring != xhci->event_ring) {
194 next->link.control &= ~TRB_CHAIN; 195 /* If we're not dealing with 0.95 hardware,
195 next->link.control |= chain; 196 * carry over the chain bit of the previous TRB
197 * (which may mean the chain bit is cleared).
198 */
199 if (!xhci_link_trb_quirk(xhci)) {
200 next->link.control &= ~TRB_CHAIN;
201 next->link.control |= chain;
202 }
196 /* Give this link TRB to the hardware */ 203 /* Give this link TRB to the hardware */
197 wmb(); 204 wmb();
198 if (next->link.control & TRB_CYCLE) 205 if (next->link.control & TRB_CYCLE)
@@ -289,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
289 unsigned int slot_id, 296 unsigned int slot_id,
290 unsigned int ep_index) 297 unsigned int ep_index)
291{ 298{
292 struct xhci_ring *ep_ring; 299 struct xhci_virt_ep *ep;
300 unsigned int ep_state;
293 u32 field; 301 u32 field;
294 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 302 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
295 303
296 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 304 ep = &xhci->devs[slot_id]->eps[ep_index];
305 ep_state = ep->ep_state;
297 /* Don't ring the doorbell for this endpoint if there are pending 306 /* Don't ring the doorbell for this endpoint if there are pending
298 * cancellations because the we don't want to interrupt processing. 307 * cancellations because the we don't want to interrupt processing.
299 */ 308 */
300 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) 309 if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
301 && !(ep_ring->state & EP_HALTED)) { 310 && !(ep_state & EP_HALTED)) {
302 field = xhci_readl(xhci, db_addr) & DB_MASK; 311 field = xhci_readl(xhci, db_addr) & DB_MASK;
303 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
304 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 313 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -354,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
354 struct xhci_td *cur_td, struct xhci_dequeue_state *state) 363 struct xhci_td *cur_td, struct xhci_dequeue_state *state)
355{ 364{
356 struct xhci_virt_device *dev = xhci->devs[slot_id]; 365 struct xhci_virt_device *dev = xhci->devs[slot_id];
357 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 366 struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
358 struct xhci_generic_trb *trb; 367 struct xhci_generic_trb *trb;
359 struct xhci_ep_ctx *ep_ctx; 368 struct xhci_ep_ctx *ep_ctx;
360 dma_addr_t addr; 369 dma_addr_t addr;
@@ -362,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
362 state->new_cycle_state = 0; 371 state->new_cycle_state = 0;
363 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 372 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
364 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 373 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
365 ep_ring->stopped_trb, 374 dev->eps[ep_index].stopped_trb,
366 &state->new_cycle_state); 375 &state->new_cycle_state);
367 if (!state->new_deq_seg) 376 if (!state->new_deq_seg)
368 BUG(); 377 BUG();
@@ -442,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
442 union xhci_trb *deq_ptr, u32 cycle_state); 451 union xhci_trb *deq_ptr, u32 cycle_state);
443 452
444void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 453void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
445 struct xhci_ring *ep_ring, unsigned int slot_id, 454 unsigned int slot_id, unsigned int ep_index,
446 unsigned int ep_index, struct xhci_dequeue_state *deq_state) 455 struct xhci_dequeue_state *deq_state)
447{ 456{
457 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
458
448 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 459 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
449 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 460 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
450 deq_state->new_deq_seg, 461 deq_state->new_deq_seg,
@@ -461,8 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
461 * if the ring is running, and ringing the doorbell starts the 472 * if the ring is running, and ringing the doorbell starts the
462 * ring running. 473 * ring running.
463 */ 474 */
464 ep_ring->state |= SET_DEQ_PENDING; 475 ep->ep_state |= SET_DEQ_PENDING;
465 xhci_ring_cmd_db(xhci);
466} 476}
467 477
468/* 478/*
@@ -481,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
481 unsigned int slot_id; 491 unsigned int slot_id;
482 unsigned int ep_index; 492 unsigned int ep_index;
483 struct xhci_ring *ep_ring; 493 struct xhci_ring *ep_ring;
494 struct xhci_virt_ep *ep;
484 struct list_head *entry; 495 struct list_head *entry;
485 struct xhci_td *cur_td = 0; 496 struct xhci_td *cur_td = 0;
486 struct xhci_td *last_unlinked_td; 497 struct xhci_td *last_unlinked_td;
@@ -493,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
493 memset(&deq_state, 0, sizeof(deq_state)); 504 memset(&deq_state, 0, sizeof(deq_state));
494 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 505 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
495 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 506 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
496 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 507 ep = &xhci->devs[slot_id]->eps[ep_index];
508 ep_ring = ep->ring;
497 509
498 if (list_empty(&ep_ring->cancelled_td_list)) 510 if (list_empty(&ep->cancelled_td_list))
499 return; 511 return;
500 512
501 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 513 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
@@ -503,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
503 * it. We're also in the event handler, so we can't get re-interrupted 515 * it. We're also in the event handler, so we can't get re-interrupted
504 * if another Stop Endpoint command completes 516 * if another Stop Endpoint command completes
505 */ 517 */
506 list_for_each(entry, &ep_ring->cancelled_td_list) { 518 list_for_each(entry, &ep->cancelled_td_list) {
507 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 519 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
508 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 520 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
509 cur_td->first_trb, 521 cur_td->first_trb,
@@ -512,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
512 * If we stopped on the TD we need to cancel, then we have to 524 * If we stopped on the TD we need to cancel, then we have to
513 * move the xHC endpoint ring dequeue pointer past this TD. 525 * move the xHC endpoint ring dequeue pointer past this TD.
514 */ 526 */
515 if (cur_td == ep_ring->stopped_td) 527 if (cur_td == ep->stopped_td)
516 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 528 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
517 &deq_state); 529 &deq_state);
518 else 530 else
@@ -523,14 +535,15 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
523 * the cancelled TD list for URB completion later. 535 * the cancelled TD list for URB completion later.
524 */ 536 */
525 list_del(&cur_td->td_list); 537 list_del(&cur_td->td_list);
526 ep_ring->cancels_pending--; 538 ep->cancels_pending--;
527 } 539 }
528 last_unlinked_td = cur_td; 540 last_unlinked_td = cur_td;
529 541
530 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 542 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
531 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 543 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
532 xhci_queue_new_dequeue_state(xhci, ep_ring, 544 xhci_queue_new_dequeue_state(xhci,
533 slot_id, ep_index, &deq_state); 545 slot_id, ep_index, &deq_state);
546 xhci_ring_cmd_db(xhci);
534 } else { 547 } else {
535 /* Otherwise just ring the doorbell to restart the ring */ 548 /* Otherwise just ring the doorbell to restart the ring */
536 ring_ep_doorbell(xhci, slot_id, ep_index); 549 ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -543,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
543 * So stop when we've completed the URB for the last TD we unlinked. 556 * So stop when we've completed the URB for the last TD we unlinked.
544 */ 557 */
545 do { 558 do {
546 cur_td = list_entry(ep_ring->cancelled_td_list.next, 559 cur_td = list_entry(ep->cancelled_td_list.next,
547 struct xhci_td, cancelled_td_list); 560 struct xhci_td, cancelled_td_list);
548 list_del(&cur_td->cancelled_td_list); 561 list_del(&cur_td->cancelled_td_list);
549 562
@@ -590,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
590 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 603 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
591 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 604 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
592 dev = xhci->devs[slot_id]; 605 dev = xhci->devs[slot_id];
593 ep_ring = dev->ep_rings[ep_index]; 606 ep_ring = dev->eps[ep_index].ring;
594 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 607 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
595 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 608 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
596 609
@@ -634,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
634 ep_ctx->deq); 647 ep_ctx->deq);
635 } 648 }
636 649
637 ep_ring->state &= ~SET_DEQ_PENDING; 650 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
638 ring_ep_doorbell(xhci, slot_id, ep_index); 651 ring_ep_doorbell(xhci, slot_id, ep_index);
639} 652}
640 653
@@ -644,18 +657,60 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
644{ 657{
645 int slot_id; 658 int slot_id;
646 unsigned int ep_index; 659 unsigned int ep_index;
660 struct xhci_ring *ep_ring;
647 661
648 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 662 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
649 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 663 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
664 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
650 /* This command will only fail if the endpoint wasn't halted, 665 /* This command will only fail if the endpoint wasn't halted,
651 * but we don't care. 666 * but we don't care.
652 */ 667 */
653 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 668 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
654 (unsigned int) GET_COMP_CODE(event->status)); 669 (unsigned int) GET_COMP_CODE(event->status));
655 670
656 /* Clear our internal halted state and restart the ring */ 671 /* HW with the reset endpoint quirk needs to have a configure endpoint
657 xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; 672 * command complete before the endpoint can be used. Queue that here
658 ring_ep_doorbell(xhci, slot_id, ep_index); 673 * because the HW can't handle two commands being queued in a row.
674 */
675 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
676 xhci_dbg(xhci, "Queueing configure endpoint command\n");
677 xhci_queue_configure_endpoint(xhci,
678 xhci->devs[slot_id]->in_ctx->dma, slot_id,
679 false);
680 xhci_ring_cmd_db(xhci);
681 } else {
682 /* Clear our internal halted state and restart the ring */
683 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
684 ring_ep_doorbell(xhci, slot_id, ep_index);
685 }
686}
687
688/* Check to see if a command in the device's command queue matches this one.
689 * Signal the completion or free the command, and return 1. Return 0 if the
690 * completed command isn't at the head of the command list.
691 */
692static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
693 struct xhci_virt_device *virt_dev,
694 struct xhci_event_cmd *event)
695{
696 struct xhci_command *command;
697
698 if (list_empty(&virt_dev->cmd_list))
699 return 0;
700
701 command = list_entry(virt_dev->cmd_list.next,
702 struct xhci_command, cmd_list);
703 if (xhci->cmd_ring->dequeue != command->command_trb)
704 return 0;
705
706 command->status =
707 GET_COMP_CODE(event->status);
708 list_del(&command->cmd_list);
709 if (command->completion)
710 complete(command->completion);
711 else
712 xhci_free_command(xhci, command);
713 return 1;
659} 714}
660 715
661static void handle_cmd_completion(struct xhci_hcd *xhci, 716static void handle_cmd_completion(struct xhci_hcd *xhci,
@@ -664,6 +719,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
664 int slot_id = TRB_TO_SLOT_ID(event->flags); 719 int slot_id = TRB_TO_SLOT_ID(event->flags);
665 u64 cmd_dma; 720 u64 cmd_dma;
666 dma_addr_t cmd_dequeue_dma; 721 dma_addr_t cmd_dequeue_dma;
722 struct xhci_input_control_ctx *ctrl_ctx;
723 struct xhci_virt_device *virt_dev;
724 unsigned int ep_index;
725 struct xhci_ring *ep_ring;
726 unsigned int ep_state;
667 727
668 cmd_dma = event->cmd_trb; 728 cmd_dma = event->cmd_trb;
669 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 729 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
@@ -691,6 +751,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
691 xhci_free_virt_device(xhci, slot_id); 751 xhci_free_virt_device(xhci, slot_id);
692 break; 752 break;
693 case TRB_TYPE(TRB_CONFIG_EP): 753 case TRB_TYPE(TRB_CONFIG_EP):
754 virt_dev = xhci->devs[slot_id];
755 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
756 break;
757 /*
758 * Configure endpoint commands can come from the USB core
759 * configuration or alt setting changes, or because the HW
760 * needed an extra configure endpoint command after a reset
761 * endpoint command. In the latter case, the xHCI driver is
762 * not waiting on the configure endpoint command.
763 */
764 ctrl_ctx = xhci_get_input_control_ctx(xhci,
765 virt_dev->in_ctx);
766 /* Input ctx add_flags are the endpoint index plus one */
767 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
768 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
769 if (!ep_ring) {
770 /* This must have been an initial configure endpoint */
771 xhci->devs[slot_id]->cmd_status =
772 GET_COMP_CODE(event->status);
773 complete(&xhci->devs[slot_id]->cmd_completion);
774 break;
775 }
776 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
777 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
778 "state = %d\n", ep_index, ep_state);
779 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
780 ep_state & EP_HALTED) {
781 /* Clear our internal halted state and restart ring */
782 xhci->devs[slot_id]->eps[ep_index].ep_state &=
783 ~EP_HALTED;
784 ring_ep_doorbell(xhci, slot_id, ep_index);
785 } else {
786 xhci->devs[slot_id]->cmd_status =
787 GET_COMP_CODE(event->status);
788 complete(&xhci->devs[slot_id]->cmd_completion);
789 }
790 break;
791 case TRB_TYPE(TRB_EVAL_CONTEXT):
792 virt_dev = xhci->devs[slot_id];
793 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
794 break;
694 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 795 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
695 complete(&xhci->devs[slot_id]->cmd_completion); 796 complete(&xhci->devs[slot_id]->cmd_completion);
696 break; 797 break;
@@ -805,7 +906,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
805 struct xhci_transfer_event *event) 906 struct xhci_transfer_event *event)
806{ 907{
807 struct xhci_virt_device *xdev; 908 struct xhci_virt_device *xdev;
909 struct xhci_virt_ep *ep;
808 struct xhci_ring *ep_ring; 910 struct xhci_ring *ep_ring;
911 unsigned int slot_id;
809 int ep_index; 912 int ep_index;
810 struct xhci_td *td = 0; 913 struct xhci_td *td = 0;
811 dma_addr_t event_dma; 914 dma_addr_t event_dma;
@@ -814,9 +917,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
814 struct urb *urb = 0; 917 struct urb *urb = 0;
815 int status = -EINPROGRESS; 918 int status = -EINPROGRESS;
816 struct xhci_ep_ctx *ep_ctx; 919 struct xhci_ep_ctx *ep_ctx;
920 u32 trb_comp_code;
817 921
818 xhci_dbg(xhci, "In %s\n", __func__); 922 xhci_dbg(xhci, "In %s\n", __func__);
819 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 923 slot_id = TRB_TO_SLOT_ID(event->flags);
924 xdev = xhci->devs[slot_id];
820 if (!xdev) { 925 if (!xdev) {
821 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 926 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
822 return -ENODEV; 927 return -ENODEV;
@@ -825,7 +930,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
825 /* Endpoint ID is 1 based, our index is zero based */ 930 /* Endpoint ID is 1 based, our index is zero based */
826 ep_index = TRB_TO_EP_ID(event->flags) - 1; 931 ep_index = TRB_TO_EP_ID(event->flags) - 1;
827 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 932 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
828 ep_ring = xdev->ep_rings[ep_index]; 933 ep = &xdev->eps[ep_index];
934 ep_ring = ep->ring;
829 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 935 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
830 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 936 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
831 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 937 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
@@ -870,7 +976,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
870 (unsigned int) event->flags); 976 (unsigned int) event->flags);
871 977
872 /* Look for common error cases */ 978 /* Look for common error cases */
873 switch (GET_COMP_CODE(event->transfer_len)) { 979 trb_comp_code = GET_COMP_CODE(event->transfer_len);
980 switch (trb_comp_code) {
874 /* Skip codes that require special handling depending on 981 /* Skip codes that require special handling depending on
875 * transfer type 982 * transfer type
876 */ 983 */
@@ -885,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
885 break; 992 break;
886 case COMP_STALL: 993 case COMP_STALL:
887 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 994 xhci_warn(xhci, "WARN: Stalled endpoint\n");
888 ep_ring->state |= EP_HALTED; 995 ep->ep_state |= EP_HALTED;
889 status = -EPIPE; 996 status = -EPIPE;
890 break; 997 break;
891 case COMP_TRB_ERR: 998 case COMP_TRB_ERR:
@@ -913,7 +1020,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
913 /* Was this a control transfer? */ 1020 /* Was this a control transfer? */
914 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { 1021 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
915 xhci_debug_trb(xhci, xhci->event_ring->dequeue); 1022 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
916 switch (GET_COMP_CODE(event->transfer_len)) { 1023 switch (trb_comp_code) {
917 case COMP_SUCCESS: 1024 case COMP_SUCCESS:
918 if (event_trb == ep_ring->dequeue) { 1025 if (event_trb == ep_ring->dequeue) {
919 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); 1026 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
@@ -928,8 +1035,37 @@ static int handle_tx_event(struct xhci_hcd *xhci,
928 break; 1035 break;
929 case COMP_SHORT_TX: 1036 case COMP_SHORT_TX:
930 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 1037 xhci_warn(xhci, "WARN: short transfer on control ep\n");
931 status = -EREMOTEIO; 1038 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1039 status = -EREMOTEIO;
1040 else
1041 status = 0;
932 break; 1042 break;
1043 case COMP_BABBLE:
1044 /* The 0.96 spec says a babbling control endpoint
1045 * is not halted. The 0.96 spec says it is. Some HW
1046 * claims to be 0.95 compliant, but it halts the control
1047 * endpoint anyway. Check if a babble halted the
1048 * endpoint.
1049 */
1050 if (ep_ctx->ep_info != EP_STATE_HALTED)
1051 break;
1052 /* else fall through */
1053 case COMP_STALL:
1054 /* Did we transfer part of the data (middle) phase? */
1055 if (event_trb != ep_ring->dequeue &&
1056 event_trb != td->last_trb)
1057 td->urb->actual_length =
1058 td->urb->transfer_buffer_length
1059 - TRB_LEN(event->transfer_len);
1060 else
1061 td->urb->actual_length = 0;
1062
1063 ep->stopped_td = td;
1064 ep->stopped_trb = event_trb;
1065 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1066 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1067 xhci_ring_cmd_db(xhci);
1068 goto td_cleanup;
933 default: 1069 default:
934 /* Others already handled above */ 1070 /* Others already handled above */
935 break; 1071 break;
@@ -943,7 +1079,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
943 if (event_trb == td->last_trb) { 1079 if (event_trb == td->last_trb) {
944 if (td->urb->actual_length != 0) { 1080 if (td->urb->actual_length != 0) {
945 /* Don't overwrite a previously set error code */ 1081 /* Don't overwrite a previously set error code */
946 if (status == -EINPROGRESS || status == 0) 1082 if ((status == -EINPROGRESS ||
1083 status == 0) &&
1084 (td->urb->transfer_flags
1085 & URB_SHORT_NOT_OK))
947 /* Did we already see a short data stage? */ 1086 /* Did we already see a short data stage? */
948 status = -EREMOTEIO; 1087 status = -EREMOTEIO;
949 } else { 1088 } else {
@@ -952,7 +1091,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
952 } 1091 }
953 } else { 1092 } else {
954 /* Maybe the event was for the data stage? */ 1093 /* Maybe the event was for the data stage? */
955 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { 1094 if (trb_comp_code != COMP_STOP_INVAL) {
956 /* We didn't stop on a link TRB in the middle */ 1095 /* We didn't stop on a link TRB in the middle */
957 td->urb->actual_length = 1096 td->urb->actual_length =
958 td->urb->transfer_buffer_length - 1097 td->urb->transfer_buffer_length -
@@ -964,7 +1103,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
964 } 1103 }
965 } 1104 }
966 } else { 1105 } else {
967 switch (GET_COMP_CODE(event->transfer_len)) { 1106 switch (trb_comp_code) {
968 case COMP_SUCCESS: 1107 case COMP_SUCCESS:
969 /* Double check that the HW transferred everything. */ 1108 /* Double check that the HW transferred everything. */
970 if (event_trb != td->last_trb) { 1109 if (event_trb != td->last_trb) {
@@ -975,7 +1114,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
975 else 1114 else
976 status = 0; 1115 status = 0;
977 } else { 1116 } else {
978 xhci_dbg(xhci, "Successful bulk transfer!\n"); 1117 if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
1118 xhci_dbg(xhci, "Successful bulk "
1119 "transfer!\n");
1120 else
1121 xhci_dbg(xhci, "Successful interrupt "
1122 "transfer!\n");
979 status = 0; 1123 status = 0;
980 } 1124 }
981 break; 1125 break;
@@ -1001,11 +1145,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1001 td->urb->actual_length = 1145 td->urb->actual_length =
1002 td->urb->transfer_buffer_length - 1146 td->urb->transfer_buffer_length -
1003 TRB_LEN(event->transfer_len); 1147 TRB_LEN(event->transfer_len);
1004 if (td->urb->actual_length < 0) { 1148 if (td->urb->transfer_buffer_length <
1149 td->urb->actual_length) {
1005 xhci_warn(xhci, "HC gave bad length " 1150 xhci_warn(xhci, "HC gave bad length "
1006 "of %d bytes left\n", 1151 "of %d bytes left\n",
1007 TRB_LEN(event->transfer_len)); 1152 TRB_LEN(event->transfer_len));
1008 td->urb->actual_length = 0; 1153 td->urb->actual_length = 0;
1154 if (td->urb->transfer_flags &
1155 URB_SHORT_NOT_OK)
1156 status = -EREMOTEIO;
1157 else
1158 status = 0;
1009 } 1159 }
1010 /* Don't overwrite a previously set error code */ 1160 /* Don't overwrite a previously set error code */
1011 if (status == -EINPROGRESS) { 1161 if (status == -EINPROGRESS) {
@@ -1041,30 +1191,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1041 /* If the ring didn't stop on a Link or No-op TRB, add 1191 /* If the ring didn't stop on a Link or No-op TRB, add
1042 * in the actual bytes transferred from the Normal TRB 1192 * in the actual bytes transferred from the Normal TRB
1043 */ 1193 */
1044 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) 1194 if (trb_comp_code != COMP_STOP_INVAL)
1045 td->urb->actual_length += 1195 td->urb->actual_length +=
1046 TRB_LEN(cur_trb->generic.field[2]) - 1196 TRB_LEN(cur_trb->generic.field[2]) -
1047 TRB_LEN(event->transfer_len); 1197 TRB_LEN(event->transfer_len);
1048 } 1198 }
1049 } 1199 }
1050 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || 1200 if (trb_comp_code == COMP_STOP_INVAL ||
1051 GET_COMP_CODE(event->transfer_len) == COMP_STOP) { 1201 trb_comp_code == COMP_STOP) {
1052 /* The Endpoint Stop Command completion will take care of any 1202 /* The Endpoint Stop Command completion will take care of any
1053 * stopped TDs. A stopped TD may be restarted, so don't update 1203 * stopped TDs. A stopped TD may be restarted, so don't update
1054 * the ring dequeue pointer or take this TD off any lists yet. 1204 * the ring dequeue pointer or take this TD off any lists yet.
1055 */ 1205 */
1056 ep_ring->stopped_td = td; 1206 ep->stopped_td = td;
1057 ep_ring->stopped_trb = event_trb; 1207 ep->stopped_trb = event_trb;
1058 } else { 1208 } else {
1059 if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { 1209 if (trb_comp_code == COMP_STALL ||
1210 trb_comp_code == COMP_BABBLE) {
1060 /* The transfer is completed from the driver's 1211 /* The transfer is completed from the driver's
1061 * perspective, but we need to issue a set dequeue 1212 * perspective, but we need to issue a set dequeue
1062 * command for this stalled endpoint to move the dequeue 1213 * command for this stalled endpoint to move the dequeue
1063 * pointer past the TD. We can't do that here because 1214 * pointer past the TD. We can't do that here because
1064 * the halt condition must be cleared first. 1215 * the halt condition must be cleared first.
1065 */ 1216 */
1066 ep_ring->stopped_td = td; 1217 ep->stopped_td = td;
1067 ep_ring->stopped_trb = event_trb; 1218 ep->stopped_trb = event_trb;
1068 } else { 1219 } else {
1069 /* Update ring dequeue pointer */ 1220 /* Update ring dequeue pointer */
1070 while (ep_ring->dequeue != td->last_trb) 1221 while (ep_ring->dequeue != td->last_trb)
@@ -1072,16 +1223,41 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1072 inc_deq(xhci, ep_ring, false); 1223 inc_deq(xhci, ep_ring, false);
1073 } 1224 }
1074 1225
1226td_cleanup:
1075 /* Clean up the endpoint's TD list */ 1227 /* Clean up the endpoint's TD list */
1076 urb = td->urb; 1228 urb = td->urb;
1229 /* Do one last check of the actual transfer length.
1230 * If the host controller said we transferred more data than
1231 * the buffer length, urb->actual_length will be a very big
1232 * number (since it's unsigned). Play it safe and say we didn't
1233 * transfer anything.
1234 */
1235 if (urb->actual_length > urb->transfer_buffer_length) {
1236 xhci_warn(xhci, "URB transfer length is wrong, "
1237 "xHC issue? req. len = %u, "
1238 "act. len = %u\n",
1239 urb->transfer_buffer_length,
1240 urb->actual_length);
1241 urb->actual_length = 0;
1242 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1243 status = -EREMOTEIO;
1244 else
1245 status = 0;
1246 }
1077 list_del(&td->td_list); 1247 list_del(&td->td_list);
1078 /* Was this TD slated to be cancelled but completed anyway? */ 1248 /* Was this TD slated to be cancelled but completed anyway? */
1079 if (!list_empty(&td->cancelled_td_list)) { 1249 if (!list_empty(&td->cancelled_td_list)) {
1080 list_del(&td->cancelled_td_list); 1250 list_del(&td->cancelled_td_list);
1081 ep_ring->cancels_pending--; 1251 ep->cancels_pending--;
1082 } 1252 }
1083 /* Leave the TD around for the reset endpoint function to use */ 1253 /* Leave the TD around for the reset endpoint function to use
1084 if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { 1254 * (but only if it's not a control endpoint, since we already
1255 * queued the Set TR dequeue pointer command for stalled
1256 * control endpoints).
1257 */
1258 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1259 (trb_comp_code != COMP_STALL &&
1260 trb_comp_code != COMP_BABBLE)) {
1085 kfree(td); 1261 kfree(td);
1086 } 1262 }
1087 urb->hcpriv = NULL; 1263 urb->hcpriv = NULL;
@@ -1094,7 +1270,7 @@ cleanup:
1094 if (urb) { 1270 if (urb) {
1095 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1271 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1096 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", 1272 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
1097 urb, td->urb->actual_length, status); 1273 urb, urb->actual_length, status);
1098 spin_unlock(&xhci->lock); 1274 spin_unlock(&xhci->lock);
1099 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1275 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1100 spin_lock(&xhci->lock); 1276 spin_lock(&xhci->lock);
@@ -1235,7 +1411,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1235{ 1411{
1236 int ret; 1412 int ret;
1237 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1413 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1238 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1414 ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
1239 ep_ctx->ep_info & EP_STATE_MASK, 1415 ep_ctx->ep_info & EP_STATE_MASK,
1240 num_trbs, mem_flags); 1416 num_trbs, mem_flags);
1241 if (ret) 1417 if (ret)
@@ -1255,9 +1431,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1255 (*td)->urb = urb; 1431 (*td)->urb = urb;
1256 urb->hcpriv = (void *) (*td); 1432 urb->hcpriv = (void *) (*td);
1257 /* Add this TD to the tail of the endpoint ring's TD list */ 1433 /* Add this TD to the tail of the endpoint ring's TD list */
1258 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); 1434 list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
1259 (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg; 1435 (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
1260 (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue; 1436 (*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
1261 1437
1262 return 0; 1438 return 0;
1263} 1439}
@@ -1335,6 +1511,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1335 ring_ep_doorbell(xhci, slot_id, ep_index); 1511 ring_ep_doorbell(xhci, slot_id, ep_index);
1336} 1512}
1337 1513
1514/*
1515 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
1516 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
1517 * (comprised of sg list entries) can take several service intervals to
1518 * transmit.
1519 */
1520int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1521 struct urb *urb, int slot_id, unsigned int ep_index)
1522{
1523 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
1524 xhci->devs[slot_id]->out_ctx, ep_index);
1525 int xhci_interval;
1526 int ep_interval;
1527
1528 xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
1529 ep_interval = urb->interval;
1530 /* Convert to microframes */
1531 if (urb->dev->speed == USB_SPEED_LOW ||
1532 urb->dev->speed == USB_SPEED_FULL)
1533 ep_interval *= 8;
1534 /* FIXME change this to a warning and a suggestion to use the new API
1535 * to set the polling interval (once the API is added).
1536 */
1537 if (xhci_interval != ep_interval) {
1538 if (!printk_ratelimit())
1539 dev_dbg(&urb->dev->dev, "Driver uses different interval"
1540 " (%d microframe%s) than xHCI "
1541 "(%d microframe%s)\n",
1542 ep_interval,
1543 ep_interval == 1 ? "" : "s",
1544 xhci_interval,
1545 xhci_interval == 1 ? "" : "s");
1546 urb->interval = xhci_interval;
1547 /* Convert back to frames for LS/FS devices */
1548 if (urb->dev->speed == USB_SPEED_LOW ||
1549 urb->dev->speed == USB_SPEED_FULL)
1550 urb->interval /= 8;
1551 }
1552 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1553}
1554
1338static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1555static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1339 struct urb *urb, int slot_id, unsigned int ep_index) 1556 struct urb *urb, int slot_id, unsigned int ep_index)
1340{ 1557{
@@ -1350,7 +1567,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1350 struct xhci_generic_trb *start_trb; 1567 struct xhci_generic_trb *start_trb;
1351 int start_cycle; 1568 int start_cycle;
1352 1569
1353 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1570 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1354 num_trbs = count_sg_trbs_needed(xhci, urb); 1571 num_trbs = count_sg_trbs_needed(xhci, urb);
1355 num_sgs = urb->num_sgs; 1572 num_sgs = urb->num_sgs;
1356 1573
@@ -1483,7 +1700,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1483 if (urb->sg) 1700 if (urb->sg)
1484 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 1701 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1485 1702
1486 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1703 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1487 1704
1488 num_trbs = 0; 1705 num_trbs = 0;
1489 /* How much data is (potentially) left before the 64KB boundary? */ 1706 /* How much data is (potentially) left before the 64KB boundary? */
@@ -1594,7 +1811,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1594 u32 field, length_field; 1811 u32 field, length_field;
1595 struct xhci_td *td; 1812 struct xhci_td *td;
1596 1813
1597 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1814 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1598 1815
1599 /* 1816 /*
1600 * Need to copy setup packet into setup TRB, so we can't use the setup 1817 * Need to copy setup packet into setup TRB, so we can't use the setup
@@ -1677,12 +1894,27 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1677 1894
1678/**** Command Ring Operations ****/ 1895/**** Command Ring Operations ****/
1679 1896
1680/* Generic function for queueing a command TRB on the command ring */ 1897/* Generic function for queueing a command TRB on the command ring.
1681static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4) 1898 * Check to make sure there's room on the command ring for one command TRB.
1899 * Also check that there's room reserved for commands that must not fail.
1900 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
1901 * then only check for the number of reserved spots.
1902 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
1903 * because the command event handler may want to resubmit a failed command.
1904 */
1905static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
1906 u32 field3, u32 field4, bool command_must_succeed)
1682{ 1907{
1683 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) { 1908 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
1909 if (!command_must_succeed)
1910 reserved_trbs++;
1911
1912 if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
1684 if (!in_interrupt()) 1913 if (!in_interrupt())
1685 xhci_err(xhci, "ERR: No room for command on command ring\n"); 1914 xhci_err(xhci, "ERR: No room for command on command ring\n");
1915 if (command_must_succeed)
1916 xhci_err(xhci, "ERR: Reserved TRB counting for "
1917 "unfailable commands failed.\n");
1686 return -ENOMEM; 1918 return -ENOMEM;
1687 } 1919 }
1688 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 1920 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
@@ -1693,7 +1925,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 fiel
1693/* Queue a no-op command on the command ring */ 1925/* Queue a no-op command on the command ring */
1694static int queue_cmd_noop(struct xhci_hcd *xhci) 1926static int queue_cmd_noop(struct xhci_hcd *xhci)
1695{ 1927{
1696 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP)); 1928 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
1697} 1929}
1698 1930
1699/* 1931/*
@@ -1712,7 +1944,7 @@ void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1712int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 1944int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1713{ 1945{
1714 return queue_command(xhci, 0, 0, 0, 1946 return queue_command(xhci, 0, 0, 0,
1715 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); 1947 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
1716} 1948}
1717 1949
1718/* Queue an address device command TRB */ 1950/* Queue an address device command TRB */
@@ -1721,16 +1953,28 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1721{ 1953{
1722 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 1954 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1723 upper_32_bits(in_ctx_ptr), 0, 1955 upper_32_bits(in_ctx_ptr), 0,
1724 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1956 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
1957 false);
1725} 1958}
1726 1959
1727/* Queue a configure endpoint command TRB */ 1960/* Queue a configure endpoint command TRB */
1728int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1961int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1962 u32 slot_id, bool command_must_succeed)
1963{
1964 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1965 upper_32_bits(in_ctx_ptr), 0,
1966 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
1967 command_must_succeed);
1968}
1969
1970/* Queue an evaluate context command TRB */
1971int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1729 u32 slot_id) 1972 u32 slot_id)
1730{ 1973{
1731 return queue_command(xhci, lower_32_bits(in_ctx_ptr), 1974 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1732 upper_32_bits(in_ctx_ptr), 0, 1975 upper_32_bits(in_ctx_ptr), 0,
1733 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1976 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
1977 false);
1734} 1978}
1735 1979
1736int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, 1980int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
@@ -1741,7 +1985,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1741 u32 type = TRB_TYPE(TRB_STOP_RING); 1985 u32 type = TRB_TYPE(TRB_STOP_RING);
1742 1986
1743 return queue_command(xhci, 0, 0, 0, 1987 return queue_command(xhci, 0, 0, 0,
1744 trb_slot_id | trb_ep_index | type); 1988 trb_slot_id | trb_ep_index | type, false);
1745} 1989}
1746 1990
1747/* Set Transfer Ring Dequeue Pointer command. 1991/* Set Transfer Ring Dequeue Pointer command.
@@ -1765,7 +2009,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1765 } 2009 }
1766 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 2010 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
1767 upper_32_bits(addr), 0, 2011 upper_32_bits(addr), 0,
1768 trb_slot_id | trb_ep_index | type); 2012 trb_slot_id | trb_ep_index | type, false);
1769} 2013}
1770 2014
1771int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 2015int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -1775,5 +2019,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1775 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2019 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1776 u32 type = TRB_TYPE(TRB_RESET_EP); 2020 u32 type = TRB_TYPE(TRB_RESET_EP);
1777 2021
1778 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); 2022 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
2023 false);
1779} 2024}