aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c371
1 files changed, 310 insertions, 61 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 821b7b4709de..85d7e8f2085e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -65,6 +65,7 @@
65 */ 65 */
66 66
67#include <linux/scatterlist.h> 67#include <linux/scatterlist.h>
68#include <linux/slab.h>
68#include "xhci.h" 69#include "xhci.h"
69 70
70/* 71/*
@@ -306,7 +307,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
306 /* Don't ring the doorbell for this endpoint if there are pending 307 /* Don't ring the doorbell for this endpoint if there are pending
307 * cancellations because the we don't want to interrupt processing. 308 * cancellations because the we don't want to interrupt processing.
308 */ 309 */
309 if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) 310 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
310 && !(ep_state & EP_HALTED)) { 311 && !(ep_state & EP_HALTED)) {
311 field = xhci_readl(xhci, db_addr) & DB_MASK; 312 field = xhci_readl(xhci, db_addr) & DB_MASK;
312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 313 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +476,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
475 ep->ep_state |= SET_DEQ_PENDING; 476 ep->ep_state |= SET_DEQ_PENDING;
476} 477}
477 478
479static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
480 struct xhci_virt_ep *ep)
481{
482 ep->ep_state &= ~EP_HALT_PENDING;
483 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
484 * timer is running on another CPU, we don't decrement stop_cmds_pending
485 * (since we didn't successfully stop the watchdog timer).
486 */
487 if (del_timer(&ep->stop_cmd_timer))
488 ep->stop_cmds_pending--;
489}
490
491/* Must be called with xhci->lock held in interrupt context */
492static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
493 struct xhci_td *cur_td, int status, char *adjective)
494{
495 struct usb_hcd *hcd = xhci_to_hcd(xhci);
496
497 cur_td->urb->hcpriv = NULL;
498 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
499 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
500
501 spin_unlock(&xhci->lock);
502 usb_hcd_giveback_urb(hcd, cur_td->urb, status);
503 kfree(cur_td);
504 spin_lock(&xhci->lock);
505 xhci_dbg(xhci, "%s URB given back\n", adjective);
506}
507
478/* 508/*
479 * When we get a command completion for a Stop Endpoint Command, we need to 509 * When we get a command completion for a Stop Endpoint Command, we need to
480 * unlink any cancelled TDs from the ring. There are two ways to do that: 510 * unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -497,9 +527,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
497 struct xhci_td *last_unlinked_td; 527 struct xhci_td *last_unlinked_td;
498 528
499 struct xhci_dequeue_state deq_state; 529 struct xhci_dequeue_state deq_state;
500#ifdef CONFIG_USB_HCD_STAT
501 ktime_t stop_time = ktime_get();
502#endif
503 530
504 memset(&deq_state, 0, sizeof(deq_state)); 531 memset(&deq_state, 0, sizeof(deq_state));
505 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 532 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +534,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
507 ep = &xhci->devs[slot_id]->eps[ep_index]; 534 ep = &xhci->devs[slot_id]->eps[ep_index];
508 ep_ring = ep->ring; 535 ep_ring = ep->ring;
509 536
510 if (list_empty(&ep->cancelled_td_list)) 537 if (list_empty(&ep->cancelled_td_list)) {
538 xhci_stop_watchdog_timer_in_irq(xhci, ep);
539 ring_ep_doorbell(xhci, slot_id, ep_index);
511 return; 540 return;
541 }
512 542
513 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 543 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
514 * We have the xHCI lock, so nothing can modify this list until we drop 544 * We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +565,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
535 * the cancelled TD list for URB completion later. 565 * the cancelled TD list for URB completion later.
536 */ 566 */
537 list_del(&cur_td->td_list); 567 list_del(&cur_td->td_list);
538 ep->cancels_pending--;
539 } 568 }
540 last_unlinked_td = cur_td; 569 last_unlinked_td = cur_td;
570 xhci_stop_watchdog_timer_in_irq(xhci, ep);
541 571
542 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 572 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
543 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 573 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +591,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
561 list_del(&cur_td->cancelled_td_list); 591 list_del(&cur_td->cancelled_td_list);
562 592
563 /* Clean up the cancelled URB */ 593 /* Clean up the cancelled URB */
564#ifdef CONFIG_USB_HCD_STAT
565 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
566 ktime_sub(stop_time, cur_td->start_time));
567#endif
568 cur_td->urb->hcpriv = NULL;
569 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
570
571 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
572 spin_unlock(&xhci->lock);
573 /* Doesn't matter what we pass for status, since the core will 594 /* Doesn't matter what we pass for status, since the core will
574 * just overwrite it (because the URB has been unlinked). 595 * just overwrite it (because the URB has been unlinked).
575 */ 596 */
576 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); 597 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
577 kfree(cur_td);
578 598
579 spin_lock(&xhci->lock); 599 /* Stop processing the cancelled list if the watchdog timer is
600 * running.
601 */
602 if (xhci->xhc_state & XHCI_STATE_DYING)
603 return;
580 } while (cur_td != last_unlinked_td); 604 } while (cur_td != last_unlinked_td);
581 605
582 /* Return to the event handler with xhci->lock re-acquired */ 606 /* Return to the event handler with xhci->lock re-acquired */
583} 607}
584 608
609/* Watchdog timer function for when a stop endpoint command fails to complete.
610 * In this case, we assume the host controller is broken or dying or dead. The
611 * host may still be completing some other events, so we have to be careful to
612 * let the event ring handler and the URB dequeueing/enqueueing functions know
613 * through xhci->state.
614 *
615 * The timer may also fire if the host takes a very long time to respond to the
616 * command, and the stop endpoint command completion handler cannot delete the
617 * timer before the timer function is called. Another endpoint cancellation may
618 * sneak in before the timer function can grab the lock, and that may queue
619 * another stop endpoint command and add the timer back. So we cannot use a
620 * simple flag to say whether there is a pending stop endpoint command for a
621 * particular endpoint.
622 *
623 * Instead we use a combination of that flag and a counter for the number of
624 * pending stop endpoint commands. If the timer is the tail end of the last
625 * stop endpoint command, and the endpoint's command is still pending, we assume
626 * the host is dying.
627 */
628void xhci_stop_endpoint_command_watchdog(unsigned long arg)
629{
630 struct xhci_hcd *xhci;
631 struct xhci_virt_ep *ep;
632 struct xhci_virt_ep *temp_ep;
633 struct xhci_ring *ring;
634 struct xhci_td *cur_td;
635 int ret, i, j;
636
637 ep = (struct xhci_virt_ep *) arg;
638 xhci = ep->xhci;
639
640 spin_lock(&xhci->lock);
641
642 ep->stop_cmds_pending--;
643 if (xhci->xhc_state & XHCI_STATE_DYING) {
644 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
645 "xHCI as DYING, exiting.\n");
646 spin_unlock(&xhci->lock);
647 return;
648 }
649 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
650 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
651 "exiting.\n");
652 spin_unlock(&xhci->lock);
653 return;
654 }
655
656 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
657 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
658 /* Oops, HC is dead or dying or at least not responding to the stop
659 * endpoint command.
660 */
661 xhci->xhc_state |= XHCI_STATE_DYING;
662 /* Disable interrupts from the host controller and start halting it */
663 xhci_quiesce(xhci);
664 spin_unlock(&xhci->lock);
665
666 ret = xhci_halt(xhci);
667
668 spin_lock(&xhci->lock);
669 if (ret < 0) {
670 /* This is bad; the host is not responding to commands and it's
671 * not allowing itself to be halted. At least interrupts are
672 * disabled, so we can set HC_STATE_HALT and notify the
673 * USB core. But if we call usb_hc_died(), it will attempt to
674 * disconnect all device drivers under this host. Those
675 * disconnect() methods will wait for all URBs to be unlinked,
676 * so we must complete them.
677 */
678 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
679 xhci_warn(xhci, "Completing active URBs anyway.\n");
680 /* We could turn all TDs on the rings to no-ops. This won't
681 * help if the host has cached part of the ring, and is slow if
682 * we want to preserve the cycle bit. Skip it and hope the host
683 * doesn't touch the memory.
684 */
685 }
686 for (i = 0; i < MAX_HC_SLOTS; i++) {
687 if (!xhci->devs[i])
688 continue;
689 for (j = 0; j < 31; j++) {
690 temp_ep = &xhci->devs[i]->eps[j];
691 ring = temp_ep->ring;
692 if (!ring)
693 continue;
694 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
695 "ep index %u\n", i, j);
696 while (!list_empty(&ring->td_list)) {
697 cur_td = list_first_entry(&ring->td_list,
698 struct xhci_td,
699 td_list);
700 list_del(&cur_td->td_list);
701 if (!list_empty(&cur_td->cancelled_td_list))
702 list_del(&cur_td->cancelled_td_list);
703 xhci_giveback_urb_in_irq(xhci, cur_td,
704 -ESHUTDOWN, "killed");
705 }
706 while (!list_empty(&temp_ep->cancelled_td_list)) {
707 cur_td = list_first_entry(
708 &temp_ep->cancelled_td_list,
709 struct xhci_td,
710 cancelled_td_list);
711 list_del(&cur_td->cancelled_td_list);
712 xhci_giveback_urb_in_irq(xhci, cur_td,
713 -ESHUTDOWN, "killed");
714 }
715 }
716 }
717 spin_unlock(&xhci->lock);
718 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
719 xhci_dbg(xhci, "Calling usb_hc_died()\n");
720 usb_hc_died(xhci_to_hcd(xhci));
721 xhci_dbg(xhci, "xHCI host controller is dead.\n");
722}
723
585/* 724/*
586 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 725 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
587 * we need to clear the set deq pending flag in the endpoint ring state, so that 726 * we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +904,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
765 virt_dev->in_ctx); 904 virt_dev->in_ctx);
766 /* Input ctx add_flags are the endpoint index plus one */ 905 /* Input ctx add_flags are the endpoint index plus one */
767 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 906 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
768 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 907 /* A usb_set_interface() call directly after clearing a halted
769 if (!ep_ring) { 908 * condition may race on this quirky hardware.
770 /* This must have been an initial configure endpoint */ 909 * Not worth worrying about, since this is prototype hardware.
771 xhci->devs[slot_id]->cmd_status = 910 */
772 GET_COMP_CODE(event->status);
773 complete(&xhci->devs[slot_id]->cmd_completion);
774 break;
775 }
776 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
777 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
778 "state = %d\n", ep_index, ep_state);
779 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 911 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
780 ep_state & EP_HALTED) { 912 ep_index != (unsigned int) -1 &&
913 ctrl_ctx->add_flags - SLOT_FLAG ==
914 ctrl_ctx->drop_flags) {
915 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
916 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
917 if (!(ep_state & EP_HALTED))
918 goto bandwidth_change;
919 xhci_dbg(xhci, "Completed config ep cmd - "
920 "last ep index = %d, state = %d\n",
921 ep_index, ep_state);
781 /* Clear our internal halted state and restart ring */ 922 /* Clear our internal halted state and restart ring */
782 xhci->devs[slot_id]->eps[ep_index].ep_state &= 923 xhci->devs[slot_id]->eps[ep_index].ep_state &=
783 ~EP_HALTED; 924 ~EP_HALTED;
784 ring_ep_doorbell(xhci, slot_id, ep_index); 925 ring_ep_doorbell(xhci, slot_id, ep_index);
785 } else { 926 break;
786 xhci->devs[slot_id]->cmd_status =
787 GET_COMP_CODE(event->status);
788 complete(&xhci->devs[slot_id]->cmd_completion);
789 } 927 }
928bandwidth_change:
929 xhci_dbg(xhci, "Completed config ep cmd\n");
930 xhci->devs[slot_id]->cmd_status =
931 GET_COMP_CODE(event->status);
932 complete(&xhci->devs[slot_id]->cmd_completion);
790 break; 933 break;
791 case TRB_TYPE(TRB_EVAL_CONTEXT): 934 case TRB_TYPE(TRB_EVAL_CONTEXT):
792 virt_dev = xhci->devs[slot_id]; 935 virt_dev = xhci->devs[slot_id];
@@ -811,6 +954,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
811 case TRB_TYPE(TRB_RESET_EP): 954 case TRB_TYPE(TRB_RESET_EP):
812 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 955 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
813 break; 956 break;
957 case TRB_TYPE(TRB_RESET_DEV):
958 xhci_dbg(xhci, "Completed reset device command.\n");
959 slot_id = TRB_TO_SLOT_ID(
960 xhci->cmd_ring->dequeue->generic.field[3]);
961 virt_dev = xhci->devs[slot_id];
962 if (virt_dev)
963 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
964 else
965 xhci_warn(xhci, "Reset device command completion "
966 "for disabled slot %u\n", slot_id);
967 break;
814 default: 968 default:
815 /* Skip over unknown commands on the event ring */ 969 /* Skip over unknown commands on the event ring */
816 xhci->error_bitmask |= 1 << 6; 970 xhci->error_bitmask |= 1 << 6;
@@ -849,8 +1003,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
849 * TRB in this TD, this function returns that TRB's segment. Otherwise it 1003 * TRB in this TD, this function returns that TRB's segment. Otherwise it
850 * returns 0. 1004 * returns 0.
851 */ 1005 */
852static struct xhci_segment *trb_in_td( 1006struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
853 struct xhci_segment *start_seg,
854 union xhci_trb *start_trb, 1007 union xhci_trb *start_trb,
855 union xhci_trb *end_trb, 1008 union xhci_trb *end_trb,
856 dma_addr_t suspect_dma) 1009 dma_addr_t suspect_dma)
@@ -900,6 +1053,59 @@ static struct xhci_segment *trb_in_td(
900 return 0; 1053 return 0;
901} 1054}
902 1055
1056static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1057 unsigned int slot_id, unsigned int ep_index,
1058 struct xhci_td *td, union xhci_trb *event_trb)
1059{
1060 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1061 ep->ep_state |= EP_HALTED;
1062 ep->stopped_td = td;
1063 ep->stopped_trb = event_trb;
1064 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1065 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1066 xhci_ring_cmd_db(xhci);
1067}
1068
1069/* Check if an error has halted the endpoint ring. The class driver will
1070 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1071 * However, a babble and other errors also halt the endpoint ring, and the class
1072 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1073 * Ring Dequeue Pointer command manually.
1074 */
1075static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1076 struct xhci_ep_ctx *ep_ctx,
1077 unsigned int trb_comp_code)
1078{
1079 /* TRB completion codes that may require a manual halt cleanup */
1080 if (trb_comp_code == COMP_TX_ERR ||
1081 trb_comp_code == COMP_BABBLE ||
1082 trb_comp_code == COMP_SPLIT_ERR)
1083 /* The 0.96 spec says a babbling control endpoint
1084 * is not halted. The 0.96 spec says it is. Some HW
1085 * claims to be 0.95 compliant, but it halts the control
1086 * endpoint anyway. Check if a babble halted the
1087 * endpoint.
1088 */
1089 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
1090 return 1;
1091
1092 return 0;
1093}
1094
1095int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1096{
1097 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1098 /* Vendor defined "informational" completion code,
1099 * treat as not-an-error.
1100 */
1101 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1102 trb_comp_code);
1103 xhci_dbg(xhci, "Treating code as success.\n");
1104 return 1;
1105 }
1106 return 0;
1107}
1108
903/* 1109/*
904 * If this function returns an error condition, it means it got a Transfer 1110 * If this function returns an error condition, it means it got a Transfer
905 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1111 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1002,6 +1208,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1002 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1208 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1003 status = -EILSEQ; 1209 status = -EILSEQ;
1004 break; 1210 break;
1211 case COMP_SPLIT_ERR:
1005 case COMP_TX_ERR: 1212 case COMP_TX_ERR:
1006 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1213 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1007 status = -EPROTO; 1214 status = -EPROTO;
@@ -1015,6 +1222,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1015 status = -ENOSR; 1222 status = -ENOSR;
1016 break; 1223 break;
1017 default: 1224 default:
1225 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1226 status = 0;
1227 break;
1228 }
1018 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 1229 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
1019 urb = NULL; 1230 urb = NULL;
1020 goto cleanup; 1231 goto cleanup;
@@ -1043,15 +1254,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1043 else 1254 else
1044 status = 0; 1255 status = 0;
1045 break; 1256 break;
1046 case COMP_BABBLE: 1257
1047 /* The 0.96 spec says a babbling control endpoint 1258 default:
1048 * is not halted. The 0.96 spec says it is. Some HW 1259 if (!xhci_requires_manual_halt_cleanup(xhci,
1049 * claims to be 0.95 compliant, but it halts the control 1260 ep_ctx, trb_comp_code))
1050 * endpoint anyway. Check if a babble halted the
1051 * endpoint.
1052 */
1053 if (ep_ctx->ep_info != EP_STATE_HALTED)
1054 break; 1261 break;
1262 xhci_dbg(xhci, "TRB error code %u, "
1263 "halted endpoint index = %u\n",
1264 trb_comp_code, ep_index);
1055 /* else fall through */ 1265 /* else fall through */
1056 case COMP_STALL: 1266 case COMP_STALL:
1057 /* Did we transfer part of the data (middle) phase? */ 1267 /* Did we transfer part of the data (middle) phase? */
@@ -1063,15 +1273,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1063 else 1273 else
1064 td->urb->actual_length = 0; 1274 td->urb->actual_length = 0;
1065 1275
1066 ep->stopped_td = td; 1276 xhci_cleanup_halted_endpoint(xhci,
1067 ep->stopped_trb = event_trb; 1277 slot_id, ep_index, td, event_trb);
1068 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1069 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1070 xhci_ring_cmd_db(xhci);
1071 goto td_cleanup; 1278 goto td_cleanup;
1072 default:
1073 /* Others already handled above */
1074 break;
1075 } 1279 }
1076 /* 1280 /*
1077 * Did we transfer any data, despite the errors that might have 1281 * Did we transfer any data, despite the errors that might have
@@ -1209,16 +1413,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1209 ep->stopped_td = td; 1413 ep->stopped_td = td;
1210 ep->stopped_trb = event_trb; 1414 ep->stopped_trb = event_trb;
1211 } else { 1415 } else {
1212 if (trb_comp_code == COMP_STALL || 1416 if (trb_comp_code == COMP_STALL) {
1213 trb_comp_code == COMP_BABBLE) {
1214 /* The transfer is completed from the driver's 1417 /* The transfer is completed from the driver's
1215 * perspective, but we need to issue a set dequeue 1418 * perspective, but we need to issue a set dequeue
1216 * command for this stalled endpoint to move the dequeue 1419 * command for this stalled endpoint to move the dequeue
1217 * pointer past the TD. We can't do that here because 1420 * pointer past the TD. We can't do that here because
1218 * the halt condition must be cleared first. 1421 * the halt condition must be cleared first. Let the
1422 * USB class driver clear the stall later.
1219 */ 1423 */
1220 ep->stopped_td = td; 1424 ep->stopped_td = td;
1221 ep->stopped_trb = event_trb; 1425 ep->stopped_trb = event_trb;
1426 } else if (xhci_requires_manual_halt_cleanup(xhci,
1427 ep_ctx, trb_comp_code)) {
1428 /* Other types of errors halt the endpoint, but the
1429 * class driver doesn't call usb_reset_endpoint() unless
1430 * the error is -EPIPE. Clear the halted status in the
1431 * xHCI hardware manually.
1432 */
1433 xhci_cleanup_halted_endpoint(xhci,
1434 slot_id, ep_index, td, event_trb);
1222 } else { 1435 } else {
1223 /* Update ring dequeue pointer */ 1436 /* Update ring dequeue pointer */
1224 while (ep_ring->dequeue != td->last_trb) 1437 while (ep_ring->dequeue != td->last_trb)
@@ -1249,10 +1462,9 @@ td_cleanup:
1249 } 1462 }
1250 list_del(&td->td_list); 1463 list_del(&td->td_list);
1251 /* Was this TD slated to be cancelled but completed anyway? */ 1464 /* Was this TD slated to be cancelled but completed anyway? */
1252 if (!list_empty(&td->cancelled_td_list)) { 1465 if (!list_empty(&td->cancelled_td_list))
1253 list_del(&td->cancelled_td_list); 1466 list_del(&td->cancelled_td_list);
1254 ep->cancels_pending--; 1467
1255 }
1256 /* Leave the TD around for the reset endpoint function to use 1468 /* Leave the TD around for the reset endpoint function to use
1257 * (but only if it's not a control endpoint, since we already 1469 * (but only if it's not a control endpoint, since we already
1258 * queued the Set TR dequeue pointer command for stalled 1470 * queued the Set TR dequeue pointer command for stalled
@@ -1331,6 +1543,14 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1331 default: 1543 default:
1332 xhci->error_bitmask |= 1 << 3; 1544 xhci->error_bitmask |= 1 << 3;
1333 } 1545 }
1546 /* Any of the above functions may drop and re-acquire the lock, so check
1547 * to make sure a watchdog timer didn't mark the host as non-responsive.
1548 */
1549 if (xhci->xhc_state & XHCI_STATE_DYING) {
1550 xhci_dbg(xhci, "xHCI host dying, returning from "
1551 "event handler.\n");
1552 return;
1553 }
1334 1554
1335 if (update_ptrs) { 1555 if (update_ptrs) {
1336 /* Update SW and HC event ring dequeue pointer */ 1556 /* Update SW and HC event ring dequeue pointer */
@@ -1555,6 +1775,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1555 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 1775 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1556} 1776}
1557 1777
1778/*
1779 * The TD size is the number of bytes remaining in the TD (including this TRB),
1780 * right shifted by 10.
1781 * It must fit in bits 21:17, so it can't be bigger than 31.
1782 */
1783static u32 xhci_td_remainder(unsigned int remainder)
1784{
1785 u32 max = (1 << (21 - 17 + 1)) - 1;
1786
1787 if ((remainder >> 10) >= max)
1788 return max << 17;
1789 else
1790 return (remainder >> 10) << 17;
1791}
1792
1558static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1793static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1559 struct urb *urb, int slot_id, unsigned int ep_index) 1794 struct urb *urb, int slot_id, unsigned int ep_index)
1560{ 1795{
@@ -1612,6 +1847,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1612 do { 1847 do {
1613 u32 field = 0; 1848 u32 field = 0;
1614 u32 length_field = 0; 1849 u32 length_field = 0;
1850 u32 remainder = 0;
1615 1851
1616 /* Don't change the cycle bit of the first TRB until later */ 1852 /* Don't change the cycle bit of the first TRB until later */
1617 if (first_trb) 1853 if (first_trb)
@@ -1641,8 +1877,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1641 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1877 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1642 (unsigned int) addr + trb_buff_len); 1878 (unsigned int) addr + trb_buff_len);
1643 } 1879 }
1880 remainder = xhci_td_remainder(urb->transfer_buffer_length -
1881 running_total) ;
1644 length_field = TRB_LEN(trb_buff_len) | 1882 length_field = TRB_LEN(trb_buff_len) |
1645 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1883 remainder |
1646 TRB_INTR_TARGET(0); 1884 TRB_INTR_TARGET(0);
1647 queue_trb(xhci, ep_ring, false, 1885 queue_trb(xhci, ep_ring, false,
1648 lower_32_bits(addr), 1886 lower_32_bits(addr),
@@ -1755,6 +1993,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1755 1993
1756 /* Queue the first TRB, even if it's zero-length */ 1994 /* Queue the first TRB, even if it's zero-length */
1757 do { 1995 do {
1996 u32 remainder = 0;
1758 field = 0; 1997 field = 0;
1759 1998
1760 /* Don't change the cycle bit of the first TRB until later */ 1999 /* Don't change the cycle bit of the first TRB until later */
@@ -1773,8 +2012,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1773 td->last_trb = ep_ring->enqueue; 2012 td->last_trb = ep_ring->enqueue;
1774 field |= TRB_IOC; 2013 field |= TRB_IOC;
1775 } 2014 }
2015 remainder = xhci_td_remainder(urb->transfer_buffer_length -
2016 running_total);
1776 length_field = TRB_LEN(trb_buff_len) | 2017 length_field = TRB_LEN(trb_buff_len) |
1777 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 2018 remainder |
1778 TRB_INTR_TARGET(0); 2019 TRB_INTR_TARGET(0);
1779 queue_trb(xhci, ep_ring, false, 2020 queue_trb(xhci, ep_ring, false,
1780 lower_32_bits(addr), 2021 lower_32_bits(addr),
@@ -1862,7 +2103,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1862 /* If there's data, queue data TRBs */ 2103 /* If there's data, queue data TRBs */
1863 field = 0; 2104 field = 0;
1864 length_field = TRB_LEN(urb->transfer_buffer_length) | 2105 length_field = TRB_LEN(urb->transfer_buffer_length) |
1865 TD_REMAINDER(urb->transfer_buffer_length) | 2106 xhci_td_remainder(urb->transfer_buffer_length) |
1866 TRB_INTR_TARGET(0); 2107 TRB_INTR_TARGET(0);
1867 if (urb->transfer_buffer_length > 0) { 2108 if (urb->transfer_buffer_length > 0) {
1868 if (setup->bRequestType & USB_DIR_IN) 2109 if (setup->bRequestType & USB_DIR_IN)
@@ -1960,6 +2201,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1960 false); 2201 false);
1961} 2202}
1962 2203
2204/* Queue a reset device command TRB */
2205int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
2206{
2207 return queue_command(xhci, 0, 0, 0,
2208 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
2209 false);
2210}
2211
1963/* Queue a configure endpoint command TRB */ 2212/* Queue a configure endpoint command TRB */
1964int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2213int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1965 u32 slot_id, bool command_must_succeed) 2214 u32 slot_id, bool command_must_succeed)