aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c343
1 files changed, 282 insertions, 61 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 821b7b4709de..ee7bc7ecbc59 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -306,7 +306,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
306 /* Don't ring the doorbell for this endpoint if there are pending 306 /* Don't ring the doorbell for this endpoint if there are pending
307 * cancellations because the we don't want to interrupt processing. 307 * cancellations because the we don't want to interrupt processing.
308 */ 308 */
309 if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) 309 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
310 && !(ep_state & EP_HALTED)) { 310 && !(ep_state & EP_HALTED)) {
311 field = xhci_readl(xhci, db_addr) & DB_MASK; 311 field = xhci_readl(xhci, db_addr) & DB_MASK;
312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +475,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
475 ep->ep_state |= SET_DEQ_PENDING; 475 ep->ep_state |= SET_DEQ_PENDING;
476} 476}
477 477
478static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
479 struct xhci_virt_ep *ep)
480{
481 ep->ep_state &= ~EP_HALT_PENDING;
482 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
483 * timer is running on another CPU, we don't decrement stop_cmds_pending
484 * (since we didn't successfully stop the watchdog timer).
485 */
486 if (del_timer(&ep->stop_cmd_timer))
487 ep->stop_cmds_pending--;
488}
489
490/* Must be called with xhci->lock held in interrupt context */
491static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
492 struct xhci_td *cur_td, int status, char *adjective)
493{
494 struct usb_hcd *hcd = xhci_to_hcd(xhci);
495
496 cur_td->urb->hcpriv = NULL;
497 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
498 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
499
500 spin_unlock(&xhci->lock);
501 usb_hcd_giveback_urb(hcd, cur_td->urb, status);
502 kfree(cur_td);
503 spin_lock(&xhci->lock);
504 xhci_dbg(xhci, "%s URB given back\n", adjective);
505}
506
478/* 507/*
479 * When we get a command completion for a Stop Endpoint Command, we need to 508 * When we get a command completion for a Stop Endpoint Command, we need to
480 * unlink any cancelled TDs from the ring. There are two ways to do that: 509 * unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -497,9 +526,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
497 struct xhci_td *last_unlinked_td; 526 struct xhci_td *last_unlinked_td;
498 527
499 struct xhci_dequeue_state deq_state; 528 struct xhci_dequeue_state deq_state;
500#ifdef CONFIG_USB_HCD_STAT
501 ktime_t stop_time = ktime_get();
502#endif
503 529
504 memset(&deq_state, 0, sizeof(deq_state)); 530 memset(&deq_state, 0, sizeof(deq_state));
505 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 531 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +533,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
507 ep = &xhci->devs[slot_id]->eps[ep_index]; 533 ep = &xhci->devs[slot_id]->eps[ep_index];
508 ep_ring = ep->ring; 534 ep_ring = ep->ring;
509 535
510 if (list_empty(&ep->cancelled_td_list)) 536 if (list_empty(&ep->cancelled_td_list)) {
537 xhci_stop_watchdog_timer_in_irq(xhci, ep);
538 ring_ep_doorbell(xhci, slot_id, ep_index);
511 return; 539 return;
540 }
512 541
513 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 542 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
514 * We have the xHCI lock, so nothing can modify this list until we drop 543 * We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +564,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
535 * the cancelled TD list for URB completion later. 564 * the cancelled TD list for URB completion later.
536 */ 565 */
537 list_del(&cur_td->td_list); 566 list_del(&cur_td->td_list);
538 ep->cancels_pending--;
539 } 567 }
540 last_unlinked_td = cur_td; 568 last_unlinked_td = cur_td;
569 xhci_stop_watchdog_timer_in_irq(xhci, ep);
541 570
542 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 571 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
543 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 572 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +590,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
561 list_del(&cur_td->cancelled_td_list); 590 list_del(&cur_td->cancelled_td_list);
562 591
563 /* Clean up the cancelled URB */ 592 /* Clean up the cancelled URB */
564#ifdef CONFIG_USB_HCD_STAT
565 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
566 ktime_sub(stop_time, cur_td->start_time));
567#endif
568 cur_td->urb->hcpriv = NULL;
569 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
570
571 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
572 spin_unlock(&xhci->lock);
573 /* Doesn't matter what we pass for status, since the core will 593 /* Doesn't matter what we pass for status, since the core will
574 * just overwrite it (because the URB has been unlinked). 594 * just overwrite it (because the URB has been unlinked).
575 */ 595 */
576 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); 596 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
577 kfree(cur_td);
578 597
579 spin_lock(&xhci->lock); 598 /* Stop processing the cancelled list if the watchdog timer is
599 * running.
600 */
601 if (xhci->xhc_state & XHCI_STATE_DYING)
602 return;
580 } while (cur_td != last_unlinked_td); 603 } while (cur_td != last_unlinked_td);
581 604
582 /* Return to the event handler with xhci->lock re-acquired */ 605 /* Return to the event handler with xhci->lock re-acquired */
583} 606}
584 607
608/* Watchdog timer function for when a stop endpoint command fails to complete.
609 * In this case, we assume the host controller is broken or dying or dead. The
610 * host may still be completing some other events, so we have to be careful to
611 * let the event ring handler and the URB dequeueing/enqueueing functions know
612 * through xhci->state.
613 *
614 * The timer may also fire if the host takes a very long time to respond to the
615 * command, and the stop endpoint command completion handler cannot delete the
616 * timer before the timer function is called. Another endpoint cancellation may
617 * sneak in before the timer function can grab the lock, and that may queue
618 * another stop endpoint command and add the timer back. So we cannot use a
619 * simple flag to say whether there is a pending stop endpoint command for a
620 * particular endpoint.
621 *
622 * Instead we use a combination of that flag and a counter for the number of
623 * pending stop endpoint commands. If the timer is the tail end of the last
624 * stop endpoint command, and the endpoint's command is still pending, we assume
625 * the host is dying.
626 */
627void xhci_stop_endpoint_command_watchdog(unsigned long arg)
628{
629 struct xhci_hcd *xhci;
630 struct xhci_virt_ep *ep;
631 struct xhci_virt_ep *temp_ep;
632 struct xhci_ring *ring;
633 struct xhci_td *cur_td;
634 int ret, i, j;
635
636 ep = (struct xhci_virt_ep *) arg;
637 xhci = ep->xhci;
638
639 spin_lock(&xhci->lock);
640
641 ep->stop_cmds_pending--;
642 if (xhci->xhc_state & XHCI_STATE_DYING) {
643 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
644 "xHCI as DYING, exiting.\n");
645 spin_unlock(&xhci->lock);
646 return;
647 }
648 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
649 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
650 "exiting.\n");
651 spin_unlock(&xhci->lock);
652 return;
653 }
654
655 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
656 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
657 /* Oops, HC is dead or dying or at least not responding to the stop
658 * endpoint command.
659 */
660 xhci->xhc_state |= XHCI_STATE_DYING;
661 /* Disable interrupts from the host controller and start halting it */
662 xhci_quiesce(xhci);
663 spin_unlock(&xhci->lock);
664
665 ret = xhci_halt(xhci);
666
667 spin_lock(&xhci->lock);
668 if (ret < 0) {
669 /* This is bad; the host is not responding to commands and it's
670 * not allowing itself to be halted. At least interrupts are
671 * disabled, so we can set HC_STATE_HALT and notify the
672 * USB core. But if we call usb_hc_died(), it will attempt to
673 * disconnect all device drivers under this host. Those
674 * disconnect() methods will wait for all URBs to be unlinked,
675 * so we must complete them.
676 */
677 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
678 xhci_warn(xhci, "Completing active URBs anyway.\n");
679 /* We could turn all TDs on the rings to no-ops. This won't
680 * help if the host has cached part of the ring, and is slow if
681 * we want to preserve the cycle bit. Skip it and hope the host
682 * doesn't touch the memory.
683 */
684 }
685 for (i = 0; i < MAX_HC_SLOTS; i++) {
686 if (!xhci->devs[i])
687 continue;
688 for (j = 0; j < 31; j++) {
689 temp_ep = &xhci->devs[i]->eps[j];
690 ring = temp_ep->ring;
691 if (!ring)
692 continue;
693 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
694 "ep index %u\n", i, j);
695 while (!list_empty(&ring->td_list)) {
696 cur_td = list_first_entry(&ring->td_list,
697 struct xhci_td,
698 td_list);
699 list_del(&cur_td->td_list);
700 if (!list_empty(&cur_td->cancelled_td_list))
701 list_del(&cur_td->cancelled_td_list);
702 xhci_giveback_urb_in_irq(xhci, cur_td,
703 -ESHUTDOWN, "killed");
704 }
705 while (!list_empty(&temp_ep->cancelled_td_list)) {
706 cur_td = list_first_entry(
707 &temp_ep->cancelled_td_list,
708 struct xhci_td,
709 cancelled_td_list);
710 list_del(&cur_td->cancelled_td_list);
711 xhci_giveback_urb_in_irq(xhci, cur_td,
712 -ESHUTDOWN, "killed");
713 }
714 }
715 }
716 spin_unlock(&xhci->lock);
717 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
718 xhci_dbg(xhci, "Calling usb_hc_died()\n");
719 usb_hc_died(xhci_to_hcd(xhci));
720 xhci_dbg(xhci, "xHCI host controller is dead.\n");
721}
722
585/* 723/*
586 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 724 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
587 * we need to clear the set deq pending flag in the endpoint ring state, so that 725 * we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +903,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
765 virt_dev->in_ctx); 903 virt_dev->in_ctx);
766 /* Input ctx add_flags are the endpoint index plus one */ 904 /* Input ctx add_flags are the endpoint index plus one */
767 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 905 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
768 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 906 /* A usb_set_interface() call directly after clearing a halted
769 if (!ep_ring) { 907 * condition may race on this quirky hardware.
770 /* This must have been an initial configure endpoint */ 908 * Not worth worrying about, since this is prototype hardware.
771 xhci->devs[slot_id]->cmd_status = 909 */
772 GET_COMP_CODE(event->status);
773 complete(&xhci->devs[slot_id]->cmd_completion);
774 break;
775 }
776 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
777 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
778 "state = %d\n", ep_index, ep_state);
779 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 910 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
780 ep_state & EP_HALTED) { 911 ep_index != (unsigned int) -1 &&
912 ctrl_ctx->add_flags - SLOT_FLAG ==
913 ctrl_ctx->drop_flags) {
914 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
915 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
916 if (!(ep_state & EP_HALTED))
917 goto bandwidth_change;
918 xhci_dbg(xhci, "Completed config ep cmd - "
919 "last ep index = %d, state = %d\n",
920 ep_index, ep_state);
781 /* Clear our internal halted state and restart ring */ 921 /* Clear our internal halted state and restart ring */
782 xhci->devs[slot_id]->eps[ep_index].ep_state &= 922 xhci->devs[slot_id]->eps[ep_index].ep_state &=
783 ~EP_HALTED; 923 ~EP_HALTED;
784 ring_ep_doorbell(xhci, slot_id, ep_index); 924 ring_ep_doorbell(xhci, slot_id, ep_index);
785 } else { 925 break;
786 xhci->devs[slot_id]->cmd_status =
787 GET_COMP_CODE(event->status);
788 complete(&xhci->devs[slot_id]->cmd_completion);
789 } 926 }
927bandwidth_change:
928 xhci_dbg(xhci, "Completed config ep cmd\n");
929 xhci->devs[slot_id]->cmd_status =
930 GET_COMP_CODE(event->status);
931 complete(&xhci->devs[slot_id]->cmd_completion);
790 break; 932 break;
791 case TRB_TYPE(TRB_EVAL_CONTEXT): 933 case TRB_TYPE(TRB_EVAL_CONTEXT):
792 virt_dev = xhci->devs[slot_id]; 934 virt_dev = xhci->devs[slot_id];
@@ -849,8 +991,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
849 * TRB in this TD, this function returns that TRB's segment. Otherwise it 991 * TRB in this TD, this function returns that TRB's segment. Otherwise it
850 * returns 0. 992 * returns 0.
851 */ 993 */
852static struct xhci_segment *trb_in_td( 994struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
853 struct xhci_segment *start_seg,
854 union xhci_trb *start_trb, 995 union xhci_trb *start_trb,
855 union xhci_trb *end_trb, 996 union xhci_trb *end_trb,
856 dma_addr_t suspect_dma) 997 dma_addr_t suspect_dma)
@@ -900,6 +1041,45 @@ static struct xhci_segment *trb_in_td(
900 return 0; 1041 return 0;
901} 1042}
902 1043
1044static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1045 unsigned int slot_id, unsigned int ep_index,
1046 struct xhci_td *td, union xhci_trb *event_trb)
1047{
1048 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1049 ep->ep_state |= EP_HALTED;
1050 ep->stopped_td = td;
1051 ep->stopped_trb = event_trb;
1052 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1053 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1054 xhci_ring_cmd_db(xhci);
1055}
1056
1057/* Check if an error has halted the endpoint ring. The class driver will
1058 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1059 * However, a babble and other errors also halt the endpoint ring, and the class
1060 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1061 * Ring Dequeue Pointer command manually.
1062 */
1063static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1064 struct xhci_ep_ctx *ep_ctx,
1065 unsigned int trb_comp_code)
1066{
1067 /* TRB completion codes that may require a manual halt cleanup */
1068 if (trb_comp_code == COMP_TX_ERR ||
1069 trb_comp_code == COMP_BABBLE ||
1070 trb_comp_code == COMP_SPLIT_ERR)
1071 /* The 0.96 spec says a babbling control endpoint
1072 * is not halted. The 0.96 spec says it is. Some HW
1073 * claims to be 0.95 compliant, but it halts the control
1074 * endpoint anyway. Check if a babble halted the
1075 * endpoint.
1076 */
1077 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
1078 return 1;
1079
1080 return 0;
1081}
1082
903/* 1083/*
904 * If this function returns an error condition, it means it got a Transfer 1084 * If this function returns an error condition, it means it got a Transfer
905 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1085 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1002,6 +1182,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1002 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1182 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1003 status = -EILSEQ; 1183 status = -EILSEQ;
1004 break; 1184 break;
1185 case COMP_SPLIT_ERR:
1005 case COMP_TX_ERR: 1186 case COMP_TX_ERR:
1006 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1187 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1007 status = -EPROTO; 1188 status = -EPROTO;
@@ -1015,6 +1196,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1015 status = -ENOSR; 1196 status = -ENOSR;
1016 break; 1197 break;
1017 default: 1198 default:
1199 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1200 /* Vendor defined "informational" completion code,
1201 * treat as not-an-error.
1202 */
1203 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1204 trb_comp_code);
1205 xhci_dbg(xhci, "Treating code as success.\n");
1206 status = 0;
1207 break;
1208 }
1018 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 1209 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
1019 urb = NULL; 1210 urb = NULL;
1020 goto cleanup; 1211 goto cleanup;
@@ -1043,15 +1234,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1043 else 1234 else
1044 status = 0; 1235 status = 0;
1045 break; 1236 break;
1046 case COMP_BABBLE: 1237
1047 /* The 0.96 spec says a babbling control endpoint 1238 default:
1048 * is not halted. The 0.96 spec says it is. Some HW 1239 if (!xhci_requires_manual_halt_cleanup(xhci,
1049 * claims to be 0.95 compliant, but it halts the control 1240 ep_ctx, trb_comp_code))
1050 * endpoint anyway. Check if a babble halted the
1051 * endpoint.
1052 */
1053 if (ep_ctx->ep_info != EP_STATE_HALTED)
1054 break; 1241 break;
1242 xhci_dbg(xhci, "TRB error code %u, "
1243 "halted endpoint index = %u\n",
1244 trb_comp_code, ep_index);
1055 /* else fall through */ 1245 /* else fall through */
1056 case COMP_STALL: 1246 case COMP_STALL:
1057 /* Did we transfer part of the data (middle) phase? */ 1247 /* Did we transfer part of the data (middle) phase? */
@@ -1063,15 +1253,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1063 else 1253 else
1064 td->urb->actual_length = 0; 1254 td->urb->actual_length = 0;
1065 1255
1066 ep->stopped_td = td; 1256 xhci_cleanup_halted_endpoint(xhci,
1067 ep->stopped_trb = event_trb; 1257 slot_id, ep_index, td, event_trb);
1068 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1069 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1070 xhci_ring_cmd_db(xhci);
1071 goto td_cleanup; 1258 goto td_cleanup;
1072 default:
1073 /* Others already handled above */
1074 break;
1075 } 1259 }
1076 /* 1260 /*
1077 * Did we transfer any data, despite the errors that might have 1261 * Did we transfer any data, despite the errors that might have
@@ -1209,16 +1393,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1209 ep->stopped_td = td; 1393 ep->stopped_td = td;
1210 ep->stopped_trb = event_trb; 1394 ep->stopped_trb = event_trb;
1211 } else { 1395 } else {
1212 if (trb_comp_code == COMP_STALL || 1396 if (trb_comp_code == COMP_STALL) {
1213 trb_comp_code == COMP_BABBLE) {
1214 /* The transfer is completed from the driver's 1397 /* The transfer is completed from the driver's
1215 * perspective, but we need to issue a set dequeue 1398 * perspective, but we need to issue a set dequeue
1216 * command for this stalled endpoint to move the dequeue 1399 * command for this stalled endpoint to move the dequeue
1217 * pointer past the TD. We can't do that here because 1400 * pointer past the TD. We can't do that here because
1218 * the halt condition must be cleared first. 1401 * the halt condition must be cleared first. Let the
1402 * USB class driver clear the stall later.
1219 */ 1403 */
1220 ep->stopped_td = td; 1404 ep->stopped_td = td;
1221 ep->stopped_trb = event_trb; 1405 ep->stopped_trb = event_trb;
1406 } else if (xhci_requires_manual_halt_cleanup(xhci,
1407 ep_ctx, trb_comp_code)) {
1408 /* Other types of errors halt the endpoint, but the
1409 * class driver doesn't call usb_reset_endpoint() unless
1410 * the error is -EPIPE. Clear the halted status in the
1411 * xHCI hardware manually.
1412 */
1413 xhci_cleanup_halted_endpoint(xhci,
1414 slot_id, ep_index, td, event_trb);
1222 } else { 1415 } else {
1223 /* Update ring dequeue pointer */ 1416 /* Update ring dequeue pointer */
1224 while (ep_ring->dequeue != td->last_trb) 1417 while (ep_ring->dequeue != td->last_trb)
@@ -1249,10 +1442,9 @@ td_cleanup:
1249 } 1442 }
1250 list_del(&td->td_list); 1443 list_del(&td->td_list);
1251 /* Was this TD slated to be cancelled but completed anyway? */ 1444 /* Was this TD slated to be cancelled but completed anyway? */
1252 if (!list_empty(&td->cancelled_td_list)) { 1445 if (!list_empty(&td->cancelled_td_list))
1253 list_del(&td->cancelled_td_list); 1446 list_del(&td->cancelled_td_list);
1254 ep->cancels_pending--; 1447
1255 }
1256 /* Leave the TD around for the reset endpoint function to use 1448 /* Leave the TD around for the reset endpoint function to use
1257 * (but only if it's not a control endpoint, since we already 1449 * (but only if it's not a control endpoint, since we already
1258 * queued the Set TR dequeue pointer command for stalled 1450 * queued the Set TR dequeue pointer command for stalled
@@ -1331,6 +1523,14 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1331 default: 1523 default:
1332 xhci->error_bitmask |= 1 << 3; 1524 xhci->error_bitmask |= 1 << 3;
1333 } 1525 }
1526 /* Any of the above functions may drop and re-acquire the lock, so check
1527 * to make sure a watchdog timer didn't mark the host as non-responsive.
1528 */
1529 if (xhci->xhc_state & XHCI_STATE_DYING) {
1530 xhci_dbg(xhci, "xHCI host dying, returning from "
1531 "event handler.\n");
1532 return;
1533 }
1334 1534
1335 if (update_ptrs) { 1535 if (update_ptrs) {
1336 /* Update SW and HC event ring dequeue pointer */ 1536 /* Update SW and HC event ring dequeue pointer */
@@ -1555,6 +1755,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1555 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 1755 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1556} 1756}
1557 1757
1758/*
1759 * The TD size is the number of bytes remaining in the TD (including this TRB),
1760 * right shifted by 10.
1761 * It must fit in bits 21:17, so it can't be bigger than 31.
1762 */
1763static u32 xhci_td_remainder(unsigned int remainder)
1764{
1765 u32 max = (1 << (21 - 17 + 1)) - 1;
1766
1767 if ((remainder >> 10) >= max)
1768 return max << 17;
1769 else
1770 return (remainder >> 10) << 17;
1771}
1772
1558static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1773static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1559 struct urb *urb, int slot_id, unsigned int ep_index) 1774 struct urb *urb, int slot_id, unsigned int ep_index)
1560{ 1775{
@@ -1612,6 +1827,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1612 do { 1827 do {
1613 u32 field = 0; 1828 u32 field = 0;
1614 u32 length_field = 0; 1829 u32 length_field = 0;
1830 u32 remainder = 0;
1615 1831
1616 /* Don't change the cycle bit of the first TRB until later */ 1832 /* Don't change the cycle bit of the first TRB until later */
1617 if (first_trb) 1833 if (first_trb)
@@ -1641,8 +1857,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1641 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1857 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1642 (unsigned int) addr + trb_buff_len); 1858 (unsigned int) addr + trb_buff_len);
1643 } 1859 }
1860 remainder = xhci_td_remainder(urb->transfer_buffer_length -
1861 running_total) ;
1644 length_field = TRB_LEN(trb_buff_len) | 1862 length_field = TRB_LEN(trb_buff_len) |
1645 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1863 remainder |
1646 TRB_INTR_TARGET(0); 1864 TRB_INTR_TARGET(0);
1647 queue_trb(xhci, ep_ring, false, 1865 queue_trb(xhci, ep_ring, false,
1648 lower_32_bits(addr), 1866 lower_32_bits(addr),
@@ -1755,6 +1973,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1755 1973
1756 /* Queue the first TRB, even if it's zero-length */ 1974 /* Queue the first TRB, even if it's zero-length */
1757 do { 1975 do {
1976 u32 remainder = 0;
1758 field = 0; 1977 field = 0;
1759 1978
1760 /* Don't change the cycle bit of the first TRB until later */ 1979 /* Don't change the cycle bit of the first TRB until later */
@@ -1773,8 +1992,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1773 td->last_trb = ep_ring->enqueue; 1992 td->last_trb = ep_ring->enqueue;
1774 field |= TRB_IOC; 1993 field |= TRB_IOC;
1775 } 1994 }
1995 remainder = xhci_td_remainder(urb->transfer_buffer_length -
1996 running_total);
1776 length_field = TRB_LEN(trb_buff_len) | 1997 length_field = TRB_LEN(trb_buff_len) |
1777 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1998 remainder |
1778 TRB_INTR_TARGET(0); 1999 TRB_INTR_TARGET(0);
1779 queue_trb(xhci, ep_ring, false, 2000 queue_trb(xhci, ep_ring, false,
1780 lower_32_bits(addr), 2001 lower_32_bits(addr),
@@ -1862,7 +2083,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1862 /* If there's data, queue data TRBs */ 2083 /* If there's data, queue data TRBs */
1863 field = 0; 2084 field = 0;
1864 length_field = TRB_LEN(urb->transfer_buffer_length) | 2085 length_field = TRB_LEN(urb->transfer_buffer_length) |
1865 TD_REMAINDER(urb->transfer_buffer_length) | 2086 xhci_td_remainder(urb->transfer_buffer_length) |
1866 TRB_INTR_TARGET(0); 2087 TRB_INTR_TARGET(0);
1867 if (urb->transfer_buffer_length > 0) { 2088 if (urb->transfer_buffer_length > 0) {
1868 if (setup->bRequestType & USB_DIR_IN) 2089 if (setup->bRequestType & USB_DIR_IN)