aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-08-07 17:04:52 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-09-23 09:46:17 -0400
commit82d1009f537c2a43be0a410abd33521f76ee3a5a (patch)
tree98ff36d6d21b627fd42d0d59b1fc507fd5c92347 /drivers/usb/host/xhci-ring.c
parent2d3f1fac7ee8bb4c6fad40f838488edbeabb0c50 (diff)
USB: xhci: Handle stalled control endpoints.
When a control endpoint stalls, the next control transfer will clear the stall. The USB core doesn't call down to the host controller driver's endpoint_reset() method when control endpoints stall, so the xHCI driver has to do all its stall handling for internal state in its interrupt handler. When the host stalls on a control endpoint, it may stop on the data phase or status phase of the control transfer. Like other stalled endpoints, the xHCI driver needs to queue a Reset Endpoint command and move the hardware's control endpoint ring dequeue pointer past the failed control transfer (with a Set TR Dequeue Pointer or a Configure Endpoint command). Since the USB core doesn't call usb_hcd_reset_endpoint() for control endpoints, we need to do this in interrupt context when we get notified of the stalled transfer. URBs may be queued to the hardware before these two commands complete. The endpoint queue will be restarted once both commands complete. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c33
1 files changed, 30 insertions, 3 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ee7fc4500dfb..c831194b0966 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -817,6 +817,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
817{ 817{
818 struct xhci_virt_device *xdev; 818 struct xhci_virt_device *xdev;
819 struct xhci_ring *ep_ring; 819 struct xhci_ring *ep_ring;
820 unsigned int slot_id;
820 int ep_index; 821 int ep_index;
821 struct xhci_td *td = 0; 822 struct xhci_td *td = 0;
822 dma_addr_t event_dma; 823 dma_addr_t event_dma;
@@ -827,7 +828,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
827 struct xhci_ep_ctx *ep_ctx; 828 struct xhci_ep_ctx *ep_ctx;
828 829
829 xhci_dbg(xhci, "In %s\n", __func__); 830 xhci_dbg(xhci, "In %s\n", __func__);
830 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 831 slot_id = TRB_TO_SLOT_ID(event->flags);
832 xdev = xhci->devs[slot_id];
831 if (!xdev) { 833 if (!xdev) {
832 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 834 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
833 return -ENODEV; 835 return -ENODEV;
@@ -941,6 +943,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
941 xhci_warn(xhci, "WARN: short transfer on control ep\n"); 943 xhci_warn(xhci, "WARN: short transfer on control ep\n");
942 status = -EREMOTEIO; 944 status = -EREMOTEIO;
943 break; 945 break;
946 case COMP_STALL:
947 /* Did we transfer part of the data (middle) phase? */
948 if (event_trb != ep_ring->dequeue &&
949 event_trb != td->last_trb)
950 td->urb->actual_length =
951 td->urb->transfer_buffer_length
952 - TRB_LEN(event->transfer_len);
953 else
954 td->urb->actual_length = 0;
955
956 ep_ring->stopped_td = td;
957 ep_ring->stopped_trb = event_trb;
958 xhci_queue_reset_ep(xhci, slot_id, ep_index);
959 xhci_cleanup_stalled_ring(xhci,
960 td->urb->dev,
961 td->urb->ep,
962 ep_index, ep_ring);
963 xhci_ring_cmd_db(xhci);
964 goto td_cleanup;
944 default: 965 default:
945 /* Others already handled above */ 966 /* Others already handled above */
946 break; 967 break;
@@ -1083,6 +1104,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1083 inc_deq(xhci, ep_ring, false); 1104 inc_deq(xhci, ep_ring, false);
1084 } 1105 }
1085 1106
1107td_cleanup:
1086 /* Clean up the endpoint's TD list */ 1108 /* Clean up the endpoint's TD list */
1087 urb = td->urb; 1109 urb = td->urb;
1088 list_del(&td->td_list); 1110 list_del(&td->td_list);
@@ -1091,8 +1113,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1091 list_del(&td->cancelled_td_list); 1113 list_del(&td->cancelled_td_list);
1092 ep_ring->cancels_pending--; 1114 ep_ring->cancels_pending--;
1093 } 1115 }
1094 /* Leave the TD around for the reset endpoint function to use */ 1116 /* Leave the TD around for the reset endpoint function to use
1095 if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { 1117 * (but only if it's not a control endpoint, since we already
1118 * queued the Set TR dequeue pointer command for stalled
1119 * control endpoints).
1120 */
1121 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
1122 GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
1096 kfree(td); 1123 kfree(td);
1097 } 1124 }
1098 urb->hcpriv = NULL; 1125 urb->hcpriv = NULL;