aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-07-29 15:44:32 -0400
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2011-08-09 17:49:05 -0400
commit522989a27c7badb608155b1f1dea3487ed431f74 (patch)
tree04da1b5508c22f3e262c787f27e8b02fde6633cf
parentd13565c12828ce0cd2a3862bf6260164a0653352 (diff)
xhci: Fix failed enqueue in the middle of isoch TD.
When an isochronous transfer is enqueued, xhci_queue_isoc_tx_prepare() will ensure that there is enough room on the transfer rings for all of the isochronous TDs for that URB. However, when xhci_queue_isoc_tx() is enqueueing individual isoc TDs, the prepare_transfer() function can fail if the endpoint state has changed to disabled, error, or some other unknown state. With the current code, if Nth TD (not the first TD) fails, the ring is left in a sorry state. The partially enqueued TDs are left on the ring, and the first TRB of the TD is not given back to the hardware. The enqueue pointer is left on the TRB after the last successfully enqueued TD. This means the ring is basically useless. Any new transfers will be enqueued after the failed TDs, which the hardware will never read because the cycle bit indicates it does not own them. The ring will fill up with untransferred TDs, and the endpoint will be basically unusable. The untransferred TDs will also remain on the TD list. Since the td_list is a FIFO, this basically means the ring handler will be waiting on TDs that will never be completed (or worse, dereference memory that doesn't exist any more). Change the code to clean up the isochronous ring after a failed transfer. If the first TD failed, simply return and allow the xhci_urb_enqueue function to free the urb_priv. If the Nth TD failed, first remove the TDs from the td_list. Then convert the TRBs that were enqueued into No-op TRBs. Make sure to flip the cycle bit on all enqueued TRBs (including any link TRBs in the middle or between TDs), but leave the cycle bit of the first TRB (which will show software-owned) intact. Then move the ring enqueue pointer back to the first TRB and make sure to change the xhci_ring's cycle state to what is appropriate for that ring segment. This ensures that the No-op TRBs will be overwritten by subsequent TDs, and the hardware will not start executing random TRBs because the cycle bit was left as hardware-owned. This bug is unlikely to be hit, but it was something I noticed while tracking down the watchdog timer issue. I verified that the fix works by injecting some errors on the 250th isochronous URB queued, although I could not verify that the ring is in the correct state because uvcvideo refused to talk to the device after the first usb_submit_urb() failed. Ring debugging shows that the ring looks correct, however. This patch should be backported to kernels as old as 2.6.36. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Andiry Xu <andiry.xu@amd.com> Cc: stable@kernel.org
-rw-r--r--drivers/usb/host/xhci-ring.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9d3f9dd1ad28..f72149b666b1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
514 (unsigned long long) addr); 514 (unsigned long long) addr);
515} 515}
516 516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
517static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
518 struct xhci_td *cur_td) 522 struct xhci_td *cur_td, bool flip_cycle)
519{ 523{
520 struct xhci_segment *cur_seg; 524 struct xhci_segment *cur_seg;
521 union xhci_trb *cur_trb; 525 union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 * leave the pointers intact. 532 * leave the pointers intact.
529 */ 533 */
530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
533 "in seg %p (0x%llx dma)\n", 543 "in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
541 cur_trb->generic.field[2] = 0; 551 cur_trb->generic.field[2] = 0;
542 /* Preserve only the cycle bit of this TRB */ 552 /* Preserve only the cycle bit of this TRB */
543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
544 cur_trb->generic.field[3] |= cpu_to_le32( 559 cur_trb->generic.field[3] |= cpu_to_le32(
545 TRB_TYPE(TRB_TR_NOOP)); 560 TRB_TYPE(TRB_TR_NOOP));
546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,7 +734,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
719 cur_td->urb->stream_id, 734 cur_td->urb->stream_id,
720 cur_td, &deq_state); 735 cur_td, &deq_state);
721 else 736 else
722 td_to_noop(xhci, ep_ring, cur_td); 737 td_to_noop(xhci, ep_ring, cur_td, false);
723remove_finished_td: 738remove_finished_td:
724 /* 739 /*
725 * The event handler won't see a completion for this TD anymore, 740 * The event handler won't see a completion for this TD anymore,
@@ -3223,6 +3238,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3223 start_trb = &ep_ring->enqueue->generic; 3238 start_trb = &ep_ring->enqueue->generic;
3224 start_cycle = ep_ring->cycle_state; 3239 start_cycle = ep_ring->cycle_state;
3225 3240
3241 urb_priv = urb->hcpriv;
3226 /* Queue the first TRB, even if it's zero-length */ 3242 /* Queue the first TRB, even if it's zero-length */
3227 for (i = 0; i < num_tds; i++) { 3243 for (i = 0; i < num_tds; i++) {
3228 unsigned int total_packet_count; 3244 unsigned int total_packet_count;
@@ -3246,12 +3262,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3246 3262
3247 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3263 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3248 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3264 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3249 if (ret < 0) 3265 if (ret < 0) {
3250 return ret; 3266 if (i == 0)
3267 return ret;
3268 goto cleanup;
3269 }
3251 3270
3252 urb_priv = urb->hcpriv;
3253 td = urb_priv->td[i]; 3271 td = urb_priv->td[i];
3254
3255 for (j = 0; j < trbs_per_td; j++) { 3272 for (j = 0; j < trbs_per_td; j++) {
3256 u32 remainder = 0; 3273 u32 remainder = 0;
3257 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3274 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3341,6 +3358,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3341 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3358 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3342 start_cycle, start_trb); 3359 start_cycle, start_trb);
3343 return 0; 3360 return 0;
3361cleanup:
3362 /* Clean up a partially enqueued isoc transfer. */
3363
3364 for (i--; i >= 0; i--)
3365 list_del(&urb_priv->td[i]->td_list);
3366
3367 /* Use the first TD as a temporary variable to turn the TDs we've queued
3368 * into No-ops with a software-owned cycle bit. That way the hardware
3369 * won't accidentally start executing bogus TDs when we partially
3370 * overwrite them. td->first_trb and td->start_seg are already set.
3371 */
3372 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3373 /* Every TRB except the first & last will have its cycle bit flipped. */
3374 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3375
3376 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3377 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3378 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3379 ep_ring->cycle_state = start_cycle;
3380 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3381 return ret;
3344} 3382}
3345 3383
3346/* 3384/*