aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-07-29 15:44:32 -0400
committerHerton Ronaldo Krzesinski <herton.krzesinski@canonical.com>2011-10-17 13:32:27 -0400
commit237b7ccc3193856fe660253a99381ed279775167 (patch)
tree950f0300dedfd95bde7c1e00da39f2c49d4cdaff /drivers/usb
parente3f522d4674a2637c57c0d228c6f48f74922b380 (diff)
xhci: Fix failed enqueue in the middle of isoch TD.
BugLink: http://bugs.launchpad.net/bugs/868628 commit 522989a27c7badb608155b1f1dea3487ed431f74 upstream. When an isochronous transfer is enqueued, xhci_queue_isoc_tx_prepare() will ensure that there is enough room on the transfer rings for all of the isochronous TDs for that URB. However, when xhci_queue_isoc_tx() is enqueueing individual isoc TDs, the prepare_transfer() function can fail if the endpoint state has changed to disabled, error, or some other unknown state. With the current code, if Nth TD (not the first TD) fails, the ring is left in a sorry state. The partially enqueued TDs are left on the ring, and the first TRB of the TD is not given back to the hardware. The enqueue pointer is left on the TRB after the last successfully enqueued TD. This means the ring is basically useless. Any new transfers will be enqueued after the failed TDs, which the hardware will never read because the cycle bit indicates it does not own them. The ring will fill up with untransferred TDs, and the endpoint will be basically unusable. The untransferred TDs will also remain on the TD list. Since the td_list is a FIFO, this basically means the ring handler will be waiting on TDs that will never be completed (or worse, dereference memory that doesn't exist any more). Change the code to clean up the isochronous ring after a failed transfer. If the first TD failed, simply return and allow the xhci_urb_enqueue function to free the urb_priv. If the Nth TD failed, first remove the TDs from the td_list. Then convert the TRBs that were enqueued into No-op TRBs. Make sure to flip the cycle bit on all enqueued TRBs (including any link TRBs in the middle or between TDs), but leave the cycle bit of the first TRB (which will show software-owned) intact. Then move the ring enqueue pointer back to the first TRB and make sure to change the xhci_ring's cycle state to what is appropriate for that ring segment. This ensures that the No-op TRBs will be overwritten by subsequent TDs, and the hardware will not start executing random TRBs because the cycle bit was left as hardware-owned. This bug is unlikely to be hit, but it was something I noticed while tracking down the watchdog timer issue. I verified that the fix works by injecting some errors on the 250th isochronous URB queued, although I could not verify that the ring is in the correct state because uvcvideo refused to talk to the device after the first usb_submit_urb() failed. Ring debugging shows that the ring looks correct, however. This patch should be backported to kernels as old as 2.6.36. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/host/xhci-ring.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 53e117c38b8..80e51ba72cd 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -516,8 +516,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
516 (unsigned long long) addr); 516 (unsigned long long) addr);
517} 517}
518 518
519/* flip_cycle means flip the cycle bit of all but the first and last TRB.
520 * (The last TRB actually points to the ring enqueue pointer, which is not part
521 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
522 */
519static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 523static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
520 struct xhci_td *cur_td) 524 struct xhci_td *cur_td, bool flip_cycle)
521{ 525{
522 struct xhci_segment *cur_seg; 526 struct xhci_segment *cur_seg;
523 union xhci_trb *cur_trb; 527 union xhci_trb *cur_trb;
@@ -531,6 +535,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
531 * leave the pointers intact. 535 * leave the pointers intact.
532 */ 536 */
533 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 537 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
538 /* Flip the cycle bit (link TRBs can't be the first
539 * or last TRB).
540 */
541 if (flip_cycle)
542 cur_trb->generic.field[3] ^=
543 cpu_to_le32(TRB_CYCLE);
534 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 544 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
535 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 545 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
536 "in seg %p (0x%llx dma)\n", 546 "in seg %p (0x%llx dma)\n",
@@ -544,6 +554,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
544 cur_trb->generic.field[2] = 0; 554 cur_trb->generic.field[2] = 0;
545 /* Preserve only the cycle bit of this TRB */ 555 /* Preserve only the cycle bit of this TRB */
546 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 556 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
557 /* Flip the cycle bit except on the first or last TRB */
558 if (flip_cycle && cur_trb != cur_td->first_trb &&
559 cur_trb != cur_td->last_trb)
560 cur_trb->generic.field[3] ^=
561 cpu_to_le32(TRB_CYCLE);
547 cur_trb->generic.field[3] |= cpu_to_le32( 562 cur_trb->generic.field[3] |= cpu_to_le32(
548 TRB_TYPE(TRB_TR_NOOP)); 563 TRB_TYPE(TRB_TR_NOOP));
549 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 564 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -722,7 +737,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
722 cur_td->urb->stream_id, 737 cur_td->urb->stream_id,
723 cur_td, &deq_state); 738 cur_td, &deq_state);
724 else 739 else
725 td_to_noop(xhci, ep_ring, cur_td); 740 td_to_noop(xhci, ep_ring, cur_td, false);
726remove_finished_td: 741remove_finished_td:
727 /* 742 /*
728 * The event handler won't see a completion for this TD anymore, 743 * The event handler won't see a completion for this TD anymore,
@@ -3231,6 +3246,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3231 start_trb = &ep_ring->enqueue->generic; 3246 start_trb = &ep_ring->enqueue->generic;
3232 start_cycle = ep_ring->cycle_state; 3247 start_cycle = ep_ring->cycle_state;
3233 3248
3249 urb_priv = urb->hcpriv;
3234 /* Queue the first TRB, even if it's zero-length */ 3250 /* Queue the first TRB, even if it's zero-length */
3235 for (i = 0; i < num_tds; i++) { 3251 for (i = 0; i < num_tds; i++) {
3236 unsigned int total_packet_count; 3252 unsigned int total_packet_count;
@@ -3254,12 +3270,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3254 3270
3255 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3271 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3256 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3272 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3257 if (ret < 0) 3273 if (ret < 0) {
3258 return ret; 3274 if (i == 0)
3275 return ret;
3276 goto cleanup;
3277 }
3259 3278
3260 urb_priv = urb->hcpriv;
3261 td = urb_priv->td[i]; 3279 td = urb_priv->td[i];
3262
3263 for (j = 0; j < trbs_per_td; j++) { 3280 for (j = 0; j < trbs_per_td; j++) {
3264 u32 remainder = 0; 3281 u32 remainder = 0;
3265 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3282 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3349,6 +3366,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3349 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3366 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3350 start_cycle, start_trb); 3367 start_cycle, start_trb);
3351 return 0; 3368 return 0;
3369cleanup:
3370 /* Clean up a partially enqueued isoc transfer. */
3371
3372 for (i--; i >= 0; i--)
3373 list_del(&urb_priv->td[i]->td_list);
3374
3375 /* Use the first TD as a temporary variable to turn the TDs we've queued
3376 * into No-ops with a software-owned cycle bit. That way the hardware
3377 * won't accidentally start executing bogus TDs when we partially
3378 * overwrite them. td->first_trb and td->start_seg are already set.
3379 */
3380 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3381 /* Every TRB except the first & last will have its cycle bit flipped. */
3382 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3383
3384 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3385 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3386 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3387 ep_ring->cycle_state = start_cycle;
3388 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3389 return ret;
3352} 3390}
3353 3391
3354/* 3392/*