aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host
diff options
context:
space:
mode:
authorMathias Nyman <mathias.nyman@linux.intel.com>2014-08-19 08:17:58 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-08-19 12:27:36 -0400
commit365038d83313951d6ace15342eb24624bbef1666 (patch)
tree0c0605519c22cb31360617dd58887e8d685f486c /drivers/usb/host
parent2597fe99bb0259387111d0431691f5daac84f5a5 (diff)
xhci: rework cycle bit checking for new dequeue pointers
When we manually need to move the TR dequeue pointer we need to set the correct cycle bit as well. Previously we used the trb pointer from the last event received as a base, but this was changed in commit 1f81b6d22a59 ("usb: xhci: Prefer endpoint context dequeue pointer") to use the dequeue pointer from the endpoint context instead It turns out some Asmedia controllers advance the dequeue pointer stored in the endpoint context past the event triggering TRB, and this messed up the way the cycle bit was calculated. Instead of adding a quirk or complicating the already hard to follow cycle bit code, the whole cycle bit calculation is now simplified and adapted to handle event and endpoint context dequeue pointer differences. Fixes: 1f81b6d22a59 ("usb: xhci: Prefer endpoint context dequeue pointer") Reported-by: Maciej Puzio <mx34567@gmail.com> Reported-by: Evan Langlois <uudruid74@gmail.com> Reviewed-by: Julius Werner <jwerner@chromium.org> Tested-by: Maciej Puzio <mx34567@gmail.com> Tested-by: Evan Langlois <uudruid74@gmail.com> Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/xhci-ring.c101
-rw-r--r--drivers/usb/host/xhci.c3
2 files changed, 42 insertions, 62 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ac8cf2374046..abed30b82905 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -364,32 +364,6 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
364 } 364 }
365} 365}
366 366
367/*
368 * Find the segment that trb is in. Start searching in start_seg.
369 * If we must move past a segment that has a link TRB with a toggle cycle state
370 * bit set, then we will toggle the value pointed at by cycle_state.
371 */
372static struct xhci_segment *find_trb_seg(
373 struct xhci_segment *start_seg,
374 union xhci_trb *trb, int *cycle_state)
375{
376 struct xhci_segment *cur_seg = start_seg;
377 struct xhci_generic_trb *generic_trb;
378
379 while (cur_seg->trbs > trb ||
380 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
381 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
382 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
383 *cycle_state ^= 0x1;
384 cur_seg = cur_seg->next;
385 if (cur_seg == start_seg)
386 /* Looped over the entire list. Oops! */
387 return NULL;
388 }
389 return cur_seg;
390}
391
392
393static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 367static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
394 unsigned int slot_id, unsigned int ep_index, 368 unsigned int slot_id, unsigned int ep_index,
395 unsigned int stream_id) 369 unsigned int stream_id)
@@ -459,9 +433,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
459 struct xhci_virt_device *dev = xhci->devs[slot_id]; 433 struct xhci_virt_device *dev = xhci->devs[slot_id];
460 struct xhci_virt_ep *ep = &dev->eps[ep_index]; 434 struct xhci_virt_ep *ep = &dev->eps[ep_index];
461 struct xhci_ring *ep_ring; 435 struct xhci_ring *ep_ring;
462 struct xhci_generic_trb *trb; 436 struct xhci_segment *new_seg;
437 union xhci_trb *new_deq;
463 dma_addr_t addr; 438 dma_addr_t addr;
464 u64 hw_dequeue; 439 u64 hw_dequeue;
440 bool cycle_found = false;
441 bool td_last_trb_found = false;
465 442
466 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 443 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
467 ep_index, stream_id); 444 ep_index, stream_id);
@@ -486,45 +463,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
486 hw_dequeue = le64_to_cpu(ep_ctx->deq); 463 hw_dequeue = le64_to_cpu(ep_ctx->deq);
487 } 464 }
488 465
489 /* Find virtual address and segment of hardware dequeue pointer */ 466 new_seg = ep_ring->deq_seg;
490 state->new_deq_seg = ep_ring->deq_seg; 467 new_deq = ep_ring->dequeue;
491 state->new_deq_ptr = ep_ring->dequeue; 468 state->new_cycle_state = hw_dequeue & 0x1;
492 while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) 469
493 != (dma_addr_t)(hw_dequeue & ~0xf)) {
494 next_trb(xhci, ep_ring, &state->new_deq_seg,
495 &state->new_deq_ptr);
496 if (state->new_deq_ptr == ep_ring->dequeue) {
497 WARN_ON(1);
498 return;
499 }
500 }
501 /* 470 /*
502 * Find cycle state for last_trb, starting at old cycle state of 471 * We want to find the pointer, segment and cycle state of the new trb
503 * hw_dequeue. If there is only one segment ring, find_trb_seg() will 472 * (the one after current TD's last_trb). We know the cycle state at
504 * return immediately and cannot toggle the cycle state if this search 473 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
505 * wraps around, so add one more toggle manually in that case. 474 * found.
506 */ 475 */
507 state->new_cycle_state = hw_dequeue & 0x1; 476 do {
508 if (ep_ring->first_seg == ep_ring->first_seg->next && 477 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
509 cur_td->last_trb < state->new_deq_ptr) 478 == (dma_addr_t)(hw_dequeue & ~0xf)) {
510 state->new_cycle_state ^= 0x1; 479 cycle_found = true;
480 if (td_last_trb_found)
481 break;
482 }
483 if (new_deq == cur_td->last_trb)
484 td_last_trb_found = true;
511 485
512 state->new_deq_ptr = cur_td->last_trb; 486 if (cycle_found &&
513 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 487 TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
514 "Finding segment containing last TRB in TD."); 488 new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
515 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 489 state->new_cycle_state ^= 0x1;
516 state->new_deq_ptr, &state->new_cycle_state); 490
517 if (!state->new_deq_seg) { 491 next_trb(xhci, ep_ring, &new_seg, &new_deq);
518 WARN_ON(1); 492
519 return; 493 /* Search wrapped around, bail out */
520 } 494 if (new_deq == ep->ring->dequeue) {
495 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
496 state->new_deq_seg = NULL;
497 state->new_deq_ptr = NULL;
498 return;
499 }
500
501 } while (!cycle_found || !td_last_trb_found);
521 502
522 /* Increment to find next TRB after last_trb. Cycle if appropriate. */ 503 state->new_deq_seg = new_seg;
523 trb = &state->new_deq_ptr->generic; 504 state->new_deq_ptr = new_deq;
524 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
525 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
526 state->new_cycle_state ^= 0x1;
527 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
528 505
529 /* Don't update the ring cycle state for the producer (us). */ 506 /* Don't update the ring cycle state for the producer (us). */
530 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 507 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b6f21175b872..c020b094fe7d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2880,6 +2880,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2880 ep_index, ep->stopped_stream, ep->stopped_td, 2880 ep_index, ep->stopped_stream, ep->stopped_td,
2881 &deq_state); 2881 &deq_state);
2882 2882
2883 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2884 return;
2885
2883 /* HW with the reset endpoint quirk will use the saved dequeue state to 2886 /* HW with the reset endpoint quirk will use the saved dequeue state to
2884 * issue a configure endpoint command later. 2887 * issue a configure endpoint command later.
2885 */ 2888 */