diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 67 |
1 files changed, 31 insertions, 36 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5f926bea5ab1..7a0e3c720c00 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
550 | struct xhci_ring *ep_ring; | 550 | struct xhci_ring *ep_ring; |
551 | struct xhci_generic_trb *trb; | 551 | struct xhci_generic_trb *trb; |
552 | dma_addr_t addr; | 552 | dma_addr_t addr; |
553 | u64 hw_dequeue; | ||
553 | 554 | ||
554 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, | 555 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, |
555 | ep_index, stream_id); | 556 | ep_index, stream_id); |
@@ -559,16 +560,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
559 | stream_id); | 560 | stream_id); |
560 | return; | 561 | return; |
561 | } | 562 | } |
562 | state->new_cycle_state = 0; | ||
563 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | ||
564 | "Finding segment containing stopped TRB."); | ||
565 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | ||
566 | dev->eps[ep_index].stopped_trb, | ||
567 | &state->new_cycle_state); | ||
568 | if (!state->new_deq_seg) { | ||
569 | WARN_ON(1); | ||
570 | return; | ||
571 | } | ||
572 | 563 | ||
573 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 564 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
574 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 565 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
@@ -577,46 +568,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
577 | if (ep->ep_state & EP_HAS_STREAMS) { | 568 | if (ep->ep_state & EP_HAS_STREAMS) { |
578 | struct xhci_stream_ctx *ctx = | 569 | struct xhci_stream_ctx *ctx = |
579 | &ep->stream_info->stream_ctx_array[stream_id]; | 570 | &ep->stream_info->stream_ctx_array[stream_id]; |
580 | state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring); | 571 | hw_dequeue = le64_to_cpu(ctx->stream_ring); |
581 | } else { | 572 | } else { |
582 | struct xhci_ep_ctx *ep_ctx | 573 | struct xhci_ep_ctx *ep_ctx |
583 | = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 574 | = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
584 | state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); | 575 | hw_dequeue = le64_to_cpu(ep_ctx->deq); |
585 | } | 576 | } |
586 | 577 | ||
578 | /* Find virtual address and segment of hardware dequeue pointer */ | ||
579 | state->new_deq_seg = ep_ring->deq_seg; | ||
580 | state->new_deq_ptr = ep_ring->dequeue; | ||
581 | while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) | ||
582 | != (dma_addr_t)(hw_dequeue & ~0xf)) { | ||
583 | next_trb(xhci, ep_ring, &state->new_deq_seg, | ||
584 | &state->new_deq_ptr); | ||
585 | if (state->new_deq_ptr == ep_ring->dequeue) { | ||
586 | WARN_ON(1); | ||
587 | return; | ||
588 | } | ||
589 | } | ||
590 | /* | ||
591 | * Find cycle state for last_trb, starting at old cycle state of | ||
592 | * hw_dequeue. If there is only one segment ring, find_trb_seg() will | ||
593 | * return immediately and cannot toggle the cycle state if this search | ||
594 | * wraps around, so add one more toggle manually in that case. | ||
595 | */ | ||
596 | state->new_cycle_state = hw_dequeue & 0x1; | ||
597 | if (ep_ring->first_seg == ep_ring->first_seg->next && | ||
598 | cur_td->last_trb < state->new_deq_ptr) | ||
599 | state->new_cycle_state ^= 0x1; | ||
600 | |||
587 | state->new_deq_ptr = cur_td->last_trb; | 601 | state->new_deq_ptr = cur_td->last_trb; |
588 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 602 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
589 | "Finding segment containing last TRB in TD."); | 603 | "Finding segment containing last TRB in TD."); |
590 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 604 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
591 | state->new_deq_ptr, | 605 | state->new_deq_ptr, &state->new_cycle_state); |
592 | &state->new_cycle_state); | ||
593 | if (!state->new_deq_seg) { | 606 | if (!state->new_deq_seg) { |
594 | WARN_ON(1); | 607 | WARN_ON(1); |
595 | return; | 608 | return; |
596 | } | 609 | } |
597 | 610 | ||
611 | /* Increment to find next TRB after last_trb. Cycle if appropriate. */ | ||
598 | trb = &state->new_deq_ptr->generic; | 612 | trb = &state->new_deq_ptr->generic; |
599 | if (TRB_TYPE_LINK_LE32(trb->field[3]) && | 613 | if (TRB_TYPE_LINK_LE32(trb->field[3]) && |
600 | (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) | 614 | (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) |
601 | state->new_cycle_state ^= 0x1; | 615 | state->new_cycle_state ^= 0x1; |
602 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 616 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
603 | 617 | ||
604 | /* | 618 | /* Don't update the ring cycle state for the producer (us). */ |
605 | * If there is only one segment in a ring, find_trb_seg()'s while loop | ||
606 | * will not run, and it will return before it has a chance to see if it | ||
607 | * needs to toggle the cycle bit. It can't tell if the stalled transfer | ||
608 | * ended just before the link TRB on a one-segment ring, or if the TD | ||
609 | * wrapped around the top of the ring, because it doesn't have the TD in | ||
610 | * question. Look for the one-segment case where stalled TRB's address | ||
611 | * is greater than the new dequeue pointer address. | ||
612 | */ | ||
613 | if (ep_ring->first_seg == ep_ring->first_seg->next && | ||
614 | state->new_deq_ptr < dev->eps[ep_index].stopped_trb) | ||
615 | state->new_cycle_state ^= 0x1; | ||
616 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 619 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
617 | "Cycle state = 0x%x", state->new_cycle_state); | 620 | "Cycle state = 0x%x", state->new_cycle_state); |
618 | 621 | ||
619 | /* Don't update the ring cycle state for the producer (us). */ | ||
620 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 622 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
621 | "New dequeue segment = %p (virtual)", | 623 | "New dequeue segment = %p (virtual)", |
622 | state->new_deq_seg); | 624 | state->new_deq_seg); |
@@ -799,7 +801,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, | |||
799 | if (list_empty(&ep->cancelled_td_list)) { | 801 | if (list_empty(&ep->cancelled_td_list)) { |
800 | xhci_stop_watchdog_timer_in_irq(xhci, ep); | 802 | xhci_stop_watchdog_timer_in_irq(xhci, ep); |
801 | ep->stopped_td = NULL; | 803 | ep->stopped_td = NULL; |
802 | ep->stopped_trb = NULL; | ||
803 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | 804 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
804 | return; | 805 | return; |
805 | } | 806 | } |
@@ -867,11 +868,9 @@ remove_finished_td: | |||
867 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | 868 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
868 | } | 869 | } |
869 | 870 | ||
870 | /* Clear stopped_td and stopped_trb if endpoint is not halted */ | 871 | /* Clear stopped_td if endpoint is not halted */ |
871 | if (!(ep->ep_state & EP_HALTED)) { | 872 | if (!(ep->ep_state & EP_HALTED)) |
872 | ep->stopped_td = NULL; | 873 | ep->stopped_td = NULL; |
873 | ep->stopped_trb = NULL; | ||
874 | } | ||
875 | 874 | ||
876 | /* | 875 | /* |
877 | * Drop the lock and complete the URBs in the cancelled TD list. | 876 | * Drop the lock and complete the URBs in the cancelled TD list. |
@@ -1941,14 +1940,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, | |||
1941 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | 1940 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
1942 | ep->ep_state |= EP_HALTED; | 1941 | ep->ep_state |= EP_HALTED; |
1943 | ep->stopped_td = td; | 1942 | ep->stopped_td = td; |
1944 | ep->stopped_trb = event_trb; | ||
1945 | ep->stopped_stream = stream_id; | 1943 | ep->stopped_stream = stream_id; |
1946 | 1944 | ||
1947 | xhci_queue_reset_ep(xhci, slot_id, ep_index); | 1945 | xhci_queue_reset_ep(xhci, slot_id, ep_index); |
1948 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); | 1946 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); |
1949 | 1947 | ||
1950 | ep->stopped_td = NULL; | 1948 | ep->stopped_td = NULL; |
1951 | ep->stopped_trb = NULL; | ||
1952 | ep->stopped_stream = 0; | 1949 | ep->stopped_stream = 0; |
1953 | 1950 | ||
1954 | xhci_ring_cmd_db(xhci); | 1951 | xhci_ring_cmd_db(xhci); |
@@ -2030,7 +2027,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2030 | * the ring dequeue pointer or take this TD off any lists yet. | 2027 | * the ring dequeue pointer or take this TD off any lists yet. |
2031 | */ | 2028 | */ |
2032 | ep->stopped_td = td; | 2029 | ep->stopped_td = td; |
2033 | ep->stopped_trb = event_trb; | ||
2034 | return 0; | 2030 | return 0; |
2035 | } else { | 2031 | } else { |
2036 | if (trb_comp_code == COMP_STALL) { | 2032 | if (trb_comp_code == COMP_STALL) { |
@@ -2042,7 +2038,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2042 | * USB class driver clear the stall later. | 2038 | * USB class driver clear the stall later. |
2043 | */ | 2039 | */ |
2044 | ep->stopped_td = td; | 2040 | ep->stopped_td = td; |
2045 | ep->stopped_trb = event_trb; | ||
2046 | ep->stopped_stream = ep_ring->stream_id; | 2041 | ep->stopped_stream = ep_ring->stream_id; |
2047 | } else if (xhci_requires_manual_halt_cleanup(xhci, | 2042 | } else if (xhci_requires_manual_halt_cleanup(xhci, |
2048 | ep_ctx, trb_comp_code)) { | 2043 | ep_ctx, trb_comp_code)) { |