diff options
author | Xenia Ragiadakou <burzalodowa@gmail.com> | 2013-08-13 23:33:54 -0400 |
---|---|---|
committer | Sarah Sharp <sarah.a.sharp@linux.intel.com> | 2013-08-14 00:14:42 -0400 |
commit | aa50b29061d3df896c494d92e9c8c2e1f295cc6e (patch) | |
tree | 886f57b61cf8d9d20965832492ef36022ae66d36 /drivers/usb/host/xhci-ring.c | |
parent | 63a23b9a7451660525c90b08219e14e701e294f1 (diff) |
xhci: trace debug statements for urb cancellation
This patch defines a new trace event, which is called xhci_dbg_cancel_urb
and belongs to the event class xhci_log_msg, and adds tracepoints that
trace the debug messages related to the removal of a cancelled URB from
the endpoint's transfer ring.
Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 64 |
1 files changed, 41 insertions, 23 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 86971ac5851b..f908e205f7d5 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -556,7 +556,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
556 | return; | 556 | return; |
557 | } | 557 | } |
558 | state->new_cycle_state = 0; | 558 | state->new_cycle_state = 0; |
559 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | 559 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
560 | "Finding segment containing stopped TRB."); | ||
560 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 561 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
561 | dev->eps[ep_index].stopped_trb, | 562 | dev->eps[ep_index].stopped_trb, |
562 | &state->new_cycle_state); | 563 | &state->new_cycle_state); |
@@ -566,12 +567,14 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
566 | } | 567 | } |
567 | 568 | ||
568 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 569 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
569 | xhci_dbg(xhci, "Finding endpoint context\n"); | 570 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
571 | "Finding endpoint context"); | ||
570 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 572 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
571 | state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); | 573 | state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); |
572 | 574 | ||
573 | state->new_deq_ptr = cur_td->last_trb; | 575 | state->new_deq_ptr = cur_td->last_trb; |
574 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | 576 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
577 | "Finding segment containing last TRB in TD."); | ||
575 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 578 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
576 | state->new_deq_ptr, | 579 | state->new_deq_ptr, |
577 | &state->new_cycle_state); | 580 | &state->new_cycle_state); |
@@ -598,13 +601,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
598 | if (ep_ring->first_seg == ep_ring->first_seg->next && | 601 | if (ep_ring->first_seg == ep_ring->first_seg->next && |
599 | state->new_deq_ptr < dev->eps[ep_index].stopped_trb) | 602 | state->new_deq_ptr < dev->eps[ep_index].stopped_trb) |
600 | state->new_cycle_state ^= 0x1; | 603 | state->new_cycle_state ^= 0x1; |
601 | xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); | 604 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
605 | "Cycle state = 0x%x", state->new_cycle_state); | ||
602 | 606 | ||
603 | /* Don't update the ring cycle state for the producer (us). */ | 607 | /* Don't update the ring cycle state for the producer (us). */ |
604 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", | 608 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
609 | "New dequeue segment = %p (virtual)", | ||
605 | state->new_deq_seg); | 610 | state->new_deq_seg); |
606 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); | 611 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); |
607 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", | 612 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
613 | "New dequeue pointer = 0x%llx (DMA)", | ||
608 | (unsigned long long) addr); | 614 | (unsigned long long) addr); |
609 | } | 615 | } |
610 | 616 | ||
@@ -632,9 +638,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
632 | if (flip_cycle) | 638 | if (flip_cycle) |
633 | cur_trb->generic.field[3] ^= | 639 | cur_trb->generic.field[3] ^= |
634 | cpu_to_le32(TRB_CYCLE); | 640 | cpu_to_le32(TRB_CYCLE); |
635 | xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); | 641 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
636 | xhci_dbg(xhci, "Address = %p (0x%llx dma); " | 642 | "Cancel (unchain) link TRB"); |
637 | "in seg %p (0x%llx dma)\n", | 643 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
644 | "Address = %p (0x%llx dma); " | ||
645 | "in seg %p (0x%llx dma)", | ||
638 | cur_trb, | 646 | cur_trb, |
639 | (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), | 647 | (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), |
640 | cur_seg, | 648 | cur_seg, |
@@ -652,7 +660,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
652 | cpu_to_le32(TRB_CYCLE); | 660 | cpu_to_le32(TRB_CYCLE); |
653 | cur_trb->generic.field[3] |= cpu_to_le32( | 661 | cur_trb->generic.field[3] |= cpu_to_le32( |
654 | TRB_TYPE(TRB_TR_NOOP)); | 662 | TRB_TYPE(TRB_TR_NOOP)); |
655 | xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n", | 663 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
664 | "TRB to noop at offset 0x%llx", | ||
656 | (unsigned long long) | 665 | (unsigned long long) |
657 | xhci_trb_virt_to_dma(cur_seg, cur_trb)); | 666 | xhci_trb_virt_to_dma(cur_seg, cur_trb)); |
658 | } | 667 | } |
@@ -673,8 +682,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | |||
673 | { | 682 | { |
674 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; | 683 | struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; |
675 | 684 | ||
676 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 685 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
677 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 686 | "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " |
687 | "new deq ptr = %p (0x%llx dma), new cycle = %u", | ||
678 | deq_state->new_deq_seg, | 688 | deq_state->new_deq_seg, |
679 | (unsigned long long)deq_state->new_deq_seg->dma, | 689 | (unsigned long long)deq_state->new_deq_seg->dma, |
680 | deq_state->new_deq_ptr, | 690 | deq_state->new_deq_ptr, |
@@ -794,7 +804,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
794 | */ | 804 | */ |
795 | list_for_each(entry, &ep->cancelled_td_list) { | 805 | list_for_each(entry, &ep->cancelled_td_list) { |
796 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); | 806 | cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); |
797 | xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n", | 807 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
808 | "Removing canceled TD starting at 0x%llx (dma).", | ||
798 | (unsigned long long)xhci_trb_virt_to_dma( | 809 | (unsigned long long)xhci_trb_virt_to_dma( |
799 | cur_td->start_seg, cur_td->first_trb)); | 810 | cur_td->start_seg, cur_td->first_trb)); |
800 | ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); | 811 | ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); |
@@ -914,14 +925,16 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
914 | 925 | ||
915 | ep->stop_cmds_pending--; | 926 | ep->stop_cmds_pending--; |
916 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 927 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
917 | xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " | 928 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
918 | "xHCI as DYING, exiting.\n"); | 929 | "Stop EP timer ran, but another timer marked " |
930 | "xHCI as DYING, exiting."); | ||
919 | spin_unlock_irqrestore(&xhci->lock, flags); | 931 | spin_unlock_irqrestore(&xhci->lock, flags); |
920 | return; | 932 | return; |
921 | } | 933 | } |
922 | if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { | 934 | if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { |
923 | xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " | 935 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
924 | "exiting.\n"); | 936 | "Stop EP timer ran, but no command pending, " |
937 | "exiting."); | ||
925 | spin_unlock_irqrestore(&xhci->lock, flags); | 938 | spin_unlock_irqrestore(&xhci->lock, flags); |
926 | return; | 939 | return; |
927 | } | 940 | } |
@@ -963,8 +976,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
963 | ring = temp_ep->ring; | 976 | ring = temp_ep->ring; |
964 | if (!ring) | 977 | if (!ring) |
965 | continue; | 978 | continue; |
966 | xhci_dbg(xhci, "Killing URBs for slot ID %u, " | 979 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
967 | "ep index %u\n", i, j); | 980 | "Killing URBs for slot ID %u, " |
981 | "ep index %u", i, j); | ||
968 | while (!list_empty(&ring->td_list)) { | 982 | while (!list_empty(&ring->td_list)) { |
969 | cur_td = list_first_entry(&ring->td_list, | 983 | cur_td = list_first_entry(&ring->td_list, |
970 | struct xhci_td, | 984 | struct xhci_td, |
@@ -987,9 +1001,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
987 | } | 1001 | } |
988 | } | 1002 | } |
989 | spin_unlock_irqrestore(&xhci->lock, flags); | 1003 | spin_unlock_irqrestore(&xhci->lock, flags); |
990 | xhci_dbg(xhci, "Calling usb_hc_died()\n"); | 1004 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
1005 | "Calling usb_hc_died()"); | ||
991 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); | 1006 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); |
992 | xhci_dbg(xhci, "xHCI host controller is dead.\n"); | 1007 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
1008 | "xHCI host controller is dead."); | ||
993 | } | 1009 | } |
994 | 1010 | ||
995 | 1011 | ||
@@ -1093,7 +1109,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
1093 | ep_state &= EP_STATE_MASK; | 1109 | ep_state &= EP_STATE_MASK; |
1094 | slot_state = le32_to_cpu(slot_ctx->dev_state); | 1110 | slot_state = le32_to_cpu(slot_ctx->dev_state); |
1095 | slot_state = GET_SLOT_STATE(slot_state); | 1111 | slot_state = GET_SLOT_STATE(slot_state); |
1096 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | 1112 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
1113 | "Slot state = %u, EP state = %u", | ||
1097 | slot_state, ep_state); | 1114 | slot_state, ep_state); |
1098 | break; | 1115 | break; |
1099 | case COMP_EBADSLT: | 1116 | case COMP_EBADSLT: |
@@ -1113,7 +1130,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
1113 | * cancelling URBs, which might not be an error... | 1130 | * cancelling URBs, which might not be an error... |
1114 | */ | 1131 | */ |
1115 | } else { | 1132 | } else { |
1116 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", | 1133 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
1134 | "Successful Set TR Deq Ptr cmd, deq = @%08llx", | ||
1117 | le64_to_cpu(ep_ctx->deq)); | 1135 | le64_to_cpu(ep_ctx->deq)); |
1118 | if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, | 1136 | if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, |
1119 | dev->eps[ep_index].queued_deq_ptr) == | 1137 | dev->eps[ep_index].queued_deq_ptr) == |