aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorXenia Ragiadakou <burzalodowa@gmail.com>2013-08-13 23:33:54 -0400
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2013-08-14 00:14:42 -0400
commitaa50b29061d3df896c494d92e9c8c2e1f295cc6e (patch)
tree886f57b61cf8d9d20965832492ef36022ae66d36 /drivers
parent63a23b9a7451660525c90b08219e14e701e294f1 (diff)
xhci: trace debug statements for urb cancellation
This patch defines a new trace event, which is called xhci_dbg_cancel_urb and belongs to the event class xhci_log_msg, and adds tracepoints that trace the debug messages related to the removal of a cancelled URB from the endpoint's transfer ring. Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/xhci-ring.c64
-rw-r--r--drivers/usb/host/xhci-trace.h5
-rw-r--r--drivers/usb/host/xhci.c13
3 files changed, 54 insertions, 28 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 86971ac5851b..f908e205f7d5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -556,7 +556,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
556 return; 556 return;
557 } 557 }
558 state->new_cycle_state = 0; 558 state->new_cycle_state = 0;
559 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 559 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
560 "Finding segment containing stopped TRB.");
560 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 561 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
561 dev->eps[ep_index].stopped_trb, 562 dev->eps[ep_index].stopped_trb,
562 &state->new_cycle_state); 563 &state->new_cycle_state);
@@ -566,12 +567,14 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
566 } 567 }
567 568
568 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 569 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
569 xhci_dbg(xhci, "Finding endpoint context\n"); 570 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
571 "Finding endpoint context");
570 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 572 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
571 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); 573 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
572 574
573 state->new_deq_ptr = cur_td->last_trb; 575 state->new_deq_ptr = cur_td->last_trb;
574 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 576 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
577 "Finding segment containing last TRB in TD.");
575 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 578 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
576 state->new_deq_ptr, 579 state->new_deq_ptr,
577 &state->new_cycle_state); 580 &state->new_cycle_state);
@@ -598,13 +601,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
598 if (ep_ring->first_seg == ep_ring->first_seg->next && 601 if (ep_ring->first_seg == ep_ring->first_seg->next &&
599 state->new_deq_ptr < dev->eps[ep_index].stopped_trb) 602 state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
600 state->new_cycle_state ^= 0x1; 603 state->new_cycle_state ^= 0x1;
601 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); 604 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
605 "Cycle state = 0x%x", state->new_cycle_state);
602 606
603 /* Don't update the ring cycle state for the producer (us). */ 607 /* Don't update the ring cycle state for the producer (us). */
604 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 608 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
609 "New dequeue segment = %p (virtual)",
605 state->new_deq_seg); 610 state->new_deq_seg);
606 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 611 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
607 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 612 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
613 "New dequeue pointer = 0x%llx (DMA)",
608 (unsigned long long) addr); 614 (unsigned long long) addr);
609} 615}
610 616
@@ -632,9 +638,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
632 if (flip_cycle) 638 if (flip_cycle)
633 cur_trb->generic.field[3] ^= 639 cur_trb->generic.field[3] ^=
634 cpu_to_le32(TRB_CYCLE); 640 cpu_to_le32(TRB_CYCLE);
635 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 641 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
636 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 642 "Cancel (unchain) link TRB");
637 "in seg %p (0x%llx dma)\n", 643 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
644 "Address = %p (0x%llx dma); "
645 "in seg %p (0x%llx dma)",
638 cur_trb, 646 cur_trb,
639 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 647 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
640 cur_seg, 648 cur_seg,
@@ -652,7 +660,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
652 cpu_to_le32(TRB_CYCLE); 660 cpu_to_le32(TRB_CYCLE);
653 cur_trb->generic.field[3] |= cpu_to_le32( 661 cur_trb->generic.field[3] |= cpu_to_le32(
654 TRB_TYPE(TRB_TR_NOOP)); 662 TRB_TYPE(TRB_TR_NOOP));
655 xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n", 663 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
664 "TRB to noop at offset 0x%llx",
656 (unsigned long long) 665 (unsigned long long)
657 xhci_trb_virt_to_dma(cur_seg, cur_trb)); 666 xhci_trb_virt_to_dma(cur_seg, cur_trb));
658 } 667 }
@@ -673,8 +682,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
673{ 682{
674 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 683 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
675 684
676 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 685 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
677 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 686 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
687 "new deq ptr = %p (0x%llx dma), new cycle = %u",
678 deq_state->new_deq_seg, 688 deq_state->new_deq_seg,
679 (unsigned long long)deq_state->new_deq_seg->dma, 689 (unsigned long long)deq_state->new_deq_seg->dma,
680 deq_state->new_deq_ptr, 690 deq_state->new_deq_ptr,
@@ -794,7 +804,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
794 */ 804 */
795 list_for_each(entry, &ep->cancelled_td_list) { 805 list_for_each(entry, &ep->cancelled_td_list) {
796 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 806 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
797 xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n", 807 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
808 "Removing canceled TD starting at 0x%llx (dma).",
798 (unsigned long long)xhci_trb_virt_to_dma( 809 (unsigned long long)xhci_trb_virt_to_dma(
799 cur_td->start_seg, cur_td->first_trb)); 810 cur_td->start_seg, cur_td->first_trb));
800 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 811 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
@@ -914,14 +925,16 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
914 925
915 ep->stop_cmds_pending--; 926 ep->stop_cmds_pending--;
916 if (xhci->xhc_state & XHCI_STATE_DYING) { 927 if (xhci->xhc_state & XHCI_STATE_DYING) {
917 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 928 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
918 "xHCI as DYING, exiting.\n"); 929 "Stop EP timer ran, but another timer marked "
930 "xHCI as DYING, exiting.");
919 spin_unlock_irqrestore(&xhci->lock, flags); 931 spin_unlock_irqrestore(&xhci->lock, flags);
920 return; 932 return;
921 } 933 }
922 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 934 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
923 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 935 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
924 "exiting.\n"); 936 "Stop EP timer ran, but no command pending, "
937 "exiting.");
925 spin_unlock_irqrestore(&xhci->lock, flags); 938 spin_unlock_irqrestore(&xhci->lock, flags);
926 return; 939 return;
927 } 940 }
@@ -963,8 +976,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
963 ring = temp_ep->ring; 976 ring = temp_ep->ring;
964 if (!ring) 977 if (!ring)
965 continue; 978 continue;
966 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 979 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
967 "ep index %u\n", i, j); 980 "Killing URBs for slot ID %u, "
981 "ep index %u", i, j);
968 while (!list_empty(&ring->td_list)) { 982 while (!list_empty(&ring->td_list)) {
969 cur_td = list_first_entry(&ring->td_list, 983 cur_td = list_first_entry(&ring->td_list,
970 struct xhci_td, 984 struct xhci_td,
@@ -987,9 +1001,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
987 } 1001 }
988 } 1002 }
989 spin_unlock_irqrestore(&xhci->lock, flags); 1003 spin_unlock_irqrestore(&xhci->lock, flags);
990 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 1004 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1005 "Calling usb_hc_died()");
991 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 1006 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
992 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 1007 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1008 "xHCI host controller is dead.");
993} 1009}
994 1010
995 1011
@@ -1093,7 +1109,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
1093 ep_state &= EP_STATE_MASK; 1109 ep_state &= EP_STATE_MASK;
1094 slot_state = le32_to_cpu(slot_ctx->dev_state); 1110 slot_state = le32_to_cpu(slot_ctx->dev_state);
1095 slot_state = GET_SLOT_STATE(slot_state); 1111 slot_state = GET_SLOT_STATE(slot_state);
1096 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 1112 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1113 "Slot state = %u, EP state = %u",
1097 slot_state, ep_state); 1114 slot_state, ep_state);
1098 break; 1115 break;
1099 case COMP_EBADSLT: 1116 case COMP_EBADSLT:
@@ -1113,7 +1130,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
1113 * cancelling URBs, which might not be an error... 1130 * cancelling URBs, which might not be an error...
1114 */ 1131 */
1115 } else { 1132 } else {
1116 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 1133 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1134 "Successful Set TR Deq Ptr cmd, deq = @%08llx",
1117 le64_to_cpu(ep_ctx->deq)); 1135 le64_to_cpu(ep_ctx->deq));
1118 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 1136 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1119 dev->eps[ep_index].queued_deq_ptr) == 1137 dev->eps[ep_index].queued_deq_ptr) ==
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index d6c222916858..c0eaccc10a3c 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -52,6 +52,11 @@ DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
52 TP_ARGS(vaf) 52 TP_ARGS(vaf)
53); 53);
54 54
55DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
56 TP_PROTO(struct va_format *vaf),
57 TP_ARGS(vaf)
58);
59
55DECLARE_EVENT_CLASS(xhci_log_ctx, 60DECLARE_EVENT_CLASS(xhci_log_ctx,
56 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, 61 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
57 unsigned int ep_num), 62 unsigned int ep_num),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9f4f73bfd01b..4adf5e19918a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1434,7 +1434,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1434 goto done; 1434 goto done;
1435 temp = xhci_readl(xhci, &xhci->op_regs->status); 1435 temp = xhci_readl(xhci, &xhci->op_regs->status);
1436 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1436 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1437 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1437 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1438 "HW died, freeing TD.");
1438 urb_priv = urb->hcpriv; 1439 urb_priv = urb->hcpriv;
1439 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1440 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1440 td = urb_priv->td[i]; 1441 td = urb_priv->td[i];
@@ -1452,8 +1453,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1452 } 1453 }
1453 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1454 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1454 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1455 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1455 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1456 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1456 "non-responsive xHCI host.\n", 1457 "Ep 0x%x: URB %p to be canceled on "
1458 "non-responsive xHCI host.",
1457 urb->ep->desc.bEndpointAddress, urb); 1459 urb->ep->desc.bEndpointAddress, urb);
1458 /* Let the stop endpoint command watchdog timer (which set this 1460 /* Let the stop endpoint command watchdog timer (which set this
1459 * state) finish cleaning up the endpoint TD lists. We must 1461 * state) finish cleaning up the endpoint TD lists. We must
@@ -1474,8 +1476,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1474 urb_priv = urb->hcpriv; 1476 urb_priv = urb->hcpriv;
1475 i = urb_priv->td_cnt; 1477 i = urb_priv->td_cnt;
1476 if (i < urb_priv->length) 1478 if (i < urb_priv->length)
1477 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, " 1479 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1478 "starting at offset 0x%llx\n", 1480 "Cancel URB %p, dev %s, ep 0x%x, "
1481 "starting at offset 0x%llx",
1479 urb, urb->dev->devpath, 1482 urb, urb->dev->devpath,
1480 urb->ep->desc.bEndpointAddress, 1483 urb->ep->desc.bEndpointAddress,
1481 (unsigned long long) xhci_trb_virt_to_dma( 1484 (unsigned long long) xhci_trb_virt_to_dma(