aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/Kconfig9
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/xhci-dbg.c14
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-mem.c187
-rw-r--r--drivers/usb/host/xhci-pci.c14
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-ring.c89
-rw-r--r--drivers/usb/host/xhci-trace.c15
-rw-r--r--drivers/usb/host/xhci-trace.h151
-rw-r--r--drivers/usb/host/xhci.c337
-rw-r--r--drivers/usb/host/xhci.h17
12 files changed, 510 insertions, 354 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index c41005a47802..5be0326aae38 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -29,15 +29,6 @@ if USB_XHCI_HCD
29config USB_XHCI_PLATFORM 29config USB_XHCI_PLATFORM
30 tristate 30 tristate
31 31
32config USB_XHCI_HCD_DEBUGGING
33 bool "Debugging for the xHCI host controller"
34 ---help---
35 Say 'Y' to turn on debugging for the xHCI host controller driver.
36 This will spew debugging output, even in interrupt context.
37 This should only be used for debugging xHCI driver bugs.
38
39 If unsure, say N.
40
41endif # USB_XHCI_HCD 32endif # USB_XHCI_HCD
42 33
43config USB_EHCI_HCD 34config USB_EHCI_HCD
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 829a3397882a..50b0041c09a9 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -4,6 +4,9 @@
4 4
5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG 5ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
6 6
7# tell define_trace.h where to find the xhci trace header
8CFLAGS_xhci-trace.o := -I$(src)
9
7isp1760-y := isp1760-hcd.o isp1760-if.o 10isp1760-y := isp1760-hcd.o isp1760-if.o
8 11
9fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o 12fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
@@ -13,6 +16,7 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
13 16
14xhci-hcd-y := xhci.o xhci-mem.o 17xhci-hcd-y := xhci.o xhci-mem.o
15xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o 18xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
19xhci-hcd-y += xhci-trace.o
16xhci-hcd-$(CONFIG_PCI) += xhci-pci.o 20xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
17 21
18ifneq ($(CONFIG_USB_XHCI_PLATFORM), ) 22ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 5d5e58fdeccc..73503a81ee81 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -580,3 +580,17 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
580 xhci_dbg_slot_ctx(xhci, ctx); 580 xhci_dbg_slot_ctx(xhci, ctx);
581 xhci_dbg_ep_ctx(xhci, ctx, last_ep); 581 xhci_dbg_ep_ctx(xhci, ctx, last_ep);
582} 582}
583
584void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
585 const char *fmt, ...)
586{
587 struct va_format vaf;
588 va_list args;
589
590 va_start(args, fmt);
591 vaf.fmt = fmt;
592 vaf.va = &args;
593 xhci_dbg(xhci, "%pV\n", &vaf);
594 trace(&vaf);
595 va_end(args);
596}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 93f3fdf0ff0a..fae697ed0b70 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -24,6 +24,7 @@
24#include <asm/unaligned.h> 24#include <asm/unaligned.h>
25 25
26#include "xhci.h" 26#include "xhci.h"
27#include "xhci-trace.h"
27 28
28#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) 29#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
29#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ 30#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
@@ -535,8 +536,10 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
535 xhci->port_status_u0 |= 1 << wIndex; 536 xhci->port_status_u0 |= 1 << wIndex;
536 if (xhci->port_status_u0 == all_ports_seen_u0) { 537 if (xhci->port_status_u0 == all_ports_seen_u0) {
537 del_timer_sync(&xhci->comp_mode_recovery_timer); 538 del_timer_sync(&xhci->comp_mode_recovery_timer);
538 xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n"); 539 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
539 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n"); 540 "All USB3 ports have entered U0 already!");
541 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
542 "Compliance Mode Recovery Timer Deleted.");
540 } 543 }
541 } 544 }
542} 545}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index df6978abd7e6..b150360d1e78 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -26,6 +26,7 @@
26#include <linux/dmapool.h> 26#include <linux/dmapool.h>
27 27
28#include "xhci.h" 28#include "xhci.h"
29#include "xhci-trace.h"
29 30
30/* 31/*
31 * Allocates a generic ring segment from the ring pool, sets the dma address, 32 * Allocates a generic ring segment from the ring pool, sets the dma address,
@@ -347,7 +348,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
347 return -ENOMEM; 348 return -ENOMEM;
348 349
349 xhci_link_rings(xhci, ring, first, last, num_segs); 350 xhci_link_rings(xhci, ring, first, last, num_segs);
350 xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n", 351 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
352 "ring expansion succeed, now has %d segments",
351 ring->num_segs); 353 ring->num_segs);
352 354
353 return 0; 355 return 0;
@@ -481,17 +483,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
481 return ep->ring; 483 return ep->ring;
482} 484}
483 485
484/* Only use this when you know stream_info is valid */
485#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
486static struct xhci_ring *dma_to_stream_ring(
487 struct xhci_stream_info *stream_info,
488 u64 address)
489{
490 return radix_tree_lookup(&stream_info->trb_address_map,
491 address >> TRB_SEGMENT_SHIFT);
492}
493#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
494
495struct xhci_ring *xhci_stream_id_to_ring( 486struct xhci_ring *xhci_stream_id_to_ring(
496 struct xhci_virt_device *dev, 487 struct xhci_virt_device *dev,
497 unsigned int ep_index, 488 unsigned int ep_index,
@@ -509,58 +500,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
509 return ep->stream_info->stream_rings[stream_id]; 500 return ep->stream_info->stream_rings[stream_id];
510} 501}
511 502
512#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
513static int xhci_test_radix_tree(struct xhci_hcd *xhci,
514 unsigned int num_streams,
515 struct xhci_stream_info *stream_info)
516{
517 u32 cur_stream;
518 struct xhci_ring *cur_ring;
519 u64 addr;
520
521 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
522 struct xhci_ring *mapped_ring;
523 int trb_size = sizeof(union xhci_trb);
524
525 cur_ring = stream_info->stream_rings[cur_stream];
526 for (addr = cur_ring->first_seg->dma;
527 addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
528 addr += trb_size) {
529 mapped_ring = dma_to_stream_ring(stream_info, addr);
530 if (cur_ring != mapped_ring) {
531 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
532 "didn't map to stream ID %u; "
533 "mapped to ring %p\n",
534 (unsigned long long) addr,
535 cur_stream,
536 mapped_ring);
537 return -EINVAL;
538 }
539 }
540 /* One TRB after the end of the ring segment shouldn't return a
541 * pointer to the current ring (although it may be a part of a
542 * different ring).
543 */
544 mapped_ring = dma_to_stream_ring(stream_info, addr);
545 if (mapped_ring != cur_ring) {
546 /* One TRB before should also fail */
547 addr = cur_ring->first_seg->dma - trb_size;
548 mapped_ring = dma_to_stream_ring(stream_info, addr);
549 }
550 if (mapped_ring == cur_ring) {
551 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
552 "mapped to valid stream ID %u; "
553 "mapped ring = %p\n",
554 (unsigned long long) addr,
555 cur_stream,
556 mapped_ring);
557 return -EINVAL;
558 }
559 }
560 return 0;
561}
562#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
563
564/* 503/*
565 * Change an endpoint's internal structure so it supports stream IDs. The 504 * Change an endpoint's internal structure so it supports stream IDs. The
566 * number of requested streams includes stream 0, which cannot be used by device 505 * number of requested streams includes stream 0, which cannot be used by device
@@ -687,13 +626,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
687 * was any other way, the host controller would assume the ring is 626 * was any other way, the host controller would assume the ring is
688 * "empty" and wait forever for data to be queued to that stream ID). 627 * "empty" and wait forever for data to be queued to that stream ID).
689 */ 628 */
690#if XHCI_DEBUG
691 /* Do a little test on the radix tree to make sure it returns the
692 * correct values.
693 */
694 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
695 goto cleanup_rings;
696#endif
697 629
698 return stream_info; 630 return stream_info;
699 631
@@ -731,7 +663,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
731 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 663 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
732 */ 664 */
733 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 665 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
734 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 666 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
667 "Setting number of stream ctx array entries to %u",
735 1 << (max_primary_streams + 1)); 668 1 << (max_primary_streams + 1));
736 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 669 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
737 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 670 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
@@ -1613,7 +1546,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1613 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1546 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1614 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1547 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1615 1548
1616 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 1549 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1550 "Allocating %d scratchpad buffers", num_sp);
1617 1551
1618 if (!num_sp) 1552 if (!num_sp)
1619 return 0; 1553 return 0;
@@ -1770,11 +1704,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1770 dma_free_coherent(&pdev->dev, size, 1704 dma_free_coherent(&pdev->dev, size,
1771 xhci->erst.entries, xhci->erst.erst_dma_addr); 1705 xhci->erst.entries, xhci->erst.erst_dma_addr);
1772 xhci->erst.entries = NULL; 1706 xhci->erst.entries = NULL;
1773 xhci_dbg(xhci, "Freed ERST\n"); 1707 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1774 if (xhci->event_ring) 1708 if (xhci->event_ring)
1775 xhci_ring_free(xhci, xhci->event_ring); 1709 xhci_ring_free(xhci, xhci->event_ring);
1776 xhci->event_ring = NULL; 1710 xhci->event_ring = NULL;
1777 xhci_dbg(xhci, "Freed event ring\n"); 1711 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1778 1712
1779 if (xhci->lpm_command) 1713 if (xhci->lpm_command)
1780 xhci_free_command(xhci, xhci->lpm_command); 1714 xhci_free_command(xhci, xhci->lpm_command);
@@ -1782,7 +1716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1782 if (xhci->cmd_ring) 1716 if (xhci->cmd_ring)
1783 xhci_ring_free(xhci, xhci->cmd_ring); 1717 xhci_ring_free(xhci, xhci->cmd_ring);
1784 xhci->cmd_ring = NULL; 1718 xhci->cmd_ring = NULL;
1785 xhci_dbg(xhci, "Freed command ring\n"); 1719 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1786 list_for_each_entry_safe(cur_cd, next_cd, 1720 list_for_each_entry_safe(cur_cd, next_cd,
1787 &xhci->cancel_cmd_list, cancel_cmd_list) { 1721 &xhci->cancel_cmd_list, cancel_cmd_list) {
1788 list_del(&cur_cd->cancel_cmd_list); 1722 list_del(&cur_cd->cancel_cmd_list);
@@ -1795,22 +1729,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1795 if (xhci->segment_pool) 1729 if (xhci->segment_pool)
1796 dma_pool_destroy(xhci->segment_pool); 1730 dma_pool_destroy(xhci->segment_pool);
1797 xhci->segment_pool = NULL; 1731 xhci->segment_pool = NULL;
1798 xhci_dbg(xhci, "Freed segment pool\n"); 1732 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1799 1733
1800 if (xhci->device_pool) 1734 if (xhci->device_pool)
1801 dma_pool_destroy(xhci->device_pool); 1735 dma_pool_destroy(xhci->device_pool);
1802 xhci->device_pool = NULL; 1736 xhci->device_pool = NULL;
1803 xhci_dbg(xhci, "Freed device context pool\n"); 1737 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1804 1738
1805 if (xhci->small_streams_pool) 1739 if (xhci->small_streams_pool)
1806 dma_pool_destroy(xhci->small_streams_pool); 1740 dma_pool_destroy(xhci->small_streams_pool);
1807 xhci->small_streams_pool = NULL; 1741 xhci->small_streams_pool = NULL;
1808 xhci_dbg(xhci, "Freed small stream array pool\n"); 1742 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1743 "Freed small stream array pool");
1809 1744
1810 if (xhci->medium_streams_pool) 1745 if (xhci->medium_streams_pool)
1811 dma_pool_destroy(xhci->medium_streams_pool); 1746 dma_pool_destroy(xhci->medium_streams_pool);
1812 xhci->medium_streams_pool = NULL; 1747 xhci->medium_streams_pool = NULL;
1813 xhci_dbg(xhci, "Freed medium stream array pool\n"); 1748 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1749 "Freed medium stream array pool");
1814 1750
1815 if (xhci->dcbaa) 1751 if (xhci->dcbaa)
1816 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), 1752 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
@@ -2036,8 +1972,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2036 * there might be more events to service. 1972 * there might be more events to service.
2037 */ 1973 */
2038 temp &= ~ERST_EHB; 1974 temp &= ~ERST_EHB;
2039 xhci_dbg(xhci, "// Write event ring dequeue pointer, " 1975 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2040 "preserving EHB bit\n"); 1976 "// Write event ring dequeue pointer, "
1977 "preserving EHB bit");
2041 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 1978 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2042 &xhci->ir_set->erst_dequeue); 1979 &xhci->ir_set->erst_dequeue);
2043} 1980}
@@ -2060,8 +1997,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2060 temp = xhci_readl(xhci, addr + 2); 1997 temp = xhci_readl(xhci, addr + 2);
2061 port_offset = XHCI_EXT_PORT_OFF(temp); 1998 port_offset = XHCI_EXT_PORT_OFF(temp);
2062 port_count = XHCI_EXT_PORT_COUNT(temp); 1999 port_count = XHCI_EXT_PORT_COUNT(temp);
2063 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " 2000 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2064 "count = %u, revision = 0x%x\n", 2001 "Ext Cap %p, port offset = %u, "
2002 "count = %u, revision = 0x%x",
2065 addr, port_offset, port_count, major_revision); 2003 addr, port_offset, port_count, major_revision);
2066 /* Port count includes the current port offset */ 2004 /* Port count includes the current port offset */
2067 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 2005 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
@@ -2075,15 +2013,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2075 /* Check the host's USB2 LPM capability */ 2013 /* Check the host's USB2 LPM capability */
2076 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && 2014 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2077 (temp & XHCI_L1C)) { 2015 (temp & XHCI_L1C)) {
2078 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); 2016 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2017 "xHCI 0.96: support USB2 software lpm");
2079 xhci->sw_lpm_support = 1; 2018 xhci->sw_lpm_support = 1;
2080 } 2019 }
2081 2020
2082 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { 2021 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2083 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); 2022 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2023 "xHCI 1.0: support USB2 software lpm");
2084 xhci->sw_lpm_support = 1; 2024 xhci->sw_lpm_support = 1;
2085 if (temp & XHCI_HLC) { 2025 if (temp & XHCI_HLC) {
2086 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); 2026 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2027 "xHCI 1.0: support USB2 hardware lpm");
2087 xhci->hw_lpm_support = 1; 2028 xhci->hw_lpm_support = 1;
2088 } 2029 }
2089 } 2030 }
@@ -2207,18 +2148,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2207 xhci_warn(xhci, "No ports on the roothubs?\n"); 2148 xhci_warn(xhci, "No ports on the roothubs?\n");
2208 return -ENODEV; 2149 return -ENODEV;
2209 } 2150 }
2210 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", 2151 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2152 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2211 xhci->num_usb2_ports, xhci->num_usb3_ports); 2153 xhci->num_usb2_ports, xhci->num_usb3_ports);
2212 2154
2213 /* Place limits on the number of roothub ports so that the hub 2155 /* Place limits on the number of roothub ports so that the hub
2214 * descriptors aren't longer than the USB core will allocate. 2156 * descriptors aren't longer than the USB core will allocate.
2215 */ 2157 */
2216 if (xhci->num_usb3_ports > 15) { 2158 if (xhci->num_usb3_ports > 15) {
2217 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); 2159 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2160 "Limiting USB 3.0 roothub ports to 15.");
2218 xhci->num_usb3_ports = 15; 2161 xhci->num_usb3_ports = 15;
2219 } 2162 }
2220 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2163 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2221 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", 2164 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2165 "Limiting USB 2.0 roothub ports to %u.",
2222 USB_MAXCHILDREN); 2166 USB_MAXCHILDREN);
2223 xhci->num_usb2_ports = USB_MAXCHILDREN; 2167 xhci->num_usb2_ports = USB_MAXCHILDREN;
2224 } 2168 }
@@ -2243,8 +2187,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2243 xhci->usb2_ports[port_index] = 2187 xhci->usb2_ports[port_index] =
2244 &xhci->op_regs->port_status_base + 2188 &xhci->op_regs->port_status_base +
2245 NUM_PORT_REGS*i; 2189 NUM_PORT_REGS*i;
2246 xhci_dbg(xhci, "USB 2.0 port at index %u, " 2190 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2247 "addr = %p\n", i, 2191 "USB 2.0 port at index %u, "
2192 "addr = %p", i,
2248 xhci->usb2_ports[port_index]); 2193 xhci->usb2_ports[port_index]);
2249 port_index++; 2194 port_index++;
2250 if (port_index == xhci->num_usb2_ports) 2195 if (port_index == xhci->num_usb2_ports)
@@ -2263,8 +2208,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2263 xhci->usb3_ports[port_index] = 2208 xhci->usb3_ports[port_index] =
2264 &xhci->op_regs->port_status_base + 2209 &xhci->op_regs->port_status_base +
2265 NUM_PORT_REGS*i; 2210 NUM_PORT_REGS*i;
2266 xhci_dbg(xhci, "USB 3.0 port at index %u, " 2211 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2267 "addr = %p\n", i, 2212 "USB 3.0 port at index %u, "
2213 "addr = %p", i,
2268 xhci->usb3_ports[port_index]); 2214 xhci->usb3_ports[port_index]);
2269 port_index++; 2215 port_index++;
2270 if (port_index == xhci->num_usb3_ports) 2216 if (port_index == xhci->num_usb3_ports)
@@ -2288,32 +2234,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2288 INIT_LIST_HEAD(&xhci->cancel_cmd_list); 2234 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2289 2235
2290 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 2236 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2291 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 2237 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2238 "Supported page size register = 0x%x", page_size);
2292 for (i = 0; i < 16; i++) { 2239 for (i = 0; i < 16; i++) {
2293 if ((0x1 & page_size) != 0) 2240 if ((0x1 & page_size) != 0)
2294 break; 2241 break;
2295 page_size = page_size >> 1; 2242 page_size = page_size >> 1;
2296 } 2243 }
2297 if (i < 16) 2244 if (i < 16)
2298 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 2245 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2246 "Supported page size of %iK", (1 << (i+12)) / 1024);
2299 else 2247 else
2300 xhci_warn(xhci, "WARN: no supported page size\n"); 2248 xhci_warn(xhci, "WARN: no supported page size\n");
2301 /* Use 4K pages, since that's common and the minimum the HC supports */ 2249 /* Use 4K pages, since that's common and the minimum the HC supports */
2302 xhci->page_shift = 12; 2250 xhci->page_shift = 12;
2303 xhci->page_size = 1 << xhci->page_shift; 2251 xhci->page_size = 1 << xhci->page_shift;
2304 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 2252 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2253 "HCD page size set to %iK", xhci->page_size / 1024);
2305 2254
2306 /* 2255 /*
2307 * Program the Number of Device Slots Enabled field in the CONFIG 2256 * Program the Number of Device Slots Enabled field in the CONFIG
2308 * register with the max value of slots the HC can handle. 2257 * register with the max value of slots the HC can handle.
2309 */ 2258 */
2310 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 2259 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2311 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 2260 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2312 (unsigned int) val); 2261 "// xHC can handle at most %d device slots.", val);
2313 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 2262 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2314 val |= (val2 & ~HCS_SLOTS_MASK); 2263 val |= (val2 & ~HCS_SLOTS_MASK);
2315 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 2264 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2316 (unsigned int) val); 2265 "// Setting Max device slots reg = 0x%x.", val);
2317 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 2266 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2318 2267
2319 /* 2268 /*
@@ -2326,7 +2275,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2326 goto fail; 2275 goto fail;
2327 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2276 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2328 xhci->dcbaa->dma = dma; 2277 xhci->dcbaa->dma = dma;
2329 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 2278 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2279 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2330 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2280 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2331 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2281 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2332 2282
@@ -2365,8 +2315,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2365 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); 2315 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2366 if (!xhci->cmd_ring) 2316 if (!xhci->cmd_ring)
2367 goto fail; 2317 goto fail;
2368 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2318 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2369 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 2319 "Allocated command ring at %p", xhci->cmd_ring);
2320 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2370 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2321 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2371 2322
2372 /* Set the address in the Command Ring Control register */ 2323 /* Set the address in the Command Ring Control register */
@@ -2374,7 +2325,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2374 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2325 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2375 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2326 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2376 xhci->cmd_ring->cycle_state; 2327 xhci->cmd_ring->cycle_state;
2377 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 2328 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2329 "// Setting command ring address to 0x%x", val);
2378 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2330 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2379 xhci_dbg_cmd_ptrs(xhci); 2331 xhci_dbg_cmd_ptrs(xhci);
2380 2332
@@ -2390,8 +2342,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2390 2342
2391 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 2343 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2392 val &= DBOFF_MASK; 2344 val &= DBOFF_MASK;
2393 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 2345 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2394 " from cap regs base addr\n", val); 2346 "// Doorbell array is located at offset 0x%x"
2347 " from cap regs base addr", val);
2395 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2348 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2396 xhci_dbg_regs(xhci); 2349 xhci_dbg_regs(xhci);
2397 xhci_print_run_regs(xhci); 2350 xhci_print_run_regs(xhci);
@@ -2402,7 +2355,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2402 * Event ring setup: Allocate a normal ring, but also setup 2355 * Event ring setup: Allocate a normal ring, but also setup
2403 * the event ring segment table (ERST). Section 4.9.3. 2356 * the event ring segment table (ERST). Section 4.9.3.
2404 */ 2357 */
2405 xhci_dbg(xhci, "// Allocating event ring\n"); 2358 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2406 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 2359 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2407 flags); 2360 flags);
2408 if (!xhci->event_ring) 2361 if (!xhci->event_ring)
@@ -2415,13 +2368,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2415 GFP_KERNEL); 2368 GFP_KERNEL);
2416 if (!xhci->erst.entries) 2369 if (!xhci->erst.entries)
2417 goto fail; 2370 goto fail;
2418 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 2371 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2372 "// Allocated event ring segment table at 0x%llx",
2419 (unsigned long long)dma); 2373 (unsigned long long)dma);
2420 2374
2421 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 2375 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2422 xhci->erst.num_entries = ERST_NUM_SEGS; 2376 xhci->erst.num_entries = ERST_NUM_SEGS;
2423 xhci->erst.erst_dma_addr = dma; 2377 xhci->erst.erst_dma_addr = dma;
2424 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 2378 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2379 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2425 xhci->erst.num_entries, 2380 xhci->erst.num_entries,
2426 xhci->erst.entries, 2381 xhci->erst.entries,
2427 (unsigned long long)xhci->erst.erst_dma_addr); 2382 (unsigned long long)xhci->erst.erst_dma_addr);
@@ -2439,13 +2394,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2439 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 2394 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2440 val &= ERST_SIZE_MASK; 2395 val &= ERST_SIZE_MASK;
2441 val |= ERST_NUM_SEGS; 2396 val |= ERST_NUM_SEGS;
2442 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 2397 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2398 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2443 val); 2399 val);
2444 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 2400 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2445 2401
2446 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 2402 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2403 "// Set ERST entries to point to event ring.");
2447 /* set the segment table base address */ 2404 /* set the segment table base address */
2448 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 2405 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2406 "// Set ERST base address for ir_set 0 = 0x%llx",
2449 (unsigned long long)xhci->erst.erst_dma_addr); 2407 (unsigned long long)xhci->erst.erst_dma_addr);
2450 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 2408 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2451 val_64 &= ERST_PTR_MASK; 2409 val_64 &= ERST_PTR_MASK;
@@ -2454,7 +2412,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2454 2412
2455 /* Set the event ring dequeue address */ 2413 /* Set the event ring dequeue address */
2456 xhci_set_hc_event_deq(xhci); 2414 xhci_set_hc_event_deq(xhci);
2457 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2416 "Wrote ERST address to ir_set 0.");
2458 xhci_print_ir_set(xhci, 0); 2417 xhci_print_ir_set(xhci, 0);
2459 2418
2460 /* 2419 /*
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 72960684a942..c2d495057eb5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include "xhci.h" 27#include "xhci.h"
28#include "xhci-trace.h"
28 29
29/* Device for a quirk */ 30/* Device for a quirk */
30#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 31#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
@@ -64,16 +65,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
64 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && 65 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
65 pdev->revision == 0x0) { 66 pdev->revision == 0x0) {
66 xhci->quirks |= XHCI_RESET_EP_QUIRK; 67 xhci->quirks |= XHCI_RESET_EP_QUIRK;
67 xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" 68 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
68 " endpoint cmd after reset endpoint\n"); 69 "QUIRK: Fresco Logic xHC needs configure"
70 " endpoint cmd after reset endpoint");
69 } 71 }
70 /* Fresco Logic confirms: all revisions of this chip do not 72 /* Fresco Logic confirms: all revisions of this chip do not
71 * support MSI, even though some of them claim to in their PCI 73 * support MSI, even though some of them claim to in their PCI
72 * capabilities. 74 * capabilities.
73 */ 75 */
74 xhci->quirks |= XHCI_BROKEN_MSI; 76 xhci->quirks |= XHCI_BROKEN_MSI;
75 xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u " 77 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
76 "has broken MSI implementation\n", 78 "QUIRK: Fresco Logic revision %u "
79 "has broken MSI implementation",
77 pdev->revision); 80 pdev->revision);
78 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 81 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
79 } 82 }
@@ -110,7 +113,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
110 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 113 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
111 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 114 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
112 xhci->quirks |= XHCI_RESET_ON_RESUME; 115 xhci->quirks |= XHCI_RESET_ON_RESUME;
113 xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); 116 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
117 "QUIRK: Resetting on resume");
114 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 118 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
115 } 119 }
116 if (pdev->vendor == PCI_VENDOR_ID_VIA) 120 if (pdev->vendor == PCI_VENDOR_ID_VIA)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 412fe8d167cf..be5e70d2300c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -14,6 +14,8 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/of.h>
18#include <linux/dma-mapping.h>
17 19
18#include "xhci.h" 20#include "xhci.h"
19 21
@@ -104,6 +106,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
104 if (!res) 106 if (!res)
105 return -ENODEV; 107 return -ENODEV;
106 108
109 /* Initialize dma_mask and coherent_dma_mask to 32-bits */
110 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
111 if (ret)
112 return ret;
113 if (!pdev->dev.dma_mask)
114 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
115 else
116 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
117
107 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); 118 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
108 if (!hcd) 119 if (!hcd)
109 return -ENOMEM; 120 return -ENOMEM;
@@ -211,12 +222,21 @@ static const struct dev_pm_ops xhci_plat_pm_ops = {
211#define DEV_PM_OPS NULL 222#define DEV_PM_OPS NULL
212#endif /* CONFIG_PM */ 223#endif /* CONFIG_PM */
213 224
225#ifdef CONFIG_OF
226static const struct of_device_id usb_xhci_of_match[] = {
227 { .compatible = "xhci-platform" },
228 { },
229};
230MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
231#endif
232
214static struct platform_driver usb_xhci_driver = { 233static struct platform_driver usb_xhci_driver = {
215 .probe = xhci_plat_probe, 234 .probe = xhci_plat_probe,
216 .remove = xhci_plat_remove, 235 .remove = xhci_plat_remove,
217 .driver = { 236 .driver = {
218 .name = "xhci-hcd", 237 .name = "xhci-hcd",
219 .pm = DEV_PM_OPS, 238 .pm = DEV_PM_OPS,
239 .of_match_table = of_match_ptr(usb_xhci_of_match),
220 }, 240 },
221}; 241};
222MODULE_ALIAS("platform:xhci-hcd"); 242MODULE_ALIAS("platform:xhci-hcd");
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5b08cd85f8e7..7b35af167e55 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -67,6 +67,7 @@
67#include <linux/scatterlist.h> 67#include <linux/scatterlist.h>
68#include <linux/slab.h> 68#include <linux/slab.h>
69#include "xhci.h" 69#include "xhci.h"
70#include "xhci-trace.h"
70 71
71static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, 72static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72 struct xhci_virt_device *virt_dev, 73 struct xhci_virt_device *virt_dev,
@@ -555,7 +556,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
555 return; 556 return;
556 } 557 }
557 state->new_cycle_state = 0; 558 state->new_cycle_state = 0;
558 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 559 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
560 "Finding segment containing stopped TRB.");
559 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 561 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
560 dev->eps[ep_index].stopped_trb, 562 dev->eps[ep_index].stopped_trb,
561 &state->new_cycle_state); 563 &state->new_cycle_state);
@@ -565,12 +567,14 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
565 } 567 }
566 568
567 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 569 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
568 xhci_dbg(xhci, "Finding endpoint context\n"); 570 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
571 "Finding endpoint context");
569 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 572 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
570 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); 573 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
571 574
572 state->new_deq_ptr = cur_td->last_trb; 575 state->new_deq_ptr = cur_td->last_trb;
573 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); 576 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
577 "Finding segment containing last TRB in TD.");
574 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 578 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
575 state->new_deq_ptr, 579 state->new_deq_ptr,
576 &state->new_cycle_state); 580 &state->new_cycle_state);
@@ -597,13 +601,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
597 if (ep_ring->first_seg == ep_ring->first_seg->next && 601 if (ep_ring->first_seg == ep_ring->first_seg->next &&
598 state->new_deq_ptr < dev->eps[ep_index].stopped_trb) 602 state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
599 state->new_cycle_state ^= 0x1; 603 state->new_cycle_state ^= 0x1;
600 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); 604 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
605 "Cycle state = 0x%x", state->new_cycle_state);
601 606
602 /* Don't update the ring cycle state for the producer (us). */ 607 /* Don't update the ring cycle state for the producer (us). */
603 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", 608 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
609 "New dequeue segment = %p (virtual)",
604 state->new_deq_seg); 610 state->new_deq_seg);
605 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); 611 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
606 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", 612 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
613 "New dequeue pointer = 0x%llx (DMA)",
607 (unsigned long long) addr); 614 (unsigned long long) addr);
608} 615}
609 616
@@ -631,9 +638,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
631 if (flip_cycle) 638 if (flip_cycle)
632 cur_trb->generic.field[3] ^= 639 cur_trb->generic.field[3] ^=
633 cpu_to_le32(TRB_CYCLE); 640 cpu_to_le32(TRB_CYCLE);
634 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 641 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
635 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 642 "Cancel (unchain) link TRB");
636 "in seg %p (0x%llx dma)\n", 643 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
644 "Address = %p (0x%llx dma); "
645 "in seg %p (0x%llx dma)",
637 cur_trb, 646 cur_trb,
638 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), 647 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
639 cur_seg, 648 cur_seg,
@@ -651,7 +660,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
651 cpu_to_le32(TRB_CYCLE); 660 cpu_to_le32(TRB_CYCLE);
652 cur_trb->generic.field[3] |= cpu_to_le32( 661 cur_trb->generic.field[3] |= cpu_to_le32(
653 TRB_TYPE(TRB_TR_NOOP)); 662 TRB_TYPE(TRB_TR_NOOP));
654 xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n", 663 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
664 "TRB to noop at offset 0x%llx",
655 (unsigned long long) 665 (unsigned long long)
656 xhci_trb_virt_to_dma(cur_seg, cur_trb)); 666 xhci_trb_virt_to_dma(cur_seg, cur_trb));
657 } 667 }
@@ -672,8 +682,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
672{ 682{
673 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 683 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
674 684
675 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 685 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
676 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 686 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
687 "new deq ptr = %p (0x%llx dma), new cycle = %u",
677 deq_state->new_deq_seg, 688 deq_state->new_deq_seg,
678 (unsigned long long)deq_state->new_deq_seg->dma, 689 (unsigned long long)deq_state->new_deq_seg->dma,
679 deq_state->new_deq_ptr, 690 deq_state->new_deq_ptr,
@@ -793,7 +804,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
793 */ 804 */
794 list_for_each(entry, &ep->cancelled_td_list) { 805 list_for_each(entry, &ep->cancelled_td_list) {
795 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); 806 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
796 xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n", 807 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
808 "Removing canceled TD starting at 0x%llx (dma).",
797 (unsigned long long)xhci_trb_virt_to_dma( 809 (unsigned long long)xhci_trb_virt_to_dma(
798 cur_td->start_seg, cur_td->first_trb)); 810 cur_td->start_seg, cur_td->first_trb));
799 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 811 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
@@ -913,14 +925,16 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
913 925
914 ep->stop_cmds_pending--; 926 ep->stop_cmds_pending--;
915 if (xhci->xhc_state & XHCI_STATE_DYING) { 927 if (xhci->xhc_state & XHCI_STATE_DYING) {
916 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 928 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
917 "xHCI as DYING, exiting.\n"); 929 "Stop EP timer ran, but another timer marked "
930 "xHCI as DYING, exiting.");
918 spin_unlock_irqrestore(&xhci->lock, flags); 931 spin_unlock_irqrestore(&xhci->lock, flags);
919 return; 932 return;
920 } 933 }
921 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 934 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
922 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 935 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
923 "exiting.\n"); 936 "Stop EP timer ran, but no command pending, "
937 "exiting.");
924 spin_unlock_irqrestore(&xhci->lock, flags); 938 spin_unlock_irqrestore(&xhci->lock, flags);
925 return; 939 return;
926 } 940 }
@@ -962,8 +976,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
962 ring = temp_ep->ring; 976 ring = temp_ep->ring;
963 if (!ring) 977 if (!ring)
964 continue; 978 continue;
965 xhci_dbg(xhci, "Killing URBs for slot ID %u, " 979 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
966 "ep index %u\n", i, j); 980 "Killing URBs for slot ID %u, "
981 "ep index %u", i, j);
967 while (!list_empty(&ring->td_list)) { 982 while (!list_empty(&ring->td_list)) {
968 cur_td = list_first_entry(&ring->td_list, 983 cur_td = list_first_entry(&ring->td_list,
969 struct xhci_td, 984 struct xhci_td,
@@ -986,9 +1001,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
986 } 1001 }
987 } 1002 }
988 spin_unlock_irqrestore(&xhci->lock, flags); 1003 spin_unlock_irqrestore(&xhci->lock, flags);
989 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 1004 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1005 "Calling usb_hc_died()");
990 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 1006 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
991 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 1007 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1008 "xHCI host controller is dead.");
992} 1009}
993 1010
994 1011
@@ -1092,7 +1109,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
1092 ep_state &= EP_STATE_MASK; 1109 ep_state &= EP_STATE_MASK;
1093 slot_state = le32_to_cpu(slot_ctx->dev_state); 1110 slot_state = le32_to_cpu(slot_ctx->dev_state);
1094 slot_state = GET_SLOT_STATE(slot_state); 1111 slot_state = GET_SLOT_STATE(slot_state);
1095 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 1112 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1113 "Slot state = %u, EP state = %u",
1096 slot_state, ep_state); 1114 slot_state, ep_state);
1097 break; 1115 break;
1098 case COMP_EBADSLT: 1116 case COMP_EBADSLT:
@@ -1112,7 +1130,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
1112 * cancelling URBs, which might not be an error... 1130 * cancelling URBs, which might not be an error...
1113 */ 1131 */
1114 } else { 1132 } else {
1115 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 1133 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1134 "Successful Set TR Deq Ptr cmd, deq = @%08llx",
1116 le64_to_cpu(ep_ctx->deq)); 1135 le64_to_cpu(ep_ctx->deq));
1117 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, 1136 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1118 dev->eps[ep_index].queued_deq_ptr) == 1137 dev->eps[ep_index].queued_deq_ptr) ==
@@ -1150,7 +1169,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1150 /* This command will only fail if the endpoint wasn't halted, 1169 /* This command will only fail if the endpoint wasn't halted,
1151 * but we don't care. 1170 * but we don't care.
1152 */ 1171 */
1153 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", 1172 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1173 "Ignoring reset ep completion code of %u",
1154 GET_COMP_CODE(le32_to_cpu(event->status))); 1174 GET_COMP_CODE(le32_to_cpu(event->status)));
1155 1175
1156 /* HW with the reset endpoint quirk needs to have a configure endpoint 1176 /* HW with the reset endpoint quirk needs to have a configure endpoint
@@ -1158,7 +1178,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1158 * because the HW can't handle two commands being queued in a row. 1178 * because the HW can't handle two commands being queued in a row.
1159 */ 1179 */
1160 if (xhci->quirks & XHCI_RESET_EP_QUIRK) { 1180 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1161 xhci_dbg(xhci, "Queueing configure endpoint command\n"); 1181 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1182 "Queueing configure endpoint command");
1162 xhci_queue_configure_endpoint(xhci, 1183 xhci_queue_configure_endpoint(xhci,
1163 xhci->devs[slot_id]->in_ctx->dma, slot_id, 1184 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1164 false); 1185 false);
@@ -1377,6 +1398,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1377 return; 1398 return;
1378 } 1399 }
1379 1400
1401 trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
1402 (struct xhci_generic_trb *) event);
1403
1380 if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) || 1404 if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
1381 (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) { 1405 (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
1382 /* If the return value is 0, we think the trb pointed by 1406 /* If the return value is 0, we think the trb pointed by
@@ -1444,8 +1468,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1444 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 1468 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1445 if (!(ep_state & EP_HALTED)) 1469 if (!(ep_state & EP_HALTED))
1446 goto bandwidth_change; 1470 goto bandwidth_change;
1447 xhci_dbg(xhci, "Completed config ep cmd - " 1471 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1448 "last ep index = %d, state = %d\n", 1472 "Completed config ep cmd - "
1473 "last ep index = %d, state = %d",
1449 ep_index, ep_state); 1474 ep_index, ep_state);
1450 /* Clear internal halted state and restart ring(s) */ 1475 /* Clear internal halted state and restart ring(s) */
1451 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1476 xhci->devs[slot_id]->eps[ep_index].ep_state &=
@@ -1454,7 +1479,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1454 break; 1479 break;
1455 } 1480 }
1456bandwidth_change: 1481bandwidth_change:
1457 xhci_dbg(xhci, "Completed config ep cmd\n"); 1482 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1483 "Completed config ep cmd");
1458 xhci->devs[slot_id]->cmd_status = 1484 xhci->devs[slot_id]->cmd_status =
1459 GET_COMP_CODE(le32_to_cpu(event->status)); 1485 GET_COMP_CODE(le32_to_cpu(event->status));
1460 complete(&xhci->devs[slot_id]->cmd_completion); 1486 complete(&xhci->devs[slot_id]->cmd_completion);
@@ -1497,7 +1523,8 @@ bandwidth_change:
1497 xhci->error_bitmask |= 1 << 6; 1523 xhci->error_bitmask |= 1 << 6;
1498 break; 1524 break;
1499 } 1525 }
1500 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", 1526 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1527 "NEC firmware version %2x.%02x",
1501 NEC_FW_MAJOR(le32_to_cpu(event->status)), 1528 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1502 NEC_FW_MINOR(le32_to_cpu(event->status))); 1529 NEC_FW_MINOR(le32_to_cpu(event->status)));
1503 break; 1530 break;
@@ -2877,8 +2904,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2877 return -ENOMEM; 2904 return -ENOMEM;
2878 } 2905 }
2879 2906
2880 xhci_dbg(xhci, "ERROR no room on ep ring, " 2907 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2881 "try ring expansion\n"); 2908 "ERROR no room on ep ring, try ring expansion");
2882 num_trbs_needed = num_trbs - ep_ring->num_trbs_free; 2909 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2883 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, 2910 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2884 mem_flags)) { 2911 mem_flags)) {
diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c
new file mode 100644
index 000000000000..7cf30c83dcf3
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.c
@@ -0,0 +1,15 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2013 Xenia Ragiadakou
5 *
6 * Author: Xenia Ragiadakou
7 * Email : burzalodowa@gmail.com
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#define CREATE_TRACE_POINTS
15#include "xhci-trace.h"
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
new file mode 100644
index 000000000000..20364cc8d2fb
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.h
@@ -0,0 +1,151 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2013 Xenia Ragiadakou
5 *
6 * Author: Xenia Ragiadakou
7 * Email : burzalodowa@gmail.com
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#undef TRACE_SYSTEM
15#define TRACE_SYSTEM xhci-hcd
16
17#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
18#define __XHCI_TRACE_H
19
20#include <linux/tracepoint.h>
21#include "xhci.h"
22
23#define XHCI_MSG_MAX 500
24
25DECLARE_EVENT_CLASS(xhci_log_msg,
26 TP_PROTO(struct va_format *vaf),
27 TP_ARGS(vaf),
28 TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
29 TP_fast_assign(
30 vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
31 ),
32 TP_printk("%s", __get_str(msg))
33);
34
35DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
36 TP_PROTO(struct va_format *vaf),
37 TP_ARGS(vaf)
38);
39
40DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
41 TP_PROTO(struct va_format *vaf),
42 TP_ARGS(vaf)
43);
44
45DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
46 TP_PROTO(struct va_format *vaf),
47 TP_ARGS(vaf)
48);
49
50DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
51 TP_PROTO(struct va_format *vaf),
52 TP_ARGS(vaf)
53);
54
55DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
56 TP_PROTO(struct va_format *vaf),
57 TP_ARGS(vaf)
58);
59
60DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
61 TP_PROTO(struct va_format *vaf),
62 TP_ARGS(vaf)
63);
64
65DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
66 TP_PROTO(struct va_format *vaf),
67 TP_ARGS(vaf)
68);
69
70DECLARE_EVENT_CLASS(xhci_log_ctx,
71 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
72 unsigned int ep_num),
73 TP_ARGS(xhci, ctx, ep_num),
74 TP_STRUCT__entry(
75 __field(int, ctx_64)
76 __field(unsigned, ctx_type)
77 __field(dma_addr_t, ctx_dma)
78 __field(u8 *, ctx_va)
79 __field(unsigned, ctx_ep_num)
80 __field(int, slot_id)
81 __dynamic_array(u32, ctx_data,
82 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
83 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
84 ),
85 TP_fast_assign(
86 struct usb_device *udev;
87
88 udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
89 __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
90 __entry->ctx_type = ctx->type;
91 __entry->ctx_dma = ctx->dma;
92 __entry->ctx_va = ctx->bytes;
93 __entry->slot_id = udev->slot_id;
94 __entry->ctx_ep_num = ep_num;
95 memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
96 ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
97 ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
98 ),
99 TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
100 __entry->ctx_64, __entry->ctx_type,
101 (unsigned long long) __entry->ctx_dma, __entry->ctx_va
102 )
103);
104
105DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
106 TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
107 unsigned int ep_num),
108 TP_ARGS(xhci, ctx, ep_num)
109);
110
111DECLARE_EVENT_CLASS(xhci_log_event,
112 TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
113 TP_ARGS(trb_va, ev),
114 TP_STRUCT__entry(
115 __field(void *, va)
116 __field(u64, dma)
117 __field(u32, status)
118 __field(u32, flags)
119 __dynamic_array(__le32, trb, 4)
120 ),
121 TP_fast_assign(
122 __entry->va = trb_va;
123 __entry->dma = le64_to_cpu(((u64)ev->field[1]) << 32 |
124 ev->field[0]);
125 __entry->status = le32_to_cpu(ev->field[2]);
126 __entry->flags = le32_to_cpu(ev->field[3]);
127 memcpy(__get_dynamic_array(trb), trb_va,
128 sizeof(struct xhci_generic_trb));
129 ),
130 TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x",
131 (unsigned long long) __entry->dma, __entry->va,
132 __entry->status, __entry->flags
133 )
134);
135
136DEFINE_EVENT(xhci_log_event, xhci_cmd_completion,
137 TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
138 TP_ARGS(trb_va, ev)
139);
140
141#endif /* __XHCI_TRACE_H */
142
143/* this part must be outside header guard */
144
145#undef TRACE_INCLUDE_PATH
146#define TRACE_INCLUDE_PATH .
147
148#undef TRACE_INCLUDE_FILE
149#define TRACE_INCLUDE_FILE xhci-trace
150
151#include <trace/define_trace.h>
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 246de8905db1..7299b591a341 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -29,6 +29,7 @@
29#include <linux/dmi.h> 29#include <linux/dmi.h>
30 30
31#include "xhci.h" 31#include "xhci.h"
32#include "xhci-trace.h"
32 33
33#define DRIVER_AUTHOR "Sarah Sharp" 34#define DRIVER_AUTHOR "Sarah Sharp"
34#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 35#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -100,7 +101,7 @@ void xhci_quiesce(struct xhci_hcd *xhci)
100int xhci_halt(struct xhci_hcd *xhci) 101int xhci_halt(struct xhci_hcd *xhci)
101{ 102{
102 int ret; 103 int ret;
103 xhci_dbg(xhci, "// Halt the HC\n"); 104 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
104 xhci_quiesce(xhci); 105 xhci_quiesce(xhci);
105 106
106 ret = xhci_handshake(xhci, &xhci->op_regs->status, 107 ret = xhci_handshake(xhci, &xhci->op_regs->status,
@@ -124,7 +125,7 @@ static int xhci_start(struct xhci_hcd *xhci)
124 125
125 temp = xhci_readl(xhci, &xhci->op_regs->command); 126 temp = xhci_readl(xhci, &xhci->op_regs->command);
126 temp |= (CMD_RUN); 127 temp |= (CMD_RUN);
127 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 128 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
128 temp); 129 temp);
129 xhci_writel(xhci, temp, &xhci->op_regs->command); 130 xhci_writel(xhci, temp, &xhci->op_regs->command);
130 131
@@ -162,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
162 return 0; 163 return 0;
163 } 164 }
164 165
165 xhci_dbg(xhci, "// Reset the HC\n"); 166 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
166 command = xhci_readl(xhci, &xhci->op_regs->command); 167 command = xhci_readl(xhci, &xhci->op_regs->command);
167 command |= CMD_RESET; 168 command |= CMD_RESET;
168 xhci_writel(xhci, command, &xhci->op_regs->command); 169 xhci_writel(xhci, command, &xhci->op_regs->command);
@@ -172,7 +173,8 @@ int xhci_reset(struct xhci_hcd *xhci)
172 if (ret) 173 if (ret)
173 return ret; 174 return ret;
174 175
175 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); 176 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
177 "Wait for controller to be ready for doorbell rings");
176 /* 178 /*
177 * xHCI cannot write to any doorbells or operational registers other 179 * xHCI cannot write to any doorbells or operational registers other
178 * than status until the "Controller Not Ready" flag is cleared. 180 * than status until the "Controller Not Ready" flag is cleared.
@@ -214,14 +216,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
214 216
215 ret = pci_enable_msi(pdev); 217 ret = pci_enable_msi(pdev);
216 if (ret) { 218 if (ret) {
217 xhci_dbg(xhci, "failed to allocate MSI entry\n"); 219 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
220 "failed to allocate MSI entry");
218 return ret; 221 return ret;
219 } 222 }
220 223
221 ret = request_irq(pdev->irq, xhci_msi_irq, 224 ret = request_irq(pdev->irq, xhci_msi_irq,
222 0, "xhci_hcd", xhci_to_hcd(xhci)); 225 0, "xhci_hcd", xhci_to_hcd(xhci));
223 if (ret) { 226 if (ret) {
224 xhci_dbg(xhci, "disable MSI interrupt\n"); 227 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
228 "disable MSI interrupt");
225 pci_disable_msi(pdev); 229 pci_disable_msi(pdev);
226 } 230 }
227 231
@@ -284,7 +288,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
284 288
285 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); 289 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
286 if (ret) { 290 if (ret) {
287 xhci_dbg(xhci, "Failed to enable MSI-X\n"); 291 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
292 "Failed to enable MSI-X");
288 goto free_entries; 293 goto free_entries;
289 } 294 }
290 295
@@ -300,7 +305,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
300 return ret; 305 return ret;
301 306
302disable_msix: 307disable_msix:
303 xhci_dbg(xhci, "disable MSI-X interrupt\n"); 308 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
304 xhci_free_irq(xhci); 309 xhci_free_irq(xhci);
305 pci_disable_msix(pdev); 310 pci_disable_msix(pdev);
306free_entries: 311free_entries:
@@ -417,9 +422,11 @@ static void compliance_mode_recovery(unsigned long arg)
417 * Compliance Mode Detected. Letting USB Core 422 * Compliance Mode Detected. Letting USB Core
418 * handle the Warm Reset 423 * handle the Warm Reset
419 */ 424 */
420 xhci_dbg(xhci, "Compliance mode detected->port %d\n", 425 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
426 "Compliance mode detected->port %d",
421 i + 1); 427 i + 1);
422 xhci_dbg(xhci, "Attempting compliance mode recovery\n"); 428 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
429 "Attempting compliance mode recovery");
423 hcd = xhci->shared_hcd; 430 hcd = xhci->shared_hcd;
424 431
425 if (hcd->state == HC_STATE_SUSPENDED) 432 if (hcd->state == HC_STATE_SUSPENDED)
@@ -457,7 +464,8 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
457 set_timer_slack(&xhci->comp_mode_recovery_timer, 464 set_timer_slack(&xhci->comp_mode_recovery_timer,
458 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 465 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
459 add_timer(&xhci->comp_mode_recovery_timer); 466 add_timer(&xhci->comp_mode_recovery_timer);
460 xhci_dbg(xhci, "Compliance mode recovery timer initialized\n"); 467 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
468 "Compliance mode recovery timer initialized");
461} 469}
462 470
463/* 471/*
@@ -505,16 +513,18 @@ int xhci_init(struct usb_hcd *hcd)
505 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 513 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
506 int retval = 0; 514 int retval = 0;
507 515
508 xhci_dbg(xhci, "xhci_init\n"); 516 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
509 spin_lock_init(&xhci->lock); 517 spin_lock_init(&xhci->lock);
510 if (xhci->hci_version == 0x95 && link_quirk) { 518 if (xhci->hci_version == 0x95 && link_quirk) {
511 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); 519 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
520 "QUIRK: Not clearing Link TRB chain bits.");
512 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 521 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
513 } else { 522 } else {
514 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); 523 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
524 "xHCI doesn't need link TRB QUIRK");
515 } 525 }
516 retval = xhci_mem_init(xhci, GFP_KERNEL); 526 retval = xhci_mem_init(xhci, GFP_KERNEL);
517 xhci_dbg(xhci, "Finished xhci_init\n"); 527 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
518 528
519 /* Initializing Compliance Mode Recovery Data If Needed */ 529 /* Initializing Compliance Mode Recovery Data If Needed */
520 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 530 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
@@ -528,57 +538,6 @@ int xhci_init(struct usb_hcd *hcd)
528/*-------------------------------------------------------------------------*/ 538/*-------------------------------------------------------------------------*/
529 539
530 540
531#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
532static void xhci_event_ring_work(unsigned long arg)
533{
534 unsigned long flags;
535 int temp;
536 u64 temp_64;
537 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
538 int i, j;
539
540 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
541
542 spin_lock_irqsave(&xhci->lock, flags);
543 temp = xhci_readl(xhci, &xhci->op_regs->status);
544 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
545 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
546 (xhci->xhc_state & XHCI_STATE_HALTED)) {
547 xhci_dbg(xhci, "HW died, polling stopped.\n");
548 spin_unlock_irqrestore(&xhci->lock, flags);
549 return;
550 }
551
552 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
553 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
554 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
555 xhci->error_bitmask = 0;
556 xhci_dbg(xhci, "Event ring:\n");
557 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
558 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
559 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
560 temp_64 &= ~ERST_PTR_MASK;
561 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
562 xhci_dbg(xhci, "Command ring:\n");
563 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
564 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
565 xhci_dbg_cmd_ptrs(xhci);
566 for (i = 0; i < MAX_HC_SLOTS; ++i) {
567 if (!xhci->devs[i])
568 continue;
569 for (j = 0; j < 31; ++j) {
570 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
571 }
572 }
573 spin_unlock_irqrestore(&xhci->lock, flags);
574
575 if (!xhci->zombie)
576 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
577 else
578 xhci_dbg(xhci, "Quit polling the event ring.\n");
579}
580#endif
581
582static int xhci_run_finished(struct xhci_hcd *xhci) 541static int xhci_run_finished(struct xhci_hcd *xhci)
583{ 542{
584 if (xhci_start(xhci)) { 543 if (xhci_start(xhci)) {
@@ -591,7 +550,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
591 if (xhci->quirks & XHCI_NEC_HOST) 550 if (xhci->quirks & XHCI_NEC_HOST)
592 xhci_ring_cmd_db(xhci); 551 xhci_ring_cmd_db(xhci);
593 552
594 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); 553 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
554 "Finished xhci_run for USB3 roothub");
595 return 0; 555 return 0;
596} 556}
597 557
@@ -622,23 +582,12 @@ int xhci_run(struct usb_hcd *hcd)
622 if (!usb_hcd_is_primary_hcd(hcd)) 582 if (!usb_hcd_is_primary_hcd(hcd))
623 return xhci_run_finished(xhci); 583 return xhci_run_finished(xhci);
624 584
625 xhci_dbg(xhci, "xhci_run\n"); 585 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
626 586
627 ret = xhci_try_enable_msi(hcd); 587 ret = xhci_try_enable_msi(hcd);
628 if (ret) 588 if (ret)
629 return ret; 589 return ret;
630 590
631#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
632 init_timer(&xhci->event_ring_timer);
633 xhci->event_ring_timer.data = (unsigned long) xhci;
634 xhci->event_ring_timer.function = xhci_event_ring_work;
635 /* Poll the event ring */
636 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
637 xhci->zombie = 0;
638 xhci_dbg(xhci, "Setting event ring polling timer\n");
639 add_timer(&xhci->event_ring_timer);
640#endif
641
642 xhci_dbg(xhci, "Command ring memory map follows:\n"); 591 xhci_dbg(xhci, "Command ring memory map follows:\n");
643 xhci_debug_ring(xhci, xhci->cmd_ring); 592 xhci_debug_ring(xhci, xhci->cmd_ring);
644 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 593 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
@@ -651,9 +600,11 @@ int xhci_run(struct usb_hcd *hcd)
651 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 600 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
652 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 601 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
653 temp_64 &= ~ERST_PTR_MASK; 602 temp_64 &= ~ERST_PTR_MASK;
654 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 603 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
604 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
655 605
656 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 606 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
607 "// Set the interrupt modulation register");
657 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 608 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
658 temp &= ~ER_IRQ_INTERVAL_MASK; 609 temp &= ~ER_IRQ_INTERVAL_MASK;
659 temp |= (u32) 160; 610 temp |= (u32) 160;
@@ -662,12 +613,13 @@ int xhci_run(struct usb_hcd *hcd)
662 /* Set the HCD state before we enable the irqs */ 613 /* Set the HCD state before we enable the irqs */
663 temp = xhci_readl(xhci, &xhci->op_regs->command); 614 temp = xhci_readl(xhci, &xhci->op_regs->command);
664 temp |= (CMD_EIE); 615 temp |= (CMD_EIE);
665 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", 616 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
666 temp); 617 "// Enable interrupts, cmd = 0x%x.", temp);
667 xhci_writel(xhci, temp, &xhci->op_regs->command); 618 xhci_writel(xhci, temp, &xhci->op_regs->command);
668 619
669 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 620 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
670 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", 621 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
622 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
671 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 623 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
672 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 624 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
673 &xhci->ir_set->irq_pending); 625 &xhci->ir_set->irq_pending);
@@ -677,7 +629,8 @@ int xhci_run(struct usb_hcd *hcd)
677 xhci_queue_vendor_command(xhci, 0, 0, 0, 629 xhci_queue_vendor_command(xhci, 0, 0, 0,
678 TRB_TYPE(TRB_NEC_GET_FW)); 630 TRB_TYPE(TRB_NEC_GET_FW));
679 631
680 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); 632 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
633 "Finished xhci_run for USB2 roothub");
681 return 0; 634 return 0;
682} 635}
683 636
@@ -725,24 +678,20 @@ void xhci_stop(struct usb_hcd *hcd)
725 678
726 xhci_cleanup_msix(xhci); 679 xhci_cleanup_msix(xhci);
727 680
728#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
729 /* Tell the event ring poll function not to reschedule */
730 xhci->zombie = 1;
731 del_timer_sync(&xhci->event_ring_timer);
732#endif
733
734 /* Deleting Compliance Mode Recovery Timer */ 681 /* Deleting Compliance Mode Recovery Timer */
735 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 682 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
736 (!(xhci_all_ports_seen_u0(xhci)))) { 683 (!(xhci_all_ports_seen_u0(xhci)))) {
737 del_timer_sync(&xhci->comp_mode_recovery_timer); 684 del_timer_sync(&xhci->comp_mode_recovery_timer);
738 xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n", 685 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
686 "%s: compliance mode recovery timer deleted",
739 __func__); 687 __func__);
740 } 688 }
741 689
742 if (xhci->quirks & XHCI_AMD_PLL_FIX) 690 if (xhci->quirks & XHCI_AMD_PLL_FIX)
743 usb_amd_dev_put(); 691 usb_amd_dev_put();
744 692
745 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 693 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
694 "// Disabling event ring interrupts");
746 temp = xhci_readl(xhci, &xhci->op_regs->status); 695 temp = xhci_readl(xhci, &xhci->op_regs->status);
747 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 696 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
748 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 697 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
@@ -750,10 +699,11 @@ void xhci_stop(struct usb_hcd *hcd)
750 &xhci->ir_set->irq_pending); 699 &xhci->ir_set->irq_pending);
751 xhci_print_ir_set(xhci, 0); 700 xhci_print_ir_set(xhci, 0);
752 701
753 xhci_dbg(xhci, "cleaning up memory\n"); 702 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
754 xhci_mem_cleanup(xhci); 703 xhci_mem_cleanup(xhci);
755 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 704 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
756 xhci_readl(xhci, &xhci->op_regs->status)); 705 "xhci_stop completed - status = %x",
706 xhci_readl(xhci, &xhci->op_regs->status));
757} 707}
758 708
759/* 709/*
@@ -778,8 +728,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
778 728
779 xhci_cleanup_msix(xhci); 729 xhci_cleanup_msix(xhci);
780 730
781 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 731 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
782 xhci_readl(xhci, &xhci->op_regs->status)); 732 "xhci_shutdown completed - status = %x",
733 xhci_readl(xhci, &xhci->op_regs->status));
783} 734}
784 735
785#ifdef CONFIG_PM 736#ifdef CONFIG_PM
@@ -820,7 +771,8 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
820 xhci->cmd_ring->dequeue) & 771 xhci->cmd_ring->dequeue) &
821 (u64) ~CMD_RING_RSVD_BITS) | 772 (u64) ~CMD_RING_RSVD_BITS) |
822 xhci->cmd_ring->cycle_state; 773 xhci->cmd_ring->cycle_state;
823 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", 774 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
775 "// Setting command ring address to 0x%llx",
824 (long unsigned long) val_64); 776 (long unsigned long) val_64);
825 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 777 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
826} 778}
@@ -933,7 +885,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
933 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 885 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
934 (!(xhci_all_ports_seen_u0(xhci)))) { 886 (!(xhci_all_ports_seen_u0(xhci)))) {
935 del_timer_sync(&xhci->comp_mode_recovery_timer); 887 del_timer_sync(&xhci->comp_mode_recovery_timer);
936 xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n", 888 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
889 "%s: compliance mode recovery timer deleted",
937 __func__); 890 __func__);
938 } 891 }
939 892
@@ -998,7 +951,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
998 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 951 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
999 !(xhci_all_ports_seen_u0(xhci))) { 952 !(xhci_all_ports_seen_u0(xhci))) {
1000 del_timer_sync(&xhci->comp_mode_recovery_timer); 953 del_timer_sync(&xhci->comp_mode_recovery_timer);
1001 xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n"); 954 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
955 "Compliance Mode Recovery Timer deleted!");
1002 } 956 }
1003 957
1004 /* Let the USB core know _both_ roothubs lost power. */ 958 /* Let the USB core know _both_ roothubs lost power. */
@@ -1011,12 +965,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1011 spin_unlock_irq(&xhci->lock); 965 spin_unlock_irq(&xhci->lock);
1012 xhci_cleanup_msix(xhci); 966 xhci_cleanup_msix(xhci);
1013 967
1014#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1015 /* Tell the event ring poll function not to reschedule */
1016 xhci->zombie = 1;
1017 del_timer_sync(&xhci->event_ring_timer);
1018#endif
1019
1020 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 968 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1021 temp = xhci_readl(xhci, &xhci->op_regs->status); 969 temp = xhci_readl(xhci, &xhci->op_regs->status);
1022 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 970 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -1170,27 +1118,25 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1170 struct xhci_virt_device *virt_dev; 1118 struct xhci_virt_device *virt_dev;
1171 1119
1172 if (!hcd || (check_ep && !ep) || !udev) { 1120 if (!hcd || (check_ep && !ep) || !udev) {
1173 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 1121 pr_debug("xHCI %s called with invalid args\n", func);
1174 func);
1175 return -EINVAL; 1122 return -EINVAL;
1176 } 1123 }
1177 if (!udev->parent) { 1124 if (!udev->parent) {
1178 printk(KERN_DEBUG "xHCI %s called for root hub\n", 1125 pr_debug("xHCI %s called for root hub\n", func);
1179 func);
1180 return 0; 1126 return 0;
1181 } 1127 }
1182 1128
1183 xhci = hcd_to_xhci(hcd); 1129 xhci = hcd_to_xhci(hcd);
1184 if (check_virt_dev) { 1130 if (check_virt_dev) {
1185 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1131 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1186 printk(KERN_DEBUG "xHCI %s called with unaddressed " 1132 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1187 "device\n", func); 1133 func);
1188 return -EINVAL; 1134 return -EINVAL;
1189 } 1135 }
1190 1136
1191 virt_dev = xhci->devs[udev->slot_id]; 1137 virt_dev = xhci->devs[udev->slot_id];
1192 if (virt_dev->udev != udev) { 1138 if (virt_dev->udev != udev) {
1193 printk(KERN_DEBUG "xHCI %s called with udev and " 1139 xhci_dbg(xhci, "xHCI %s called with udev and "
1194 "virt_dev does not match\n", func); 1140 "virt_dev does not match\n", func);
1195 return -EINVAL; 1141 return -EINVAL;
1196 } 1142 }
@@ -1228,12 +1174,16 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1228 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1174 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1229 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1175 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1230 if (hw_max_packet_size != max_packet_size) { 1176 if (hw_max_packet_size != max_packet_size) {
1231 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); 1177 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1232 xhci_dbg(xhci, "Max packet size in usb_device = %d\n", 1178 "Max Packet Size for ep 0 changed.");
1179 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1180 "Max packet size in usb_device = %d",
1233 max_packet_size); 1181 max_packet_size);
1234 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", 1182 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1183 "Max packet size in xHCI HW = %d",
1235 hw_max_packet_size); 1184 hw_max_packet_size);
1236 xhci_dbg(xhci, "Issuing evaluate context command.\n"); 1185 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1186 "Issuing evaluate context command.");
1237 1187
1238 /* Set up the input context flags for the command */ 1188 /* Set up the input context flags for the command */
1239 /* FIXME: This won't work if a non-default control endpoint 1189 /* FIXME: This won't work if a non-default control endpoint
@@ -1498,7 +1448,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1498 goto done; 1448 goto done;
1499 temp = xhci_readl(xhci, &xhci->op_regs->status); 1449 temp = xhci_readl(xhci, &xhci->op_regs->status);
1500 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1450 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1501 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1451 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1452 "HW died, freeing TD.");
1502 urb_priv = urb->hcpriv; 1453 urb_priv = urb->hcpriv;
1503 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1454 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1504 td = urb_priv->td[i]; 1455 td = urb_priv->td[i];
@@ -1516,8 +1467,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1516 } 1467 }
1517 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1468 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1518 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1469 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1519 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1470 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1520 "non-responsive xHCI host.\n", 1471 "Ep 0x%x: URB %p to be canceled on "
1472 "non-responsive xHCI host.",
1521 urb->ep->desc.bEndpointAddress, urb); 1473 urb->ep->desc.bEndpointAddress, urb);
1522 /* Let the stop endpoint command watchdog timer (which set this 1474 /* Let the stop endpoint command watchdog timer (which set this
1523 * state) finish cleaning up the endpoint TD lists. We must 1475 * state) finish cleaning up the endpoint TD lists. We must
@@ -1538,8 +1490,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1538 urb_priv = urb->hcpriv; 1490 urb_priv = urb->hcpriv;
1539 i = urb_priv->td_cnt; 1491 i = urb_priv->td_cnt;
1540 if (i < urb_priv->length) 1492 if (i < urb_priv->length)
1541 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, " 1493 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1542 "starting at offset 0x%llx\n", 1494 "Cancel URB %p, dev %s, ep 0x%x, "
1495 "starting at offset 0x%llx",
1543 urb, urb->dev->devpath, 1496 urb, urb->dev->devpath,
1544 urb->ep->desc.bEndpointAddress, 1497 urb->ep->desc.bEndpointAddress,
1545 (unsigned long long) xhci_trb_virt_to_dma( 1498 (unsigned long long) xhci_trb_virt_to_dma(
@@ -1851,7 +1804,8 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1851 ret = -ENODEV; 1804 ret = -ENODEV;
1852 break; 1805 break;
1853 case COMP_SUCCESS: 1806 case COMP_SUCCESS:
1854 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1807 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1808 "Successful Endpoint Configure command");
1855 ret = 0; 1809 ret = 0;
1856 break; 1810 break;
1857 default: 1811 default:
@@ -1897,7 +1851,8 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1897 ret = -EINVAL; 1851 ret = -EINVAL;
1898 break; 1852 break;
1899 case COMP_SUCCESS: 1853 case COMP_SUCCESS:
1900 dev_dbg(&udev->dev, "Successful evaluate context command\n"); 1854 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1855 "Successful evaluate context command");
1901 ret = 0; 1856 ret = 0;
1902 break; 1857 break;
1903 default: 1858 default:
@@ -1963,14 +1918,16 @@ static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1963 1918
1964 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 1919 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1965 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 1920 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1966 xhci_dbg(xhci, "Not enough ep ctxs: " 1921 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1967 "%u active, need to add %u, limit is %u.\n", 1922 "Not enough ep ctxs: "
1923 "%u active, need to add %u, limit is %u.",
1968 xhci->num_active_eps, added_eps, 1924 xhci->num_active_eps, added_eps,
1969 xhci->limit_active_eps); 1925 xhci->limit_active_eps);
1970 return -ENOMEM; 1926 return -ENOMEM;
1971 } 1927 }
1972 xhci->num_active_eps += added_eps; 1928 xhci->num_active_eps += added_eps;
1973 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, 1929 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1930 "Adding %u ep ctxs, %u now active.", added_eps,
1974 xhci->num_active_eps); 1931 xhci->num_active_eps);
1975 return 0; 1932 return 0;
1976} 1933}
@@ -1988,7 +1945,8 @@ static void xhci_free_host_resources(struct xhci_hcd *xhci,
1988 1945
1989 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 1946 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1990 xhci->num_active_eps -= num_failed_eps; 1947 xhci->num_active_eps -= num_failed_eps;
1991 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", 1948 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1949 "Removing %u failed ep ctxs, %u now active.",
1992 num_failed_eps, 1950 num_failed_eps,
1993 xhci->num_active_eps); 1951 xhci->num_active_eps);
1994} 1952}
@@ -2007,7 +1965,8 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2007 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 1965 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2008 xhci->num_active_eps -= num_dropped_eps; 1966 xhci->num_active_eps -= num_dropped_eps;
2009 if (num_dropped_eps) 1967 if (num_dropped_eps)
2010 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", 1968 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1969 "Removing %u dropped ep ctxs, %u now active.",
2011 num_dropped_eps, 1970 num_dropped_eps,
2012 xhci->num_active_eps); 1971 xhci->num_active_eps);
2013} 1972}
@@ -2168,18 +2127,21 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
2168 * that the HS bus has enough bandwidth if we are activing a new TT. 2127 * that the HS bus has enough bandwidth if we are activing a new TT.
2169 */ 2128 */
2170 if (virt_dev->tt_info) { 2129 if (virt_dev->tt_info) {
2171 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 2130 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2131 "Recalculating BW for rootport %u",
2172 virt_dev->real_port); 2132 virt_dev->real_port);
2173 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2133 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2174 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2134 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2175 "newly activated TT.\n"); 2135 "newly activated TT.\n");
2176 return -ENOMEM; 2136 return -ENOMEM;
2177 } 2137 }
2178 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", 2138 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2139 "Recalculating BW for TT slot %u port %u",
2179 virt_dev->tt_info->slot_id, 2140 virt_dev->tt_info->slot_id,
2180 virt_dev->tt_info->ttport); 2141 virt_dev->tt_info->ttport);
2181 } else { 2142 } else {
2182 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 2143 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2144 "Recalculating BW for rootport %u",
2183 virt_dev->real_port); 2145 virt_dev->real_port);
2184 } 2146 }
2185 2147
@@ -2287,8 +2249,9 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
2287 xhci->rh_bw[port_index].num_active_tts; 2249 xhci->rh_bw[port_index].num_active_tts;
2288 } 2250 }
2289 2251
2290 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2252 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2291 "Available: %u " "percent\n", 2253 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2254 "Available: %u " "percent",
2292 bw_used, max_bandwidth, bw_reserved, 2255 bw_used, max_bandwidth, bw_reserved,
2293 (max_bandwidth - bw_used - bw_reserved) * 100 / 2256 (max_bandwidth - bw_used - bw_reserved) * 100 /
2294 max_bandwidth); 2257 max_bandwidth);
@@ -2658,7 +2621,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2658 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2621 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2659 xhci_free_host_resources(xhci, ctrl_ctx); 2622 xhci_free_host_resources(xhci, ctrl_ctx);
2660 spin_unlock_irqrestore(&xhci->lock, flags); 2623 spin_unlock_irqrestore(&xhci->lock, flags);
2661 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 2624 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2625 "FIXME allocate a new ring segment");
2662 return -ENOMEM; 2626 return -ENOMEM;
2663 } 2627 }
2664 xhci_ring_cmd_db(xhci); 2628 xhci_ring_cmd_db(xhci);
@@ -2871,7 +2835,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2871 struct xhci_dequeue_state deq_state; 2835 struct xhci_dequeue_state deq_state;
2872 struct xhci_virt_ep *ep; 2836 struct xhci_virt_ep *ep;
2873 2837
2874 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 2838 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2839 "Cleaning up stalled endpoint ring");
2875 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2840 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2876 /* We need to move the HW's dequeue pointer past this TD, 2841 /* We need to move the HW's dequeue pointer past this TD,
2877 * or it will attempt to resend it on the next doorbell ring. 2842 * or it will attempt to resend it on the next doorbell ring.
@@ -2884,7 +2849,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2884 * issue a configure endpoint command later. 2849 * issue a configure endpoint command later.
2885 */ 2850 */
2886 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2851 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2887 xhci_dbg(xhci, "Queueing new dequeue state\n"); 2852 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2853 "Queueing new dequeue state");
2888 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 2854 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2889 ep_index, ep->stopped_stream, &deq_state); 2855 ep_index, ep->stopped_stream, &deq_state);
2890 } else { 2856 } else {
@@ -2893,8 +2859,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2893 * XXX: No idea how this hardware will react when stream rings 2859 * XXX: No idea how this hardware will react when stream rings
2894 * are enabled. 2860 * are enabled.
2895 */ 2861 */
2896 xhci_dbg(xhci, "Setting up input context for " 2862 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2897 "configure endpoint command\n"); 2863 "Setting up input context for "
2864 "configure endpoint command");
2898 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 2865 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2899 ep_index, &deq_state); 2866 ep_index, &deq_state);
2900 } 2867 }
@@ -2926,16 +2893,19 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
2926 ep_index = xhci_get_endpoint_index(&ep->desc); 2893 ep_index = xhci_get_endpoint_index(&ep->desc);
2927 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2894 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2928 if (!virt_ep->stopped_td) { 2895 if (!virt_ep->stopped_td) {
2929 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 2896 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2930 ep->desc.bEndpointAddress); 2897 "Endpoint 0x%x not halted, refusing to reset.",
2898 ep->desc.bEndpointAddress);
2931 return; 2899 return;
2932 } 2900 }
2933 if (usb_endpoint_xfer_control(&ep->desc)) { 2901 if (usb_endpoint_xfer_control(&ep->desc)) {
2934 xhci_dbg(xhci, "Control endpoint stall already handled.\n"); 2902 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2903 "Control endpoint stall already handled.");
2935 return; 2904 return;
2936 } 2905 }
2937 2906
2938 xhci_dbg(xhci, "Queueing reset endpoint command\n"); 2907 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2908 "Queueing reset endpoint command");
2939 spin_lock_irqsave(&xhci->lock, flags); 2909 spin_lock_irqsave(&xhci->lock, flags);
2940 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); 2910 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2941 /* 2911 /*
@@ -3373,8 +3343,9 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3373 } 3343 }
3374 xhci->num_active_eps -= num_dropped_eps; 3344 xhci->num_active_eps -= num_dropped_eps;
3375 if (num_dropped_eps) 3345 if (num_dropped_eps)
3376 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " 3346 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3377 "%u now active.\n", 3347 "Dropped %u ep ctxs, flags = 0x%x, "
3348 "%u now active.",
3378 num_dropped_eps, drop_flags, 3349 num_dropped_eps, drop_flags,
3379 xhci->num_active_eps); 3350 xhci->num_active_eps);
3380} 3351}
@@ -3508,10 +3479,10 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3508 switch (ret) { 3479 switch (ret) {
3509 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3480 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3510 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3481 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3511 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", 3482 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3512 slot_id, 3483 slot_id,
3513 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3484 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3514 xhci_info(xhci, "Not freeing device rings.\n"); 3485 xhci_dbg(xhci, "Not freeing device rings.\n");
3515 /* Don't treat this as an error. May change my mind later. */ 3486 /* Don't treat this as an error. May change my mind later. */
3516 ret = 0; 3487 ret = 0;
3517 goto command_cleanup; 3488 goto command_cleanup;
@@ -3636,13 +3607,15 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3636static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3607static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3637{ 3608{
3638 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3609 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3639 xhci_dbg(xhci, "Not enough ep ctxs: " 3610 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3640 "%u active, need to add 1, limit is %u.\n", 3611 "Not enough ep ctxs: "
3612 "%u active, need to add 1, limit is %u.",
3641 xhci->num_active_eps, xhci->limit_active_eps); 3613 xhci->num_active_eps, xhci->limit_active_eps);
3642 return -ENOMEM; 3614 return -ENOMEM;
3643 } 3615 }
3644 xhci->num_active_eps += 1; 3616 xhci->num_active_eps += 1;
3645 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", 3617 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3618 "Adding 1 ep ctx, %u now active.",
3646 xhci->num_active_eps); 3619 xhci->num_active_eps);
3647 return 0; 3620 return 0;
3648} 3621}
@@ -3742,7 +3715,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3742 union xhci_trb *cmd_trb; 3715 union xhci_trb *cmd_trb;
3743 3716
3744 if (!udev->slot_id) { 3717 if (!udev->slot_id) {
3745 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 3718 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3719 "Bad Slot ID %d", udev->slot_id);
3746 return -EINVAL; 3720 return -EINVAL;
3747 } 3721 }
3748 3722
@@ -3781,6 +3755,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3781 3755
3782 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3756 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3783 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3757 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3758 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3759 slot_ctx->dev_info >> 27);
3784 3760
3785 spin_lock_irqsave(&xhci->lock, flags); 3761 spin_lock_irqsave(&xhci->lock, flags);
3786 cmd_trb = xhci->cmd_ring->dequeue; 3762 cmd_trb = xhci->cmd_ring->dequeue;
@@ -3788,7 +3764,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3788 udev->slot_id); 3764 udev->slot_id);
3789 if (ret) { 3765 if (ret) {
3790 spin_unlock_irqrestore(&xhci->lock, flags); 3766 spin_unlock_irqrestore(&xhci->lock, flags);
3791 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3767 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3768 "FIXME: allocate a command ring segment");
3792 return ret; 3769 return ret;
3793 } 3770 }
3794 xhci_ring_cmd_db(xhci); 3771 xhci_ring_cmd_db(xhci);
@@ -3828,13 +3805,15 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3828 ret = -ENODEV; 3805 ret = -ENODEV;
3829 break; 3806 break;
3830 case COMP_SUCCESS: 3807 case COMP_SUCCESS:
3831 xhci_dbg(xhci, "Successful Address Device command\n"); 3808 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3809 "Successful Address Device command");
3832 break; 3810 break;
3833 default: 3811 default:
3834 xhci_err(xhci, "ERROR: unexpected command completion " 3812 xhci_err(xhci, "ERROR: unexpected command completion "
3835 "code 0x%x.\n", virt_dev->cmd_status); 3813 "code 0x%x.\n", virt_dev->cmd_status);
3836 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3814 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3837 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3815 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3816 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3838 ret = -EINVAL; 3817 ret = -EINVAL;
3839 break; 3818 break;
3840 } 3819 }
@@ -3842,16 +3821,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3842 return ret; 3821 return ret;
3843 } 3822 }
3844 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3823 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3845 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 3824 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3846 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 3825 "Op regs DCBAA ptr = %#016llx", temp_64);
3847 udev->slot_id, 3826 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3848 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 3827 "Slot ID %d dcbaa entry @%p = %#016llx",
3849 (unsigned long long) 3828 udev->slot_id,
3850 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 3829 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3851 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 3830 (unsigned long long)
3831 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3832 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3833 "Output Context DMA address = %#08llx",
3852 (unsigned long long)virt_dev->out_ctx->dma); 3834 (unsigned long long)virt_dev->out_ctx->dma);
3853 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3835 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3854 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3836 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3837 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3838 slot_ctx->dev_info >> 27);
3855 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3839 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3856 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3840 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3857 /* 3841 /*
@@ -3859,6 +3843,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3859 * address given back to us by the HC. 3843 * address given back to us by the HC.
3860 */ 3844 */
3861 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3845 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3846 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3847 slot_ctx->dev_info >> 27);
3862 /* Use kernel assigned address for devices; store xHC assigned 3848 /* Use kernel assigned address for devices; store xHC assigned
3863 * address locally. */ 3849 * address locally. */
3864 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3850 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
@@ -3867,7 +3853,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3867 ctrl_ctx->add_flags = 0; 3853 ctrl_ctx->add_flags = 0;
3868 ctrl_ctx->drop_flags = 0; 3854 ctrl_ctx->drop_flags = 0;
3869 3855
3870 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); 3856 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3857 "Internal device address = %d", virt_dev->address);
3871 3858
3872 return 0; 3859 return 0;
3873} 3860}
@@ -3933,7 +3920,8 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3933 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 3920 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
3934 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 3921 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
3935 3922
3936 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n"); 3923 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
3924 "Set up evaluate context for LPM MEL change.");
3937 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); 3925 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
3938 xhci_dbg_ctx(xhci, command->in_ctx, 0); 3926 xhci_dbg_ctx(xhci, command->in_ctx, 0);
3939 3927
@@ -4837,7 +4825,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4837 struct xhci_hcd *xhci; 4825 struct xhci_hcd *xhci;
4838 struct device *dev = hcd->self.controller; 4826 struct device *dev = hcd->self.controller;
4839 int retval; 4827 int retval;
4840 u32 temp;
4841 4828
4842 /* Accept arbitrarily long scatter-gather lists */ 4829 /* Accept arbitrarily long scatter-gather lists */
4843 hcd->self.sg_tablesize = ~0; 4830 hcd->self.sg_tablesize = ~0;
@@ -4869,14 +4856,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4869 /* xHCI private pointer was set in xhci_pci_probe for the second 4856 /* xHCI private pointer was set in xhci_pci_probe for the second
4870 * registered roothub. 4857 * registered roothub.
4871 */ 4858 */
4872 xhci = hcd_to_xhci(hcd);
4873 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4874 if (HCC_64BIT_ADDR(temp)) {
4875 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4876 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4877 } else {
4878 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4879 }
4880 return 0; 4859 return 0;
4881 } 4860 }
4882 4861
@@ -4915,12 +4894,12 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4915 goto error; 4894 goto error;
4916 xhci_dbg(xhci, "Reset complete\n"); 4895 xhci_dbg(xhci, "Reset complete\n");
4917 4896
4918 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4897 /* Set dma_mask and coherent_dma_mask to 64-bits,
4919 if (HCC_64BIT_ADDR(temp)) { 4898 * if xHC supports 64-bit addressing */
4899 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4900 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4920 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4901 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4921 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 4902 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4922 } else {
4923 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4924 } 4903 }
4925 4904
4926 xhci_dbg(xhci, "Calling HCD init\n"); 4905 xhci_dbg(xhci, "Calling HCD init\n");
@@ -4945,12 +4924,12 @@ static int __init xhci_hcd_init(void)
4945 4924
4946 retval = xhci_register_pci(); 4925 retval = xhci_register_pci();
4947 if (retval < 0) { 4926 if (retval < 0) {
4948 printk(KERN_DEBUG "Problem registering PCI driver."); 4927 pr_debug("Problem registering PCI driver.\n");
4949 return retval; 4928 return retval;
4950 } 4929 }
4951 retval = xhci_register_plat(); 4930 retval = xhci_register_plat();
4952 if (retval < 0) { 4931 if (retval < 0) {
4953 printk(KERN_DEBUG "Problem registering platform driver."); 4932 pr_debug("Problem registering platform driver.\n");
4954 goto unreg_pci; 4933 goto unreg_pci;
4955 } 4934 }
4956 /* 4935 /*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index c338741a675d..d2045916531b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1490,11 +1490,6 @@ struct xhci_hcd {
1490 struct dma_pool *small_streams_pool; 1490 struct dma_pool *small_streams_pool;
1491 struct dma_pool *medium_streams_pool; 1491 struct dma_pool *medium_streams_pool;
1492 1492
1493#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1494 /* Poll the rings - for debugging */
1495 struct timer_list event_ring_timer;
1496 int zombie;
1497#endif
1498 /* Host controller watchdog timer structures */ 1493 /* Host controller watchdog timer structures */
1499 unsigned int xhc_state; 1494 unsigned int xhc_state;
1500 1495
@@ -1579,16 +1574,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1579 return xhci->main_hcd; 1574 return xhci->main_hcd;
1580} 1575}
1581 1576
1582#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1583#define XHCI_DEBUG 1
1584#else
1585#define XHCI_DEBUG 0
1586#endif
1587
1588#define xhci_dbg(xhci, fmt, args...) \ 1577#define xhci_dbg(xhci, fmt, args...) \
1589 do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0) 1578 dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1590#define xhci_info(xhci, fmt, args...) \
1591 do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
1592#define xhci_err(xhci, fmt, args...) \ 1579#define xhci_err(xhci, fmt, args...) \
1593 dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) 1580 dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1594#define xhci_warn(xhci, fmt, args...) \ 1581#define xhci_warn(xhci, fmt, args...) \
@@ -1660,6 +1647,8 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
1660void xhci_dbg_ep_rings(struct xhci_hcd *xhci, 1647void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
1661 unsigned int slot_id, unsigned int ep_index, 1648 unsigned int slot_id, unsigned int ep_index,
1662 struct xhci_virt_ep *ep); 1649 struct xhci_virt_ep *ep);
1650void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
1651 const char *fmt, ...);
1663 1652
1664/* xHCI memory management */ 1653/* xHCI memory management */
1665void xhci_mem_cleanup(struct xhci_hcd *xhci); 1654void xhci_mem_cleanup(struct xhci_hcd *xhci);