aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
authorXenia Ragiadakou <burzalodowa@gmail.com>2013-08-13 23:33:55 -0400
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2013-08-14 00:14:43 -0400
commitd195fcffe4c82cc813cc43df5f27ab99ab38bc07 (patch)
treefab955664be3befccf0444f87148b05d5eac8931 /drivers/usb/host/xhci-mem.c
parentaa50b29061d3df896c494d92e9c8c2e1f295cc6e (diff)
xhci: trace debug messages related to driver initialization and unload
This patch defines a new trace event, which is called xhci_dbg_init and belongs to the event class xhci_log_msg, and adds tracepoints that trace the debug statements in the functions used to start and stop the xhci-hcd driver. Also, it removes an unnecessary cast of variable val to unsigned int in xhci_mem_init(), since val is already declared as unsigned int. Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c110
1 files changed, 68 insertions, 42 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b1bb59b58b25..ef27470b99e3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1545,7 +1545,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1545 struct device *dev = xhci_to_hcd(xhci)->self.controller; 1545 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1546 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); 1546 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1547 1547
1548 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); 1548 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1549 "Allocating %d scratchpad buffers", num_sp);
1549 1550
1550 if (!num_sp) 1551 if (!num_sp)
1551 return 0; 1552 return 0;
@@ -1702,11 +1703,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1702 dma_free_coherent(&pdev->dev, size, 1703 dma_free_coherent(&pdev->dev, size,
1703 xhci->erst.entries, xhci->erst.erst_dma_addr); 1704 xhci->erst.entries, xhci->erst.erst_dma_addr);
1704 xhci->erst.entries = NULL; 1705 xhci->erst.entries = NULL;
1705 xhci_dbg(xhci, "Freed ERST\n"); 1706 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1706 if (xhci->event_ring) 1707 if (xhci->event_ring)
1707 xhci_ring_free(xhci, xhci->event_ring); 1708 xhci_ring_free(xhci, xhci->event_ring);
1708 xhci->event_ring = NULL; 1709 xhci->event_ring = NULL;
1709 xhci_dbg(xhci, "Freed event ring\n"); 1710 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1710 1711
1711 if (xhci->lpm_command) 1712 if (xhci->lpm_command)
1712 xhci_free_command(xhci, xhci->lpm_command); 1713 xhci_free_command(xhci, xhci->lpm_command);
@@ -1714,7 +1715,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1714 if (xhci->cmd_ring) 1715 if (xhci->cmd_ring)
1715 xhci_ring_free(xhci, xhci->cmd_ring); 1716 xhci_ring_free(xhci, xhci->cmd_ring);
1716 xhci->cmd_ring = NULL; 1717 xhci->cmd_ring = NULL;
1717 xhci_dbg(xhci, "Freed command ring\n"); 1718 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1718 list_for_each_entry_safe(cur_cd, next_cd, 1719 list_for_each_entry_safe(cur_cd, next_cd,
1719 &xhci->cancel_cmd_list, cancel_cmd_list) { 1720 &xhci->cancel_cmd_list, cancel_cmd_list) {
1720 list_del(&cur_cd->cancel_cmd_list); 1721 list_del(&cur_cd->cancel_cmd_list);
@@ -1727,22 +1728,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1727 if (xhci->segment_pool) 1728 if (xhci->segment_pool)
1728 dma_pool_destroy(xhci->segment_pool); 1729 dma_pool_destroy(xhci->segment_pool);
1729 xhci->segment_pool = NULL; 1730 xhci->segment_pool = NULL;
1730 xhci_dbg(xhci, "Freed segment pool\n"); 1731 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1731 1732
1732 if (xhci->device_pool) 1733 if (xhci->device_pool)
1733 dma_pool_destroy(xhci->device_pool); 1734 dma_pool_destroy(xhci->device_pool);
1734 xhci->device_pool = NULL; 1735 xhci->device_pool = NULL;
1735 xhci_dbg(xhci, "Freed device context pool\n"); 1736 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1736 1737
1737 if (xhci->small_streams_pool) 1738 if (xhci->small_streams_pool)
1738 dma_pool_destroy(xhci->small_streams_pool); 1739 dma_pool_destroy(xhci->small_streams_pool);
1739 xhci->small_streams_pool = NULL; 1740 xhci->small_streams_pool = NULL;
1740 xhci_dbg(xhci, "Freed small stream array pool\n"); 1741 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1742 "Freed small stream array pool");
1741 1743
1742 if (xhci->medium_streams_pool) 1744 if (xhci->medium_streams_pool)
1743 dma_pool_destroy(xhci->medium_streams_pool); 1745 dma_pool_destroy(xhci->medium_streams_pool);
1744 xhci->medium_streams_pool = NULL; 1746 xhci->medium_streams_pool = NULL;
1745 xhci_dbg(xhci, "Freed medium stream array pool\n"); 1747 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1748 "Freed medium stream array pool");
1746 1749
1747 if (xhci->dcbaa) 1750 if (xhci->dcbaa)
1748 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), 1751 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
@@ -1968,8 +1971,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1968 * there might be more events to service. 1971 * there might be more events to service.
1969 */ 1972 */
1970 temp &= ~ERST_EHB; 1973 temp &= ~ERST_EHB;
1971 xhci_dbg(xhci, "// Write event ring dequeue pointer, " 1974 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1972 "preserving EHB bit\n"); 1975 "// Write event ring dequeue pointer, "
1976 "preserving EHB bit");
1973 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 1977 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1974 &xhci->ir_set->erst_dequeue); 1978 &xhci->ir_set->erst_dequeue);
1975} 1979}
@@ -1992,8 +1996,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1992 temp = xhci_readl(xhci, addr + 2); 1996 temp = xhci_readl(xhci, addr + 2);
1993 port_offset = XHCI_EXT_PORT_OFF(temp); 1997 port_offset = XHCI_EXT_PORT_OFF(temp);
1994 port_count = XHCI_EXT_PORT_COUNT(temp); 1998 port_count = XHCI_EXT_PORT_COUNT(temp);
1995 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " 1999 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1996 "count = %u, revision = 0x%x\n", 2000 "Ext Cap %p, port offset = %u, "
2001 "count = %u, revision = 0x%x",
1997 addr, port_offset, port_count, major_revision); 2002 addr, port_offset, port_count, major_revision);
1998 /* Port count includes the current port offset */ 2003 /* Port count includes the current port offset */
1999 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) 2004 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
@@ -2007,15 +2012,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2007 /* Check the host's USB2 LPM capability */ 2012 /* Check the host's USB2 LPM capability */
2008 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && 2013 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2009 (temp & XHCI_L1C)) { 2014 (temp & XHCI_L1C)) {
2010 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); 2015 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2016 "xHCI 0.96: support USB2 software lpm");
2011 xhci->sw_lpm_support = 1; 2017 xhci->sw_lpm_support = 1;
2012 } 2018 }
2013 2019
2014 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { 2020 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2015 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); 2021 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2022 "xHCI 1.0: support USB2 software lpm");
2016 xhci->sw_lpm_support = 1; 2023 xhci->sw_lpm_support = 1;
2017 if (temp & XHCI_HLC) { 2024 if (temp & XHCI_HLC) {
2018 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); 2025 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2026 "xHCI 1.0: support USB2 hardware lpm");
2019 xhci->hw_lpm_support = 1; 2027 xhci->hw_lpm_support = 1;
2020 } 2028 }
2021 } 2029 }
@@ -2139,18 +2147,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2139 xhci_warn(xhci, "No ports on the roothubs?\n"); 2147 xhci_warn(xhci, "No ports on the roothubs?\n");
2140 return -ENODEV; 2148 return -ENODEV;
2141 } 2149 }
2142 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", 2150 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2151 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2143 xhci->num_usb2_ports, xhci->num_usb3_ports); 2152 xhci->num_usb2_ports, xhci->num_usb3_ports);
2144 2153
2145 /* Place limits on the number of roothub ports so that the hub 2154 /* Place limits on the number of roothub ports so that the hub
2146 * descriptors aren't longer than the USB core will allocate. 2155 * descriptors aren't longer than the USB core will allocate.
2147 */ 2156 */
2148 if (xhci->num_usb3_ports > 15) { 2157 if (xhci->num_usb3_ports > 15) {
2149 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); 2158 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2159 "Limiting USB 3.0 roothub ports to 15.");
2150 xhci->num_usb3_ports = 15; 2160 xhci->num_usb3_ports = 15;
2151 } 2161 }
2152 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2162 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2153 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", 2163 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2164 "Limiting USB 2.0 roothub ports to %u.",
2154 USB_MAXCHILDREN); 2165 USB_MAXCHILDREN);
2155 xhci->num_usb2_ports = USB_MAXCHILDREN; 2166 xhci->num_usb2_ports = USB_MAXCHILDREN;
2156 } 2167 }
@@ -2175,8 +2186,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2175 xhci->usb2_ports[port_index] = 2186 xhci->usb2_ports[port_index] =
2176 &xhci->op_regs->port_status_base + 2187 &xhci->op_regs->port_status_base +
2177 NUM_PORT_REGS*i; 2188 NUM_PORT_REGS*i;
2178 xhci_dbg(xhci, "USB 2.0 port at index %u, " 2189 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2179 "addr = %p\n", i, 2190 "USB 2.0 port at index %u, "
2191 "addr = %p", i,
2180 xhci->usb2_ports[port_index]); 2192 xhci->usb2_ports[port_index]);
2181 port_index++; 2193 port_index++;
2182 if (port_index == xhci->num_usb2_ports) 2194 if (port_index == xhci->num_usb2_ports)
@@ -2195,8 +2207,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2195 xhci->usb3_ports[port_index] = 2207 xhci->usb3_ports[port_index] =
2196 &xhci->op_regs->port_status_base + 2208 &xhci->op_regs->port_status_base +
2197 NUM_PORT_REGS*i; 2209 NUM_PORT_REGS*i;
2198 xhci_dbg(xhci, "USB 3.0 port at index %u, " 2210 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2199 "addr = %p\n", i, 2211 "USB 3.0 port at index %u, "
2212 "addr = %p", i,
2200 xhci->usb3_ports[port_index]); 2213 xhci->usb3_ports[port_index]);
2201 port_index++; 2214 port_index++;
2202 if (port_index == xhci->num_usb3_ports) 2215 if (port_index == xhci->num_usb3_ports)
@@ -2220,32 +2233,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2220 INIT_LIST_HEAD(&xhci->cancel_cmd_list); 2233 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
2221 2234
2222 page_size = xhci_readl(xhci, &xhci->op_regs->page_size); 2235 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2223 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); 2236 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2237 "Supported page size register = 0x%x", page_size);
2224 for (i = 0; i < 16; i++) { 2238 for (i = 0; i < 16; i++) {
2225 if ((0x1 & page_size) != 0) 2239 if ((0x1 & page_size) != 0)
2226 break; 2240 break;
2227 page_size = page_size >> 1; 2241 page_size = page_size >> 1;
2228 } 2242 }
2229 if (i < 16) 2243 if (i < 16)
2230 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); 2244 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2245 "Supported page size of %iK", (1 << (i+12)) / 1024);
2231 else 2246 else
2232 xhci_warn(xhci, "WARN: no supported page size\n"); 2247 xhci_warn(xhci, "WARN: no supported page size\n");
2233 /* Use 4K pages, since that's common and the minimum the HC supports */ 2248 /* Use 4K pages, since that's common and the minimum the HC supports */
2234 xhci->page_shift = 12; 2249 xhci->page_shift = 12;
2235 xhci->page_size = 1 << xhci->page_shift; 2250 xhci->page_size = 1 << xhci->page_shift;
2236 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); 2251 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2252 "HCD page size set to %iK", xhci->page_size / 1024);
2237 2253
2238 /* 2254 /*
2239 * Program the Number of Device Slots Enabled field in the CONFIG 2255 * Program the Number of Device Slots Enabled field in the CONFIG
2240 * register with the max value of slots the HC can handle. 2256 * register with the max value of slots the HC can handle.
2241 */ 2257 */
2242 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); 2258 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2243 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", 2259 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2244 (unsigned int) val); 2260 "// xHC can handle at most %d device slots.", val);
2245 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); 2261 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2246 val |= (val2 & ~HCS_SLOTS_MASK); 2262 val |= (val2 & ~HCS_SLOTS_MASK);
2247 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", 2263 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2248 (unsigned int) val); 2264 "// Setting Max device slots reg = 0x%x.", val);
2249 xhci_writel(xhci, val, &xhci->op_regs->config_reg); 2265 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2250 2266
2251 /* 2267 /*
@@ -2258,7 +2274,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2258 goto fail; 2274 goto fail;
2259 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 2275 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2260 xhci->dcbaa->dma = dma; 2276 xhci->dcbaa->dma = dma;
2261 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 2277 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2278 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2262 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2279 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2263 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); 2280 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2264 2281
@@ -2297,8 +2314,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2297 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); 2314 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2298 if (!xhci->cmd_ring) 2315 if (!xhci->cmd_ring)
2299 goto fail; 2316 goto fail;
2300 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); 2317 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2301 xhci_dbg(xhci, "First segment DMA is 0x%llx\n", 2318 "Allocated command ring at %p", xhci->cmd_ring);
2319 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2302 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2320 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2303 2321
2304 /* Set the address in the Command Ring Control register */ 2322 /* Set the address in the Command Ring Control register */
@@ -2306,7 +2324,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2306 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2324 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2307 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2325 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2308 xhci->cmd_ring->cycle_state; 2326 xhci->cmd_ring->cycle_state;
2309 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); 2327 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2328 "// Setting command ring address to 0x%x", val);
2310 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 2329 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2311 xhci_dbg_cmd_ptrs(xhci); 2330 xhci_dbg_cmd_ptrs(xhci);
2312 2331
@@ -2322,8 +2341,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2322 2341
2323 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 2342 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2324 val &= DBOFF_MASK; 2343 val &= DBOFF_MASK;
2325 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 2344 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2326 " from cap regs base addr\n", val); 2345 "// Doorbell array is located at offset 0x%x"
2346 " from cap regs base addr", val);
2327 xhci->dba = (void __iomem *) xhci->cap_regs + val; 2347 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2328 xhci_dbg_regs(xhci); 2348 xhci_dbg_regs(xhci);
2329 xhci_print_run_regs(xhci); 2349 xhci_print_run_regs(xhci);
@@ -2334,7 +2354,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2334 * Event ring setup: Allocate a normal ring, but also setup 2354 * Event ring setup: Allocate a normal ring, but also setup
2335 * the event ring segment table (ERST). Section 4.9.3. 2355 * the event ring segment table (ERST). Section 4.9.3.
2336 */ 2356 */
2337 xhci_dbg(xhci, "// Allocating event ring\n"); 2357 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2338 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 2358 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2339 flags); 2359 flags);
2340 if (!xhci->event_ring) 2360 if (!xhci->event_ring)
@@ -2347,13 +2367,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2347 GFP_KERNEL); 2367 GFP_KERNEL);
2348 if (!xhci->erst.entries) 2368 if (!xhci->erst.entries)
2349 goto fail; 2369 goto fail;
2350 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", 2370 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2371 "// Allocated event ring segment table at 0x%llx",
2351 (unsigned long long)dma); 2372 (unsigned long long)dma);
2352 2373
2353 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); 2374 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2354 xhci->erst.num_entries = ERST_NUM_SEGS; 2375 xhci->erst.num_entries = ERST_NUM_SEGS;
2355 xhci->erst.erst_dma_addr = dma; 2376 xhci->erst.erst_dma_addr = dma;
2356 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", 2377 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2378 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2357 xhci->erst.num_entries, 2379 xhci->erst.num_entries,
2358 xhci->erst.entries, 2380 xhci->erst.entries,
2359 (unsigned long long)xhci->erst.erst_dma_addr); 2381 (unsigned long long)xhci->erst.erst_dma_addr);
@@ -2371,13 +2393,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2371 val = xhci_readl(xhci, &xhci->ir_set->erst_size); 2393 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2372 val &= ERST_SIZE_MASK; 2394 val &= ERST_SIZE_MASK;
2373 val |= ERST_NUM_SEGS; 2395 val |= ERST_NUM_SEGS;
2374 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", 2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2375 val); 2398 val);
2376 xhci_writel(xhci, val, &xhci->ir_set->erst_size); 2399 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2377 2400
2378 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); 2401 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2402 "// Set ERST entries to point to event ring.");
2379 /* set the segment table base address */ 2403 /* set the segment table base address */
2380 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 2404 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2405 "// Set ERST base address for ir_set 0 = 0x%llx",
2381 (unsigned long long)xhci->erst.erst_dma_addr); 2406 (unsigned long long)xhci->erst.erst_dma_addr);
2382 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); 2407 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2383 val_64 &= ERST_PTR_MASK; 2408 val_64 &= ERST_PTR_MASK;
@@ -2386,7 +2411,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2386 2411
2387 /* Set the event ring dequeue address */ 2412 /* Set the event ring dequeue address */
2388 xhci_set_hc_event_deq(xhci); 2413 xhci_set_hc_event_deq(xhci);
2389 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 2414 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2415 "Wrote ERST address to ir_set 0.");
2390 xhci_print_ir_set(xhci, 0); 2416 xhci_print_ir_set(xhci, 0);
2391 2417
2392 /* 2418 /*