aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-04-27 22:53:56 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-06-16 00:44:48 -0400
commit7f84eef0dafb1d318263d8b71c38700aaf2d530d (patch)
treed7de1ac3d91fb206a5cec2e85b0ad7f4a7b78b21
parenta74588f94655263b96dacbbf14aac0958d8b7409 (diff)
USB: xhci: No-op command queueing and irq handler.
xHCI host controllers can optionally implement a no-op test. This simple test ensures the OS has correctly setup all basic data structures and can correctly respond to interrupts from the host controller hardware. There are two rings exercised by the no-op test: the command ring, and the event ring. The host controller driver writes a no-op command TRB to the command ring, and rings the doorbell for the command ring (the first entry in the doorbell array). The hardware receives this event, places a command completion event on the event ring, and fires an interrupt. The host controller driver sees the interrupt, and checks the event ring for TRBs it can process, and sees the command completion event. (See the rules in xhci-ring.c for who "owns" a TRB. This is a simplified set of rules, and may not contain all the details that are in the xHCI 0.95 spec.) A timer fires every 60 seconds to debug the state of the hardware and command and event rings. This timer only runs if CONFIG_USB_XHCI_HCD_DEBUGGING is 'y'. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/usb/host/xhci-dbg.c76
-rw-r--r--drivers/usb/host/xhci-hcd.c152
-rw-r--r--drivers/usb/host/xhci-mem.c11
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c367
-rw-r--r--drivers/usb/host/xhci.h53
6 files changed, 647 insertions, 13 deletions
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 5724683cef16..6dbf7d856f80 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -230,6 +230,64 @@ void xhci_print_registers(struct xhci_hcd *xhci)
230 xhci_print_op_regs(xhci); 230 xhci_print_op_regs(xhci);
231} 231}
232 232
233void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
234{
235 int i;
236 for (i = 0; i < 4; ++i)
237 xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
238 i*4, trb->generic.field[i]);
239}
240
241/**
242 * Debug a transfer request block (TRB).
243 */
244void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
245{
246 u64 address;
247 u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
248
249 switch (type) {
250 case TRB_TYPE(TRB_LINK):
251 xhci_dbg(xhci, "Link TRB:\n");
252 xhci_print_trb_offsets(xhci, trb);
253
254 address = trb->link.segment_ptr[0] +
255 (((u64) trb->link.segment_ptr[1]) << 32);
256 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
257
258 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
259 GET_INTR_TARGET(trb->link.intr_target));
260 xhci_dbg(xhci, "Cycle bit = %u\n",
261 (unsigned int) (trb->link.control & TRB_CYCLE));
262 xhci_dbg(xhci, "Toggle cycle bit = %u\n",
263 (unsigned int) (trb->link.control & LINK_TOGGLE));
264 xhci_dbg(xhci, "No Snoop bit = %u\n",
265 (unsigned int) (trb->link.control & TRB_NO_SNOOP));
266 break;
267 case TRB_TYPE(TRB_TRANSFER):
268 address = trb->trans_event.buffer[0] +
269 (((u64) trb->trans_event.buffer[1]) << 32);
270 /*
271 * FIXME: look at flags to figure out if it's an address or if
272 * the data is directly in the buffer field.
273 */
274 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
275 break;
276 case TRB_TYPE(TRB_COMPLETION):
277 address = trb->event_cmd.cmd_trb[0] +
278 (((u64) trb->event_cmd.cmd_trb[1]) << 32);
279 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
280 xhci_dbg(xhci, "Completion status = %u\n",
281 (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
282 xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
283 break;
284 default:
285 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
286 (unsigned int) type>>10);
287 xhci_print_trb_offsets(xhci, trb);
288 break;
289 }
290}
233 291
234/** 292/**
235 * Debug a segment with an xHCI ring. 293 * Debug a segment with an xHCI ring.
@@ -261,6 +319,20 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
261 } 319 }
262} 320}
263 321
322void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
323{
324 xhci_dbg(xhci, "Ring deq = 0x%x (virt), 0x%x (dma)\n",
325 (unsigned int) ring->dequeue,
326 trb_virt_to_dma(ring->deq_seg, ring->dequeue));
327 xhci_dbg(xhci, "Ring deq updated %u times\n",
328 ring->deq_updates);
329 xhci_dbg(xhci, "Ring enq = 0x%x (virt), 0x%x (dma)\n",
330 (unsigned int) ring->enqueue,
331 trb_virt_to_dma(ring->enq_seg, ring->enqueue));
332 xhci_dbg(xhci, "Ring enq updated %u times\n",
333 ring->enq_updates);
334}
335
264/** 336/**
265 * Debugging for an xHCI ring, which is a queue broken into multiple segments. 337 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
266 * 338 *
@@ -277,6 +349,10 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
277 struct xhci_segment *first_seg = ring->first_seg; 349 struct xhci_segment *first_seg = ring->first_seg;
278 xhci_debug_segment(xhci, first_seg); 350 xhci_debug_segment(xhci, first_seg);
279 351
352 if (!ring->enq_updates && !ring->deq_updates) {
353 xhci_dbg(xhci, " Ring has not been updated\n");
354 return;
355 }
280 for (seg = first_seg->next; seg != first_seg; seg = seg->next) 356 for (seg = first_seg->next; seg != first_seg; seg = seg->next)
281 xhci_debug_segment(xhci, seg); 357 xhci_debug_segment(xhci, seg);
282} 358}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 011f47810665..a99c119e9fd9 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -218,6 +218,120 @@ int xhci_init(struct usb_hcd *hcd)
218} 218}
219 219
220/* 220/*
221 * Called in interrupt context when there might be work
222 * queued on the event ring
223 *
224 * xhci->lock must be held by caller.
225 */
226static void xhci_work(struct xhci_hcd *xhci)
227{
228 u32 temp;
229
230 /*
231 * Clear the op reg interrupt status first,
232 * so we can receive interrupts from other MSI-X interrupters.
233 * Write 1 to clear the interrupt status.
234 */
235 temp = xhci_readl(xhci, &xhci->op_regs->status);
236 temp |= STS_EINT;
237 xhci_writel(xhci, temp, &xhci->op_regs->status);
238 /* FIXME when MSI-X is supported and there are multiple vectors */
239 /* Clear the MSI-X event interrupt status */
240
241 /* Acknowledge the interrupt */
242 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
243 temp |= 0x3;
244 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
245 /* Flush posted writes */
246 xhci_readl(xhci, &xhci->ir_set->irq_pending);
247
248 /* FIXME this should be a delayed service routine that clears the EHB */
249 handle_event(xhci);
250
251 /* Clear the event handler busy flag; the event ring should be empty. */
252 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
253 xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
254 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci, &xhci->ir_set->irq_pending);
256}
257
258/*-------------------------------------------------------------------------*/
259
260/*
261 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
262 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
263 * indicators of an event TRB error, but we check the status *first* to be safe.
264 */
265irqreturn_t xhci_irq(struct usb_hcd *hcd)
266{
267 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
268 u32 temp, temp2;
269
270 spin_lock(&xhci->lock);
271 /* Check if the xHC generated the interrupt, or the irq is shared */
272 temp = xhci_readl(xhci, &xhci->op_regs->status);
273 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
274 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
275 spin_unlock(&xhci->lock);
276 return IRQ_NONE;
277 }
278
279 temp = xhci_readl(xhci, &xhci->op_regs->status);
280 if (temp & STS_FATAL) {
281 xhci_warn(xhci, "WARNING: Host System Error\n");
282 xhci_halt(xhci);
283 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
284 return -ESHUTDOWN;
285 }
286
287 xhci_work(xhci);
288 spin_unlock(&xhci->lock);
289
290 return IRQ_HANDLED;
291}
292
293#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
294void event_ring_work(unsigned long arg)
295{
296 unsigned long flags;
297 int temp;
298 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
299 int i, j;
300
301 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
302
303 spin_lock_irqsave(&xhci->lock, flags);
304 temp = xhci_readl(xhci, &xhci->op_regs->status);
305 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
306 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
307 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
308 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
309 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
310 xhci->error_bitmask = 0;
311 xhci_dbg(xhci, "Event ring:\n");
312 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
313 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
314 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
315 temp &= ERST_PTR_MASK;
316 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
317 xhci_dbg(xhci, "Command ring:\n");
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
320 xhci_dbg_cmd_ptrs(xhci);
321
322 if (xhci->noops_submitted != NUM_TEST_NOOPS)
323 if (setup_one_noop(xhci))
324 ring_cmd_db(xhci);
325 spin_unlock_irqrestore(&xhci->lock, flags);
326
327 if (!xhci->zombie)
328 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
329 else
330 xhci_dbg(xhci, "Quit polling the event ring.\n");
331}
332#endif
333
334/*
221 * Start the HC after it was halted. 335 * Start the HC after it was halted.
222 * 336 *
223 * This function is called by the USB core when the HC driver is added. 337 * This function is called by the USB core when the HC driver is added.
@@ -233,8 +347,9 @@ int xhci_run(struct usb_hcd *hcd)
233{ 347{
234 u32 temp; 348 u32 temp;
235 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 349 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
236 xhci_dbg(xhci, "xhci_run\n"); 350 void (*doorbell)(struct xhci_hcd *) = NULL;
237 351
352 xhci_dbg(xhci, "xhci_run\n");
238#if 0 /* FIXME: MSI not setup yet */ 353#if 0 /* FIXME: MSI not setup yet */
239 /* Do this at the very last minute */ 354 /* Do this at the very last minute */
240 ret = xhci_setup_msix(xhci); 355 ret = xhci_setup_msix(xhci);
@@ -243,6 +358,17 @@ int xhci_run(struct usb_hcd *hcd)
243 358
244 return -ENOSYS; 359 return -ENOSYS;
245#endif 360#endif
361#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
362 init_timer(&xhci->event_ring_timer);
363 xhci->event_ring_timer.data = (unsigned long) xhci;
364 xhci->event_ring_timer.function = event_ring_work;
365 /* Poll the event ring */
366 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
367 xhci->zombie = 0;
368 xhci_dbg(xhci, "Setting event ring polling timer\n");
369 add_timer(&xhci->event_ring_timer);
370#endif
371
246 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 372 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
247 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 373 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
248 temp &= 0xffff; 374 temp &= 0xffff;
@@ -266,10 +392,24 @@ int xhci_run(struct usb_hcd *hcd)
266 &xhci->ir_set->irq_pending); 392 &xhci->ir_set->irq_pending);
267 xhci_print_ir_set(xhci, xhci->ir_set, 0); 393 xhci_print_ir_set(xhci, xhci->ir_set, 0);
268 394
395 if (NUM_TEST_NOOPS > 0)
396 doorbell = setup_one_noop(xhci);
397
269 xhci_dbg(xhci, "Command ring memory map follows:\n"); 398 xhci_dbg(xhci, "Command ring memory map follows:\n");
270 xhci_debug_ring(xhci, xhci->cmd_ring); 399 xhci_debug_ring(xhci, xhci->cmd_ring);
400 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
401 xhci_dbg_cmd_ptrs(xhci);
402
271 xhci_dbg(xhci, "ERST memory map follows:\n"); 403 xhci_dbg(xhci, "ERST memory map follows:\n");
272 xhci_dbg_erst(xhci, &xhci->erst); 404 xhci_dbg_erst(xhci, &xhci->erst);
405 xhci_dbg(xhci, "Event ring:\n");
406 xhci_debug_ring(xhci, xhci->event_ring);
407 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
408 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
409 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
410 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
411 temp &= ERST_PTR_MASK;
412 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
273 413
274 temp = xhci_readl(xhci, &xhci->op_regs->command); 414 temp = xhci_readl(xhci, &xhci->op_regs->command);
275 temp |= (CMD_RUN); 415 temp |= (CMD_RUN);
@@ -280,6 +420,8 @@ int xhci_run(struct usb_hcd *hcd)
280 temp = xhci_readl(xhci, &xhci->op_regs->command); 420 temp = xhci_readl(xhci, &xhci->op_regs->command);
281 xhci_dbg(xhci, "// @%x = 0x%x\n", 421 xhci_dbg(xhci, "// @%x = 0x%x\n",
282 (unsigned int) &xhci->op_regs->command, temp); 422 (unsigned int) &xhci->op_regs->command, temp);
423 if (doorbell)
424 (*doorbell)(xhci);
283 425
284 xhci_dbg(xhci, "Finished xhci_run\n"); 426 xhci_dbg(xhci, "Finished xhci_run\n");
285 return 0; 427 return 0;
@@ -309,6 +451,12 @@ void xhci_stop(struct usb_hcd *hcd)
309#if 0 /* No MSI yet */ 451#if 0 /* No MSI yet */
310 xhci_cleanup_msix(xhci); 452 xhci_cleanup_msix(xhci);
311#endif 453#endif
454#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
455 /* Tell the event ring poll function not to reschedule */
456 xhci->zombie = 1;
457 del_timer_sync(&xhci->event_ring_timer);
458#endif
459
312 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 460 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
313 temp = xhci_readl(xhci, &xhci->op_regs->status); 461 temp = xhci_readl(xhci, &xhci->op_regs->status);
314 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 462 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -346,6 +494,8 @@ void xhci_shutdown(struct usb_hcd *hcd)
346 xhci_readl(xhci, &xhci->op_regs->status)); 494 xhci_readl(xhci, &xhci->op_regs->status));
347} 495}
348 496
497/*-------------------------------------------------------------------------*/
498
349int xhci_get_frame(struct usb_hcd *hcd) 499int xhci_get_frame(struct usb_hcd *hcd)
350{ 500{
351 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 501 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index be5a05b2021c..005d44641d81 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -172,7 +172,9 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
172 } 172 }
173 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 173 /* The ring is empty, so the enqueue pointer == dequeue pointer */
174 ring->enqueue = ring->first_seg->trbs; 174 ring->enqueue = ring->first_seg->trbs;
175 ring->enq_seg = ring->first_seg;
175 ring->dequeue = ring->enqueue; 176 ring->dequeue = ring->enqueue;
177 ring->deq_seg = ring->first_seg;
176 /* The ring is initialized to 0. The producer must write 1 to the cycle 178 /* The ring is initialized to 0. The producer must write 1 to the cycle
177 * bit to handover ownership of the TRB, so PCS = 1. The consumer must 179 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
178 * compare CCS to the cycle bit to check ownership, so CCS = 1. 180 * compare CCS to the cycle bit to check ownership, so CCS = 1.
@@ -374,14 +376,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
374 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); 376 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
375 377
376 /* Set the event ring dequeue address */ 378 /* Set the event ring dequeue address */
377 xhci_dbg(xhci, "// Set ERST dequeue address for ir_set 0 = 0x%x%x\n", 379 set_hc_event_deq(xhci);
378 xhci->erst.entries[0].seg_addr[1], xhci->erst.entries[0].seg_addr[0]);
379 val = xhci_readl(xhci, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
380 val &= ERST_PTR_MASK;
381 val |= (xhci->erst.entries[0].seg_addr[0] & ~ERST_PTR_MASK);
382 xhci_writel(xhci, val, &xhci->run_regs->ir_set[0].erst_dequeue[0]);
383 xhci_writel(xhci, xhci->erst.entries[0].seg_addr[1],
384 &xhci->run_regs->ir_set[0].erst_dequeue[1]);
385 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 380 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
386 xhci_print_ir_set(xhci, xhci->ir_set, 0); 381 xhci_print_ir_set(xhci, xhci->ir_set, 0);
387 382
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4015082adf60..89614af80d20 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -96,6 +96,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
96 /* 96 /*
97 * generic hardware linkage 97 * generic hardware linkage
98 */ 98 */
99 .irq = xhci_irq,
99 .flags = HCD_MEMORY | HCD_USB3, 100 .flags = HCD_MEMORY | HCD_USB3,
100 101
101 /* 102 /*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 000000000000..c7e3c7142b9d
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,367 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include "xhci.h"
68
69/*
70 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
71 * address of the TRB.
72 */
73dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
74 union xhci_trb *trb)
75{
76 unsigned int offset;
77
78 if (!seg || !trb || (void *) trb < (void *) seg->trbs)
79 return 0;
80 /* offset in bytes, since these are byte-addressable */
81 offset = (unsigned int) trb - (unsigned int) seg->trbs;
82 /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
83 if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
84 return 0;
85 return seg->dma + offset;
86}
87
88/* Does this link TRB point to the first segment in a ring,
89 * or was the previous TRB the last TRB on the last segment in the ERST?
90 */
91static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
92 struct xhci_segment *seg, union xhci_trb *trb)
93{
94 if (ring == xhci->event_ring)
95 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
96 (seg->next == xhci->event_ring->first_seg);
97 else
98 return trb->link.control & LINK_TOGGLE;
99}
100
101/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102 * segment? I.e. would the updated event TRB pointer step off the end of the
103 * event seg?
104 */
105static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
106 struct xhci_segment *seg, union xhci_trb *trb)
107{
108 if (ring == xhci->event_ring)
109 return trb == &seg->trbs[TRBS_PER_SEGMENT];
110 else
111 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
112}
113
114/*
115 * See Cycle bit rules. SW is the consumer for the event ring only.
116 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
117 */
118static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
119{
120 union xhci_trb *next = ++(ring->dequeue);
121
122 ring->deq_updates++;
123 /* Update the dequeue pointer further if that was a link TRB or we're at
124 * the end of an event ring segment (which doesn't have link TRBS)
125 */
126 while (last_trb(xhci, ring, ring->deq_seg, next)) {
127 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
128 ring->cycle_state = (ring->cycle_state ? 0 : 1);
129 if (!in_interrupt())
130 xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
131 (unsigned int) ring,
132 (unsigned int) ring->cycle_state);
133 }
134 ring->deq_seg = ring->deq_seg->next;
135 ring->dequeue = ring->deq_seg->trbs;
136 next = ring->dequeue;
137 }
138}
139
140/*
141 * See Cycle bit rules. SW is the consumer for the event ring only.
142 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
143 *
144 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
145 * chain bit is set), then set the chain bit in all the following link TRBs.
146 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
147 * have their chain bit cleared (so that each Link TRB is a separate TD).
148 *
149 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
150 * set, but other sections talk about dealing with the chain bit set.
151 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
152 */
153static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
154{
155 u32 chain;
156 union xhci_trb *next;
157
158 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
159 next = ++(ring->enqueue);
160
161 ring->enq_updates++;
162 /* Update the dequeue pointer further if that was a link TRB or we're at
163 * the end of an event ring segment (which doesn't have link TRBS)
164 */
165 while (last_trb(xhci, ring, ring->enq_seg, next)) {
166 if (!consumer) {
167 if (ring != xhci->event_ring) {
168 /* Give this link TRB to the hardware */
169 if (next->link.control & TRB_CYCLE)
170 next->link.control &= (u32) ~TRB_CYCLE;
171 else
172 next->link.control |= (u32) TRB_CYCLE;
173 next->link.control &= TRB_CHAIN;
174 next->link.control |= chain;
175 }
176 /* Toggle the cycle bit after the last ring segment. */
177 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
178 ring->cycle_state = (ring->cycle_state ? 0 : 1);
179 if (!in_interrupt())
180 xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
181 (unsigned int) ring,
182 (unsigned int) ring->cycle_state);
183 }
184 }
185 ring->enq_seg = ring->enq_seg->next;
186 ring->enqueue = ring->enq_seg->trbs;
187 next = ring->enqueue;
188 }
189}
190
191/*
192 * Check to see if there's room to enqueue num_trbs on the ring. See rules
193 * above.
194 * FIXME: this would be simpler and faster if we just kept track of the number
195 * of free TRBs in a ring.
196 */
197static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
198 unsigned int num_trbs)
199{
200 int i;
201 union xhci_trb *enq = ring->enqueue;
202 struct xhci_segment *enq_seg = ring->enq_seg;
203
204 /* Check if ring is empty */
205 if (enq == ring->dequeue)
206 return 1;
207 /* Make sure there's an extra empty TRB available */
208 for (i = 0; i <= num_trbs; ++i) {
209 if (enq == ring->dequeue)
210 return 0;
211 enq++;
212 while (last_trb(xhci, ring, enq_seg, enq)) {
213 enq_seg = enq_seg->next;
214 enq = enq_seg->trbs;
215 }
216 }
217 return 1;
218}
219
220void set_hc_event_deq(struct xhci_hcd *xhci)
221{
222 u32 temp;
223 dma_addr_t deq;
224
225 deq = trb_virt_to_dma(xhci->event_ring->deq_seg,
226 xhci->event_ring->dequeue);
227 if (deq == 0 && !in_interrupt())
228 xhci_warn(xhci, "WARN something wrong with SW event ring "
229 "dequeue ptr.\n");
230 /* Update HC event ring dequeue pointer */
231 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
232 temp &= ERST_PTR_MASK;
233 if (!in_interrupt())
234 xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
235 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
236 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
237 &xhci->ir_set->erst_dequeue[0]);
238}
239
240/* Ring the host controller doorbell after placing a command on the ring */
241void ring_cmd_db(struct xhci_hcd *xhci)
242{
243 u32 temp;
244
245 xhci_dbg(xhci, "// Ding dong!\n");
246 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
247 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
248 /* Flush PCI posted writes */
249 xhci_readl(xhci, &xhci->dba->doorbell[0]);
250}
251
252static void handle_cmd_completion(struct xhci_hcd *xhci,
253 struct xhci_event_cmd *event)
254{
255 u64 cmd_dma;
256 dma_addr_t cmd_dequeue_dma;
257
258 /* Check completion code */
259 if (GET_COMP_CODE(event->status) != COMP_SUCCESS)
260 xhci_dbg(xhci, "WARN: unsuccessful no-op command\n");
261
262 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
263 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
264 xhci->cmd_ring->dequeue);
265 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
266 if (cmd_dequeue_dma == 0) {
267 xhci->error_bitmask |= 1 << 4;
268 return;
269 }
270 /* Does the DMA address match our internal dequeue pointer address? */
271 if (cmd_dma != (u64) cmd_dequeue_dma) {
272 xhci->error_bitmask |= 1 << 5;
273 return;
274 }
275 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
276 case TRB_TYPE(TRB_CMD_NOOP):
277 ++xhci->noops_handled;
278 break;
279 default:
280 /* Skip over unknown commands on the event ring */
281 xhci->error_bitmask |= 1 << 6;
282 break;
283 }
284 inc_deq(xhci, xhci->cmd_ring, false);
285}
286
287void handle_event(struct xhci_hcd *xhci)
288{
289 union xhci_trb *event;
290
291 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
292 xhci->error_bitmask |= 1 << 1;
293 return;
294 }
295
296 event = xhci->event_ring->dequeue;
297 /* Does the HC or OS own the TRB? */
298 if ((event->event_cmd.flags & TRB_CYCLE) !=
299 xhci->event_ring->cycle_state) {
300 xhci->error_bitmask |= 1 << 2;
301 return;
302 }
303
304 /* FIXME: Only handles command completion events. */
305 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
306 case TRB_TYPE(TRB_COMPLETION):
307 handle_cmd_completion(xhci, &event->event_cmd);
308 break;
309 default:
310 xhci->error_bitmask |= 1 << 3;
311 }
312
313 /* Update SW and HC event ring dequeue pointer */
314 inc_deq(xhci, xhci->event_ring, true);
315 set_hc_event_deq(xhci);
316 /* Are there more items on the event ring? */
317 handle_event(xhci);
318}
319
320/*
321 * Generic function for queueing a TRB on a ring.
322 * The caller must have checked to make sure there's room on the ring.
323 */
324static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
325 bool consumer,
326 u32 field1, u32 field2, u32 field3, u32 field4)
327{
328 struct xhci_generic_trb *trb;
329
330 trb = &ring->enqueue->generic;
331 trb->field[0] = field1;
332 trb->field[1] = field2;
333 trb->field[2] = field3;
334 trb->field[3] = field4;
335 inc_enq(xhci, ring, consumer);
336}
337
338/* Generic function for queueing a command TRB on the command ring */
339static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
340{
341 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
342 if (!in_interrupt())
343 xhci_err(xhci, "ERR: No room for command on command ring\n");
344 return -ENOMEM;
345 }
346 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
347 field4 | xhci->cmd_ring->cycle_state);
348 return 0;
349}
350
351/* Queue a no-op command on the command ring */
352static int queue_cmd_noop(struct xhci_hcd *xhci)
353{
354 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
355}
356
357/*
358 * Place a no-op command on the command ring to test the command and
359 * event ring.
360 */
361void *setup_one_noop(struct xhci_hcd *xhci)
362{
363 if (queue_cmd_noop(xhci) < 0)
364 return NULL;
365 xhci->noops_submitted++;
366 return ring_cmd_db;
367}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f168fcac5999..66be134b8921 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -24,6 +24,7 @@
24#define __LINUX_XHCI_HCD_H 24#define __LINUX_XHCI_HCD_H
25 25
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/timer.h>
27 28
28#include "../core/hcd.h" 29#include "../core/hcd.h"
29/* Code sharing between pci-quirks and xhci hcd */ 30/* Code sharing between pci-quirks and xhci hcd */
@@ -377,6 +378,7 @@ struct intr_reg {
377/* irq_pending bitmasks */ 378/* irq_pending bitmasks */
378#define ER_IRQ_PENDING(p) ((p) & 0x1) 379#define ER_IRQ_PENDING(p) ((p) & 0x1)
379/* bits 2:31 need to be preserved */ 380/* bits 2:31 need to be preserved */
381/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
380#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe) 382#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
381#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2) 383#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
382#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2)) 384#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
@@ -699,11 +701,14 @@ struct xhci_link_trb {
699/* control bitfields */ 701/* control bitfields */
700#define LINK_TOGGLE (0x1<<1) 702#define LINK_TOGGLE (0x1<<1)
701 703
704/* Command completion event TRB */
705struct xhci_event_cmd {
706 /* Pointer to command TRB, or the value passed by the event data trb */
707 u32 cmd_trb[2];
708 u32 status;
709 u32 flags;
710} __attribute__ ((packed));
702 711
703union xhci_trb {
704 struct xhci_link_trb link;
705 struct xhci_transfer_event trans_event;
706};
707 712
708/* Normal TRB fields */ 713/* Normal TRB fields */
709/* transfer_len bitmasks - bits 0:16 */ 714/* transfer_len bitmasks - bits 0:16 */
@@ -737,6 +742,17 @@ union xhci_trb {
737/* Control transfer TRB specific fields */ 742/* Control transfer TRB specific fields */
738#define TRB_DIR_IN (1<<16) 743#define TRB_DIR_IN (1<<16)
739 744
745struct xhci_generic_trb {
746 u32 field[4];
747} __attribute__ ((packed));
748
749union xhci_trb {
750 struct xhci_link_trb link;
751 struct xhci_transfer_event trans_event;
752 struct xhci_event_cmd event_cmd;
753 struct xhci_generic_trb generic;
754};
755
740/* TRB bit mask */ 756/* TRB bit mask */
741#define TRB_TYPE_BITMASK (0xfc00) 757#define TRB_TYPE_BITMASK (0xfc00)
742#define TRB_TYPE(p) ((p) << 10) 758#define TRB_TYPE(p) ((p) << 10)
@@ -825,7 +841,11 @@ struct xhci_segment {
825struct xhci_ring { 841struct xhci_ring {
826 struct xhci_segment *first_seg; 842 struct xhci_segment *first_seg;
827 union xhci_trb *enqueue; 843 union xhci_trb *enqueue;
844 struct xhci_segment *enq_seg;
845 unsigned int enq_updates;
828 union xhci_trb *dequeue; 846 union xhci_trb *dequeue;
847 struct xhci_segment *deq_seg;
848 unsigned int deq_updates;
829 /* 849 /*
830 * Write the cycle state into the TRB cycle field to give ownership of 850 * Write the cycle state into the TRB cycle field to give ownership of
831 * the TRB to the host controller (if we are the producer), or to check 851 * the TRB to the host controller (if we are the producer), or to check
@@ -861,6 +881,8 @@ struct xhci_erst {
861#define ERST_SIZE 64 881#define ERST_SIZE 64
862/* Initial number of event segment rings allocated */ 882/* Initial number of event segment rings allocated */
863#define ERST_ENTRIES 1 883#define ERST_ENTRIES 1
884/* Poll every 60 seconds */
885#define POLL_TIMEOUT 60
864/* XXX: Make these module parameters */ 886/* XXX: Make these module parameters */
865 887
866 888
@@ -907,8 +929,21 @@ struct xhci_hcd {
907 /* DMA pools */ 929 /* DMA pools */
908 struct dma_pool *device_pool; 930 struct dma_pool *device_pool;
909 struct dma_pool *segment_pool; 931 struct dma_pool *segment_pool;
932
933#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
934 /* Poll the rings - for debugging */
935 struct timer_list event_ring_timer;
936 int zombie;
937#endif
938 /* Statistics */
939 int noops_submitted;
940 int noops_handled;
941 int error_bitmask;
910}; 942};
911 943
944/* For testing purposes */
945#define NUM_TEST_NOOPS 0
946
912/* convert between an HCD pointer and the corresponding EHCI_HCD */ 947/* convert between an HCD pointer and the corresponding EHCI_HCD */
913static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd) 948static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
914{ 949{
@@ -956,9 +991,11 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_n
956void xhci_print_registers(struct xhci_hcd *xhci); 991void xhci_print_registers(struct xhci_hcd *xhci);
957void xhci_dbg_regs(struct xhci_hcd *xhci); 992void xhci_dbg_regs(struct xhci_hcd *xhci);
958void xhci_print_run_regs(struct xhci_hcd *xhci); 993void xhci_print_run_regs(struct xhci_hcd *xhci);
994void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
959void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); 995void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
960void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 996void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
961void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 997void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
998void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
962 999
963/* xHCI memory managment */ 1000/* xHCI memory managment */
964void xhci_mem_cleanup(struct xhci_hcd *xhci); 1001void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -978,5 +1015,13 @@ int xhci_run(struct usb_hcd *hcd);
978void xhci_stop(struct usb_hcd *hcd); 1015void xhci_stop(struct usb_hcd *hcd);
979void xhci_shutdown(struct usb_hcd *hcd); 1016void xhci_shutdown(struct usb_hcd *hcd);
980int xhci_get_frame(struct usb_hcd *hcd); 1017int xhci_get_frame(struct usb_hcd *hcd);
1018irqreturn_t xhci_irq(struct usb_hcd *hcd);
1019
1020/* xHCI ring, segment, TRB, and TD functions */
1021dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1022void ring_cmd_db(struct xhci_hcd *xhci);
1023void *setup_one_noop(struct xhci_hcd *xhci);
1024void handle_event(struct xhci_hcd *xhci);
1025void set_hc_event_deq(struct xhci_hcd *xhci);
981 1026
982#endif /* __LINUX_XHCI_HCD_H */ 1027#endif /* __LINUX_XHCI_HCD_H */