diff options
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 139 |
1 files changed, 94 insertions, 45 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 932f99938481..5e92c72df642 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -67,22 +67,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Force HC into halt state. | 70 | * Disable interrupts and begin the xHCI halting process. |
71 | * | ||
72 | * Disable any IRQs and clear the run/stop bit. | ||
73 | * HC will complete any current and actively pipelined transactions, and | ||
74 | * should halt within 16 microframes of the run/stop bit being cleared. | ||
75 | * Read HC Halted bit in the status register to see when the HC is finished. | ||
76 | * XXX: shouldn't we set HC_STATE_HALT here somewhere? | ||
77 | */ | 71 | */ |
78 | int xhci_halt(struct xhci_hcd *xhci) | 72 | void xhci_quiesce(struct xhci_hcd *xhci) |
79 | { | 73 | { |
80 | u32 halted; | 74 | u32 halted; |
81 | u32 cmd; | 75 | u32 cmd; |
82 | u32 mask; | 76 | u32 mask; |
83 | 77 | ||
84 | xhci_dbg(xhci, "// Halt the HC\n"); | ||
85 | /* Disable all interrupts from the host controller */ | ||
86 | mask = ~(XHCI_IRQS); | 78 | mask = ~(XHCI_IRQS); |
87 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; | 79 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; |
88 | if (!halted) | 80 | if (!halted) |
@@ -91,6 +83,21 @@ int xhci_halt(struct xhci_hcd *xhci) | |||
91 | cmd = xhci_readl(xhci, &xhci->op_regs->command); | 83 | cmd = xhci_readl(xhci, &xhci->op_regs->command); |
92 | cmd &= mask; | 84 | cmd &= mask; |
93 | xhci_writel(xhci, cmd, &xhci->op_regs->command); | 85 | xhci_writel(xhci, cmd, &xhci->op_regs->command); |
86 | } | ||
87 | |||
88 | /* | ||
89 | * Force HC into halt state. | ||
90 | * | ||
91 | * Disable any IRQs and clear the run/stop bit. | ||
92 | * HC will complete any current and actively pipelined transactions, and | ||
93 | * should halt within 16 microframes of the run/stop bit being cleared. | ||
94 | * Read HC Halted bit in the status register to see when the HC is finished. | ||
95 | * XXX: shouldn't we set HC_STATE_HALT here somewhere? | ||
96 | */ | ||
97 | int xhci_halt(struct xhci_hcd *xhci) | ||
98 | { | ||
99 | xhci_dbg(xhci, "// Halt the HC\n"); | ||
100 | xhci_quiesce(xhci); | ||
94 | 101 | ||
95 | return handshake(xhci, &xhci->op_regs->status, | 102 | return handshake(xhci, &xhci->op_regs->status, |
96 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); | 103 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
@@ -124,28 +131,6 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
124 | return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); | 131 | return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); |
125 | } | 132 | } |
126 | 133 | ||
127 | /* | ||
128 | * Stop the HC from processing the endpoint queues. | ||
129 | */ | ||
130 | static void xhci_quiesce(struct xhci_hcd *xhci) | ||
131 | { | ||
132 | /* | ||
133 | * Queues are per endpoint, so we need to disable an endpoint or slot. | ||
134 | * | ||
135 | * To disable a slot, we need to insert a disable slot command on the | ||
136 | * command ring and ring the doorbell. This will also free any internal | ||
137 | * resources associated with the slot (which might not be what we want). | ||
138 | * | ||
139 | * A Release Endpoint command sounds better - doesn't free internal HC | ||
140 | * memory, but removes the endpoints from the schedule and releases the | ||
141 | * bandwidth, disables the doorbells, and clears the endpoint enable | ||
142 | * flag. Usually used prior to a set interface command. | ||
143 | * | ||
144 | * TODO: Implement after command ring code is done. | ||
145 | */ | ||
146 | BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state)); | ||
147 | xhci_dbg(xhci, "Finished quiescing -- code not written yet\n"); | ||
148 | } | ||
149 | 134 | ||
150 | #if 0 | 135 | #if 0 |
151 | /* Set up MSI-X table for entry 0 (may claim other entries later) */ | 136 | /* Set up MSI-X table for entry 0 (may claim other entries later) */ |
@@ -261,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci) | |||
261 | /* Flush posted writes */ | 246 | /* Flush posted writes */ |
262 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | 247 | xhci_readl(xhci, &xhci->ir_set->irq_pending); |
263 | 248 | ||
264 | /* FIXME this should be a delayed service routine that clears the EHB */ | 249 | if (xhci->xhc_state & XHCI_STATE_DYING) |
265 | xhci_handle_event(xhci); | 250 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " |
251 | "Shouldn't IRQs be disabled?\n"); | ||
252 | else | ||
253 | /* FIXME this should be a delayed service routine | ||
254 | * that clears the EHB. | ||
255 | */ | ||
256 | xhci_handle_event(xhci); | ||
266 | 257 | ||
267 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ | 258 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ |
268 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | 259 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
@@ -335,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
335 | spin_lock_irqsave(&xhci->lock, flags); | 326 | spin_lock_irqsave(&xhci->lock, flags); |
336 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 327 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
337 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); | 328 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); |
338 | if (temp == 0xffffffff) { | 329 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { |
339 | xhci_dbg(xhci, "HW died, polling stopped.\n"); | 330 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
340 | spin_unlock_irqrestore(&xhci->lock, flags); | 331 | spin_unlock_irqrestore(&xhci->lock, flags); |
341 | return; | 332 | return; |
@@ -490,8 +481,6 @@ void xhci_stop(struct usb_hcd *hcd) | |||
490 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 481 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
491 | 482 | ||
492 | spin_lock_irq(&xhci->lock); | 483 | spin_lock_irq(&xhci->lock); |
493 | if (HC_IS_RUNNING(hcd->state)) | ||
494 | xhci_quiesce(xhci); | ||
495 | xhci_halt(xhci); | 484 | xhci_halt(xhci); |
496 | xhci_reset(xhci); | 485 | xhci_reset(xhci); |
497 | spin_unlock_irq(&xhci->lock); | 486 | spin_unlock_irq(&xhci->lock); |
@@ -727,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
727 | * atomic context to this function, which may allocate memory. | 716 | * atomic context to this function, which may allocate memory. |
728 | */ | 717 | */ |
729 | spin_lock_irqsave(&xhci->lock, flags); | 718 | spin_lock_irqsave(&xhci->lock, flags); |
719 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
720 | goto dying; | ||
730 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | 721 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
731 | slot_id, ep_index); | 722 | slot_id, ep_index); |
732 | spin_unlock_irqrestore(&xhci->lock, flags); | 723 | spin_unlock_irqrestore(&xhci->lock, flags); |
733 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | 724 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { |
734 | spin_lock_irqsave(&xhci->lock, flags); | 725 | spin_lock_irqsave(&xhci->lock, flags); |
726 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
727 | goto dying; | ||
735 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 728 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
736 | slot_id, ep_index); | 729 | slot_id, ep_index); |
737 | spin_unlock_irqrestore(&xhci->lock, flags); | 730 | spin_unlock_irqrestore(&xhci->lock, flags); |
738 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | 731 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
739 | spin_lock_irqsave(&xhci->lock, flags); | 732 | spin_lock_irqsave(&xhci->lock, flags); |
733 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
734 | goto dying; | ||
740 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | 735 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
741 | slot_id, ep_index); | 736 | slot_id, ep_index); |
742 | spin_unlock_irqrestore(&xhci->lock, flags); | 737 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -745,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
745 | } | 740 | } |
746 | exit: | 741 | exit: |
747 | return ret; | 742 | return ret; |
743 | dying: | ||
744 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | ||
745 | "non-responsive xHCI host.\n", | ||
746 | urb->ep->desc.bEndpointAddress, urb); | ||
747 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
748 | return -ESHUTDOWN; | ||
748 | } | 749 | } |
749 | 750 | ||
750 | /* | 751 | /* |
@@ -806,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
806 | kfree(td); | 807 | kfree(td); |
807 | return ret; | 808 | return ret; |
808 | } | 809 | } |
810 | if (xhci->xhc_state & XHCI_STATE_DYING) { | ||
811 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " | ||
812 | "non-responsive xHCI host.\n", | ||
813 | urb->ep->desc.bEndpointAddress, urb); | ||
814 | /* Let the stop endpoint command watchdog timer (which set this | ||
815 | * state) finish cleaning up the endpoint TD lists. We must | ||
816 | * have caught it in the middle of dropping a lock and giving | ||
817 | * back an URB. | ||
818 | */ | ||
819 | goto done; | ||
820 | } | ||
809 | 821 | ||
810 | xhci_dbg(xhci, "Cancel URB %p\n", urb); | 822 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
811 | xhci_dbg(xhci, "Event ring:\n"); | 823 | xhci_dbg(xhci, "Event ring:\n"); |
@@ -817,12 +829,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
817 | xhci_debug_ring(xhci, ep_ring); | 829 | xhci_debug_ring(xhci, ep_ring); |
818 | td = (struct xhci_td *) urb->hcpriv; | 830 | td = (struct xhci_td *) urb->hcpriv; |
819 | 831 | ||
820 | ep->cancels_pending++; | ||
821 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | 832 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
822 | /* Queue a stop endpoint command, but only if this is | 833 | /* Queue a stop endpoint command, but only if this is |
823 | * the first cancellation to be handled. | 834 | * the first cancellation to be handled. |
824 | */ | 835 | */ |
825 | if (ep->cancels_pending == 1) { | 836 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
837 | ep->ep_state |= EP_HALT_PENDING; | ||
838 | ep->stop_cmds_pending++; | ||
839 | ep->stop_cmd_timer.expires = jiffies + | ||
840 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | ||
841 | add_timer(&ep->stop_cmd_timer); | ||
826 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 842 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
827 | xhci_ring_cmd_db(xhci); | 843 | xhci_ring_cmd_db(xhci); |
828 | } | 844 | } |
@@ -1246,13 +1262,35 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1246 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | 1262 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
1247 | 1263 | ||
1248 | xhci_zero_in_ctx(xhci, virt_dev); | 1264 | xhci_zero_in_ctx(xhci, virt_dev); |
1249 | /* Free any old rings */ | 1265 | /* Install new rings and free or cache any old rings */ |
1250 | for (i = 1; i < 31; ++i) { | 1266 | for (i = 1; i < 31; ++i) { |
1251 | if (virt_dev->eps[i].new_ring) { | 1267 | int rings_cached; |
1252 | xhci_ring_free(xhci, virt_dev->eps[i].ring); | 1268 | |
1253 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; | 1269 | if (!virt_dev->eps[i].new_ring) |
1254 | virt_dev->eps[i].new_ring = NULL; | 1270 | continue; |
1271 | /* Only cache or free the old ring if it exists. | ||
1272 | * It may not if this is the first add of an endpoint. | ||
1273 | */ | ||
1274 | if (virt_dev->eps[i].ring) { | ||
1275 | rings_cached = virt_dev->num_rings_cached; | ||
1276 | if (rings_cached < XHCI_MAX_RINGS_CACHED) { | ||
1277 | virt_dev->num_rings_cached++; | ||
1278 | rings_cached = virt_dev->num_rings_cached; | ||
1279 | virt_dev->ring_cache[rings_cached] = | ||
1280 | virt_dev->eps[i].ring; | ||
1281 | xhci_dbg(xhci, "Cached old ring, " | ||
1282 | "%d ring%s cached\n", | ||
1283 | rings_cached, | ||
1284 | (rings_cached > 1) ? "s" : ""); | ||
1285 | } else { | ||
1286 | xhci_ring_free(xhci, virt_dev->eps[i].ring); | ||
1287 | xhci_dbg(xhci, "Ring cache full (%d rings), " | ||
1288 | "freeing ring\n", | ||
1289 | virt_dev->num_rings_cached); | ||
1290 | } | ||
1255 | } | 1291 | } |
1292 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; | ||
1293 | virt_dev->eps[i].new_ring = NULL; | ||
1256 | } | 1294 | } |
1257 | 1295 | ||
1258 | return ret; | 1296 | return ret; |
@@ -1427,16 +1465,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1427 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | 1465 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
1428 | { | 1466 | { |
1429 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1467 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1468 | struct xhci_virt_device *virt_dev; | ||
1430 | unsigned long flags; | 1469 | unsigned long flags; |
1431 | u32 state; | 1470 | u32 state; |
1471 | int i; | ||
1432 | 1472 | ||
1433 | if (udev->slot_id == 0) | 1473 | if (udev->slot_id == 0) |
1434 | return; | 1474 | return; |
1475 | virt_dev = xhci->devs[udev->slot_id]; | ||
1476 | if (!virt_dev) | ||
1477 | return; | ||
1478 | |||
1479 | /* Stop any wayward timer functions (which may grab the lock) */ | ||
1480 | for (i = 0; i < 31; ++i) { | ||
1481 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; | ||
1482 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | ||
1483 | } | ||
1435 | 1484 | ||
1436 | spin_lock_irqsave(&xhci->lock, flags); | 1485 | spin_lock_irqsave(&xhci->lock, flags); |
1437 | /* Don't disable the slot if the host controller is dead. */ | 1486 | /* Don't disable the slot if the host controller is dead. */ |
1438 | state = xhci_readl(xhci, &xhci->op_regs->status); | 1487 | state = xhci_readl(xhci, &xhci->op_regs->status); |
1439 | if (state == 0xffffffff) { | 1488 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { |
1440 | xhci_free_virt_device(xhci, udev->slot_id); | 1489 | xhci_free_virt_device(xhci, udev->slot_id); |
1441 | spin_unlock_irqrestore(&xhci->lock, flags); | 1490 | spin_unlock_irqrestore(&xhci->lock, flags); |
1442 | return; | 1491 | return; |