aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-hcd.c
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2009-10-27 13:57:01 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-12-11 14:55:17 -0500
commit6f5165cf989387e84ef23122330b27cca1cbe831 (patch)
tree44ff1ea0590b00f2851f50ffa2cf9954eb70a767 /drivers/usb/host/xhci-hcd.c
parent4f0f0baef017dfd5d62b749716ab980a825e1071 (diff)
USB: xhci: Add watchdog timer for URB cancellation.
In order to giveback a canceled URB, we must ensure that the xHCI hardware will not access the buffer in an URB. We can't modify the buffer pointers on endpoint rings without issuing and waiting for a stop endpoint command. Since URBs can be canceled in interrupt context, we can't wait on that command. The old code trusted that the host controller would respond to the command, and would giveback the URBs in the event handler. If the hardware never responds to the stop endpoint command, the URBs will never be completed, and we might hang the USB subsystem. Implement a watchdog timer that is spawned whenever a stop endpoint command is queued. If a stop endpoint command event is found on the event ring during an interrupt, we need to stop the watchdog timer with del_timer(). Since del_timer() can fail if the timer is running and waiting on the xHCI lock, we need a way to signal to the timer that everything is fine and it should exit. If we simply clear EP_HALT_PENDING, a new stop endpoint command could sneak in and set it before the watchdog timer can grab the lock. Instead we use a combination of the EP_HALT_PENDING flag and a counter for the number of pending stop endpoint commands (xhci_virt_ep->stop_cmds_pending). If we need to cancel the watchdog timer and del_timer() succeeds, we decrement the number of pending stop endpoint commands. If del_timer() fails, we leave the number of pending stop endpoint commands alone. In either case, we clear the EP_HALT_PENDING flag. The timer will decrement the number of pending stop endpoint commands once it obtains the lock. If the timer is the tail end of the last stop endpoint command (xhci_virt_ep->stop_cmds_pending == 0), and the endpoint's command is still pending (EP_HALT_PENDING is set), we assume the host is dying. The watchdog timer will set XHCI_STATE_DYING, try to halt the xHCI host, and give back all pending URBs. Various other places in the driver need to check whether the xHCI host is dying. If the interrupt handler ever notices, it should immediately stop processing events. The URB enqueue function should also return -ESHUTDOWN. The URB dequeue function should simply return the value of usb_hcd_check_unlink_urb() and the watchdog timer will take care of giving the URB back. When a device is disconnected, the xHCI hardware structures should be freed without issuing a disable slot command (since the hardware probably won't respond to it anyway). The debugging polling loop should stop polling if the host is dying. When a device is disconnected, any pending watchdog timers are killed with del_timer_sync(). It must be synchronous so that the watchdog timer doesn't attempt to access the freed endpoint structures. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r--drivers/usb/host/xhci-hcd.c52
1 files changed, 48 insertions, 4 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5839453d342b..0d5a8564ed17 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -246,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci)
246 /* Flush posted writes */ 246 /* Flush posted writes */
247 xhci_readl(xhci, &xhci->ir_set->irq_pending); 247 xhci_readl(xhci, &xhci->ir_set->irq_pending);
248 248
249 /* FIXME this should be a delayed service routine that clears the EHB */ 249 if (xhci->xhc_state & XHCI_STATE_DYING)
250 xhci_handle_event(xhci); 250 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
251 "Shouldn't IRQs be disabled?\n");
252 else
253 /* FIXME this should be a delayed service routine
254 * that clears the EHB.
255 */
256 xhci_handle_event(xhci);
251 257
252 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ 258 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
253 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 259 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
@@ -320,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg)
320 spin_lock_irqsave(&xhci->lock, flags); 326 spin_lock_irqsave(&xhci->lock, flags);
321 temp = xhci_readl(xhci, &xhci->op_regs->status); 327 temp = xhci_readl(xhci, &xhci->op_regs->status);
322 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 328 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
323 if (temp == 0xffffffff) { 329 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
324 xhci_dbg(xhci, "HW died, polling stopped.\n"); 330 xhci_dbg(xhci, "HW died, polling stopped.\n");
325 spin_unlock_irqrestore(&xhci->lock, flags); 331 spin_unlock_irqrestore(&xhci->lock, flags);
326 return; 332 return;
@@ -710,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
710 * atomic context to this function, which may allocate memory. 716 * atomic context to this function, which may allocate memory.
711 */ 717 */
712 spin_lock_irqsave(&xhci->lock, flags); 718 spin_lock_irqsave(&xhci->lock, flags);
719 if (xhci->xhc_state & XHCI_STATE_DYING)
720 goto dying;
713 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 721 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
714 slot_id, ep_index); 722 slot_id, ep_index);
715 spin_unlock_irqrestore(&xhci->lock, flags); 723 spin_unlock_irqrestore(&xhci->lock, flags);
716 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 724 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
717 spin_lock_irqsave(&xhci->lock, flags); 725 spin_lock_irqsave(&xhci->lock, flags);
726 if (xhci->xhc_state & XHCI_STATE_DYING)
727 goto dying;
718 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 728 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
719 slot_id, ep_index); 729 slot_id, ep_index);
720 spin_unlock_irqrestore(&xhci->lock, flags); 730 spin_unlock_irqrestore(&xhci->lock, flags);
721 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 731 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
722 spin_lock_irqsave(&xhci->lock, flags); 732 spin_lock_irqsave(&xhci->lock, flags);
733 if (xhci->xhc_state & XHCI_STATE_DYING)
734 goto dying;
723 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 735 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
724 slot_id, ep_index); 736 slot_id, ep_index);
725 spin_unlock_irqrestore(&xhci->lock, flags); 737 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -728,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
728 } 740 }
729exit: 741exit:
730 return ret; 742 return ret;
743dying:
744 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
745 "non-responsive xHCI host.\n",
746 urb->ep->desc.bEndpointAddress, urb);
747 spin_unlock_irqrestore(&xhci->lock, flags);
748 return -ESHUTDOWN;
731} 749}
732 750
733/* 751/*
@@ -789,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
789 kfree(td); 807 kfree(td);
790 return ret; 808 return ret;
791 } 809 }
810 if (xhci->xhc_state & XHCI_STATE_DYING) {
811 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
812 "non-responsive xHCI host.\n",
813 urb->ep->desc.bEndpointAddress, urb);
814 /* Let the stop endpoint command watchdog timer (which set this
815 * state) finish cleaning up the endpoint TD lists. We must
816 * have caught it in the middle of dropping a lock and giving
817 * back an URB.
818 */
819 goto done;
820 }
792 821
793 xhci_dbg(xhci, "Cancel URB %p\n", urb); 822 xhci_dbg(xhci, "Cancel URB %p\n", urb);
794 xhci_dbg(xhci, "Event ring:\n"); 823 xhci_dbg(xhci, "Event ring:\n");
@@ -806,6 +835,10 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
806 */ 835 */
807 if (!(ep->ep_state & EP_HALT_PENDING)) { 836 if (!(ep->ep_state & EP_HALT_PENDING)) {
808 ep->ep_state |= EP_HALT_PENDING; 837 ep->ep_state |= EP_HALT_PENDING;
838 ep->stop_cmds_pending++;
839 ep->stop_cmd_timer.expires = jiffies +
840 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
841 add_timer(&ep->stop_cmd_timer);
809 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 842 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
810 xhci_ring_cmd_db(xhci); 843 xhci_ring_cmd_db(xhci);
811 } 844 }
@@ -1410,16 +1443,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1410void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 1443void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1411{ 1444{
1412 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1445 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1446 struct xhci_virt_device *virt_dev;
1413 unsigned long flags; 1447 unsigned long flags;
1414 u32 state; 1448 u32 state;
1449 int i;
1415 1450
1416 if (udev->slot_id == 0) 1451 if (udev->slot_id == 0)
1417 return; 1452 return;
1453 virt_dev = xhci->devs[udev->slot_id];
1454 if (!virt_dev)
1455 return;
1456
1457 /* Stop any wayward timer functions (which may grab the lock) */
1458 for (i = 0; i < 31; ++i) {
1459 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
1460 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
1461 }
1418 1462
1419 spin_lock_irqsave(&xhci->lock, flags); 1463 spin_lock_irqsave(&xhci->lock, flags);
1420 /* Don't disable the slot if the host controller is dead. */ 1464 /* Don't disable the slot if the host controller is dead. */
1421 state = xhci_readl(xhci, &xhci->op_regs->status); 1465 state = xhci_readl(xhci, &xhci->op_regs->status);
1422 if (state == 0xffffffff) { 1466 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
1423 xhci_free_virt_device(xhci, udev->slot_id); 1467 xhci_free_virt_device(xhci, udev->slot_id);
1424 spin_unlock_irqrestore(&xhci->lock, flags); 1468 spin_unlock_irqrestore(&xhci->lock, flags);
1425 return; 1469 return;