diff options
Diffstat (limited to 'drivers/usb/host/xhci-hcd.c')
-rw-r--r-- | drivers/usb/host/xhci-hcd.c | 52 |
1 files changed, 48 insertions, 4 deletions
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 5839453d342b..0d5a8564ed17 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -246,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci) | |||
246 | /* Flush posted writes */ | 246 | /* Flush posted writes */ |
247 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | 247 | xhci_readl(xhci, &xhci->ir_set->irq_pending); |
248 | 248 | ||
249 | /* FIXME this should be a delayed service routine that clears the EHB */ | 249 | if (xhci->xhc_state & XHCI_STATE_DYING) |
250 | xhci_handle_event(xhci); | 250 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " |
251 | "Shouldn't IRQs be disabled?\n"); | ||
252 | else | ||
253 | /* FIXME this should be a delayed service routine | ||
254 | * that clears the EHB. | ||
255 | */ | ||
256 | xhci_handle_event(xhci); | ||
251 | 257 | ||
252 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ | 258 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ |
253 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | 259 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
@@ -320,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
320 | spin_lock_irqsave(&xhci->lock, flags); | 326 | spin_lock_irqsave(&xhci->lock, flags); |
321 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 327 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
322 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); | 328 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); |
323 | if (temp == 0xffffffff) { | 329 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { |
324 | xhci_dbg(xhci, "HW died, polling stopped.\n"); | 330 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
325 | spin_unlock_irqrestore(&xhci->lock, flags); | 331 | spin_unlock_irqrestore(&xhci->lock, flags); |
326 | return; | 332 | return; |
@@ -710,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
710 | * atomic context to this function, which may allocate memory. | 716 | * atomic context to this function, which may allocate memory. |
711 | */ | 717 | */ |
712 | spin_lock_irqsave(&xhci->lock, flags); | 718 | spin_lock_irqsave(&xhci->lock, flags); |
719 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
720 | goto dying; | ||
713 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | 721 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
714 | slot_id, ep_index); | 722 | slot_id, ep_index); |
715 | spin_unlock_irqrestore(&xhci->lock, flags); | 723 | spin_unlock_irqrestore(&xhci->lock, flags); |
716 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | 724 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { |
717 | spin_lock_irqsave(&xhci->lock, flags); | 725 | spin_lock_irqsave(&xhci->lock, flags); |
726 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
727 | goto dying; | ||
718 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | 728 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
719 | slot_id, ep_index); | 729 | slot_id, ep_index); |
720 | spin_unlock_irqrestore(&xhci->lock, flags); | 730 | spin_unlock_irqrestore(&xhci->lock, flags); |
721 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | 731 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
722 | spin_lock_irqsave(&xhci->lock, flags); | 732 | spin_lock_irqsave(&xhci->lock, flags); |
733 | if (xhci->xhc_state & XHCI_STATE_DYING) | ||
734 | goto dying; | ||
723 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | 735 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
724 | slot_id, ep_index); | 736 | slot_id, ep_index); |
725 | spin_unlock_irqrestore(&xhci->lock, flags); | 737 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -728,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
728 | } | 740 | } |
729 | exit: | 741 | exit: |
730 | return ret; | 742 | return ret; |
743 | dying: | ||
744 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | ||
745 | "non-responsive xHCI host.\n", | ||
746 | urb->ep->desc.bEndpointAddress, urb); | ||
747 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
748 | return -ESHUTDOWN; | ||
731 | } | 749 | } |
732 | 750 | ||
733 | /* | 751 | /* |
@@ -789,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
789 | kfree(td); | 807 | kfree(td); |
790 | return ret; | 808 | return ret; |
791 | } | 809 | } |
810 | if (xhci->xhc_state & XHCI_STATE_DYING) { | ||
811 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " | ||
812 | "non-responsive xHCI host.\n", | ||
813 | urb->ep->desc.bEndpointAddress, urb); | ||
814 | /* Let the stop endpoint command watchdog timer (which set this | ||
815 | * state) finish cleaning up the endpoint TD lists. We must | ||
816 | * have caught it in the middle of dropping a lock and giving | ||
817 | * back an URB. | ||
818 | */ | ||
819 | goto done; | ||
820 | } | ||
792 | 821 | ||
793 | xhci_dbg(xhci, "Cancel URB %p\n", urb); | 822 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
794 | xhci_dbg(xhci, "Event ring:\n"); | 823 | xhci_dbg(xhci, "Event ring:\n"); |
@@ -806,6 +835,10 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
806 | */ | 835 | */ |
807 | if (!(ep->ep_state & EP_HALT_PENDING)) { | 836 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
808 | ep->ep_state |= EP_HALT_PENDING; | 837 | ep->ep_state |= EP_HALT_PENDING; |
838 | ep->stop_cmds_pending++; | ||
839 | ep->stop_cmd_timer.expires = jiffies + | ||
840 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | ||
841 | add_timer(&ep->stop_cmd_timer); | ||
809 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); | 842 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); |
810 | xhci_ring_cmd_db(xhci); | 843 | xhci_ring_cmd_db(xhci); |
811 | } | 844 | } |
@@ -1410,16 +1443,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
1410 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | 1443 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
1411 | { | 1444 | { |
1412 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1445 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1446 | struct xhci_virt_device *virt_dev; | ||
1413 | unsigned long flags; | 1447 | unsigned long flags; |
1414 | u32 state; | 1448 | u32 state; |
1449 | int i; | ||
1415 | 1450 | ||
1416 | if (udev->slot_id == 0) | 1451 | if (udev->slot_id == 0) |
1417 | return; | 1452 | return; |
1453 | virt_dev = xhci->devs[udev->slot_id]; | ||
1454 | if (!virt_dev) | ||
1455 | return; | ||
1456 | |||
1457 | /* Stop any wayward timer functions (which may grab the lock) */ | ||
1458 | for (i = 0; i < 31; ++i) { | ||
1459 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; | ||
1460 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | ||
1461 | } | ||
1418 | 1462 | ||
1419 | spin_lock_irqsave(&xhci->lock, flags); | 1463 | spin_lock_irqsave(&xhci->lock, flags); |
1420 | /* Don't disable the slot if the host controller is dead. */ | 1464 | /* Don't disable the slot if the host controller is dead. */ |
1421 | state = xhci_readl(xhci, &xhci->op_regs->status); | 1465 | state = xhci_readl(xhci, &xhci->op_regs->status); |
1422 | if (state == 0xffffffff) { | 1466 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { |
1423 | xhci_free_virt_device(xhci, udev->slot_id); | 1467 | xhci_free_virt_device(xhci, udev->slot_id); |
1424 | spin_unlock_irqrestore(&xhci->lock, flags); | 1468 | spin_unlock_irqrestore(&xhci->lock, flags); |
1425 | return; | 1469 | return; |