aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c343
1 files changed, 287 insertions, 56 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d5c550ea3e68..5d7d4e951ea4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -551,6 +551,218 @@ void xhci_shutdown(struct usb_hcd *hcd)
551 xhci_readl(xhci, &xhci->op_regs->status)); 551 xhci_readl(xhci, &xhci->op_regs->status));
552} 552}
553 553
554#ifdef CONFIG_PM
555static void xhci_save_registers(struct xhci_hcd *xhci)
556{
557 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
558 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
559 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
560 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
561 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
562 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
563 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
564 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
565 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
566}
567
568static void xhci_restore_registers(struct xhci_hcd *xhci)
569{
570 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
571 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
572 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
573 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
574 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
575 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
576 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
577 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
578}
579
580/*
581 * Stop HC (not bus-specific)
582 *
583 * This is called when the machine transition into S3/S4 mode.
584 *
585 */
586int xhci_suspend(struct xhci_hcd *xhci)
587{
588 int rc = 0;
589 struct usb_hcd *hcd = xhci_to_hcd(xhci);
590 u32 command;
591
592 spin_lock_irq(&xhci->lock);
593 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
594 /* step 1: stop endpoint */
595 /* skipped assuming that port suspend has done */
596
597 /* step 2: clear Run/Stop bit */
598 command = xhci_readl(xhci, &xhci->op_regs->command);
599 command &= ~CMD_RUN;
600 xhci_writel(xhci, command, &xhci->op_regs->command);
601 if (handshake(xhci, &xhci->op_regs->status,
602 STS_HALT, STS_HALT, 100*100)) {
603 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
604 spin_unlock_irq(&xhci->lock);
605 return -ETIMEDOUT;
606 }
607
608 /* step 3: save registers */
609 xhci_save_registers(xhci);
610
611 /* step 4: set CSS flag */
612 command = xhci_readl(xhci, &xhci->op_regs->command);
613 command |= CMD_CSS;
614 xhci_writel(xhci, command, &xhci->op_regs->command);
615 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
616 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
617 spin_unlock_irq(&xhci->lock);
618 return -ETIMEDOUT;
619 }
620 /* step 5: remove core well power */
621 xhci_cleanup_msix(xhci);
622 spin_unlock_irq(&xhci->lock);
623
624 return rc;
625}
626
627/*
628 * start xHC (not bus-specific)
629 *
630 * This is called when the machine transition from S3/S4 mode.
631 *
632 */
633int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
634{
635 u32 command, temp = 0;
636 struct usb_hcd *hcd = xhci_to_hcd(xhci);
637 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
638 u64 val_64;
639 int old_state, retval;
640
641 old_state = hcd->state;
642 if (time_before(jiffies, xhci->next_statechange))
643 msleep(100);
644
645 spin_lock_irq(&xhci->lock);
646
647 if (!hibernated) {
648 /* step 1: restore register */
649 xhci_restore_registers(xhci);
650 /* step 2: initialize command ring buffer */
651 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
652 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
653 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
654 xhci->cmd_ring->dequeue) &
655 (u64) ~CMD_RING_RSVD_BITS) |
656 xhci->cmd_ring->cycle_state;
657 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
658 (long unsigned long) val_64);
659 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
660 /* step 3: restore state and start state*/
661 /* step 3: set CRS flag */
662 command = xhci_readl(xhci, &xhci->op_regs->command);
663 command |= CMD_CRS;
664 xhci_writel(xhci, command, &xhci->op_regs->command);
665 if (handshake(xhci, &xhci->op_regs->status,
666 STS_RESTORE, 0, 10*100)) {
667 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
668 spin_unlock_irq(&xhci->lock);
669 return -ETIMEDOUT;
670 }
671 temp = xhci_readl(xhci, &xhci->op_regs->status);
672 }
673
674 /* If restore operation fails, re-initialize the HC during resume */
675 if ((temp & STS_SRE) || hibernated) {
676 usb_root_hub_lost_power(hcd->self.root_hub);
677
678 xhci_dbg(xhci, "Stop HCD\n");
679 xhci_halt(xhci);
680 xhci_reset(xhci);
681 if (hibernated)
682 xhci_cleanup_msix(xhci);
683 spin_unlock_irq(&xhci->lock);
684
685#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
686 /* Tell the event ring poll function not to reschedule */
687 xhci->zombie = 1;
688 del_timer_sync(&xhci->event_ring_timer);
689#endif
690
691 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
692 temp = xhci_readl(xhci, &xhci->op_regs->status);
693 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
694 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
695 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
696 &xhci->ir_set->irq_pending);
697 xhci_print_ir_set(xhci, xhci->ir_set, 0);
698
699 xhci_dbg(xhci, "cleaning up memory\n");
700 xhci_mem_cleanup(xhci);
701 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
702 xhci_readl(xhci, &xhci->op_regs->status));
703
704 xhci_dbg(xhci, "Initialize the HCD\n");
705 retval = xhci_init(hcd);
706 if (retval)
707 return retval;
708
709 xhci_dbg(xhci, "Start the HCD\n");
710 retval = xhci_run(hcd);
711 if (!retval)
712 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
713 hcd->state = HC_STATE_SUSPENDED;
714 return retval;
715 }
716
717 /* Re-setup MSI-X */
718 if (hcd->irq)
719 free_irq(hcd->irq, hcd);
720 hcd->irq = -1;
721
722 retval = xhci_setup_msix(xhci);
723 if (retval)
724 /* fall back to msi*/
725 retval = xhci_setup_msi(xhci);
726
727 if (retval) {
728 /* fall back to legacy interrupt*/
729 retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
730 hcd->irq_descr, hcd);
731 if (retval) {
732 xhci_err(xhci, "request interrupt %d failed\n",
733 pdev->irq);
734 return retval;
735 }
736 hcd->irq = pdev->irq;
737 }
738
739 /* step 4: set Run/Stop bit */
740 command = xhci_readl(xhci, &xhci->op_regs->command);
741 command |= CMD_RUN;
742 xhci_writel(xhci, command, &xhci->op_regs->command);
743 handshake(xhci, &xhci->op_regs->status, STS_HALT,
744 0, 250 * 1000);
745
746 /* step 5: walk topology and initialize portsc,
747 * portpmsc and portli
748 */
749 /* this is done in bus_resume */
750
751 /* step 6: restart each of the previously
752 * Running endpoints by ringing their doorbells
753 */
754
755 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
756 if (!hibernated)
757 hcd->state = old_state;
758 else
759 hcd->state = HC_STATE_SUSPENDED;
760
761 spin_unlock_irq(&xhci->lock);
762 return 0;
763}
764#endif /* CONFIG_PM */
765
554/*-------------------------------------------------------------------------*/ 766/*-------------------------------------------------------------------------*/
555 767
556/** 768/**
@@ -607,7 +819,11 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
607 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 819 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
608 */ 820 */
609int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 821int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
610 struct usb_host_endpoint *ep, int check_ep, const char *func) { 822 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
823 const char *func) {
824 struct xhci_hcd *xhci;
825 struct xhci_virt_device *virt_dev;
826
611 if (!hcd || (check_ep && !ep) || !udev) { 827 if (!hcd || (check_ep && !ep) || !udev) {
612 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 828 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
613 func); 829 func);
@@ -618,11 +834,24 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
618 func); 834 func);
619 return 0; 835 return 0;
620 } 836 }
621 if (!udev->slot_id) { 837
622 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", 838 if (check_virt_dev) {
623 func); 839 xhci = hcd_to_xhci(hcd);
624 return -EINVAL; 840 if (!udev->slot_id || !xhci->devs
841 || !xhci->devs[udev->slot_id]) {
842 printk(KERN_DEBUG "xHCI %s called with unaddressed "
843 "device\n", func);
844 return -EINVAL;
845 }
846
847 virt_dev = xhci->devs[udev->slot_id];
848 if (virt_dev->udev != udev) {
849 printk(KERN_DEBUG "xHCI %s called with udev and "
850 "virt_dev does not match\n", func);
851 return -EINVAL;
852 }
625 } 853 }
854
626 return 1; 855 return 1;
627} 856}
628 857
@@ -704,18 +933,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
704 struct urb_priv *urb_priv; 933 struct urb_priv *urb_priv;
705 int size, i; 934 int size, i;
706 935
707 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) 936 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
937 true, true, __func__) <= 0)
708 return -EINVAL; 938 return -EINVAL;
709 939
710 slot_id = urb->dev->slot_id; 940 slot_id = urb->dev->slot_id;
711 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 941 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
712 942
713 if (!xhci->devs || !xhci->devs[slot_id]) {
714 if (!in_interrupt())
715 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
716 ret = -EINVAL;
717 goto exit;
718 }
719 if (!HCD_HW_ACCESSIBLE(hcd)) { 943 if (!HCD_HW_ACCESSIBLE(hcd)) {
720 if (!in_interrupt()) 944 if (!in_interrupt())
721 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 945 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
@@ -956,7 +1180,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
956 ep->stop_cmd_timer.expires = jiffies + 1180 ep->stop_cmd_timer.expires = jiffies +
957 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1181 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
958 add_timer(&ep->stop_cmd_timer); 1182 add_timer(&ep->stop_cmd_timer);
959 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 1183 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
960 xhci_ring_cmd_db(xhci); 1184 xhci_ring_cmd_db(xhci);
961 } 1185 }
962done: 1186done:
@@ -991,7 +1215,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
991 u32 new_add_flags, new_drop_flags, new_slot_info; 1215 u32 new_add_flags, new_drop_flags, new_slot_info;
992 int ret; 1216 int ret;
993 1217
994 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 1218 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
995 if (ret <= 0) 1219 if (ret <= 0)
996 return ret; 1220 return ret;
997 xhci = hcd_to_xhci(hcd); 1221 xhci = hcd_to_xhci(hcd);
@@ -1004,12 +1228,6 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1004 return 0; 1228 return 0;
1005 } 1229 }
1006 1230
1007 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1008 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1009 __func__);
1010 return -EINVAL;
1011 }
1012
1013 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1231 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1014 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1232 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1015 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1233 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
@@ -1078,7 +1296,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1078 u32 new_add_flags, new_drop_flags, new_slot_info; 1296 u32 new_add_flags, new_drop_flags, new_slot_info;
1079 int ret = 0; 1297 int ret = 0;
1080 1298
1081 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 1299 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1082 if (ret <= 0) { 1300 if (ret <= 0) {
1083 /* So we won't queue a reset ep command for a root hub */ 1301 /* So we won't queue a reset ep command for a root hub */
1084 ep->hcpriv = NULL; 1302 ep->hcpriv = NULL;
@@ -1098,12 +1316,6 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1098 return 0; 1316 return 0;
1099 } 1317 }
1100 1318
1101 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1102 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1103 __func__);
1104 return -EINVAL;
1105 }
1106
1107 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1319 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1108 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1320 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1109 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1321 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
@@ -1346,16 +1558,11 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1346 struct xhci_input_control_ctx *ctrl_ctx; 1558 struct xhci_input_control_ctx *ctrl_ctx;
1347 struct xhci_slot_ctx *slot_ctx; 1559 struct xhci_slot_ctx *slot_ctx;
1348 1560
1349 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 1561 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1350 if (ret <= 0) 1562 if (ret <= 0)
1351 return ret; 1563 return ret;
1352 xhci = hcd_to_xhci(hcd); 1564 xhci = hcd_to_xhci(hcd);
1353 1565
1354 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
1355 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1356 __func__);
1357 return -EINVAL;
1358 }
1359 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1566 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1360 virt_dev = xhci->devs[udev->slot_id]; 1567 virt_dev = xhci->devs[udev->slot_id];
1361 1568
@@ -1405,16 +1612,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1405 struct xhci_virt_device *virt_dev; 1612 struct xhci_virt_device *virt_dev;
1406 int i, ret; 1613 int i, ret;
1407 1614
1408 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 1615 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1409 if (ret <= 0) 1616 if (ret <= 0)
1410 return; 1617 return;
1411 xhci = hcd_to_xhci(hcd); 1618 xhci = hcd_to_xhci(hcd);
1412 1619
1413 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1414 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1415 __func__);
1416 return;
1417 }
1418 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1620 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1419 virt_dev = xhci->devs[udev->slot_id]; 1621 virt_dev = xhci->devs[udev->slot_id];
1420 /* Free any rings allocated for added endpoints */ 1622 /* Free any rings allocated for added endpoints */
@@ -1575,7 +1777,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1575 1777
1576 if (!ep) 1778 if (!ep)
1577 return -EINVAL; 1779 return -EINVAL;
1578 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__); 1780 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
1579 if (ret <= 0) 1781 if (ret <= 0)
1580 return -EINVAL; 1782 return -EINVAL;
1581 if (ep->ss_ep_comp.bmAttributes == 0) { 1783 if (ep->ss_ep_comp.bmAttributes == 0) {
@@ -1953,8 +2155,13 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1953 * Wait for the Reset Device command to finish. Remove all structures 2155 * Wait for the Reset Device command to finish. Remove all structures
1954 * associated with the endpoints that were disabled. Clear the input device 2156 * associated with the endpoints that were disabled. Clear the input device
1955 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 2157 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
2158 *
2159 * If the virt_dev to be reset does not exist or does not match the udev,
2160 * it means the device is lost, possibly due to the xHC restore error and
2161 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2162 * re-allocate the device.
1956 */ 2163 */
1957int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 2164int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1958{ 2165{
1959 int ret, i; 2166 int ret, i;
1960 unsigned long flags; 2167 unsigned long flags;
@@ -1965,16 +2172,35 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1965 int timeleft; 2172 int timeleft;
1966 int last_freed_endpoint; 2173 int last_freed_endpoint;
1967 2174
1968 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 2175 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
1969 if (ret <= 0) 2176 if (ret <= 0)
1970 return ret; 2177 return ret;
1971 xhci = hcd_to_xhci(hcd); 2178 xhci = hcd_to_xhci(hcd);
1972 slot_id = udev->slot_id; 2179 slot_id = udev->slot_id;
1973 virt_dev = xhci->devs[slot_id]; 2180 virt_dev = xhci->devs[slot_id];
1974 if (!virt_dev) { 2181 if (!virt_dev) {
1975 xhci_dbg(xhci, "%s called with invalid slot ID %u\n", 2182 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
1976 __func__, slot_id); 2183 "not exist. Re-allocate the device\n", slot_id);
1977 return -EINVAL; 2184 ret = xhci_alloc_dev(hcd, udev);
2185 if (ret == 1)
2186 return 0;
2187 else
2188 return -EINVAL;
2189 }
2190
2191 if (virt_dev->udev != udev) {
2192 /* If the virt_dev and the udev does not match, this virt_dev
2193 * may belong to another udev.
2194 * Re-allocate the device.
2195 */
2196 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2197 "not match the udev. Re-allocate the device\n",
2198 slot_id);
2199 ret = xhci_alloc_dev(hcd, udev);
2200 if (ret == 1)
2201 return 0;
2202 else
2203 return -EINVAL;
1978 } 2204 }
1979 2205
1980 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 2206 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
@@ -2077,13 +2303,13 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2077 struct xhci_virt_device *virt_dev; 2303 struct xhci_virt_device *virt_dev;
2078 unsigned long flags; 2304 unsigned long flags;
2079 u32 state; 2305 u32 state;
2080 int i; 2306 int i, ret;
2081 2307
2082 if (udev->slot_id == 0) 2308 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2309 if (ret <= 0)
2083 return; 2310 return;
2311
2084 virt_dev = xhci->devs[udev->slot_id]; 2312 virt_dev = xhci->devs[udev->slot_id];
2085 if (!virt_dev)
2086 return;
2087 2313
2088 /* Stop any wayward timer functions (which may grab the lock) */ 2314 /* Stop any wayward timer functions (which may grab the lock) */
2089 for (i = 0; i < 31; ++i) { 2315 for (i = 0; i < 31; ++i) {
@@ -2191,12 +2417,17 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2191 2417
2192 virt_dev = xhci->devs[udev->slot_id]; 2418 virt_dev = xhci->devs[udev->slot_id];
2193 2419
2194 /* If this is a Set Address to an unconfigured device, setup ep 0 */ 2420 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2195 if (!udev->config) 2421 /*
2422 * If this is the first Set Address since device plug-in or
2423 * virt_device realloaction after a resume with an xHCI power loss,
2424 * then set up the slot context.
2425 */
2426 if (!slot_ctx->dev_info)
2196 xhci_setup_addressable_virt_dev(xhci, udev); 2427 xhci_setup_addressable_virt_dev(xhci, udev);
2428 /* Otherwise, update the control endpoint ring enqueue pointer. */
2197 else 2429 else
2198 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 2430 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2199 /* Otherwise, assume the core has the device configured how it wants */
2200 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 2431 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2201 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 2432 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2202 2433
@@ -2268,15 +2499,15 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2268 * address given back to us by the HC. 2499 * address given back to us by the HC.
2269 */ 2500 */
2270 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 2501 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2271 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; 2502 /* Use kernel assigned address for devices; store xHC assigned
2503 * address locally. */
2504 virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
2272 /* Zero the input context control for later use */ 2505 /* Zero the input context control for later use */
2273 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2506 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2274 ctrl_ctx->add_flags = 0; 2507 ctrl_ctx->add_flags = 0;
2275 ctrl_ctx->drop_flags = 0; 2508 ctrl_ctx->drop_flags = 0;
2276 2509
2277 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 2510 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
2278 /* XXX Meh, not sure if anyone else but choose_address uses this. */
2279 set_bit(udev->devnum, udev->bus->devmap.devicemap);
2280 2511
2281 return 0; 2512 return 0;
2282} 2513}