aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci.c
diff options
context:
space:
mode:
authorAndiry Xu <andiry.xu@amd.com>2010-10-14 10:23:06 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-10-22 13:22:13 -0400
commit5535b1d5f8885695c6ded783c692e3c0d0eda8ca (patch)
treef5454493a1c50e4a6254d904578dc3ecfd1d6e63 /drivers/usb/host/xhci.c
parent9777e3ce907d4cb5a513902a87ecd03b52499569 (diff)
USB: xHCI: PCI power management implementation
This patch implements the PCI suspend/resume. Please refer to xHCI spec for doing the suspend/resume operation. For S3, CSS/SRS in USBCMD is used to save/restore the internal state. However, an error maybe occurs while restoring the internal state. In this case, it means that HC internal state is wrong and HC will be re-initialized. Signed-off-by: Libin Yang <libin.yang@amd.com> Signed-off-by: Dong Nguyen <dong.nguyen@amd.com> Signed-off-by: Andiry Xu <andiry.xu@amd.com> Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c210
1 files changed, 210 insertions, 0 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 3d2af688157a..33d0034d8a6f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -551,6 +551,216 @@ void xhci_shutdown(struct usb_hcd *hcd)
551 xhci_readl(xhci, &xhci->op_regs->status)); 551 xhci_readl(xhci, &xhci->op_regs->status));
552} 552}
553 553
554static void xhci_save_registers(struct xhci_hcd *xhci)
555{
556 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
557 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
558 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
559 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
560 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
561 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
562 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
563 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
564 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
565}
566
567static void xhci_restore_registers(struct xhci_hcd *xhci)
568{
569 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
570 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
571 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
572 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
573 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
574 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
575 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
576 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
577}
578
579/*
580 * Stop HC (not bus-specific)
581 *
582 * This is called when the machine transition into S3/S4 mode.
583 *
584 */
585int xhci_suspend(struct xhci_hcd *xhci)
586{
587 int rc = 0;
588 struct usb_hcd *hcd = xhci_to_hcd(xhci);
589 u32 command;
590
591 spin_lock_irq(&xhci->lock);
592 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
593 /* step 1: stop endpoint */
594 /* skipped assuming that port suspend has done */
595
596 /* step 2: clear Run/Stop bit */
597 command = xhci_readl(xhci, &xhci->op_regs->command);
598 command &= ~CMD_RUN;
599 xhci_writel(xhci, command, &xhci->op_regs->command);
600 if (handshake(xhci, &xhci->op_regs->status,
601 STS_HALT, STS_HALT, 100*100)) {
602 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
603 spin_unlock_irq(&xhci->lock);
604 return -ETIMEDOUT;
605 }
606
607 /* step 3: save registers */
608 xhci_save_registers(xhci);
609
610 /* step 4: set CSS flag */
611 command = xhci_readl(xhci, &xhci->op_regs->command);
612 command |= CMD_CSS;
613 xhci_writel(xhci, command, &xhci->op_regs->command);
614 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
615 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
616 spin_unlock_irq(&xhci->lock);
617 return -ETIMEDOUT;
618 }
619 /* step 5: remove core well power */
620 xhci_cleanup_msix(xhci);
621 spin_unlock_irq(&xhci->lock);
622
623 return rc;
624}
625
626/*
627 * start xHC (not bus-specific)
628 *
629 * This is called when the machine transition from S3/S4 mode.
630 *
631 */
632int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
633{
634 u32 command, temp = 0;
635 struct usb_hcd *hcd = xhci_to_hcd(xhci);
636 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
637 u64 val_64;
638 int old_state, retval;
639
640 old_state = hcd->state;
641 if (time_before(jiffies, xhci->next_statechange))
642 msleep(100);
643
644 spin_lock_irq(&xhci->lock);
645
646 if (!hibernated) {
647 /* step 1: restore register */
648 xhci_restore_registers(xhci);
649 /* step 2: initialize command ring buffer */
650 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
651 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
652 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
653 xhci->cmd_ring->dequeue) &
654 (u64) ~CMD_RING_RSVD_BITS) |
655 xhci->cmd_ring->cycle_state;
656 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
657 (long unsigned long) val_64);
658 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
659 /* step 3: restore state and start state*/
660 /* step 3: set CRS flag */
661 command = xhci_readl(xhci, &xhci->op_regs->command);
662 command |= CMD_CRS;
663 xhci_writel(xhci, command, &xhci->op_regs->command);
664 if (handshake(xhci, &xhci->op_regs->status,
665 STS_RESTORE, 0, 10*100)) {
666 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
667 spin_unlock_irq(&xhci->lock);
668 return -ETIMEDOUT;
669 }
670 temp = xhci_readl(xhci, &xhci->op_regs->status);
671 }
672
673 /* If restore operation fails, re-initialize the HC during resume */
674 if ((temp & STS_SRE) || hibernated) {
675 usb_root_hub_lost_power(hcd->self.root_hub);
676
677 xhci_dbg(xhci, "Stop HCD\n");
678 xhci_halt(xhci);
679 xhci_reset(xhci);
680 if (hibernated)
681 xhci_cleanup_msix(xhci);
682 spin_unlock_irq(&xhci->lock);
683
684#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
685 /* Tell the event ring poll function not to reschedule */
686 xhci->zombie = 1;
687 del_timer_sync(&xhci->event_ring_timer);
688#endif
689
690 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
691 temp = xhci_readl(xhci, &xhci->op_regs->status);
692 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
693 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
694 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
695 &xhci->ir_set->irq_pending);
696 xhci_print_ir_set(xhci, xhci->ir_set, 0);
697
698 xhci_dbg(xhci, "cleaning up memory\n");
699 xhci_mem_cleanup(xhci);
700 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
701 xhci_readl(xhci, &xhci->op_regs->status));
702
703 xhci_dbg(xhci, "Initialize the HCD\n");
704 retval = xhci_init(hcd);
705 if (retval)
706 return retval;
707
708 xhci_dbg(xhci, "Start the HCD\n");
709 retval = xhci_run(hcd);
710 if (!retval)
711 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
712 hcd->state = HC_STATE_SUSPENDED;
713 return retval;
714 }
715
716 /* Re-setup MSI-X */
717 if (hcd->irq)
718 free_irq(hcd->irq, hcd);
719 hcd->irq = -1;
720
721 retval = xhci_setup_msix(xhci);
722 if (retval)
723 /* fall back to msi*/
724 retval = xhci_setup_msi(xhci);
725
726 if (retval) {
727 /* fall back to legacy interrupt*/
728 retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
729 hcd->irq_descr, hcd);
730 if (retval) {
731 xhci_err(xhci, "request interrupt %d failed\n",
732 pdev->irq);
733 return retval;
734 }
735 hcd->irq = pdev->irq;
736 }
737
738 /* step 4: set Run/Stop bit */
739 command = xhci_readl(xhci, &xhci->op_regs->command);
740 command |= CMD_RUN;
741 xhci_writel(xhci, command, &xhci->op_regs->command);
742 handshake(xhci, &xhci->op_regs->status, STS_HALT,
743 0, 250 * 1000);
744
745 /* step 5: walk topology and initialize portsc,
746 * portpmsc and portli
747 */
748 /* this is done in bus_resume */
749
750 /* step 6: restart each of the previously
751 * Running endpoints by ringing their doorbells
752 */
753
754 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
755 if (!hibernated)
756 hcd->state = old_state;
757 else
758 hcd->state = HC_STATE_SUSPENDED;
759
760 spin_unlock_irq(&xhci->lock);
761 return 0;
762}
763
554/*-------------------------------------------------------------------------*/ 764/*-------------------------------------------------------------------------*/
555 765
556/** 766/**