aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorFenghua Yu <fenghua.yu@intel.com>2009-03-27 17:22:43 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-04-03 16:45:57 -0400
commiteb4a52bc660ea835482c582eaaf4893742cbd160 (patch)
treec405de01851eb0a2cdd9aa4f8c2b98d3b1eb7bba /drivers/pci
parentf59c7b69bcba31cd355ababe067202b9895d6102 (diff)
Intel IOMMU Suspend/Resume Support - Queued Invalidation
This patch supports queued invalidation suspend/resume. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c70
1 files changed, 55 insertions, 15 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index d313039e2fdf..3fbe6af7ad71 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -790,14 +790,41 @@ end:
790} 790}
791 791
792/* 792/*
793 * Enable queued invalidation.
794 */
795static void __dmar_enable_qi(struct intel_iommu *iommu)
796{
797 u32 cmd, sts;
798 unsigned long flags;
799 struct q_inval *qi = iommu->qi;
800
801 qi->free_head = qi->free_tail = 0;
802 qi->free_cnt = QI_LENGTH;
803
804 spin_lock_irqsave(&iommu->register_lock, flags);
805
806 /* write zero to the tail reg */
807 writel(0, iommu->reg + DMAR_IQT_REG);
808
809 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
810
811 cmd = iommu->gcmd | DMA_GCMD_QIE;
812 iommu->gcmd |= DMA_GCMD_QIE;
813 writel(cmd, iommu->reg + DMAR_GCMD_REG);
814
815 /* Make sure hardware complete it */
816 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
817
818 spin_unlock_irqrestore(&iommu->register_lock, flags);
819}
820
821/*
793 * Enable Queued Invalidation interface. This is a must to support 822 * Enable Queued Invalidation interface. This is a must to support
794 * interrupt-remapping. Also used by DMA-remapping, which replaces 823 * interrupt-remapping. Also used by DMA-remapping, which replaces
795 * register based IOTLB invalidation. 824 * register based IOTLB invalidation.
796 */ 825 */
797int dmar_enable_qi(struct intel_iommu *iommu) 826int dmar_enable_qi(struct intel_iommu *iommu)
798{ 827{
799 u32 cmd, sts;
800 unsigned long flags;
801 struct q_inval *qi; 828 struct q_inval *qi;
802 829
803 if (!ecap_qis(iommu->ecap)) 830 if (!ecap_qis(iommu->ecap))
@@ -835,19 +862,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
835 862
836 spin_lock_init(&qi->q_lock); 863 spin_lock_init(&qi->q_lock);
837 864
838 spin_lock_irqsave(&iommu->register_lock, flags); 865 __dmar_enable_qi(iommu);
839 /* write zero to the tail reg */
840 writel(0, iommu->reg + DMAR_IQT_REG);
841
842 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
843
844 cmd = iommu->gcmd | DMA_GCMD_QIE;
845 iommu->gcmd |= DMA_GCMD_QIE;
846 writel(cmd, iommu->reg + DMAR_GCMD_REG);
847
848 /* Make sure hardware complete it */
849 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
850 spin_unlock_irqrestore(&iommu->register_lock, flags);
851 866
852 return 0; 867 return 0;
853} 868}
@@ -1102,3 +1117,28 @@ int __init enable_drhd_fault_handling(void)
1102 1117
1103 return 0; 1118 return 0;
1104} 1119}
1120
1121/*
1122 * Re-enable Queued Invalidation interface.
1123 */
1124int dmar_reenable_qi(struct intel_iommu *iommu)
1125{
1126 if (!ecap_qis(iommu->ecap))
1127 return -ENOENT;
1128
1129 if (!iommu->qi)
1130 return -ENOENT;
1131
1132 /*
1133 * First disable queued invalidation.
1134 */
1135 dmar_disable_qi(iommu);
1136 /*
1137 * Then enable queued invalidation again. Since there is no pending
1138 * invalidation requests now, it's safe to re-enable queued
1139 * invalidation.
1140 */
1141 __dmar_enable_qi(iommu);
1142
1143 return 0;
1144}