aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dmar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r--drivers/pci/dmar.c71
1 files changed, 56 insertions, 15 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index d313039e2fdf..25a00ce4f24d 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -180,6 +180,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
180 dmaru->hdr = header; 180 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header; 181 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 182 dmaru->reg_base_addr = drhd->address;
183 dmaru->segment = drhd->segment;
183 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 184 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
184 185
185 ret = alloc_iommu(dmaru); 186 ret = alloc_iommu(dmaru);
@@ -790,14 +791,41 @@ end:
790} 791}
791 792
792/* 793/*
794 * Enable queued invalidation.
795 */
796static void __dmar_enable_qi(struct intel_iommu *iommu)
797{
798 u32 cmd, sts;
799 unsigned long flags;
800 struct q_inval *qi = iommu->qi;
801
802 qi->free_head = qi->free_tail = 0;
803 qi->free_cnt = QI_LENGTH;
804
805 spin_lock_irqsave(&iommu->register_lock, flags);
806
807 /* write zero to the tail reg */
808 writel(0, iommu->reg + DMAR_IQT_REG);
809
810 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
811
812 cmd = iommu->gcmd | DMA_GCMD_QIE;
813 iommu->gcmd |= DMA_GCMD_QIE;
814 writel(cmd, iommu->reg + DMAR_GCMD_REG);
815
816 /* Make sure hardware complete it */
817 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
818
819 spin_unlock_irqrestore(&iommu->register_lock, flags);
820}
821
822/*
793 * Enable Queued Invalidation interface. This is a must to support 823 * Enable Queued Invalidation interface. This is a must to support
794 * interrupt-remapping. Also used by DMA-remapping, which replaces 824 * interrupt-remapping. Also used by DMA-remapping, which replaces
795 * register based IOTLB invalidation. 825 * register based IOTLB invalidation.
796 */ 826 */
797int dmar_enable_qi(struct intel_iommu *iommu) 827int dmar_enable_qi(struct intel_iommu *iommu)
798{ 828{
799 u32 cmd, sts;
800 unsigned long flags;
801 struct q_inval *qi; 829 struct q_inval *qi;
802 830
803 if (!ecap_qis(iommu->ecap)) 831 if (!ecap_qis(iommu->ecap))
@@ -835,19 +863,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
835 863
836 spin_lock_init(&qi->q_lock); 864 spin_lock_init(&qi->q_lock);
837 865
838 spin_lock_irqsave(&iommu->register_lock, flags); 866 __dmar_enable_qi(iommu);
839 /* write zero to the tail reg */
840 writel(0, iommu->reg + DMAR_IQT_REG);
841
842 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
843
844 cmd = iommu->gcmd | DMA_GCMD_QIE;
845 iommu->gcmd |= DMA_GCMD_QIE;
846 writel(cmd, iommu->reg + DMAR_GCMD_REG);
847
848 /* Make sure hardware complete it */
849 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
850 spin_unlock_irqrestore(&iommu->register_lock, flags);
851 867
852 return 0; 868 return 0;
853} 869}
@@ -1102,3 +1118,28 @@ int __init enable_drhd_fault_handling(void)
1102 1118
1103 return 0; 1119 return 0;
1104} 1120}
1121
1122/*
1123 * Re-enable Queued Invalidation interface.
1124 */
1125int dmar_reenable_qi(struct intel_iommu *iommu)
1126{
1127 if (!ecap_qis(iommu->ecap))
1128 return -ENOENT;
1129
1130 if (!iommu->qi)
1131 return -ENOENT;
1132
1133 /*
1134 * First disable queued invalidation.
1135 */
1136 dmar_disable_qi(iommu);
1137 /*
1138 * Then enable queued invalidation again. Since there is no pending
1139 * invalidation requests now, it's safe to re-enable queued
1140 * invalidation.
1141 */
1142 __dmar_enable_qi(iommu);
1143
1144 return 0;
1145}