aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/dmar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/dmar.c')
-rw-r--r--drivers/pci/dmar.c82
1 files changed, 66 insertions, 16 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index d313039e2fdf..fa3a11365ec3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -173,13 +173,23 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
173 struct dmar_drhd_unit *dmaru; 173 struct dmar_drhd_unit *dmaru;
174 int ret = 0; 174 int ret = 0;
175 175
176 drhd = (struct acpi_dmar_hardware_unit *)header;
177 if (!drhd->address) {
178 /* Promote an attitude of violence to a BIOS engineer today */
179 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
180 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
181 dmi_get_system_info(DMI_BIOS_VENDOR),
182 dmi_get_system_info(DMI_BIOS_VERSION),
183 dmi_get_system_info(DMI_PRODUCT_VERSION));
184 return -ENODEV;
185 }
176 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 186 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
177 if (!dmaru) 187 if (!dmaru)
178 return -ENOMEM; 188 return -ENOMEM;
179 189
180 dmaru->hdr = header; 190 dmaru->hdr = header;
181 drhd = (struct acpi_dmar_hardware_unit *)header;
182 dmaru->reg_base_addr = drhd->address; 191 dmaru->reg_base_addr = drhd->address;
192 dmaru->segment = drhd->segment;
183 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 193 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
184 194
185 ret = alloc_iommu(dmaru); 195 ret = alloc_iommu(dmaru);
@@ -790,14 +800,41 @@ end:
790} 800}
791 801
792/* 802/*
803 * Enable queued invalidation.
804 */
805static void __dmar_enable_qi(struct intel_iommu *iommu)
806{
807 u32 cmd, sts;
808 unsigned long flags;
809 struct q_inval *qi = iommu->qi;
810
811 qi->free_head = qi->free_tail = 0;
812 qi->free_cnt = QI_LENGTH;
813
814 spin_lock_irqsave(&iommu->register_lock, flags);
815
816 /* write zero to the tail reg */
817 writel(0, iommu->reg + DMAR_IQT_REG);
818
819 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
820
821 cmd = iommu->gcmd | DMA_GCMD_QIE;
822 iommu->gcmd |= DMA_GCMD_QIE;
823 writel(cmd, iommu->reg + DMAR_GCMD_REG);
824
825 /* Make sure hardware complete it */
826 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
827
828 spin_unlock_irqrestore(&iommu->register_lock, flags);
829}
830
831/*
793 * Enable Queued Invalidation interface. This is a must to support 832 * Enable Queued Invalidation interface. This is a must to support
794 * interrupt-remapping. Also used by DMA-remapping, which replaces 833 * interrupt-remapping. Also used by DMA-remapping, which replaces
795 * register based IOTLB invalidation. 834 * register based IOTLB invalidation.
796 */ 835 */
797int dmar_enable_qi(struct intel_iommu *iommu) 836int dmar_enable_qi(struct intel_iommu *iommu)
798{ 837{
799 u32 cmd, sts;
800 unsigned long flags;
801 struct q_inval *qi; 838 struct q_inval *qi;
802 839
803 if (!ecap_qis(iommu->ecap)) 840 if (!ecap_qis(iommu->ecap))
@@ -835,19 +872,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
835 872
836 spin_lock_init(&qi->q_lock); 873 spin_lock_init(&qi->q_lock);
837 874
838 spin_lock_irqsave(&iommu->register_lock, flags); 875 __dmar_enable_qi(iommu);
839 /* write zero to the tail reg */
840 writel(0, iommu->reg + DMAR_IQT_REG);
841
842 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
843
844 cmd = iommu->gcmd | DMA_GCMD_QIE;
845 iommu->gcmd |= DMA_GCMD_QIE;
846 writel(cmd, iommu->reg + DMAR_GCMD_REG);
847
848 /* Make sure hardware complete it */
849 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
850 spin_unlock_irqrestore(&iommu->register_lock, flags);
851 876
852 return 0; 877 return 0;
853} 878}
@@ -1102,3 +1127,28 @@ int __init enable_drhd_fault_handling(void)
1102 1127
1103 return 0; 1128 return 0;
1104} 1129}
1130
1131/*
1132 * Re-enable Queued Invalidation interface.
1133 */
1134int dmar_reenable_qi(struct intel_iommu *iommu)
1135{
1136 if (!ecap_qis(iommu->ecap))
1137 return -ENOENT;
1138
1139 if (!iommu->qi)
1140 return -ENOENT;
1141
1142 /*
1143 * First disable queued invalidation.
1144 */
1145 dmar_disable_qi(iommu);
1146 /*
1147 * Then enable queued invalidation again. Since there is no pending
1148 * invalidation requests now, it's safe to re-enable queued
1149 * invalidation.
1150 */
1151 __dmar_enable_qi(iommu);
1152
1153 return 0;
1154}