aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-07-19 10:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:17 -0400
commit1f5b3c3fd2d73d6b30e9ef6dcbf131a791d5cbbd (patch)
tree1d24f2510bd8c57f5e026bf9a7ff93999ed39577
parent289b4e7a48d91fbef7af819020d826ad9f49f568 (diff)
locking, x86, iommu: Annotate iommu->register_lock as raw
The iommu->register_lock can be taken in atomic context and therefore must not be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--drivers/iommu/dmar.c34
-rw-r--r--drivers/iommu/intel-iommu.c36
-rw-r--r--drivers/iommu/intr_remapping.c12
-rw-r--r--include/linux/intel-iommu.h2
4 files changed, 42 insertions, 42 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5aec..be4164b3ebe8 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
800 (unsigned long long)iommu->cap, 800 (unsigned long long)iommu->cap,
801 (unsigned long long)iommu->ecap); 801 (unsigned long long)iommu->ecap);
802 802
803 spin_lock_init(&iommu->register_lock); 803 raw_spin_lock_init(&iommu->register_lock);
804 804
805 drhd->iommu = iommu; 805 drhd->iommu = iommu;
806 return 0; 806 return 0;
@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1062 if (!ecap_qis(iommu->ecap)) 1062 if (!ecap_qis(iommu->ecap))
1063 return; 1063 return;
1064 1064
1065 spin_lock_irqsave(&iommu->register_lock, flags); 1065 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1066 1066
1067 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1067 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1068 if (!(sts & DMA_GSTS_QIES)) 1068 if (!(sts & DMA_GSTS_QIES))
@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1082 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 1082 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1083 !(sts & DMA_GSTS_QIES), sts); 1083 !(sts & DMA_GSTS_QIES), sts);
1084end: 1084end:
1085 spin_unlock_irqrestore(&iommu->register_lock, flags); 1085 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1086} 1086}
1087 1087
1088/* 1088/*
@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
1097 qi->free_head = qi->free_tail = 0; 1097 qi->free_head = qi->free_tail = 0;
1098 qi->free_cnt = QI_LENGTH; 1098 qi->free_cnt = QI_LENGTH;
1099 1099
1100 spin_lock_irqsave(&iommu->register_lock, flags); 1100 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1101 1101
1102 /* write zero to the tail reg */ 1102 /* write zero to the tail reg */
1103 writel(0, iommu->reg + DMAR_IQT_REG); 1103 writel(0, iommu->reg + DMAR_IQT_REG);
@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
1110 /* Make sure hardware complete it */ 1110 /* Make sure hardware complete it */
1111 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 1111 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1112 1112
1113 spin_unlock_irqrestore(&iommu->register_lock, flags); 1113 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1114} 1114}
1115 1115
1116/* 1116/*
@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *data)
1225 unsigned long flag; 1225 unsigned long flag;
1226 1226
1227 /* unmask it */ 1227 /* unmask it */
1228 spin_lock_irqsave(&iommu->register_lock, flag); 1228 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1229 writel(0, iommu->reg + DMAR_FECTL_REG); 1229 writel(0, iommu->reg + DMAR_FECTL_REG);
1230 /* Read a reg to force flush the post write */ 1230 /* Read a reg to force flush the post write */
1231 readl(iommu->reg + DMAR_FECTL_REG); 1231 readl(iommu->reg + DMAR_FECTL_REG);
1232 spin_unlock_irqrestore(&iommu->register_lock, flag); 1232 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1233} 1233}
1234 1234
1235void dmar_msi_mask(struct irq_data *data) 1235void dmar_msi_mask(struct irq_data *data)
@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data)
1238 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1238 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1239 1239
1240 /* mask it */ 1240 /* mask it */
1241 spin_lock_irqsave(&iommu->register_lock, flag); 1241 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1242 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); 1242 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1243 /* Read a reg to force flush the post write */ 1243 /* Read a reg to force flush the post write */
1244 readl(iommu->reg + DMAR_FECTL_REG); 1244 readl(iommu->reg + DMAR_FECTL_REG);
1245 spin_unlock_irqrestore(&iommu->register_lock, flag); 1245 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1246} 1246}
1247 1247
1248void dmar_msi_write(int irq, struct msi_msg *msg) 1248void dmar_msi_write(int irq, struct msi_msg *msg)
@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
1250 struct intel_iommu *iommu = irq_get_handler_data(irq); 1250 struct intel_iommu *iommu = irq_get_handler_data(irq);
1251 unsigned long flag; 1251 unsigned long flag;
1252 1252
1253 spin_lock_irqsave(&iommu->register_lock, flag); 1253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1254 writel(msg->data, iommu->reg + DMAR_FEDATA_REG); 1254 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1255 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); 1255 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1256 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); 1256 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1257 spin_unlock_irqrestore(&iommu->register_lock, flag); 1257 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1258} 1258}
1259 1259
1260void dmar_msi_read(int irq, struct msi_msg *msg) 1260void dmar_msi_read(int irq, struct msi_msg *msg)
@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
1262 struct intel_iommu *iommu = irq_get_handler_data(irq); 1262 struct intel_iommu *iommu = irq_get_handler_data(irq);
1263 unsigned long flag; 1263 unsigned long flag;
1264 1264
1265 spin_lock_irqsave(&iommu->register_lock, flag); 1265 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1266 msg->data = readl(iommu->reg + DMAR_FEDATA_REG); 1266 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1267 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); 1267 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1268 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); 1268 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1269 spin_unlock_irqrestore(&iommu->register_lock, flag); 1269 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1270} 1270}
1271 1271
1272static int dmar_fault_do_one(struct intel_iommu *iommu, int type, 1272static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1303 u32 fault_status; 1303 u32 fault_status;
1304 unsigned long flag; 1304 unsigned long flag;
1305 1305
1306 spin_lock_irqsave(&iommu->register_lock, flag); 1306 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1307 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1307 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1308 if (fault_status) 1308 if (fault_status)
1309 printk(KERN_ERR "DRHD: handling fault status reg %x\n", 1309 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1342 writel(DMA_FRCD_F, iommu->reg + reg + 1342 writel(DMA_FRCD_F, iommu->reg + reg +
1343 fault_index * PRIMARY_FAULT_REG_LEN + 12); 1343 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1344 1344
1345 spin_unlock_irqrestore(&iommu->register_lock, flag); 1345 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1346 1346
1347 dmar_fault_do_one(iommu, type, fault_reason, 1347 dmar_fault_do_one(iommu, type, fault_reason,
1348 source_id, guest_addr); 1348 source_id, guest_addr);
@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1350 fault_index++; 1350 fault_index++;
1351 if (fault_index >= cap_num_fault_regs(iommu->cap)) 1351 if (fault_index >= cap_num_fault_regs(iommu->cap))
1352 fault_index = 0; 1352 fault_index = 0;
1353 spin_lock_irqsave(&iommu->register_lock, flag); 1353 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1354 } 1354 }
1355clear_rest: 1355clear_rest:
1356 /* clear all the other faults */ 1356 /* clear all the other faults */
1357 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1357 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1358 writel(fault_status, iommu->reg + DMAR_FSTS_REG); 1358 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1359 1359
1360 spin_unlock_irqrestore(&iommu->register_lock, flag); 1360 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1361 return IRQ_HANDLED; 1361 return IRQ_HANDLED;
1362} 1362}
1363 1363
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c621c98c99da..bf4a63c2477c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -932,7 +932,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
932 932
933 addr = iommu->root_entry; 933 addr = iommu->root_entry;
934 934
935 spin_lock_irqsave(&iommu->register_lock, flag); 935 raw_spin_lock_irqsave(&iommu->register_lock, flag);
936 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 936 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
937 937
938 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); 938 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -941,7 +941,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
941 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 941 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
942 readl, (sts & DMA_GSTS_RTPS), sts); 942 readl, (sts & DMA_GSTS_RTPS), sts);
943 943
944 spin_unlock_irqrestore(&iommu->register_lock, flag); 944 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
945} 945}
946 946
947static void iommu_flush_write_buffer(struct intel_iommu *iommu) 947static void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -952,14 +952,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
952 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 952 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
953 return; 953 return;
954 954
955 spin_lock_irqsave(&iommu->register_lock, flag); 955 raw_spin_lock_irqsave(&iommu->register_lock, flag);
956 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); 956 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
957 957
958 /* Make sure hardware complete it */ 958 /* Make sure hardware complete it */
959 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 959 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
960 readl, (!(val & DMA_GSTS_WBFS)), val); 960 readl, (!(val & DMA_GSTS_WBFS)), val);
961 961
962 spin_unlock_irqrestore(&iommu->register_lock, flag); 962 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
963} 963}
964 964
965/* return value determine if we need a write buffer flush */ 965/* return value determine if we need a write buffer flush */
@@ -986,14 +986,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
986 } 986 }
987 val |= DMA_CCMD_ICC; 987 val |= DMA_CCMD_ICC;
988 988
989 spin_lock_irqsave(&iommu->register_lock, flag); 989 raw_spin_lock_irqsave(&iommu->register_lock, flag);
990 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); 990 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
991 991
992 /* Make sure hardware complete it */ 992 /* Make sure hardware complete it */
993 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, 993 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
994 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 994 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
995 995
996 spin_unlock_irqrestore(&iommu->register_lock, flag); 996 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
997} 997}
998 998
999/* return value determine if we need a write buffer flush */ 999/* return value determine if we need a write buffer flush */
@@ -1032,7 +1032,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1032 if (cap_write_drain(iommu->cap)) 1032 if (cap_write_drain(iommu->cap))
1033 val |= DMA_TLB_WRITE_DRAIN; 1033 val |= DMA_TLB_WRITE_DRAIN;
1034 1034
1035 spin_lock_irqsave(&iommu->register_lock, flag); 1035 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1036 /* Note: Only uses first TLB reg currently */ 1036 /* Note: Only uses first TLB reg currently */
1037 if (val_iva) 1037 if (val_iva)
1038 dmar_writeq(iommu->reg + tlb_offset, val_iva); 1038 dmar_writeq(iommu->reg + tlb_offset, val_iva);
@@ -1042,7 +1042,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1042 IOMMU_WAIT_OP(iommu, tlb_offset + 8, 1042 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1043 dmar_readq, (!(val & DMA_TLB_IVT)), val); 1043 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1044 1044
1045 spin_unlock_irqrestore(&iommu->register_lock, flag); 1045 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1046 1046
1047 /* check IOTLB invalidation granularity */ 1047 /* check IOTLB invalidation granularity */
1048 if (DMA_TLB_IAIG(val) == 0) 1048 if (DMA_TLB_IAIG(val) == 0)
@@ -1158,7 +1158,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1158 u32 pmen; 1158 u32 pmen;
1159 unsigned long flags; 1159 unsigned long flags;
1160 1160
1161 spin_lock_irqsave(&iommu->register_lock, flags); 1161 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1162 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1162 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1163 pmen &= ~DMA_PMEN_EPM; 1163 pmen &= ~DMA_PMEN_EPM;
1164 writel(pmen, iommu->reg + DMAR_PMEN_REG); 1164 writel(pmen, iommu->reg + DMAR_PMEN_REG);
@@ -1167,7 +1167,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1167 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, 1167 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1168 readl, !(pmen & DMA_PMEN_PRS), pmen); 1168 readl, !(pmen & DMA_PMEN_PRS), pmen);
1169 1169
1170 spin_unlock_irqrestore(&iommu->register_lock, flags); 1170 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1171} 1171}
1172 1172
1173static int iommu_enable_translation(struct intel_iommu *iommu) 1173static int iommu_enable_translation(struct intel_iommu *iommu)
@@ -1175,7 +1175,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1175 u32 sts; 1175 u32 sts;
1176 unsigned long flags; 1176 unsigned long flags;
1177 1177
1178 spin_lock_irqsave(&iommu->register_lock, flags); 1178 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1179 iommu->gcmd |= DMA_GCMD_TE; 1179 iommu->gcmd |= DMA_GCMD_TE;
1180 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1180 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1181 1181
@@ -1183,7 +1183,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1184 readl, (sts & DMA_GSTS_TES), sts); 1184 readl, (sts & DMA_GSTS_TES), sts);
1185 1185
1186 spin_unlock_irqrestore(&iommu->register_lock, flags); 1186 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1187 return 0; 1187 return 0;
1188} 1188}
1189 1189
@@ -1192,7 +1192,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1192 u32 sts; 1192 u32 sts;
1193 unsigned long flag; 1193 unsigned long flag;
1194 1194
1195 spin_lock_irqsave(&iommu->register_lock, flag); 1195 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1196 iommu->gcmd &= ~DMA_GCMD_TE; 1196 iommu->gcmd &= ~DMA_GCMD_TE;
1197 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1197 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1198 1198
@@ -1200,7 +1200,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1200 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1200 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1201 readl, (!(sts & DMA_GSTS_TES)), sts); 1201 readl, (!(sts & DMA_GSTS_TES)), sts);
1202 1202
1203 spin_unlock_irqrestore(&iommu->register_lock, flag); 1203 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1204 return 0; 1204 return 0;
1205} 1205}
1206 1206
@@ -3320,7 +3320,7 @@ static int iommu_suspend(void)
3320 for_each_active_iommu(iommu, drhd) { 3320 for_each_active_iommu(iommu, drhd) {
3321 iommu_disable_translation(iommu); 3321 iommu_disable_translation(iommu);
3322 3322
3323 spin_lock_irqsave(&iommu->register_lock, flag); 3323 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3324 3324
3325 iommu->iommu_state[SR_DMAR_FECTL_REG] = 3325 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3326 readl(iommu->reg + DMAR_FECTL_REG); 3326 readl(iommu->reg + DMAR_FECTL_REG);
@@ -3331,7 +3331,7 @@ static int iommu_suspend(void)
3331 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = 3331 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3332 readl(iommu->reg + DMAR_FEUADDR_REG); 3332 readl(iommu->reg + DMAR_FEUADDR_REG);
3333 3333
3334 spin_unlock_irqrestore(&iommu->register_lock, flag); 3334 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3335 } 3335 }
3336 return 0; 3336 return 0;
3337 3337
@@ -3358,7 +3358,7 @@ static void iommu_resume(void)
3358 3358
3359 for_each_active_iommu(iommu, drhd) { 3359 for_each_active_iommu(iommu, drhd) {
3360 3360
3361 spin_lock_irqsave(&iommu->register_lock, flag); 3361 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3362 3362
3363 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], 3363 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3364 iommu->reg + DMAR_FECTL_REG); 3364 iommu->reg + DMAR_FECTL_REG);
@@ -3369,7 +3369,7 @@ static void iommu_resume(void)
3369 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], 3369 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3370 iommu->reg + DMAR_FEUADDR_REG); 3370 iommu->reg + DMAR_FEUADDR_REG);
3371 3371
3372 spin_unlock_irqrestore(&iommu->register_lock, flag); 3372 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3373 } 3373 }
3374 3374
3375 for_each_active_iommu(iommu, drhd) 3375 for_each_active_iommu(iommu, drhd)
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 1a89d4a2cadf..b2443f1a13ef 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -409,7 +409,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
409 409
410 addr = virt_to_phys((void *)iommu->ir_table->base); 410 addr = virt_to_phys((void *)iommu->ir_table->base);
411 411
412 spin_lock_irqsave(&iommu->register_lock, flags); 412 raw_spin_lock_irqsave(&iommu->register_lock, flags);
413 413
414 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 414 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
415 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 415 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
@@ -420,7 +420,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
420 420
421 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 421 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
422 readl, (sts & DMA_GSTS_IRTPS), sts); 422 readl, (sts & DMA_GSTS_IRTPS), sts);
423 spin_unlock_irqrestore(&iommu->register_lock, flags); 423 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
424 424
425 /* 425 /*
426 * global invalidation of interrupt entry cache before enabling 426 * global invalidation of interrupt entry cache before enabling
@@ -428,7 +428,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
428 */ 428 */
429 qi_global_iec(iommu); 429 qi_global_iec(iommu);
430 430
431 spin_lock_irqsave(&iommu->register_lock, flags); 431 raw_spin_lock_irqsave(&iommu->register_lock, flags);
432 432
433 /* Enable interrupt-remapping */ 433 /* Enable interrupt-remapping */
434 iommu->gcmd |= DMA_GCMD_IRE; 434 iommu->gcmd |= DMA_GCMD_IRE;
@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
437 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 437 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
438 readl, (sts & DMA_GSTS_IRES), sts); 438 readl, (sts & DMA_GSTS_IRES), sts);
439 439
440 spin_unlock_irqrestore(&iommu->register_lock, flags); 440 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
441} 441}
442 442
443 443
@@ -485,7 +485,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
485 */ 485 */
486 qi_global_iec(iommu); 486 qi_global_iec(iommu);
487 487
488 spin_lock_irqsave(&iommu->register_lock, flags); 488 raw_spin_lock_irqsave(&iommu->register_lock, flags);
489 489
490 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 490 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
491 if (!(sts & DMA_GSTS_IRES)) 491 if (!(sts & DMA_GSTS_IRES))
@@ -498,7 +498,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
498 readl, !(sts & DMA_GSTS_IRES), sts); 498 readl, !(sts & DMA_GSTS_IRES), sts);
499 499
500end: 500end:
501 spin_unlock_irqrestore(&iommu->register_lock, flags); 501 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
502} 502}
503 503
504int __init intr_remapping_supported(void) 504int __init intr_remapping_supported(void)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 9310c699a37d..19728c462399 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -311,7 +311,7 @@ struct intel_iommu {
311 u64 cap; 311 u64 cap;
312 u64 ecap; 312 u64 ecap;
313 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 313 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
314 spinlock_t register_lock; /* protect register handling */ 314 raw_spinlock_t register_lock; /* protect register handling */
315 int seq_id; /* sequence id of the iommu */ 315 int seq_id; /* sequence id of the iommu */
316 int agaw; /* agaw of this iommu */ 316 int agaw; /* agaw of this iommu */
317 int msagaw; /* max sagaw of this iommu */ 317 int msagaw; /* max sagaw of this iommu */