aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dmar.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-07-19 10:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:17 -0400
commit1f5b3c3fd2d73d6b30e9ef6dcbf131a791d5cbbd (patch)
tree1d24f2510bd8c57f5e026bf9a7ff93999ed39577 /drivers/iommu/dmar.c
parent289b4e7a48d91fbef7af819020d826ad9f49f568 (diff)
locking, x86, iommu: Annotate iommu->register_lock as raw
The iommu->register_lock can be taken in atomic context and therefore must not be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/iommu/dmar.c')
-rw-r--r--drivers/iommu/dmar.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5aec..be4164b3ebe8 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
800 (unsigned long long)iommu->cap, 800 (unsigned long long)iommu->cap,
801 (unsigned long long)iommu->ecap); 801 (unsigned long long)iommu->ecap);
802 802
803 spin_lock_init(&iommu->register_lock); 803 raw_spin_lock_init(&iommu->register_lock);
804 804
805 drhd->iommu = iommu; 805 drhd->iommu = iommu;
806 return 0; 806 return 0;
@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1062 if (!ecap_qis(iommu->ecap)) 1062 if (!ecap_qis(iommu->ecap))
1063 return; 1063 return;
1064 1064
1065 spin_lock_irqsave(&iommu->register_lock, flags); 1065 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1066 1066
1067 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1067 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1068 if (!(sts & DMA_GSTS_QIES)) 1068 if (!(sts & DMA_GSTS_QIES))
@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1082 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 1082 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1083 !(sts & DMA_GSTS_QIES), sts); 1083 !(sts & DMA_GSTS_QIES), sts);
1084end: 1084end:
1085 spin_unlock_irqrestore(&iommu->register_lock, flags); 1085 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1086} 1086}
1087 1087
1088/* 1088/*
@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
1097 qi->free_head = qi->free_tail = 0; 1097 qi->free_head = qi->free_tail = 0;
1098 qi->free_cnt = QI_LENGTH; 1098 qi->free_cnt = QI_LENGTH;
1099 1099
1100 spin_lock_irqsave(&iommu->register_lock, flags); 1100 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1101 1101
1102 /* write zero to the tail reg */ 1102 /* write zero to the tail reg */
1103 writel(0, iommu->reg + DMAR_IQT_REG); 1103 writel(0, iommu->reg + DMAR_IQT_REG);
@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
1110 /* Make sure hardware complete it */ 1110 /* Make sure hardware complete it */
1111 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 1111 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1112 1112
1113 spin_unlock_irqrestore(&iommu->register_lock, flags); 1113 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1114} 1114}
1115 1115
1116/* 1116/*
@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *data)
1225 unsigned long flag; 1225 unsigned long flag;
1226 1226
1227 /* unmask it */ 1227 /* unmask it */
1228 spin_lock_irqsave(&iommu->register_lock, flag); 1228 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1229 writel(0, iommu->reg + DMAR_FECTL_REG); 1229 writel(0, iommu->reg + DMAR_FECTL_REG);
1230 /* Read a reg to force flush the post write */ 1230 /* Read a reg to force flush the post write */
1231 readl(iommu->reg + DMAR_FECTL_REG); 1231 readl(iommu->reg + DMAR_FECTL_REG);
1232 spin_unlock_irqrestore(&iommu->register_lock, flag); 1232 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1233} 1233}
1234 1234
1235void dmar_msi_mask(struct irq_data *data) 1235void dmar_msi_mask(struct irq_data *data)
@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data)
1238 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1238 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1239 1239
1240 /* mask it */ 1240 /* mask it */
1241 spin_lock_irqsave(&iommu->register_lock, flag); 1241 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1242 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); 1242 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1243 /* Read a reg to force flush the post write */ 1243 /* Read a reg to force flush the post write */
1244 readl(iommu->reg + DMAR_FECTL_REG); 1244 readl(iommu->reg + DMAR_FECTL_REG);
1245 spin_unlock_irqrestore(&iommu->register_lock, flag); 1245 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1246} 1246}
1247 1247
1248void dmar_msi_write(int irq, struct msi_msg *msg) 1248void dmar_msi_write(int irq, struct msi_msg *msg)
@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
1250 struct intel_iommu *iommu = irq_get_handler_data(irq); 1250 struct intel_iommu *iommu = irq_get_handler_data(irq);
1251 unsigned long flag; 1251 unsigned long flag;
1252 1252
1253 spin_lock_irqsave(&iommu->register_lock, flag); 1253 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1254 writel(msg->data, iommu->reg + DMAR_FEDATA_REG); 1254 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1255 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); 1255 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1256 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); 1256 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1257 spin_unlock_irqrestore(&iommu->register_lock, flag); 1257 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1258} 1258}
1259 1259
1260void dmar_msi_read(int irq, struct msi_msg *msg) 1260void dmar_msi_read(int irq, struct msi_msg *msg)
@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
1262 struct intel_iommu *iommu = irq_get_handler_data(irq); 1262 struct intel_iommu *iommu = irq_get_handler_data(irq);
1263 unsigned long flag; 1263 unsigned long flag;
1264 1264
1265 spin_lock_irqsave(&iommu->register_lock, flag); 1265 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1266 msg->data = readl(iommu->reg + DMAR_FEDATA_REG); 1266 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1267 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); 1267 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1268 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); 1268 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1269 spin_unlock_irqrestore(&iommu->register_lock, flag); 1269 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1270} 1270}
1271 1271
1272static int dmar_fault_do_one(struct intel_iommu *iommu, int type, 1272static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1303 u32 fault_status; 1303 u32 fault_status;
1304 unsigned long flag; 1304 unsigned long flag;
1305 1305
1306 spin_lock_irqsave(&iommu->register_lock, flag); 1306 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1307 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1307 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1308 if (fault_status) 1308 if (fault_status)
1309 printk(KERN_ERR "DRHD: handling fault status reg %x\n", 1309 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1342 writel(DMA_FRCD_F, iommu->reg + reg + 1342 writel(DMA_FRCD_F, iommu->reg + reg +
1343 fault_index * PRIMARY_FAULT_REG_LEN + 12); 1343 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1344 1344
1345 spin_unlock_irqrestore(&iommu->register_lock, flag); 1345 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1346 1346
1347 dmar_fault_do_one(iommu, type, fault_reason, 1347 dmar_fault_do_one(iommu, type, fault_reason,
1348 source_id, guest_addr); 1348 source_id, guest_addr);
@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1350 fault_index++; 1350 fault_index++;
1351 if (fault_index >= cap_num_fault_regs(iommu->cap)) 1351 if (fault_index >= cap_num_fault_regs(iommu->cap))
1352 fault_index = 0; 1352 fault_index = 0;
1353 spin_lock_irqsave(&iommu->register_lock, flag); 1353 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1354 } 1354 }
1355clear_rest: 1355clear_rest:
1356 /* clear all the other faults */ 1356 /* clear all the other faults */
1357 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1357 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1358 writel(fault_status, iommu->reg + DMAR_FSTS_REG); 1358 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1359 1359
1360 spin_unlock_irqrestore(&iommu->register_lock, flag); 1360 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1361 return IRQ_HANDLED; 1361 return IRQ_HANDLED;
1362} 1362}
1363 1363