aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-07-19 10:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:17 -0400
commit1f5b3c3fd2d73d6b30e9ef6dcbf131a791d5cbbd (patch)
tree1d24f2510bd8c57f5e026bf9a7ff93999ed39577 /drivers/iommu/intel-iommu.c
parent289b4e7a48d91fbef7af819020d826ad9f49f568 (diff)
locking, x86, iommu: Annotate iommu->register_lock as raw
The iommu->register_lock can be taken in atomic context and therefore must not be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c621c98c99da..bf4a63c2477c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -932,7 +932,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
932 932
933 addr = iommu->root_entry; 933 addr = iommu->root_entry;
934 934
935 spin_lock_irqsave(&iommu->register_lock, flag); 935 raw_spin_lock_irqsave(&iommu->register_lock, flag);
936 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 936 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
937 937
938 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); 938 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -941,7 +941,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
941 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 941 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
942 readl, (sts & DMA_GSTS_RTPS), sts); 942 readl, (sts & DMA_GSTS_RTPS), sts);
943 943
944 spin_unlock_irqrestore(&iommu->register_lock, flag); 944 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
945} 945}
946 946
947static void iommu_flush_write_buffer(struct intel_iommu *iommu) 947static void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -952,14 +952,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
952 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 952 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
953 return; 953 return;
954 954
955 spin_lock_irqsave(&iommu->register_lock, flag); 955 raw_spin_lock_irqsave(&iommu->register_lock, flag);
956 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); 956 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
957 957
958 /* Make sure hardware complete it */ 958 /* Make sure hardware complete it */
959 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 959 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
960 readl, (!(val & DMA_GSTS_WBFS)), val); 960 readl, (!(val & DMA_GSTS_WBFS)), val);
961 961
962 spin_unlock_irqrestore(&iommu->register_lock, flag); 962 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
963} 963}
964 964
965/* return value determine if we need a write buffer flush */ 965/* return value determine if we need a write buffer flush */
@@ -986,14 +986,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
986 } 986 }
987 val |= DMA_CCMD_ICC; 987 val |= DMA_CCMD_ICC;
988 988
989 spin_lock_irqsave(&iommu->register_lock, flag); 989 raw_spin_lock_irqsave(&iommu->register_lock, flag);
990 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); 990 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
991 991
992 /* Make sure hardware complete it */ 992 /* Make sure hardware complete it */
993 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, 993 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
994 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 994 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
995 995
996 spin_unlock_irqrestore(&iommu->register_lock, flag); 996 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
997} 997}
998 998
999/* return value determine if we need a write buffer flush */ 999/* return value determine if we need a write buffer flush */
@@ -1032,7 +1032,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1032 if (cap_write_drain(iommu->cap)) 1032 if (cap_write_drain(iommu->cap))
1033 val |= DMA_TLB_WRITE_DRAIN; 1033 val |= DMA_TLB_WRITE_DRAIN;
1034 1034
1035 spin_lock_irqsave(&iommu->register_lock, flag); 1035 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1036 /* Note: Only uses first TLB reg currently */ 1036 /* Note: Only uses first TLB reg currently */
1037 if (val_iva) 1037 if (val_iva)
1038 dmar_writeq(iommu->reg + tlb_offset, val_iva); 1038 dmar_writeq(iommu->reg + tlb_offset, val_iva);
@@ -1042,7 +1042,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1042 IOMMU_WAIT_OP(iommu, tlb_offset + 8, 1042 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1043 dmar_readq, (!(val & DMA_TLB_IVT)), val); 1043 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1044 1044
1045 spin_unlock_irqrestore(&iommu->register_lock, flag); 1045 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1046 1046
1047 /* check IOTLB invalidation granularity */ 1047 /* check IOTLB invalidation granularity */
1048 if (DMA_TLB_IAIG(val) == 0) 1048 if (DMA_TLB_IAIG(val) == 0)
@@ -1158,7 +1158,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1158 u32 pmen; 1158 u32 pmen;
1159 unsigned long flags; 1159 unsigned long flags;
1160 1160
1161 spin_lock_irqsave(&iommu->register_lock, flags); 1161 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1162 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1162 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1163 pmen &= ~DMA_PMEN_EPM; 1163 pmen &= ~DMA_PMEN_EPM;
1164 writel(pmen, iommu->reg + DMAR_PMEN_REG); 1164 writel(pmen, iommu->reg + DMAR_PMEN_REG);
@@ -1167,7 +1167,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1167 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, 1167 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1168 readl, !(pmen & DMA_PMEN_PRS), pmen); 1168 readl, !(pmen & DMA_PMEN_PRS), pmen);
1169 1169
1170 spin_unlock_irqrestore(&iommu->register_lock, flags); 1170 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1171} 1171}
1172 1172
1173static int iommu_enable_translation(struct intel_iommu *iommu) 1173static int iommu_enable_translation(struct intel_iommu *iommu)
@@ -1175,7 +1175,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1175 u32 sts; 1175 u32 sts;
1176 unsigned long flags; 1176 unsigned long flags;
1177 1177
1178 spin_lock_irqsave(&iommu->register_lock, flags); 1178 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1179 iommu->gcmd |= DMA_GCMD_TE; 1179 iommu->gcmd |= DMA_GCMD_TE;
1180 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1180 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1181 1181
@@ -1183,7 +1183,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1183 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1184 readl, (sts & DMA_GSTS_TES), sts); 1184 readl, (sts & DMA_GSTS_TES), sts);
1185 1185
1186 spin_unlock_irqrestore(&iommu->register_lock, flags); 1186 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1187 return 0; 1187 return 0;
1188} 1188}
1189 1189
@@ -1192,7 +1192,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1192 u32 sts; 1192 u32 sts;
1193 unsigned long flag; 1193 unsigned long flag;
1194 1194
1195 spin_lock_irqsave(&iommu->register_lock, flag); 1195 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1196 iommu->gcmd &= ~DMA_GCMD_TE; 1196 iommu->gcmd &= ~DMA_GCMD_TE;
1197 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1197 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1198 1198
@@ -1200,7 +1200,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1200 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1200 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1201 readl, (!(sts & DMA_GSTS_TES)), sts); 1201 readl, (!(sts & DMA_GSTS_TES)), sts);
1202 1202
1203 spin_unlock_irqrestore(&iommu->register_lock, flag); 1203 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1204 return 0; 1204 return 0;
1205} 1205}
1206 1206
@@ -3320,7 +3320,7 @@ static int iommu_suspend(void)
3320 for_each_active_iommu(iommu, drhd) { 3320 for_each_active_iommu(iommu, drhd) {
3321 iommu_disable_translation(iommu); 3321 iommu_disable_translation(iommu);
3322 3322
3323 spin_lock_irqsave(&iommu->register_lock, flag); 3323 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3324 3324
3325 iommu->iommu_state[SR_DMAR_FECTL_REG] = 3325 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3326 readl(iommu->reg + DMAR_FECTL_REG); 3326 readl(iommu->reg + DMAR_FECTL_REG);
@@ -3331,7 +3331,7 @@ static int iommu_suspend(void)
3331 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = 3331 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3332 readl(iommu->reg + DMAR_FEUADDR_REG); 3332 readl(iommu->reg + DMAR_FEUADDR_REG);
3333 3333
3334 spin_unlock_irqrestore(&iommu->register_lock, flag); 3334 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3335 } 3335 }
3336 return 0; 3336 return 0;
3337 3337
@@ -3358,7 +3358,7 @@ static void iommu_resume(void)
3358 3358
3359 for_each_active_iommu(iommu, drhd) { 3359 for_each_active_iommu(iommu, drhd) {
3360 3360
3361 spin_lock_irqsave(&iommu->register_lock, flag); 3361 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3362 3362
3363 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], 3363 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3364 iommu->reg + DMAR_FECTL_REG); 3364 iommu->reg + DMAR_FECTL_REG);
@@ -3369,7 +3369,7 @@ static void iommu_resume(void)
3369 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], 3369 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3370 iommu->reg + DMAR_FEUADDR_REG); 3370 iommu->reg + DMAR_FEUADDR_REG);
3371 3371
3372 spin_unlock_irqrestore(&iommu->register_lock, flag); 3372 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3373 } 3373 }
3374 3374
3375 for_each_active_iommu(iommu, drhd) 3375 for_each_active_iommu(iommu, drhd)