aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
commit3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (patch)
tree88647d9dc50d634dee9cfeb7f354d620977a2f33 /drivers/iommu/intel-iommu.c
parent982653009b883ef1529089e3e6f1ae2fee41cbe2 (diff)
parent68cc3990a545dc0da221b4844dd8b9c06623a6c5 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock() lockdep: Comment all warnings lib: atomic64: Change the type of local lock to raw_spinlock_t locking, lib/atomic64: Annotate atomic64_lock::lock as raw locking, x86, iommu: Annotate qi->q_lock as raw locking, x86, iommu: Annotate irq_2_ir_lock as raw locking, x86, iommu: Annotate iommu->register_lock as raw locking, dma, ipu: Annotate bank_lock as raw locking, ARM: Annotate low level hw locks as raw locking, drivers/dca: Annotate dca_lock as raw locking, powerpc: Annotate uic->lock as raw locking, x86: mce: Annotate cmci_discover_lock as raw locking, ACPI: Annotate c3_lock as raw locking, oprofile: Annotate oprofilefs lock as raw locking, video: Annotate vga console lock as raw locking, latencytop: Annotate latency_lock as raw locking, timer_stats: Annotate table_lock as raw locking, rwsem: Annotate inner lock as raw locking, semaphores: Annotate inner lock as raw locking, sched: Annotate thread_group_cputimer as raw ... Fix up conflicts in kernel/posix-cpu-timers.c manually: making cputimer->cputime a raw lock conflicted with the ABBA fix in commit bcd5cff7216f ("cputimer: Cure lock inversion").
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f28d933c7927..be1953c239b0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -939,7 +939,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
939 939
940 addr = iommu->root_entry; 940 addr = iommu->root_entry;
941 941
942 spin_lock_irqsave(&iommu->register_lock, flag); 942 raw_spin_lock_irqsave(&iommu->register_lock, flag);
943 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 943 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
944 944
945 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); 945 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -948,7 +948,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
948 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 948 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
949 readl, (sts & DMA_GSTS_RTPS), sts); 949 readl, (sts & DMA_GSTS_RTPS), sts);
950 950
951 spin_unlock_irqrestore(&iommu->register_lock, flag); 951 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
952} 952}
953 953
954static void iommu_flush_write_buffer(struct intel_iommu *iommu) 954static void iommu_flush_write_buffer(struct intel_iommu *iommu)
@@ -959,14 +959,14 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
959 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 959 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
960 return; 960 return;
961 961
962 spin_lock_irqsave(&iommu->register_lock, flag); 962 raw_spin_lock_irqsave(&iommu->register_lock, flag);
963 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); 963 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
964 964
965 /* Make sure hardware complete it */ 965 /* Make sure hardware complete it */
966 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 966 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
967 readl, (!(val & DMA_GSTS_WBFS)), val); 967 readl, (!(val & DMA_GSTS_WBFS)), val);
968 968
969 spin_unlock_irqrestore(&iommu->register_lock, flag); 969 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
970} 970}
971 971
972/* return value determine if we need a write buffer flush */ 972/* return value determine if we need a write buffer flush */
@@ -993,14 +993,14 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
993 } 993 }
994 val |= DMA_CCMD_ICC; 994 val |= DMA_CCMD_ICC;
995 995
996 spin_lock_irqsave(&iommu->register_lock, flag); 996 raw_spin_lock_irqsave(&iommu->register_lock, flag);
997 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); 997 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
998 998
999 /* Make sure hardware complete it */ 999 /* Make sure hardware complete it */
1000 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, 1000 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1001 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 1001 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1002 1002
1003 spin_unlock_irqrestore(&iommu->register_lock, flag); 1003 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1004} 1004}
1005 1005
1006/* return value determine if we need a write buffer flush */ 1006/* return value determine if we need a write buffer flush */
@@ -1039,7 +1039,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1039 if (cap_write_drain(iommu->cap)) 1039 if (cap_write_drain(iommu->cap))
1040 val |= DMA_TLB_WRITE_DRAIN; 1040 val |= DMA_TLB_WRITE_DRAIN;
1041 1041
1042 spin_lock_irqsave(&iommu->register_lock, flag); 1042 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1043 /* Note: Only uses first TLB reg currently */ 1043 /* Note: Only uses first TLB reg currently */
1044 if (val_iva) 1044 if (val_iva)
1045 dmar_writeq(iommu->reg + tlb_offset, val_iva); 1045 dmar_writeq(iommu->reg + tlb_offset, val_iva);
@@ -1049,7 +1049,7 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1049 IOMMU_WAIT_OP(iommu, tlb_offset + 8, 1049 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1050 dmar_readq, (!(val & DMA_TLB_IVT)), val); 1050 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1051 1051
1052 spin_unlock_irqrestore(&iommu->register_lock, flag); 1052 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1053 1053
1054 /* check IOTLB invalidation granularity */ 1054 /* check IOTLB invalidation granularity */
1055 if (DMA_TLB_IAIG(val) == 0) 1055 if (DMA_TLB_IAIG(val) == 0)
@@ -1165,7 +1165,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1165 u32 pmen; 1165 u32 pmen;
1166 unsigned long flags; 1166 unsigned long flags;
1167 1167
1168 spin_lock_irqsave(&iommu->register_lock, flags); 1168 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1169 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1169 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1170 pmen &= ~DMA_PMEN_EPM; 1170 pmen &= ~DMA_PMEN_EPM;
1171 writel(pmen, iommu->reg + DMAR_PMEN_REG); 1171 writel(pmen, iommu->reg + DMAR_PMEN_REG);
@@ -1174,7 +1174,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1174 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, 1174 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1175 readl, !(pmen & DMA_PMEN_PRS), pmen); 1175 readl, !(pmen & DMA_PMEN_PRS), pmen);
1176 1176
1177 spin_unlock_irqrestore(&iommu->register_lock, flags); 1177 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1178} 1178}
1179 1179
1180static int iommu_enable_translation(struct intel_iommu *iommu) 1180static int iommu_enable_translation(struct intel_iommu *iommu)
@@ -1182,7 +1182,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1182 u32 sts; 1182 u32 sts;
1183 unsigned long flags; 1183 unsigned long flags;
1184 1184
1185 spin_lock_irqsave(&iommu->register_lock, flags); 1185 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1186 iommu->gcmd |= DMA_GCMD_TE; 1186 iommu->gcmd |= DMA_GCMD_TE;
1187 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1187 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1188 1188
@@ -1190,7 +1190,7 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
1190 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1190 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1191 readl, (sts & DMA_GSTS_TES), sts); 1191 readl, (sts & DMA_GSTS_TES), sts);
1192 1192
1193 spin_unlock_irqrestore(&iommu->register_lock, flags); 1193 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1194 return 0; 1194 return 0;
1195} 1195}
1196 1196
@@ -1199,7 +1199,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1199 u32 sts; 1199 u32 sts;
1200 unsigned long flag; 1200 unsigned long flag;
1201 1201
1202 spin_lock_irqsave(&iommu->register_lock, flag); 1202 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1203 iommu->gcmd &= ~DMA_GCMD_TE; 1203 iommu->gcmd &= ~DMA_GCMD_TE;
1204 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1204 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1205 1205
@@ -1207,7 +1207,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
1207 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1207 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1208 readl, (!(sts & DMA_GSTS_TES)), sts); 1208 readl, (!(sts & DMA_GSTS_TES)), sts);
1209 1209
1210 spin_unlock_irqrestore(&iommu->register_lock, flag); 1210 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1211 return 0; 1211 return 0;
1212} 1212}
1213 1213
@@ -3329,7 +3329,7 @@ static int iommu_suspend(void)
3329 for_each_active_iommu(iommu, drhd) { 3329 for_each_active_iommu(iommu, drhd) {
3330 iommu_disable_translation(iommu); 3330 iommu_disable_translation(iommu);
3331 3331
3332 spin_lock_irqsave(&iommu->register_lock, flag); 3332 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3333 3333
3334 iommu->iommu_state[SR_DMAR_FECTL_REG] = 3334 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3335 readl(iommu->reg + DMAR_FECTL_REG); 3335 readl(iommu->reg + DMAR_FECTL_REG);
@@ -3340,7 +3340,7 @@ static int iommu_suspend(void)
3340 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = 3340 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3341 readl(iommu->reg + DMAR_FEUADDR_REG); 3341 readl(iommu->reg + DMAR_FEUADDR_REG);
3342 3342
3343 spin_unlock_irqrestore(&iommu->register_lock, flag); 3343 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3344 } 3344 }
3345 return 0; 3345 return 0;
3346 3346
@@ -3367,7 +3367,7 @@ static void iommu_resume(void)
3367 3367
3368 for_each_active_iommu(iommu, drhd) { 3368 for_each_active_iommu(iommu, drhd) {
3369 3369
3370 spin_lock_irqsave(&iommu->register_lock, flag); 3370 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3371 3371
3372 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], 3372 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3373 iommu->reg + DMAR_FECTL_REG); 3373 iommu->reg + DMAR_FECTL_REG);
@@ -3378,7 +3378,7 @@ static void iommu_resume(void)
3378 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], 3378 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3379 iommu->reg + DMAR_FEUADDR_REG); 3379 iommu->reg + DMAR_FEUADDR_REG);
3380 3380
3381 spin_unlock_irqrestore(&iommu->register_lock, flag); 3381 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3382 } 3382 }
3383 3383
3384 for_each_active_iommu(iommu, drhd) 3384 for_each_active_iommu(iommu, drhd)