aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intr_remapping.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-07-19 10:19:51 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:17 -0400
commit1f5b3c3fd2d73d6b30e9ef6dcbf131a791d5cbbd (patch)
tree1d24f2510bd8c57f5e026bf9a7ff93999ed39577 /drivers/iommu/intr_remapping.c
parent289b4e7a48d91fbef7af819020d826ad9f49f568 (diff)
locking, x86, iommu: Annotate iommu->register_lock as raw
The iommu->register_lock can be taken in atomic context and therefore must not be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/iommu/intr_remapping.c')
-rw-r--r--drivers/iommu/intr_remapping.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 1a89d4a2cadf..b2443f1a13ef 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -409,7 +409,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
409 409
410 addr = virt_to_phys((void *)iommu->ir_table->base); 410 addr = virt_to_phys((void *)iommu->ir_table->base);
411 411
412 spin_lock_irqsave(&iommu->register_lock, flags); 412 raw_spin_lock_irqsave(&iommu->register_lock, flags);
413 413
414 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 414 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
415 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 415 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
@@ -420,7 +420,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
420 420
421 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 421 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
422 readl, (sts & DMA_GSTS_IRTPS), sts); 422 readl, (sts & DMA_GSTS_IRTPS), sts);
423 spin_unlock_irqrestore(&iommu->register_lock, flags); 423 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
424 424
425 /* 425 /*
426 * global invalidation of interrupt entry cache before enabling 426 * global invalidation of interrupt entry cache before enabling
@@ -428,7 +428,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
428 */ 428 */
429 qi_global_iec(iommu); 429 qi_global_iec(iommu);
430 430
431 spin_lock_irqsave(&iommu->register_lock, flags); 431 raw_spin_lock_irqsave(&iommu->register_lock, flags);
432 432
433 /* Enable interrupt-remapping */ 433 /* Enable interrupt-remapping */
434 iommu->gcmd |= DMA_GCMD_IRE; 434 iommu->gcmd |= DMA_GCMD_IRE;
@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
437 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 437 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
438 readl, (sts & DMA_GSTS_IRES), sts); 438 readl, (sts & DMA_GSTS_IRES), sts);
439 439
440 spin_unlock_irqrestore(&iommu->register_lock, flags); 440 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
441} 441}
442 442
443 443
@@ -485,7 +485,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
485 */ 485 */
486 qi_global_iec(iommu); 486 qi_global_iec(iommu);
487 487
488 spin_lock_irqsave(&iommu->register_lock, flags); 488 raw_spin_lock_irqsave(&iommu->register_lock, flags);
489 489
490 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 490 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
491 if (!(sts & DMA_GSTS_IRES)) 491 if (!(sts & DMA_GSTS_IRES))
@@ -498,7 +498,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
498 readl, !(sts & DMA_GSTS_IRES), sts); 498 readl, !(sts & DMA_GSTS_IRES), sts);
499 499
500end: 500end:
501 spin_unlock_irqrestore(&iommu->register_lock, flags); 501 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
502} 502}
503 503
504int __init intr_remapping_supported(void) 504int __init intr_remapping_supported(void)