aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intr_remapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intr_remapping.c')
-rw-r--r--drivers/iommu/intr_remapping.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index cfb0dd4bf0b6..07c9f189f314 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -54,7 +54,7 @@ static __init int setup_intremap(char *str)
54} 54}
55early_param("intremap", setup_intremap); 55early_param("intremap", setup_intremap);
56 56
57static DEFINE_SPINLOCK(irq_2_ir_lock); 57static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
58 58
59static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 59static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
60{ 60{
@@ -71,12 +71,12 @@ int get_irte(int irq, struct irte *entry)
71 if (!entry || !irq_iommu) 71 if (!entry || !irq_iommu)
72 return -1; 72 return -1;
73 73
74 spin_lock_irqsave(&irq_2_ir_lock, flags); 74 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
75 75
76 index = irq_iommu->irte_index + irq_iommu->sub_handle; 76 index = irq_iommu->irte_index + irq_iommu->sub_handle;
77 *entry = *(irq_iommu->iommu->ir_table->base + index); 77 *entry = *(irq_iommu->iommu->ir_table->base + index);
78 78
79 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 79 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
80 return 0; 80 return 0;
81} 81}
82 82
@@ -110,7 +110,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
110 return -1; 110 return -1;
111 } 111 }
112 112
113 spin_lock_irqsave(&irq_2_ir_lock, flags); 113 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
114 do { 114 do {
115 for (i = index; i < index + count; i++) 115 for (i = index; i < index + count; i++)
116 if (table->base[i].present) 116 if (table->base[i].present)
@@ -122,7 +122,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
123 123
124 if (index == start_index) { 124 if (index == start_index) {
125 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
126 printk(KERN_ERR "can't allocate an IRTE\n"); 126 printk(KERN_ERR "can't allocate an IRTE\n");
127 return -1; 127 return -1;
128 } 128 }
@@ -136,7 +136,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
136 irq_iommu->sub_handle = 0; 136 irq_iommu->sub_handle = 0;
137 irq_iommu->irte_mask = mask; 137 irq_iommu->irte_mask = mask;
138 138
139 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 139 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
140 140
141 return index; 141 return index;
142} 142}
@@ -161,10 +161,10 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
161 if (!irq_iommu) 161 if (!irq_iommu)
162 return -1; 162 return -1;
163 163
164 spin_lock_irqsave(&irq_2_ir_lock, flags); 164 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
165 *sub_handle = irq_iommu->sub_handle; 165 *sub_handle = irq_iommu->sub_handle;
166 index = irq_iommu->irte_index; 166 index = irq_iommu->irte_index;
167 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 167 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
168 return index; 168 return index;
169} 169}
170 170
@@ -176,14 +176,14 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
176 if (!irq_iommu) 176 if (!irq_iommu)
177 return -1; 177 return -1;
178 178
179 spin_lock_irqsave(&irq_2_ir_lock, flags); 179 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
180 180
181 irq_iommu->iommu = iommu; 181 irq_iommu->iommu = iommu;
182 irq_iommu->irte_index = index; 182 irq_iommu->irte_index = index;
183 irq_iommu->sub_handle = subhandle; 183 irq_iommu->sub_handle = subhandle;
184 irq_iommu->irte_mask = 0; 184 irq_iommu->irte_mask = 0;
185 185
186 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 186 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
187 187
188 return 0; 188 return 0;
189} 189}
@@ -199,7 +199,7 @@ int modify_irte(int irq, struct irte *irte_modified)
199 if (!irq_iommu) 199 if (!irq_iommu)
200 return -1; 200 return -1;
201 201
202 spin_lock_irqsave(&irq_2_ir_lock, flags); 202 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
203 203
204 iommu = irq_iommu->iommu; 204 iommu = irq_iommu->iommu;
205 205
@@ -211,7 +211,7 @@ int modify_irte(int irq, struct irte *irte_modified)
211 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 211 __iommu_flush_cache(iommu, irte, sizeof(*irte));
212 212
213 rc = qi_flush_iec(iommu, index, 0); 213 rc = qi_flush_iec(iommu, index, 0);
214 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 214 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
215 215
216 return rc; 216 return rc;
217} 217}
@@ -279,7 +279,7 @@ int free_irte(int irq)
279 if (!irq_iommu) 279 if (!irq_iommu)
280 return -1; 280 return -1;
281 281
282 spin_lock_irqsave(&irq_2_ir_lock, flags); 282 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
283 283
284 rc = clear_entries(irq_iommu); 284 rc = clear_entries(irq_iommu);
285 285
@@ -288,7 +288,7 @@ int free_irte(int irq)
288 irq_iommu->sub_handle = 0; 288 irq_iommu->sub_handle = 0;
289 irq_iommu->irte_mask = 0; 289 irq_iommu->irte_mask = 0;
290 290
291 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 291 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
292 292
293 return rc; 293 return rc;
294} 294}
@@ -418,7 +418,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
418 418
419 addr = virt_to_phys((void *)iommu->ir_table->base); 419 addr = virt_to_phys((void *)iommu->ir_table->base);
420 420
421 spin_lock_irqsave(&iommu->register_lock, flags); 421 raw_spin_lock_irqsave(&iommu->register_lock, flags);
422 422
423 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 423 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
429 429
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRTPS), sts); 431 readl, (sts & DMA_GSTS_IRTPS), sts);
432 spin_unlock_irqrestore(&iommu->register_lock, flags); 432 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
433 433
434 /* 434 /*
435 * global invalidation of interrupt entry cache before enabling 435 * global invalidation of interrupt entry cache before enabling
@@ -437,7 +437,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
437 */ 437 */
438 qi_global_iec(iommu); 438 qi_global_iec(iommu);
439 439
440 spin_lock_irqsave(&iommu->register_lock, flags); 440 raw_spin_lock_irqsave(&iommu->register_lock, flags);
441 441
442 /* Enable interrupt-remapping */ 442 /* Enable interrupt-remapping */
443 iommu->gcmd |= DMA_GCMD_IRE; 443 iommu->gcmd |= DMA_GCMD_IRE;
@@ -446,7 +446,7 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
447 readl, (sts & DMA_GSTS_IRES), sts); 447 readl, (sts & DMA_GSTS_IRES), sts);
448 448
449 spin_unlock_irqrestore(&iommu->register_lock, flags); 449 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
450} 450}
451 451
452 452
@@ -494,7 +494,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
494 */ 494 */
495 qi_global_iec(iommu); 495 qi_global_iec(iommu);
496 496
497 spin_lock_irqsave(&iommu->register_lock, flags); 497 raw_spin_lock_irqsave(&iommu->register_lock, flags);
498 498
499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
500 if (!(sts & DMA_GSTS_IRES)) 500 if (!(sts & DMA_GSTS_IRES))
@@ -507,7 +507,7 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
507 readl, !(sts & DMA_GSTS_IRES), sts); 507 readl, !(sts & DMA_GSTS_IRES), sts);
508 508
509end: 509end:
510 spin_unlock_irqrestore(&iommu->register_lock, flags); 510 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
511} 511}
512 512
513static int __init dmar_x2apic_optout(void) 513static int __init dmar_x2apic_optout(void)