aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intr_remapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r--drivers/pci/intr_remapping.c113
1 files changed, 83 insertions, 30 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 45effc5726c0..b041a409f4a7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -6,6 +6,7 @@
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <asm/io_apic.h> 7#include <asm/io_apic.h>
8#include <asm/smp.h> 8#include <asm/smp.h>
9#include <asm/cpu.h>
9#include <linux/intel-iommu.h> 10#include <linux/intel-iommu.h>
10#include "intr_remapping.h" 11#include "intr_remapping.h"
11 12
@@ -20,7 +21,7 @@ struct irq_2_iommu {
20 u8 irte_mask; 21 u8 irte_mask;
21}; 22};
22 23
23#ifdef CONFIG_SPARSE_IRQ 24#ifdef CONFIG_GENERIC_HARDIRQS
24static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) 25static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
25{ 26{
26 struct irq_2_iommu *iommu; 27 struct irq_2_iommu *iommu;
@@ -116,21 +117,22 @@ int get_irte(int irq, struct irte *entry)
116{ 117{
117 int index; 118 int index;
118 struct irq_2_iommu *irq_iommu; 119 struct irq_2_iommu *irq_iommu;
120 unsigned long flags;
119 121
120 if (!entry) 122 if (!entry)
121 return -1; 123 return -1;
122 124
123 spin_lock(&irq_2_ir_lock); 125 spin_lock_irqsave(&irq_2_ir_lock, flags);
124 irq_iommu = valid_irq_2_iommu(irq); 126 irq_iommu = valid_irq_2_iommu(irq);
125 if (!irq_iommu) { 127 if (!irq_iommu) {
126 spin_unlock(&irq_2_ir_lock); 128 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
127 return -1; 129 return -1;
128 } 130 }
129 131
130 index = irq_iommu->irte_index + irq_iommu->sub_handle; 132 index = irq_iommu->irte_index + irq_iommu->sub_handle;
131 *entry = *(irq_iommu->iommu->ir_table->base + index); 133 *entry = *(irq_iommu->iommu->ir_table->base + index);
132 134
133 spin_unlock(&irq_2_ir_lock); 135 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
134 return 0; 136 return 0;
135} 137}
136 138
@@ -140,6 +142,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
140 struct irq_2_iommu *irq_iommu; 142 struct irq_2_iommu *irq_iommu;
141 u16 index, start_index; 143 u16 index, start_index;
142 unsigned int mask = 0; 144 unsigned int mask = 0;
145 unsigned long flags;
143 int i; 146 int i;
144 147
145 if (!count) 148 if (!count)
@@ -169,7 +172,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
169 return -1; 172 return -1;
170 } 173 }
171 174
172 spin_lock(&irq_2_ir_lock); 175 spin_lock_irqsave(&irq_2_ir_lock, flags);
173 do { 176 do {
174 for (i = index; i < index + count; i++) 177 for (i = index; i < index + count; i++)
175 if (table->base[i].present) 178 if (table->base[i].present)
@@ -181,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
181 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 184 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
182 185
183 if (index == start_index) { 186 if (index == start_index) {
184 spin_unlock(&irq_2_ir_lock); 187 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
185 printk(KERN_ERR "can't allocate an IRTE\n"); 188 printk(KERN_ERR "can't allocate an IRTE\n");
186 return -1; 189 return -1;
187 } 190 }
@@ -192,7 +195,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
192 195
193 irq_iommu = irq_2_iommu_alloc(irq); 196 irq_iommu = irq_2_iommu_alloc(irq);
194 if (!irq_iommu) { 197 if (!irq_iommu) {
195 spin_unlock(&irq_2_ir_lock); 198 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
196 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 199 printk(KERN_ERR "can't allocate irq_2_iommu\n");
197 return -1; 200 return -1;
198 } 201 }
@@ -202,7 +205,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
202 irq_iommu->sub_handle = 0; 205 irq_iommu->sub_handle = 0;
203 irq_iommu->irte_mask = mask; 206 irq_iommu->irte_mask = mask;
204 207
205 spin_unlock(&irq_2_ir_lock); 208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
206 209
207 return index; 210 return index;
208} 211}
@@ -222,30 +225,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
222{ 225{
223 int index; 226 int index;
224 struct irq_2_iommu *irq_iommu; 227 struct irq_2_iommu *irq_iommu;
228 unsigned long flags;
225 229
226 spin_lock(&irq_2_ir_lock); 230 spin_lock_irqsave(&irq_2_ir_lock, flags);
227 irq_iommu = valid_irq_2_iommu(irq); 231 irq_iommu = valid_irq_2_iommu(irq);
228 if (!irq_iommu) { 232 if (!irq_iommu) {
229 spin_unlock(&irq_2_ir_lock); 233 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
230 return -1; 234 return -1;
231 } 235 }
232 236
233 *sub_handle = irq_iommu->sub_handle; 237 *sub_handle = irq_iommu->sub_handle;
234 index = irq_iommu->irte_index; 238 index = irq_iommu->irte_index;
235 spin_unlock(&irq_2_ir_lock); 239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
236 return index; 240 return index;
237} 241}
238 242
239int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 243int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
240{ 244{
241 struct irq_2_iommu *irq_iommu; 245 struct irq_2_iommu *irq_iommu;
246 unsigned long flags;
242 247
243 spin_lock(&irq_2_ir_lock); 248 spin_lock_irqsave(&irq_2_ir_lock, flags);
244 249
245 irq_iommu = irq_2_iommu_alloc(irq); 250 irq_iommu = irq_2_iommu_alloc(irq);
246 251
247 if (!irq_iommu) { 252 if (!irq_iommu) {
248 spin_unlock(&irq_2_ir_lock); 253 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
249 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 254 printk(KERN_ERR "can't allocate irq_2_iommu\n");
250 return -1; 255 return -1;
251 } 256 }
@@ -255,7 +260,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
255 irq_iommu->sub_handle = subhandle; 260 irq_iommu->sub_handle = subhandle;
256 irq_iommu->irte_mask = 0; 261 irq_iommu->irte_mask = 0;
257 262
258 spin_unlock(&irq_2_ir_lock); 263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
259 264
260 return 0; 265 return 0;
261} 266}
@@ -263,11 +268,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
263int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 268int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
264{ 269{
265 struct irq_2_iommu *irq_iommu; 270 struct irq_2_iommu *irq_iommu;
271 unsigned long flags;
266 272
267 spin_lock(&irq_2_ir_lock); 273 spin_lock_irqsave(&irq_2_ir_lock, flags);
268 irq_iommu = valid_irq_2_iommu(irq); 274 irq_iommu = valid_irq_2_iommu(irq);
269 if (!irq_iommu) { 275 if (!irq_iommu) {
270 spin_unlock(&irq_2_ir_lock); 276 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
271 return -1; 277 return -1;
272 } 278 }
273 279
@@ -276,7 +282,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
276 irq_iommu->sub_handle = 0; 282 irq_iommu->sub_handle = 0;
277 irq_2_iommu(irq)->irte_mask = 0; 283 irq_2_iommu(irq)->irte_mask = 0;
278 284
279 spin_unlock(&irq_2_ir_lock); 285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280 286
281 return 0; 287 return 0;
282} 288}
@@ -288,11 +294,12 @@ int modify_irte(int irq, struct irte *irte_modified)
288 struct irte *irte; 294 struct irte *irte;
289 struct intel_iommu *iommu; 295 struct intel_iommu *iommu;
290 struct irq_2_iommu *irq_iommu; 296 struct irq_2_iommu *irq_iommu;
297 unsigned long flags;
291 298
292 spin_lock(&irq_2_ir_lock); 299 spin_lock_irqsave(&irq_2_ir_lock, flags);
293 irq_iommu = valid_irq_2_iommu(irq); 300 irq_iommu = valid_irq_2_iommu(irq);
294 if (!irq_iommu) { 301 if (!irq_iommu) {
295 spin_unlock(&irq_2_ir_lock); 302 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
296 return -1; 303 return -1;
297 } 304 }
298 305
@@ -301,11 +308,11 @@ int modify_irte(int irq, struct irte *irte_modified)
301 index = irq_iommu->irte_index + irq_iommu->sub_handle; 308 index = irq_iommu->irte_index + irq_iommu->sub_handle;
302 irte = &iommu->ir_table->base[index]; 309 irte = &iommu->ir_table->base[index];
303 310
304 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 311 set_64bit((unsigned long *)irte, irte_modified->low);
305 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 312 __iommu_flush_cache(iommu, irte, sizeof(*irte));
306 313
307 rc = qi_flush_iec(iommu, index, 0); 314 rc = qi_flush_iec(iommu, index, 0);
308 spin_unlock(&irq_2_ir_lock); 315 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
309 316
310 return rc; 317 return rc;
311} 318}
@@ -316,11 +323,12 @@ int flush_irte(int irq)
316 int index; 323 int index;
317 struct intel_iommu *iommu; 324 struct intel_iommu *iommu;
318 struct irq_2_iommu *irq_iommu; 325 struct irq_2_iommu *irq_iommu;
326 unsigned long flags;
319 327
320 spin_lock(&irq_2_ir_lock); 328 spin_lock_irqsave(&irq_2_ir_lock, flags);
321 irq_iommu = valid_irq_2_iommu(irq); 329 irq_iommu = valid_irq_2_iommu(irq);
322 if (!irq_iommu) { 330 if (!irq_iommu) {
323 spin_unlock(&irq_2_ir_lock); 331 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
324 return -1; 332 return -1;
325 } 333 }
326 334
@@ -329,7 +337,7 @@ int flush_irte(int irq)
329 index = irq_iommu->irte_index + irq_iommu->sub_handle; 337 index = irq_iommu->irte_index + irq_iommu->sub_handle;
330 338
331 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 339 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
332 spin_unlock(&irq_2_ir_lock); 340 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
333 341
334 return rc; 342 return rc;
335} 343}
@@ -362,11 +370,12 @@ int free_irte(int irq)
362 struct irte *irte; 370 struct irte *irte;
363 struct intel_iommu *iommu; 371 struct intel_iommu *iommu;
364 struct irq_2_iommu *irq_iommu; 372 struct irq_2_iommu *irq_iommu;
373 unsigned long flags;
365 374
366 spin_lock(&irq_2_ir_lock); 375 spin_lock_irqsave(&irq_2_ir_lock, flags);
367 irq_iommu = valid_irq_2_iommu(irq); 376 irq_iommu = valid_irq_2_iommu(irq);
368 if (!irq_iommu) { 377 if (!irq_iommu) {
369 spin_unlock(&irq_2_ir_lock); 378 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
370 return -1; 379 return -1;
371 } 380 }
372 381
@@ -377,7 +386,7 @@ int free_irte(int irq)
377 386
378 if (!irq_iommu->sub_handle) { 387 if (!irq_iommu->sub_handle) {
379 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 388 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
380 set_64bit((unsigned long *)irte, 0); 389 set_64bit((unsigned long *)(irte + i), 0);
381 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 390 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
382 } 391 }
383 392
@@ -386,7 +395,7 @@ int free_irte(int irq)
386 irq_iommu->sub_handle = 0; 395 irq_iommu->sub_handle = 0;
387 irq_iommu->irte_mask = 0; 396 irq_iommu->irte_mask = 0;
388 397
389 spin_unlock(&irq_2_ir_lock); 398 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
390 399
391 return rc; 400 return rc;
392} 401}
@@ -438,12 +447,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
438 struct page *pages; 447 struct page *pages;
439 448
440 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 449 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
441 GFP_KERNEL); 450 GFP_ATOMIC);
442 451
443 if (!iommu->ir_table) 452 if (!iommu->ir_table)
444 return -ENOMEM; 453 return -ENOMEM;
445 454
446 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 455 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
447 456
448 if (!pages) { 457 if (!pages) {
449 printk(KERN_ERR "failed to allocate pages of order %d\n", 458 printk(KERN_ERR "failed to allocate pages of order %d\n",
@@ -458,11 +467,55 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
458 return 0; 467 return 0;
459} 468}
460 469
470/*
471 * Disable Interrupt Remapping.
472 */
473static void disable_intr_remapping(struct intel_iommu *iommu)
474{
475 unsigned long flags;
476 u32 sts;
477
478 if (!ecap_ir_support(iommu->ecap))
479 return;
480
481 spin_lock_irqsave(&iommu->register_lock, flags);
482
483 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
484 if (!(sts & DMA_GSTS_IRES))
485 goto end;
486
487 iommu->gcmd &= ~DMA_GCMD_IRE;
488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
489
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 readl, !(sts & DMA_GSTS_IRES), sts);
492
493end:
494 spin_unlock_irqrestore(&iommu->register_lock, flags);
495}
496
461int __init enable_intr_remapping(int eim) 497int __init enable_intr_remapping(int eim)
462{ 498{
463 struct dmar_drhd_unit *drhd; 499 struct dmar_drhd_unit *drhd;
464 int setup = 0; 500 int setup = 0;
465 501
502 for_each_drhd_unit(drhd) {
503 struct intel_iommu *iommu = drhd->iommu;
504
505 /*
506 * Clear previous faults.
507 */
508 dmar_fault(-1, iommu);
509
510 /*
511 * Disable intr remapping and queued invalidation, if already
512 * enabled prior to OS handover.
513 */
514 disable_intr_remapping(iommu);
515
516 dmar_disable_qi(iommu);
517 }
518
466 /* 519 /*
467 * check for the Interrupt-remapping support 520 * check for the Interrupt-remapping support
468 */ 521 */