diff options
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r-- | drivers/pci/intr_remapping.c | 112 |
1 files changed, 82 insertions, 30 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 8e44db040db7..b041a409f4a7 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c | |||
@@ -21,7 +21,7 @@ struct irq_2_iommu { | |||
21 | u8 irte_mask; | 21 | u8 irte_mask; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | #ifdef CONFIG_SPARSE_IRQ | 24 | #ifdef CONFIG_GENERIC_HARDIRQS |
25 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) | 25 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) |
26 | { | 26 | { |
27 | struct irq_2_iommu *iommu; | 27 | struct irq_2_iommu *iommu; |
@@ -117,21 +117,22 @@ int get_irte(int irq, struct irte *entry) | |||
117 | { | 117 | { |
118 | int index; | 118 | int index; |
119 | struct irq_2_iommu *irq_iommu; | 119 | struct irq_2_iommu *irq_iommu; |
120 | unsigned long flags; | ||
120 | 121 | ||
121 | if (!entry) | 122 | if (!entry) |
122 | return -1; | 123 | return -1; |
123 | 124 | ||
124 | spin_lock(&irq_2_ir_lock); | 125 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
125 | irq_iommu = valid_irq_2_iommu(irq); | 126 | irq_iommu = valid_irq_2_iommu(irq); |
126 | if (!irq_iommu) { | 127 | if (!irq_iommu) { |
127 | spin_unlock(&irq_2_ir_lock); | 128 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
128 | return -1; | 129 | return -1; |
129 | } | 130 | } |
130 | 131 | ||
131 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 132 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
132 | *entry = *(irq_iommu->iommu->ir_table->base + index); | 133 | *entry = *(irq_iommu->iommu->ir_table->base + index); |
133 | 134 | ||
134 | spin_unlock(&irq_2_ir_lock); | 135 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
135 | return 0; | 136 | return 0; |
136 | } | 137 | } |
137 | 138 | ||
@@ -141,6 +142,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
141 | struct irq_2_iommu *irq_iommu; | 142 | struct irq_2_iommu *irq_iommu; |
142 | u16 index, start_index; | 143 | u16 index, start_index; |
143 | unsigned int mask = 0; | 144 | unsigned int mask = 0; |
145 | unsigned long flags; | ||
144 | int i; | 146 | int i; |
145 | 147 | ||
146 | if (!count) | 148 | if (!count) |
@@ -170,7 +172,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
170 | return -1; | 172 | return -1; |
171 | } | 173 | } |
172 | 174 | ||
173 | spin_lock(&irq_2_ir_lock); | 175 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
174 | do { | 176 | do { |
175 | for (i = index; i < index + count; i++) | 177 | for (i = index; i < index + count; i++) |
176 | if (table->base[i].present) | 178 | if (table->base[i].present) |
@@ -182,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
182 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | 184 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; |
183 | 185 | ||
184 | if (index == start_index) { | 186 | if (index == start_index) { |
185 | spin_unlock(&irq_2_ir_lock); | 187 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
186 | printk(KERN_ERR "can't allocate an IRTE\n"); | 188 | printk(KERN_ERR "can't allocate an IRTE\n"); |
187 | return -1; | 189 | return -1; |
188 | } | 190 | } |
@@ -193,7 +195,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
193 | 195 | ||
194 | irq_iommu = irq_2_iommu_alloc(irq); | 196 | irq_iommu = irq_2_iommu_alloc(irq); |
195 | if (!irq_iommu) { | 197 | if (!irq_iommu) { |
196 | spin_unlock(&irq_2_ir_lock); | 198 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
197 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | 199 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
198 | return -1; | 200 | return -1; |
199 | } | 201 | } |
@@ -203,7 +205,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |||
203 | irq_iommu->sub_handle = 0; | 205 | irq_iommu->sub_handle = 0; |
204 | irq_iommu->irte_mask = mask; | 206 | irq_iommu->irte_mask = mask; |
205 | 207 | ||
206 | spin_unlock(&irq_2_ir_lock); | 208 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
207 | 209 | ||
208 | return index; | 210 | return index; |
209 | } | 211 | } |
@@ -223,30 +225,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |||
223 | { | 225 | { |
224 | int index; | 226 | int index; |
225 | struct irq_2_iommu *irq_iommu; | 227 | struct irq_2_iommu *irq_iommu; |
228 | unsigned long flags; | ||
226 | 229 | ||
227 | spin_lock(&irq_2_ir_lock); | 230 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
228 | irq_iommu = valid_irq_2_iommu(irq); | 231 | irq_iommu = valid_irq_2_iommu(irq); |
229 | if (!irq_iommu) { | 232 | if (!irq_iommu) { |
230 | spin_unlock(&irq_2_ir_lock); | 233 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
231 | return -1; | 234 | return -1; |
232 | } | 235 | } |
233 | 236 | ||
234 | *sub_handle = irq_iommu->sub_handle; | 237 | *sub_handle = irq_iommu->sub_handle; |
235 | index = irq_iommu->irte_index; | 238 | index = irq_iommu->irte_index; |
236 | spin_unlock(&irq_2_ir_lock); | 239 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
237 | return index; | 240 | return index; |
238 | } | 241 | } |
239 | 242 | ||
240 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | 243 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) |
241 | { | 244 | { |
242 | struct irq_2_iommu *irq_iommu; | 245 | struct irq_2_iommu *irq_iommu; |
246 | unsigned long flags; | ||
243 | 247 | ||
244 | spin_lock(&irq_2_ir_lock); | 248 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
245 | 249 | ||
246 | irq_iommu = irq_2_iommu_alloc(irq); | 250 | irq_iommu = irq_2_iommu_alloc(irq); |
247 | 251 | ||
248 | if (!irq_iommu) { | 252 | if (!irq_iommu) { |
249 | spin_unlock(&irq_2_ir_lock); | 253 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
250 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); | 254 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
251 | return -1; | 255 | return -1; |
252 | } | 256 | } |
@@ -256,7 +260,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
256 | irq_iommu->sub_handle = subhandle; | 260 | irq_iommu->sub_handle = subhandle; |
257 | irq_iommu->irte_mask = 0; | 261 | irq_iommu->irte_mask = 0; |
258 | 262 | ||
259 | spin_unlock(&irq_2_ir_lock); | 263 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
260 | 264 | ||
261 | return 0; | 265 | return 0; |
262 | } | 266 | } |
@@ -264,11 +268,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |||
264 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | 268 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) |
265 | { | 269 | { |
266 | struct irq_2_iommu *irq_iommu; | 270 | struct irq_2_iommu *irq_iommu; |
271 | unsigned long flags; | ||
267 | 272 | ||
268 | spin_lock(&irq_2_ir_lock); | 273 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
269 | irq_iommu = valid_irq_2_iommu(irq); | 274 | irq_iommu = valid_irq_2_iommu(irq); |
270 | if (!irq_iommu) { | 275 | if (!irq_iommu) { |
271 | spin_unlock(&irq_2_ir_lock); | 276 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
272 | return -1; | 277 | return -1; |
273 | } | 278 | } |
274 | 279 | ||
@@ -277,7 +282,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | |||
277 | irq_iommu->sub_handle = 0; | 282 | irq_iommu->sub_handle = 0; |
278 | irq_2_iommu(irq)->irte_mask = 0; | 283 | irq_2_iommu(irq)->irte_mask = 0; |
279 | 284 | ||
280 | spin_unlock(&irq_2_ir_lock); | 285 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
281 | 286 | ||
282 | return 0; | 287 | return 0; |
283 | } | 288 | } |
@@ -289,11 +294,12 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
289 | struct irte *irte; | 294 | struct irte *irte; |
290 | struct intel_iommu *iommu; | 295 | struct intel_iommu *iommu; |
291 | struct irq_2_iommu *irq_iommu; | 296 | struct irq_2_iommu *irq_iommu; |
297 | unsigned long flags; | ||
292 | 298 | ||
293 | spin_lock(&irq_2_ir_lock); | 299 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
294 | irq_iommu = valid_irq_2_iommu(irq); | 300 | irq_iommu = valid_irq_2_iommu(irq); |
295 | if (!irq_iommu) { | 301 | if (!irq_iommu) { |
296 | spin_unlock(&irq_2_ir_lock); | 302 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
297 | return -1; | 303 | return -1; |
298 | } | 304 | } |
299 | 305 | ||
@@ -302,11 +308,11 @@ int modify_irte(int irq, struct irte *irte_modified) | |||
302 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 308 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
303 | irte = &iommu->ir_table->base[index]; | 309 | irte = &iommu->ir_table->base[index]; |
304 | 310 | ||
305 | set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); | 311 | set_64bit((unsigned long *)irte, irte_modified->low); |
306 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); | 312 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
307 | 313 | ||
308 | rc = qi_flush_iec(iommu, index, 0); | 314 | rc = qi_flush_iec(iommu, index, 0); |
309 | spin_unlock(&irq_2_ir_lock); | 315 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
310 | 316 | ||
311 | return rc; | 317 | return rc; |
312 | } | 318 | } |
@@ -317,11 +323,12 @@ int flush_irte(int irq) | |||
317 | int index; | 323 | int index; |
318 | struct intel_iommu *iommu; | 324 | struct intel_iommu *iommu; |
319 | struct irq_2_iommu *irq_iommu; | 325 | struct irq_2_iommu *irq_iommu; |
326 | unsigned long flags; | ||
320 | 327 | ||
321 | spin_lock(&irq_2_ir_lock); | 328 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
322 | irq_iommu = valid_irq_2_iommu(irq); | 329 | irq_iommu = valid_irq_2_iommu(irq); |
323 | if (!irq_iommu) { | 330 | if (!irq_iommu) { |
324 | spin_unlock(&irq_2_ir_lock); | 331 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
325 | return -1; | 332 | return -1; |
326 | } | 333 | } |
327 | 334 | ||
@@ -330,7 +337,7 @@ int flush_irte(int irq) | |||
330 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | 337 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
331 | 338 | ||
332 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 339 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
333 | spin_unlock(&irq_2_ir_lock); | 340 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
334 | 341 | ||
335 | return rc; | 342 | return rc; |
336 | } | 343 | } |
@@ -363,11 +370,12 @@ int free_irte(int irq) | |||
363 | struct irte *irte; | 370 | struct irte *irte; |
364 | struct intel_iommu *iommu; | 371 | struct intel_iommu *iommu; |
365 | struct irq_2_iommu *irq_iommu; | 372 | struct irq_2_iommu *irq_iommu; |
373 | unsigned long flags; | ||
366 | 374 | ||
367 | spin_lock(&irq_2_ir_lock); | 375 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
368 | irq_iommu = valid_irq_2_iommu(irq); | 376 | irq_iommu = valid_irq_2_iommu(irq); |
369 | if (!irq_iommu) { | 377 | if (!irq_iommu) { |
370 | spin_unlock(&irq_2_ir_lock); | 378 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
371 | return -1; | 379 | return -1; |
372 | } | 380 | } |
373 | 381 | ||
@@ -378,7 +386,7 @@ int free_irte(int irq) | |||
378 | 386 | ||
379 | if (!irq_iommu->sub_handle) { | 387 | if (!irq_iommu->sub_handle) { |
380 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) | 388 | for (i = 0; i < (1 << irq_iommu->irte_mask); i++) |
381 | set_64bit((unsigned long *)irte, 0); | 389 | set_64bit((unsigned long *)(irte + i), 0); |
382 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); | 390 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
383 | } | 391 | } |
384 | 392 | ||
@@ -387,7 +395,7 @@ int free_irte(int irq) | |||
387 | irq_iommu->sub_handle = 0; | 395 | irq_iommu->sub_handle = 0; |
388 | irq_iommu->irte_mask = 0; | 396 | irq_iommu->irte_mask = 0; |
389 | 397 | ||
390 | spin_unlock(&irq_2_ir_lock); | 398 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
391 | 399 | ||
392 | return rc; | 400 | return rc; |
393 | } | 401 | } |
@@ -439,12 +447,12 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |||
439 | struct page *pages; | 447 | struct page *pages; |
440 | 448 | ||
441 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | 449 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), |
442 | GFP_KERNEL); | 450 | GFP_ATOMIC); |
443 | 451 | ||
444 | if (!iommu->ir_table) | 452 | if (!iommu->ir_table) |
445 | return -ENOMEM; | 453 | return -ENOMEM; |
446 | 454 | ||
447 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); | 455 | pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); |
448 | 456 | ||
449 | if (!pages) { | 457 | if (!pages) { |
450 | printk(KERN_ERR "failed to allocate pages of order %d\n", | 458 | printk(KERN_ERR "failed to allocate pages of order %d\n", |
@@ -459,11 +467,55 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |||
459 | return 0; | 467 | return 0; |
460 | } | 468 | } |
461 | 469 | ||
470 | /* | ||
471 | * Disable Interrupt Remapping. | ||
472 | */ | ||
473 | static void disable_intr_remapping(struct intel_iommu *iommu) | ||
474 | { | ||
475 | unsigned long flags; | ||
476 | u32 sts; | ||
477 | |||
478 | if (!ecap_ir_support(iommu->ecap)) | ||
479 | return; | ||
480 | |||
481 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
482 | |||
483 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | ||
484 | if (!(sts & DMA_GSTS_IRES)) | ||
485 | goto end; | ||
486 | |||
487 | iommu->gcmd &= ~DMA_GCMD_IRE; | ||
488 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | ||
489 | |||
490 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | ||
491 | readl, !(sts & DMA_GSTS_IRES), sts); | ||
492 | |||
493 | end: | ||
494 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
495 | } | ||
496 | |||
462 | int __init enable_intr_remapping(int eim) | 497 | int __init enable_intr_remapping(int eim) |
463 | { | 498 | { |
464 | struct dmar_drhd_unit *drhd; | 499 | struct dmar_drhd_unit *drhd; |
465 | int setup = 0; | 500 | int setup = 0; |
466 | 501 | ||
502 | for_each_drhd_unit(drhd) { | ||
503 | struct intel_iommu *iommu = drhd->iommu; | ||
504 | |||
505 | /* | ||
506 | * Clear previous faults. | ||
507 | */ | ||
508 | dmar_fault(-1, iommu); | ||
509 | |||
510 | /* | ||
511 | * Disable intr remapping and queued invalidation, if already | ||
512 | * enabled prior to OS handover. | ||
513 | */ | ||
514 | disable_intr_remapping(iommu); | ||
515 | |||
516 | dmar_disable_qi(iommu); | ||
517 | } | ||
518 | |||
467 | /* | 519 | /* |
468 | * check for the Interrupt-remapping support | 520 | * check for the Interrupt-remapping support |
469 | */ | 521 | */ |