aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiang Liu <jiang.liu@linux.intel.com>2014-01-06 01:18:08 -0500
committerJoerg Roedel <joro@8bytes.org>2014-01-07 11:16:19 -0500
commit360eb3c5687e2df23e29e97878238765bfe6a756 (patch)
tree6c97eb6de7e35cf4e3f552de8f20d7c3b1926787
parentdbad086433af81513c84678070522455fefebe2a (diff)
iommu/vt-d: use dedicated bitmap to track remapping entry allocation status
Currently Intel interrupt remapping drivers uses the "present" flag bit in remapping entry to track whether an entry is allocated or not. It works as follow: 1) allocate a remapping entry and set its "present" flag bit to 1 2) compose other fields for the entry 3) update the remapping entry with the composed value The remapping hardware may access the entry between step 1 and step 3, which then observers an entry with the "present" flag set but random values in all other fields. This patch introduces a dedicated bitmap to track remapping entry allocation status instead of sharing the "present" flag with hardware, thus eliminate the race window. It also simplifies the implementation. Tested-and-reviewed-by: Yijing Wang <wangyijing@huawei.com> Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
-rw-r--r--drivers/iommu/intel_irq_remapping.c55
-rw-r--r--include/linux/intel-iommu.h1
2 files changed, 27 insertions, 29 deletions
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c988b8d85df8..3aa9b5c347e4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -72,7 +72,6 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
72 u16 index, start_index; 72 u16 index, start_index;
73 unsigned int mask = 0; 73 unsigned int mask = 0;
74 unsigned long flags; 74 unsigned long flags;
75 int i;
76 75
77 if (!count || !irq_iommu) 76 if (!count || !irq_iommu)
78 return -1; 77 return -1;
@@ -96,32 +95,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
96 } 95 }
97 96
98 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 97 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
99 do { 98 index = bitmap_find_free_region(table->bitmap,
100 for (i = index; i < index + count; i++) 99 INTR_REMAP_TABLE_ENTRIES, mask);
101 if (table->base[i].present) 100 if (index < 0) {
102 break; 101 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
103 /* empty index found */ 102 } else {
104 if (i == index + count) 103 cfg->remapped = 1;
105 break; 104 irq_iommu->iommu = iommu;
106 105 irq_iommu->irte_index = index;
107 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 106 irq_iommu->sub_handle = 0;
108 107 irq_iommu->irte_mask = mask;
109 if (index == start_index) { 108 }
110 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
111 printk(KERN_ERR "can't allocate an IRTE\n");
112 return -1;
113 }
114 } while (1);
115
116 for (i = index; i < index + count; i++)
117 table->base[i].present = 1;
118
119 cfg->remapped = 1;
120 irq_iommu->iommu = iommu;
121 irq_iommu->irte_index = index;
122 irq_iommu->sub_handle = 0;
123 irq_iommu->irte_mask = mask;
124
125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 109 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
126 110
127 return index; 111 return index;
@@ -254,6 +238,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
254 set_64bit(&entry->low, 0); 238 set_64bit(&entry->low, 0);
255 set_64bit(&entry->high, 0); 239 set_64bit(&entry->high, 0);
256 } 240 }
241 bitmap_release_region(iommu->ir_table->bitmap, index,
242 irq_iommu->irte_mask);
257 243
258 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); 244 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
259} 245}
@@ -453,6 +439,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
453{ 439{
454 struct ir_table *ir_table; 440 struct ir_table *ir_table;
455 struct page *pages; 441 struct page *pages;
442 unsigned long *bitmap;
456 443
457 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 444 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
458 GFP_ATOMIC); 445 GFP_ATOMIC);
@@ -464,13 +451,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
464 INTR_REMAP_PAGE_ORDER); 451 INTR_REMAP_PAGE_ORDER);
465 452
466 if (!pages) { 453 if (!pages) {
467 printk(KERN_ERR "failed to allocate pages of order %d\n", 454 pr_err("IR%d: failed to allocate pages of order %d\n",
468 INTR_REMAP_PAGE_ORDER); 455 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
469 kfree(iommu->ir_table); 456 kfree(iommu->ir_table);
470 return -ENOMEM; 457 return -ENOMEM;
471 } 458 }
472 459
460 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
461 sizeof(long), GFP_ATOMIC);
462 if (bitmap == NULL) {
463 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
464 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
465 kfree(ir_table);
466 return -ENOMEM;
467 }
468
473 ir_table->base = page_address(pages); 469 ir_table->base = page_address(pages);
470 ir_table->bitmap = bitmap;
474 471
475 iommu_set_irq_remapping(iommu, mode); 472 iommu_set_irq_remapping(iommu, mode);
476 return 0; 473 return 0;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d380c5e68008..de1e5e936420 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -288,6 +288,7 @@ struct q_inval {
288 288
289struct ir_table { 289struct ir_table {
290 struct irte *base; 290 struct irte *base;
291 unsigned long *bitmap;
291}; 292};
292#endif 293#endif
293 294