aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intr_remapping.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-10-10 06:34:27 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-10-12 10:53:42 -0400
commitd585d060b42bd36f6f0b23ff327d3b91f80c7139 (patch)
tree0fc66dae7c0c9dd8b9c4498c03a158fd3ce71e9b /drivers/pci/intr_remapping.c
parent349d67673c08cbc299a69086b0f5447cf1094e9b (diff)
intr_remap: Simplify the code further
Having irq_2_iommu in struct irq_cfg allows further simplifications. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r--drivers/pci/intr_remapping.c113
1 files changed, 29 insertions, 84 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index ea46935422ff..a620b8bd8f4b 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -46,58 +46,31 @@ static __init int setup_intremap(char *str)
46} 46}
47early_param("intremap", setup_intremap); 47early_param("intremap", setup_intremap);
48 48
49static DEFINE_SPINLOCK(irq_2_ir_lock);
50
49static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 51static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
50{ 52{
51 struct irq_cfg *cfg = get_irq_chip_data(irq); 53 struct irq_cfg *cfg = get_irq_chip_data(irq);
52 return cfg ? &cfg->irq_2_iommu : NULL; 54 return cfg ? &cfg->irq_2_iommu : NULL;
53} 55}
54 56
55static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
56{
57 return irq_2_iommu(irq);
58}
59
60static void irq_2_iommu_free(unsigned int irq)
61{
62}
63
64static DEFINE_SPINLOCK(irq_2_ir_lock);
65
66static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
67{
68 struct irq_2_iommu *irq_iommu;
69
70 irq_iommu = irq_2_iommu(irq);
71
72 if (!irq_iommu)
73 return NULL;
74
75 if (!irq_iommu->iommu)
76 return NULL;
77
78 return irq_iommu;
79}
80
81int irq_remapped(int irq) 57int irq_remapped(int irq)
82{ 58{
83 return valid_irq_2_iommu(irq) != NULL; 59 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
60
61 return irq_iommu ? irq_iommu->iommu != NULL : 0;
84} 62}
85 63
86int get_irte(int irq, struct irte *entry) 64int get_irte(int irq, struct irte *entry)
87{ 65{
88 int index; 66 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
89 struct irq_2_iommu *irq_iommu;
90 unsigned long flags; 67 unsigned long flags;
68 int index;
91 69
92 if (!entry) 70 if (!entry || !irq_iommu)
93 return -1; 71 return -1;
94 72
95 spin_lock_irqsave(&irq_2_ir_lock, flags); 73 spin_lock_irqsave(&irq_2_ir_lock, flags);
96 irq_iommu = valid_irq_2_iommu(irq);
97 if (!irq_iommu) {
98 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
99 return -1;
100 }
101 74
102 index = irq_iommu->irte_index + irq_iommu->sub_handle; 75 index = irq_iommu->irte_index + irq_iommu->sub_handle;
103 *entry = *(irq_iommu->iommu->ir_table->base + index); 76 *entry = *(irq_iommu->iommu->ir_table->base + index);
@@ -109,20 +82,14 @@ int get_irte(int irq, struct irte *entry)
109int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 82int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
110{ 83{
111 struct ir_table *table = iommu->ir_table; 84 struct ir_table *table = iommu->ir_table;
112 struct irq_2_iommu *irq_iommu; 85 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
113 u16 index, start_index; 86 u16 index, start_index;
114 unsigned int mask = 0; 87 unsigned int mask = 0;
115 unsigned long flags; 88 unsigned long flags;
116 int i; 89 int i;
117 90
118 if (!count) 91 if (!count || !irq_iommu)
119 return -1;
120
121#ifndef CONFIG_SPARSE_IRQ
122 /* protect irq_2_iommu_alloc later */
123 if (irq >= nr_irqs)
124 return -1; 92 return -1;
125#endif
126 93
127 /* 94 /*
128 * start the IRTE search from index 0. 95 * start the IRTE search from index 0.
@@ -163,13 +130,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
163 for (i = index; i < index + count; i++) 130 for (i = index; i < index + count; i++)
164 table->base[i].present = 1; 131 table->base[i].present = 1;
165 132
166 irq_iommu = irq_2_iommu_alloc(irq);
167 if (!irq_iommu) {
168 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
169 printk(KERN_ERR "can't allocate irq_2_iommu\n");
170 return -1;
171 }
172
173 irq_iommu->iommu = iommu; 133 irq_iommu->iommu = iommu;
174 irq_iommu->irte_index = index; 134 irq_iommu->irte_index = index;
175 irq_iommu->sub_handle = 0; 135 irq_iommu->sub_handle = 0;
@@ -193,17 +153,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
193 153
194int map_irq_to_irte_handle(int irq, u16 *sub_handle) 154int map_irq_to_irte_handle(int irq, u16 *sub_handle)
195{ 155{
196 int index; 156 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
197 struct irq_2_iommu *irq_iommu;
198 unsigned long flags; 157 unsigned long flags;
158 int index;
199 159
200 spin_lock_irqsave(&irq_2_ir_lock, flags); 160 if (!irq_iommu)
201 irq_iommu = valid_irq_2_iommu(irq);
202 if (!irq_iommu) {
203 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
204 return -1; 161 return -1;
205 }
206 162
163 spin_lock_irqsave(&irq_2_ir_lock, flags);
207 *sub_handle = irq_iommu->sub_handle; 164 *sub_handle = irq_iommu->sub_handle;
208 index = irq_iommu->irte_index; 165 index = irq_iommu->irte_index;
209 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 166 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -212,18 +169,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
212 169
213int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 170int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
214{ 171{
215 struct irq_2_iommu *irq_iommu; 172 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
216 unsigned long flags; 173 unsigned long flags;
217 174
218 spin_lock_irqsave(&irq_2_ir_lock, flags); 175 if (!irq_iommu)
219
220 irq_iommu = irq_2_iommu_alloc(irq);
221
222 if (!irq_iommu) {
223 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
224 printk(KERN_ERR "can't allocate irq_2_iommu\n");
225 return -1; 176 return -1;
226 } 177
178 spin_lock_irqsave(&irq_2_ir_lock, flags);
227 179
228 irq_iommu->iommu = iommu; 180 irq_iommu->iommu = iommu;
229 irq_iommu->irte_index = index; 181 irq_iommu->irte_index = index;
@@ -237,19 +189,16 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
237 189
238int modify_irte(int irq, struct irte *irte_modified) 190int modify_irte(int irq, struct irte *irte_modified)
239{ 191{
240 int rc; 192 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
241 int index;
242 struct irte *irte;
243 struct intel_iommu *iommu; 193 struct intel_iommu *iommu;
244 struct irq_2_iommu *irq_iommu;
245 unsigned long flags; 194 unsigned long flags;
195 struct irte *irte;
196 int rc, index;
246 197
247 spin_lock_irqsave(&irq_2_ir_lock, flags); 198 if (!irq_iommu)
248 irq_iommu = valid_irq_2_iommu(irq);
249 if (!irq_iommu) {
250 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
251 return -1; 199 return -1;
252 } 200
201 spin_lock_irqsave(&irq_2_ir_lock, flags);
253 202
254 iommu = irq_iommu->iommu; 203 iommu = irq_iommu->iommu;
255 204
@@ -322,16 +271,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
322 271
323int free_irte(int irq) 272int free_irte(int irq)
324{ 273{
325 int rc = 0; 274 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
326 struct irq_2_iommu *irq_iommu;
327 unsigned long flags; 275 unsigned long flags;
276 int rc;
328 277
329 spin_lock_irqsave(&irq_2_ir_lock, flags); 278 if (!irq_iommu)
330 irq_iommu = valid_irq_2_iommu(irq);
331 if (!irq_iommu) {
332 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
333 return -1; 279 return -1;
334 } 280
281 spin_lock_irqsave(&irq_2_ir_lock, flags);
335 282
336 rc = clear_entries(irq_iommu); 283 rc = clear_entries(irq_iommu);
337 284
@@ -342,8 +289,6 @@ int free_irte(int irq)
342 289
343 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 290 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
344 291
345 irq_2_iommu_free(irq);
346
347 return rc; 292 return rc;
348} 293}
349 294