aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-03-16 20:04:53 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2009-03-17 18:36:40 -0400
commit4c5502b1c5744b2090414e1b80ca6388d5c46e06 (patch)
tree6447f553238a2522719fcf3c28b33a19e516f71c /drivers/pci
parent0ca0f16fd17c5d880dd0abbe03595b0c7c5b3c95 (diff)
x86, x2apic: fix lock ordering during IRQ migration
Impact: fix potential deadlock on x2apic fix "hard-safe -> hard-unsafe lock order detected" with irq_2_ir_lock On x2apic enabled system: [ INFO: hard-safe -> hard-unsafe lock order detected ] 2.6.27-03151-g4480f15b #1 ------------------------------------------------------ swapper/1 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: (irq_2_ir_lock){--..}, at: [<ffffffff8038ebc0>] get_irte+0x2f/0x95 and this task is already holding: (&irq_desc_lock_class){+...}, at: [<ffffffff802649ed>] setup_irq+0x67/0x281 which would create a new lock dependency: (&irq_desc_lock_class){+...} -> (irq_2_ir_lock){--..} but this new dependency connects a hard-irq-safe lock: (&irq_desc_lock_class){+...} ... which became hard-irq-safe at: [<ffffffffffffffff>] 0xffffffffffffffff to a hard-irq-unsafe lock: (irq_2_ir_lock){--..} ... which became hard-irq-unsafe at: ... [<ffffffff802547b5>] __lock_acquire+0x571/0x706 [<ffffffff8025499f>] lock_acquire+0x55/0x71 [<ffffffff8062f2c4>] _spin_lock+0x2c/0x38 [<ffffffff8038ee50>] alloc_irte+0x8a/0x14b [<ffffffff8021f733>] setup_IO_APIC_irq+0x119/0x30e [<ffffffff8090860e>] setup_IO_APIC+0x146/0x6e5 [<ffffffff809058fc>] native_smp_prepare_cpus+0x24e/0x2e9 [<ffffffff808f982c>] kernel_init+0x5a/0x176 [<ffffffff8020c289>] child_rip+0xa/0x11 [<ffffffffffffffff>] 0xffffffffffffffff Fix this theoretical lock order issue by using spin_lock_irqsave() instead of spin_lock() Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intr_remapping.c58
1 files changed, 33 insertions, 25 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 8e44db040db7..5ffa65fffb6a 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -117,21 +117,22 @@ int get_irte(int irq, struct irte *entry)
117{ 117{
118 int index; 118 int index;
119 struct irq_2_iommu *irq_iommu; 119 struct irq_2_iommu *irq_iommu;
120 unsigned long flags;
120 121
121 if (!entry) 122 if (!entry)
122 return -1; 123 return -1;
123 124
124 spin_lock(&irq_2_ir_lock); 125 spin_lock_irqsave(&irq_2_ir_lock, flags);
125 irq_iommu = valid_irq_2_iommu(irq); 126 irq_iommu = valid_irq_2_iommu(irq);
126 if (!irq_iommu) { 127 if (!irq_iommu) {
127 spin_unlock(&irq_2_ir_lock); 128 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
128 return -1; 129 return -1;
129 } 130 }
130 131
131 index = irq_iommu->irte_index + irq_iommu->sub_handle; 132 index = irq_iommu->irte_index + irq_iommu->sub_handle;
132 *entry = *(irq_iommu->iommu->ir_table->base + index); 133 *entry = *(irq_iommu->iommu->ir_table->base + index);
133 134
134 spin_unlock(&irq_2_ir_lock); 135 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
135 return 0; 136 return 0;
136} 137}
137 138
@@ -141,6 +142,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
141 struct irq_2_iommu *irq_iommu; 142 struct irq_2_iommu *irq_iommu;
142 u16 index, start_index; 143 u16 index, start_index;
143 unsigned int mask = 0; 144 unsigned int mask = 0;
145 unsigned long flags;
144 int i; 146 int i;
145 147
146 if (!count) 148 if (!count)
@@ -170,7 +172,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
170 return -1; 172 return -1;
171 } 173 }
172 174
173 spin_lock(&irq_2_ir_lock); 175 spin_lock_irqsave(&irq_2_ir_lock, flags);
174 do { 176 do {
175 for (i = index; i < index + count; i++) 177 for (i = index; i < index + count; i++)
176 if (table->base[i].present) 178 if (table->base[i].present)
@@ -182,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
182 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 184 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
183 185
184 if (index == start_index) { 186 if (index == start_index) {
185 spin_unlock(&irq_2_ir_lock); 187 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
186 printk(KERN_ERR "can't allocate an IRTE\n"); 188 printk(KERN_ERR "can't allocate an IRTE\n");
187 return -1; 189 return -1;
188 } 190 }
@@ -193,7 +195,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
193 195
194 irq_iommu = irq_2_iommu_alloc(irq); 196 irq_iommu = irq_2_iommu_alloc(irq);
195 if (!irq_iommu) { 197 if (!irq_iommu) {
196 spin_unlock(&irq_2_ir_lock); 198 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
197 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 199 printk(KERN_ERR "can't allocate irq_2_iommu\n");
198 return -1; 200 return -1;
199 } 201 }
@@ -203,7 +205,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
203 irq_iommu->sub_handle = 0; 205 irq_iommu->sub_handle = 0;
204 irq_iommu->irte_mask = mask; 206 irq_iommu->irte_mask = mask;
205 207
206 spin_unlock(&irq_2_ir_lock); 208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
207 209
208 return index; 210 return index;
209} 211}
@@ -223,30 +225,32 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
223{ 225{
224 int index; 226 int index;
225 struct irq_2_iommu *irq_iommu; 227 struct irq_2_iommu *irq_iommu;
228 unsigned long flags;
226 229
227 spin_lock(&irq_2_ir_lock); 230 spin_lock_irqsave(&irq_2_ir_lock, flags);
228 irq_iommu = valid_irq_2_iommu(irq); 231 irq_iommu = valid_irq_2_iommu(irq);
229 if (!irq_iommu) { 232 if (!irq_iommu) {
230 spin_unlock(&irq_2_ir_lock); 233 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
231 return -1; 234 return -1;
232 } 235 }
233 236
234 *sub_handle = irq_iommu->sub_handle; 237 *sub_handle = irq_iommu->sub_handle;
235 index = irq_iommu->irte_index; 238 index = irq_iommu->irte_index;
236 spin_unlock(&irq_2_ir_lock); 239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
237 return index; 240 return index;
238} 241}
239 242
240int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 243int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
241{ 244{
242 struct irq_2_iommu *irq_iommu; 245 struct irq_2_iommu *irq_iommu;
246 unsigned long flags;
243 247
244 spin_lock(&irq_2_ir_lock); 248 spin_lock_irqsave(&irq_2_ir_lock, flags);
245 249
246 irq_iommu = irq_2_iommu_alloc(irq); 250 irq_iommu = irq_2_iommu_alloc(irq);
247 251
248 if (!irq_iommu) { 252 if (!irq_iommu) {
249 spin_unlock(&irq_2_ir_lock); 253 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
250 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 254 printk(KERN_ERR "can't allocate irq_2_iommu\n");
251 return -1; 255 return -1;
252 } 256 }
@@ -256,7 +260,7 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
256 irq_iommu->sub_handle = subhandle; 260 irq_iommu->sub_handle = subhandle;
257 irq_iommu->irte_mask = 0; 261 irq_iommu->irte_mask = 0;
258 262
259 spin_unlock(&irq_2_ir_lock); 263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
260 264
261 return 0; 265 return 0;
262} 266}
@@ -264,11 +268,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
264int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 268int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
265{ 269{
266 struct irq_2_iommu *irq_iommu; 270 struct irq_2_iommu *irq_iommu;
271 unsigned long flags;
267 272
268 spin_lock(&irq_2_ir_lock); 273 spin_lock_irqsave(&irq_2_ir_lock, flags);
269 irq_iommu = valid_irq_2_iommu(irq); 274 irq_iommu = valid_irq_2_iommu(irq);
270 if (!irq_iommu) { 275 if (!irq_iommu) {
271 spin_unlock(&irq_2_ir_lock); 276 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
272 return -1; 277 return -1;
273 } 278 }
274 279
@@ -277,7 +282,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
277 irq_iommu->sub_handle = 0; 282 irq_iommu->sub_handle = 0;
278 irq_2_iommu(irq)->irte_mask = 0; 283 irq_2_iommu(irq)->irte_mask = 0;
279 284
280 spin_unlock(&irq_2_ir_lock); 285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
281 286
282 return 0; 287 return 0;
283} 288}
@@ -289,11 +294,12 @@ int modify_irte(int irq, struct irte *irte_modified)
289 struct irte *irte; 294 struct irte *irte;
290 struct intel_iommu *iommu; 295 struct intel_iommu *iommu;
291 struct irq_2_iommu *irq_iommu; 296 struct irq_2_iommu *irq_iommu;
297 unsigned long flags;
292 298
293 spin_lock(&irq_2_ir_lock); 299 spin_lock_irqsave(&irq_2_ir_lock, flags);
294 irq_iommu = valid_irq_2_iommu(irq); 300 irq_iommu = valid_irq_2_iommu(irq);
295 if (!irq_iommu) { 301 if (!irq_iommu) {
296 spin_unlock(&irq_2_ir_lock); 302 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
297 return -1; 303 return -1;
298 } 304 }
299 305
@@ -306,7 +312,7 @@ int modify_irte(int irq, struct irte *irte_modified)
306 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 312 __iommu_flush_cache(iommu, irte, sizeof(*irte));
307 313
308 rc = qi_flush_iec(iommu, index, 0); 314 rc = qi_flush_iec(iommu, index, 0);
309 spin_unlock(&irq_2_ir_lock); 315 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
310 316
311 return rc; 317 return rc;
312} 318}
@@ -317,11 +323,12 @@ int flush_irte(int irq)
317 int index; 323 int index;
318 struct intel_iommu *iommu; 324 struct intel_iommu *iommu;
319 struct irq_2_iommu *irq_iommu; 325 struct irq_2_iommu *irq_iommu;
326 unsigned long flags;
320 327
321 spin_lock(&irq_2_ir_lock); 328 spin_lock_irqsave(&irq_2_ir_lock, flags);
322 irq_iommu = valid_irq_2_iommu(irq); 329 irq_iommu = valid_irq_2_iommu(irq);
323 if (!irq_iommu) { 330 if (!irq_iommu) {
324 spin_unlock(&irq_2_ir_lock); 331 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
325 return -1; 332 return -1;
326 } 333 }
327 334
@@ -330,7 +337,7 @@ int flush_irte(int irq)
330 index = irq_iommu->irte_index + irq_iommu->sub_handle; 337 index = irq_iommu->irte_index + irq_iommu->sub_handle;
331 338
332 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 339 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
333 spin_unlock(&irq_2_ir_lock); 340 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
334 341
335 return rc; 342 return rc;
336} 343}
@@ -363,11 +370,12 @@ int free_irte(int irq)
363 struct irte *irte; 370 struct irte *irte;
364 struct intel_iommu *iommu; 371 struct intel_iommu *iommu;
365 struct irq_2_iommu *irq_iommu; 372 struct irq_2_iommu *irq_iommu;
373 unsigned long flags;
366 374
367 spin_lock(&irq_2_ir_lock); 375 spin_lock_irqsave(&irq_2_ir_lock, flags);
368 irq_iommu = valid_irq_2_iommu(irq); 376 irq_iommu = valid_irq_2_iommu(irq);
369 if (!irq_iommu) { 377 if (!irq_iommu) {
370 spin_unlock(&irq_2_ir_lock); 378 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
371 return -1; 379 return -1;
372 } 380 }
373 381
@@ -387,7 +395,7 @@ int free_irte(int irq)
387 irq_iommu->sub_handle = 0; 395 irq_iommu->sub_handle = 0;
388 irq_iommu->irte_mask = 0; 396 irq_iommu->irte_mask = 0;
389 397
390 spin_unlock(&irq_2_ir_lock); 398 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
391 399
392 return rc; 400 return rc;
393} 401}