diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 653 |
1 files changed, 516 insertions, 137 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index e3774f6b57cc..8cf987809c66 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -29,6 +29,8 @@ | |||
29 | * to reduce code space and undefined function references. | 29 | * to reduce code space and undefined function references. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #undef DEBUG | ||
33 | |||
32 | #include <linux/module.h> | 34 | #include <linux/module.h> |
33 | #include <linux/threads.h> | 35 | #include <linux/threads.h> |
34 | #include <linux/kernel_stat.h> | 36 | #include <linux/kernel_stat.h> |
@@ -46,7 +48,10 @@ | |||
46 | #include <linux/cpumask.h> | 48 | #include <linux/cpumask.h> |
47 | #include <linux/profile.h> | 49 | #include <linux/profile.h> |
48 | #include <linux/bitops.h> | 50 | #include <linux/bitops.h> |
49 | #include <linux/pci.h> | 51 | #include <linux/list.h> |
52 | #include <linux/radix-tree.h> | ||
53 | #include <linux/mutex.h> | ||
54 | #include <linux/bootmem.h> | ||
50 | 55 | ||
51 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
52 | #include <asm/system.h> | 57 | #include <asm/system.h> |
@@ -57,39 +62,38 @@ | |||
57 | #include <asm/prom.h> | 62 | #include <asm/prom.h> |
58 | #include <asm/ptrace.h> | 63 | #include <asm/ptrace.h> |
59 | #include <asm/machdep.h> | 64 | #include <asm/machdep.h> |
65 | #include <asm/udbg.h> | ||
60 | #ifdef CONFIG_PPC_ISERIES | 66 | #ifdef CONFIG_PPC_ISERIES |
61 | #include <asm/paca.h> | 67 | #include <asm/paca.h> |
62 | #endif | 68 | #endif |
63 | 69 | ||
64 | int __irq_offset_value; | 70 | int __irq_offset_value; |
65 | #ifdef CONFIG_PPC32 | ||
66 | EXPORT_SYMBOL(__irq_offset_value); | ||
67 | #endif | ||
68 | |||
69 | static int ppc_spurious_interrupts; | 71 | static int ppc_spurious_interrupts; |
70 | 72 | ||
71 | #ifdef CONFIG_PPC32 | 73 | #ifdef CONFIG_PPC32 |
72 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 74 | EXPORT_SYMBOL(__irq_offset_value); |
75 | atomic_t ppc_n_lost_interrupts; | ||
73 | 76 | ||
77 | #ifndef CONFIG_PPC_MERGE | ||
78 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | ||
74 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | 79 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
75 | atomic_t ppc_n_lost_interrupts; | 80 | #endif |
76 | 81 | ||
77 | #ifdef CONFIG_TAU_INT | 82 | #ifdef CONFIG_TAU_INT |
78 | extern int tau_initialized; | 83 | extern int tau_initialized; |
79 | extern int tau_interrupts(int); | 84 | extern int tau_interrupts(int); |
80 | #endif | 85 | #endif |
86 | #endif /* CONFIG_PPC32 */ | ||
81 | 87 | ||
82 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | 88 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) |
83 | extern atomic_t ipi_recv; | 89 | extern atomic_t ipi_recv; |
84 | extern atomic_t ipi_sent; | 90 | extern atomic_t ipi_sent; |
85 | #endif | 91 | #endif |
86 | #endif /* CONFIG_PPC32 */ | ||
87 | 92 | ||
88 | #ifdef CONFIG_PPC64 | 93 | #ifdef CONFIG_PPC64 |
89 | EXPORT_SYMBOL(irq_desc); | 94 | EXPORT_SYMBOL(irq_desc); |
90 | 95 | ||
91 | int distribute_irqs = 1; | 96 | int distribute_irqs = 1; |
92 | u64 ppc64_interrupt_controller; | ||
93 | #endif /* CONFIG_PPC64 */ | 97 | #endif /* CONFIG_PPC64 */ |
94 | 98 | ||
95 | int show_interrupts(struct seq_file *p, void *v) | 99 | int show_interrupts(struct seq_file *p, void *v) |
@@ -182,7 +186,7 @@ void fixup_irqs(cpumask_t map) | |||
182 | 186 | ||
183 | void do_IRQ(struct pt_regs *regs) | 187 | void do_IRQ(struct pt_regs *regs) |
184 | { | 188 | { |
185 | int irq; | 189 | unsigned int irq; |
186 | #ifdef CONFIG_IRQSTACKS | 190 | #ifdef CONFIG_IRQSTACKS |
187 | struct thread_info *curtp, *irqtp; | 191 | struct thread_info *curtp, *irqtp; |
188 | #endif | 192 | #endif |
@@ -213,22 +217,26 @@ void do_IRQ(struct pt_regs *regs) | |||
213 | */ | 217 | */ |
214 | irq = ppc_md.get_irq(regs); | 218 | irq = ppc_md.get_irq(regs); |
215 | 219 | ||
216 | if (irq >= 0) { | 220 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { |
217 | #ifdef CONFIG_IRQSTACKS | 221 | #ifdef CONFIG_IRQSTACKS |
218 | /* Switch to the irq stack to handle this */ | 222 | /* Switch to the irq stack to handle this */ |
219 | curtp = current_thread_info(); | 223 | curtp = current_thread_info(); |
220 | irqtp = hardirq_ctx[smp_processor_id()]; | 224 | irqtp = hardirq_ctx[smp_processor_id()]; |
221 | if (curtp != irqtp) { | 225 | if (curtp != irqtp) { |
226 | struct irq_desc *desc = irq_desc + irq; | ||
227 | void *handler = desc->handle_irq; | ||
228 | if (handler == NULL) | ||
229 | handler = &__do_IRQ; | ||
222 | irqtp->task = curtp->task; | 230 | irqtp->task = curtp->task; |
223 | irqtp->flags = 0; | 231 | irqtp->flags = 0; |
224 | call___do_IRQ(irq, regs, irqtp); | 232 | call_handle_irq(irq, desc, regs, irqtp, handler); |
225 | irqtp->task = NULL; | 233 | irqtp->task = NULL; |
226 | if (irqtp->flags) | 234 | if (irqtp->flags) |
227 | set_bits(irqtp->flags, &curtp->flags); | 235 | set_bits(irqtp->flags, &curtp->flags); |
228 | } else | 236 | } else |
229 | #endif | 237 | #endif |
230 | __do_IRQ(irq, regs); | 238 | generic_handle_irq(irq, regs); |
231 | } else if (irq != -2) | 239 | } else if (irq != NO_IRQ_IGNORE) |
232 | /* That's not SMP safe ... but who cares ? */ | 240 | /* That's not SMP safe ... but who cares ? */ |
233 | ppc_spurious_interrupts++; | 241 | ppc_spurious_interrupts++; |
234 | 242 | ||
@@ -245,191 +253,562 @@ void do_IRQ(struct pt_regs *regs) | |||
245 | 253 | ||
246 | void __init init_IRQ(void) | 254 | void __init init_IRQ(void) |
247 | { | 255 | { |
256 | ppc_md.init_IRQ(); | ||
248 | #ifdef CONFIG_PPC64 | 257 | #ifdef CONFIG_PPC64 |
249 | static int once = 0; | 258 | irq_ctx_init(); |
259 | #endif | ||
260 | } | ||
261 | |||
262 | |||
263 | #ifdef CONFIG_IRQSTACKS | ||
264 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; | ||
265 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | ||
266 | |||
267 | void irq_ctx_init(void) | ||
268 | { | ||
269 | struct thread_info *tp; | ||
270 | int i; | ||
271 | |||
272 | for_each_possible_cpu(i) { | ||
273 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | ||
274 | tp = softirq_ctx[i]; | ||
275 | tp->cpu = i; | ||
276 | tp->preempt_count = SOFTIRQ_OFFSET; | ||
277 | |||
278 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | ||
279 | tp = hardirq_ctx[i]; | ||
280 | tp->cpu = i; | ||
281 | tp->preempt_count = HARDIRQ_OFFSET; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | static inline void do_softirq_onstack(void) | ||
286 | { | ||
287 | struct thread_info *curtp, *irqtp; | ||
288 | |||
289 | curtp = current_thread_info(); | ||
290 | irqtp = softirq_ctx[smp_processor_id()]; | ||
291 | irqtp->task = curtp->task; | ||
292 | call_do_softirq(irqtp); | ||
293 | irqtp->task = NULL; | ||
294 | } | ||
250 | 295 | ||
251 | if (once) | 296 | #else |
297 | #define do_softirq_onstack() __do_softirq() | ||
298 | #endif /* CONFIG_IRQSTACKS */ | ||
299 | |||
300 | void do_softirq(void) | ||
301 | { | ||
302 | unsigned long flags; | ||
303 | |||
304 | if (in_interrupt()) | ||
252 | return; | 305 | return; |
253 | 306 | ||
254 | once++; | 307 | local_irq_save(flags); |
255 | 308 | ||
256 | #endif | 309 | if (local_softirq_pending()) |
257 | ppc_md.init_IRQ(); | 310 | do_softirq_onstack(); |
258 | #ifdef CONFIG_PPC64 | 311 | |
259 | irq_ctx_init(); | 312 | local_irq_restore(flags); |
260 | #endif | ||
261 | } | 313 | } |
314 | EXPORT_SYMBOL(do_softirq); | ||
315 | |||
262 | 316 | ||
263 | #ifdef CONFIG_PPC64 | ||
264 | /* | 317 | /* |
265 | * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. | 318 | * IRQ controller and virtual interrupts |
266 | */ | 319 | */ |
267 | 320 | ||
268 | #define UNDEFINED_IRQ 0xffffffff | 321 | #ifdef CONFIG_PPC_MERGE |
269 | unsigned int virt_irq_to_real_map[NR_IRQS]; | ||
270 | 322 | ||
271 | /* | 323 | static LIST_HEAD(irq_hosts); |
272 | * Don't use virtual irqs 0, 1, 2 for devices. | 324 | static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; |
273 | * The pcnet32 driver considers interrupt numbers < 2 to be invalid, | ||
274 | * and 2 is the XICS IPI interrupt. | ||
275 | * We limit virtual irqs to __irq_offet_value less than virt_irq_max so | ||
276 | * that when we offset them we don't end up with an interrupt | ||
277 | * number >= virt_irq_max. | ||
278 | */ | ||
279 | #define MIN_VIRT_IRQ 3 | ||
280 | 325 | ||
281 | unsigned int virt_irq_max; | 326 | struct irq_map_entry irq_map[NR_IRQS]; |
282 | static unsigned int max_virt_irq; | 327 | static unsigned int irq_virq_count = NR_IRQS; |
283 | static unsigned int nr_virt_irqs; | 328 | static struct irq_host *irq_default_host; |
284 | 329 | ||
285 | void | 330 | struct irq_host *irq_alloc_host(unsigned int revmap_type, |
286 | virt_irq_init(void) | 331 | unsigned int revmap_arg, |
332 | struct irq_host_ops *ops, | ||
333 | irq_hw_number_t inval_irq) | ||
287 | { | 334 | { |
288 | int i; | 335 | struct irq_host *host; |
336 | unsigned int size = sizeof(struct irq_host); | ||
337 | unsigned int i; | ||
338 | unsigned int *rmap; | ||
339 | unsigned long flags; | ||
340 | |||
341 | /* Allocate structure and revmap table if using linear mapping */ | ||
342 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | ||
343 | size += revmap_arg * sizeof(unsigned int); | ||
344 | if (mem_init_done) | ||
345 | host = kzalloc(size, GFP_KERNEL); | ||
346 | else { | ||
347 | host = alloc_bootmem(size); | ||
348 | if (host) | ||
349 | memset(host, 0, size); | ||
350 | } | ||
351 | if (host == NULL) | ||
352 | return NULL; | ||
289 | 353 | ||
290 | if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) | 354 | /* Fill structure */ |
291 | virt_irq_max = NR_IRQS - 1; | 355 | host->revmap_type = revmap_type; |
292 | max_virt_irq = virt_irq_max - __irq_offset_value; | 356 | host->inval_irq = inval_irq; |
293 | nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; | 357 | host->ops = ops; |
294 | 358 | ||
295 | for (i = 0; i < NR_IRQS; i++) | 359 | spin_lock_irqsave(&irq_big_lock, flags); |
296 | virt_irq_to_real_map[i] = UNDEFINED_IRQ; | 360 | |
361 | /* If it's a legacy controller, check for duplicates and | ||
362 | * mark it as allocated (we use irq 0 host pointer for that | ||
363 | */ | ||
364 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | ||
365 | if (irq_map[0].host != NULL) { | ||
366 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
367 | /* If we are early boot, we can't free the structure, | ||
368 | * too bad... | ||
369 | * this will be fixed once slab is made available early | ||
370 | * instead of the current cruft | ||
371 | */ | ||
372 | if (mem_init_done) | ||
373 | kfree(host); | ||
374 | return NULL; | ||
375 | } | ||
376 | irq_map[0].host = host; | ||
377 | } | ||
378 | |||
379 | list_add(&host->link, &irq_hosts); | ||
380 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
381 | |||
382 | /* Additional setups per revmap type */ | ||
383 | switch(revmap_type) { | ||
384 | case IRQ_HOST_MAP_LEGACY: | ||
385 | /* 0 is always the invalid number for legacy */ | ||
386 | host->inval_irq = 0; | ||
387 | /* setup us as the host for all legacy interrupts */ | ||
388 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | ||
389 | irq_map[i].hwirq = 0; | ||
390 | smp_wmb(); | ||
391 | irq_map[i].host = host; | ||
392 | smp_wmb(); | ||
393 | |||
394 | /* Clear some flags */ | ||
395 | get_irq_desc(i)->status | ||
396 | &= ~(IRQ_NOREQUEST | IRQ_LEVEL); | ||
397 | |||
398 | /* Legacy flags are left to default at this point, | ||
399 | * one can then use irq_create_mapping() to | ||
400 | * explicitely change them | ||
401 | */ | ||
402 | ops->map(host, i, i, 0); | ||
403 | } | ||
404 | break; | ||
405 | case IRQ_HOST_MAP_LINEAR: | ||
406 | rmap = (unsigned int *)(host + 1); | ||
407 | for (i = 0; i < revmap_arg; i++) | ||
408 | rmap[i] = IRQ_NONE; | ||
409 | host->revmap_data.linear.size = revmap_arg; | ||
410 | smp_wmb(); | ||
411 | host->revmap_data.linear.revmap = rmap; | ||
412 | break; | ||
413 | default: | ||
414 | break; | ||
415 | } | ||
416 | |||
417 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | ||
418 | |||
419 | return host; | ||
297 | } | 420 | } |
298 | 421 | ||
299 | /* Create a mapping for a real_irq if it doesn't already exist. | 422 | struct irq_host *irq_find_host(struct device_node *node) |
300 | * Return the virtual irq as a convenience. | ||
301 | */ | ||
302 | int virt_irq_create_mapping(unsigned int real_irq) | ||
303 | { | 423 | { |
304 | unsigned int virq, first_virq; | 424 | struct irq_host *h, *found = NULL; |
305 | static int warned; | 425 | unsigned long flags; |
426 | |||
427 | /* We might want to match the legacy controller last since | ||
428 | * it might potentially be set to match all interrupts in | ||
429 | * the absence of a device node. This isn't a problem so far | ||
430 | * yet though... | ||
431 | */ | ||
432 | spin_lock_irqsave(&irq_big_lock, flags); | ||
433 | list_for_each_entry(h, &irq_hosts, link) | ||
434 | if (h->ops->match == NULL || h->ops->match(h, node)) { | ||
435 | found = h; | ||
436 | break; | ||
437 | } | ||
438 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
439 | return found; | ||
440 | } | ||
441 | EXPORT_SYMBOL_GPL(irq_find_host); | ||
442 | |||
443 | void irq_set_default_host(struct irq_host *host) | ||
444 | { | ||
445 | pr_debug("irq: Default host set to @0x%p\n", host); | ||
306 | 446 | ||
307 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | 447 | irq_default_host = host; |
308 | return real_irq; /* no mapping for openpic (for now) */ | 448 | } |
309 | 449 | ||
310 | if (ppc64_interrupt_controller == IC_CELL_PIC) | 450 | void irq_set_virq_count(unsigned int count) |
311 | return real_irq; /* no mapping for iic either */ | 451 | { |
452 | pr_debug("irq: Trying to set virq count to %d\n", count); | ||
312 | 453 | ||
313 | /* don't map interrupts < MIN_VIRT_IRQ */ | 454 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
314 | if (real_irq < MIN_VIRT_IRQ) { | 455 | if (count < NR_IRQS) |
315 | virt_irq_to_real_map[real_irq] = real_irq; | 456 | irq_virq_count = count; |
316 | return real_irq; | 457 | } |
458 | |||
459 | unsigned int irq_create_mapping(struct irq_host *host, | ||
460 | irq_hw_number_t hwirq, | ||
461 | unsigned int flags) | ||
462 | { | ||
463 | unsigned int virq, hint; | ||
464 | |||
465 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n", | ||
466 | host, hwirq, flags); | ||
467 | |||
468 | /* Look for default host if nececssary */ | ||
469 | if (host == NULL) | ||
470 | host = irq_default_host; | ||
471 | if (host == NULL) { | ||
472 | printk(KERN_WARNING "irq_create_mapping called for" | ||
473 | " NULL host, hwirq=%lx\n", hwirq); | ||
474 | WARN_ON(1); | ||
475 | return NO_IRQ; | ||
317 | } | 476 | } |
477 | pr_debug("irq: -> using host @%p\n", host); | ||
318 | 478 | ||
319 | /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ | 479 | /* Check if mapping already exist, if it does, call |
320 | virq = real_irq; | 480 | * host->ops->map() to update the flags |
321 | if (virq > max_virt_irq) | 481 | */ |
322 | virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; | 482 | virq = irq_find_mapping(host, hwirq); |
323 | 483 | if (virq != IRQ_NONE) { | |
324 | /* search for this number or a free slot */ | 484 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
325 | first_virq = virq; | 485 | host->ops->map(host, virq, hwirq, flags); |
326 | while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { | 486 | return virq; |
327 | if (virt_irq_to_real_map[virq] == real_irq) | ||
328 | return virq; | ||
329 | if (++virq > max_virt_irq) | ||
330 | virq = MIN_VIRT_IRQ; | ||
331 | if (virq == first_virq) | ||
332 | goto nospace; /* oops, no free slots */ | ||
333 | } | 487 | } |
334 | 488 | ||
335 | virt_irq_to_real_map[virq] = real_irq; | 489 | /* Get a virtual interrupt number */ |
490 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | ||
491 | /* Handle legacy */ | ||
492 | virq = (unsigned int)hwirq; | ||
493 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | ||
494 | return NO_IRQ; | ||
495 | return virq; | ||
496 | } else { | ||
497 | /* Allocate a virtual interrupt number */ | ||
498 | hint = hwirq % irq_virq_count; | ||
499 | virq = irq_alloc_virt(host, 1, hint); | ||
500 | if (virq == NO_IRQ) { | ||
501 | pr_debug("irq: -> virq allocation failed\n"); | ||
502 | return NO_IRQ; | ||
503 | } | ||
504 | } | ||
505 | pr_debug("irq: -> obtained virq %d\n", virq); | ||
506 | |||
507 | /* Clear some flags */ | ||
508 | get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL); | ||
509 | |||
510 | /* map it */ | ||
511 | if (host->ops->map(host, virq, hwirq, flags)) { | ||
512 | pr_debug("irq: -> mapping failed, freeing\n"); | ||
513 | irq_free_virt(virq, 1); | ||
514 | return NO_IRQ; | ||
515 | } | ||
516 | smp_wmb(); | ||
517 | irq_map[virq].hwirq = hwirq; | ||
518 | smp_mb(); | ||
336 | return virq; | 519 | return virq; |
520 | } | ||
521 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
522 | |||
523 | extern unsigned int irq_create_of_mapping(struct device_node *controller, | ||
524 | u32 *intspec, unsigned int intsize) | ||
525 | { | ||
526 | struct irq_host *host; | ||
527 | irq_hw_number_t hwirq; | ||
528 | unsigned int flags = IRQ_TYPE_NONE; | ||
337 | 529 | ||
338 | nospace: | 530 | if (controller == NULL) |
339 | if (!warned) { | 531 | host = irq_default_host; |
340 | printk(KERN_CRIT "Interrupt table is full\n"); | 532 | else |
341 | printk(KERN_CRIT "Increase virt_irq_max (currently %d) " | 533 | host = irq_find_host(controller); |
342 | "in your kernel sources and rebuild.\n", virt_irq_max); | 534 | if (host == NULL) |
343 | warned = 1; | 535 | return NO_IRQ; |
536 | |||
537 | /* If host has no translation, then we assume interrupt line */ | ||
538 | if (host->ops->xlate == NULL) | ||
539 | hwirq = intspec[0]; | ||
540 | else { | ||
541 | if (host->ops->xlate(host, controller, intspec, intsize, | ||
542 | &hwirq, &flags)) | ||
543 | return NO_IRQ; | ||
344 | } | 544 | } |
345 | return NO_IRQ; | 545 | |
546 | return irq_create_mapping(host, hwirq, flags); | ||
346 | } | 547 | } |
548 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
347 | 549 | ||
348 | /* | 550 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
349 | * In most cases will get a hit on the very first slot checked in the | ||
350 | * virt_irq_to_real_map. Only when there are a large number of | ||
351 | * IRQs will this be expensive. | ||
352 | */ | ||
353 | unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) | ||
354 | { | 551 | { |
355 | unsigned int virq; | 552 | struct of_irq oirq; |
356 | unsigned int first_virq; | ||
357 | 553 | ||
358 | virq = real_irq; | 554 | if (of_irq_map_one(dev, index, &oirq)) |
555 | return NO_IRQ; | ||
359 | 556 | ||
360 | if (virq > max_virt_irq) | 557 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
361 | virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; | 558 | oirq.size); |
559 | } | ||
560 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | ||
362 | 561 | ||
363 | first_virq = virq; | 562 | void irq_dispose_mapping(unsigned int virq) |
563 | { | ||
564 | struct irq_host *host = irq_map[virq].host; | ||
565 | irq_hw_number_t hwirq; | ||
566 | unsigned long flags; | ||
364 | 567 | ||
365 | do { | 568 | WARN_ON (host == NULL); |
366 | if (virt_irq_to_real_map[virq] == real_irq) | 569 | if (host == NULL) |
367 | return virq; | 570 | return; |
368 | 571 | ||
369 | virq++; | 572 | /* Never unmap legacy interrupts */ |
573 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | ||
574 | return; | ||
370 | 575 | ||
371 | if (virq >= max_virt_irq) | 576 | /* remove chip and handler */ |
372 | virq = 0; | 577 | set_irq_chip_and_handler(virq, NULL, NULL); |
578 | |||
579 | /* Make sure it's completed */ | ||
580 | synchronize_irq(virq); | ||
581 | |||
582 | /* Tell the PIC about it */ | ||
583 | if (host->ops->unmap) | ||
584 | host->ops->unmap(host, virq); | ||
585 | smp_mb(); | ||
586 | |||
587 | /* Clear reverse map */ | ||
588 | hwirq = irq_map[virq].hwirq; | ||
589 | switch(host->revmap_type) { | ||
590 | case IRQ_HOST_MAP_LINEAR: | ||
591 | if (hwirq < host->revmap_data.linear.size) | ||
592 | host->revmap_data.linear.revmap[hwirq] = IRQ_NONE; | ||
593 | break; | ||
594 | case IRQ_HOST_MAP_TREE: | ||
595 | /* Check if radix tree allocated yet */ | ||
596 | if (host->revmap_data.tree.gfp_mask == 0) | ||
597 | break; | ||
598 | /* XXX radix tree not safe ! remove lock whem it becomes safe | ||
599 | * and use some RCU sync to make sure everything is ok before we | ||
600 | * can re-use that map entry | ||
601 | */ | ||
602 | spin_lock_irqsave(&irq_big_lock, flags); | ||
603 | radix_tree_delete(&host->revmap_data.tree, hwirq); | ||
604 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
605 | break; | ||
606 | } | ||
373 | 607 | ||
374 | } while (first_virq != virq); | 608 | /* Destroy map */ |
609 | smp_mb(); | ||
610 | irq_map[virq].hwirq = host->inval_irq; | ||
375 | 611 | ||
376 | return NO_IRQ; | 612 | /* Set some flags */ |
613 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | ||
377 | 614 | ||
615 | /* Free it */ | ||
616 | irq_free_virt(virq, 1); | ||
378 | } | 617 | } |
379 | #endif /* CONFIG_PPC64 */ | 618 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
380 | 619 | ||
381 | #ifdef CONFIG_IRQSTACKS | 620 | unsigned int irq_find_mapping(struct irq_host *host, |
382 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; | 621 | irq_hw_number_t hwirq) |
383 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | 622 | { |
623 | unsigned int i; | ||
624 | unsigned int hint = hwirq % irq_virq_count; | ||
625 | |||
626 | /* Look for default host if nececssary */ | ||
627 | if (host == NULL) | ||
628 | host = irq_default_host; | ||
629 | if (host == NULL) | ||
630 | return NO_IRQ; | ||
631 | |||
632 | /* legacy -> bail early */ | ||
633 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | ||
634 | return hwirq; | ||
635 | |||
636 | /* Slow path does a linear search of the map */ | ||
637 | if (hint < NUM_ISA_INTERRUPTS) | ||
638 | hint = NUM_ISA_INTERRUPTS; | ||
639 | i = hint; | ||
640 | do { | ||
641 | if (irq_map[i].host == host && | ||
642 | irq_map[i].hwirq == hwirq) | ||
643 | return i; | ||
644 | i++; | ||
645 | if (i >= irq_virq_count) | ||
646 | i = NUM_ISA_INTERRUPTS; | ||
647 | } while(i != hint); | ||
648 | return NO_IRQ; | ||
649 | } | ||
650 | EXPORT_SYMBOL_GPL(irq_find_mapping); | ||
384 | 651 | ||
385 | void irq_ctx_init(void) | 652 | |
653 | unsigned int irq_radix_revmap(struct irq_host *host, | ||
654 | irq_hw_number_t hwirq) | ||
386 | { | 655 | { |
387 | struct thread_info *tp; | 656 | struct radix_tree_root *tree; |
388 | int i; | 657 | struct irq_map_entry *ptr; |
658 | unsigned int virq; | ||
659 | unsigned long flags; | ||
389 | 660 | ||
390 | for_each_possible_cpu(i) { | 661 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
391 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | ||
392 | tp = softirq_ctx[i]; | ||
393 | tp->cpu = i; | ||
394 | tp->preempt_count = SOFTIRQ_OFFSET; | ||
395 | 662 | ||
396 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | 663 | /* Check if the radix tree exist yet. We test the value of |
397 | tp = hardirq_ctx[i]; | 664 | * the gfp_mask for that. Sneaky but saves another int in the |
398 | tp->cpu = i; | 665 | * structure. If not, we fallback to slow mode |
399 | tp->preempt_count = HARDIRQ_OFFSET; | 666 | */ |
667 | tree = &host->revmap_data.tree; | ||
668 | if (tree->gfp_mask == 0) | ||
669 | return irq_find_mapping(host, hwirq); | ||
670 | |||
671 | /* XXX Current radix trees are NOT SMP safe !!! Remove that lock | ||
672 | * when that is fixed (when Nick's patch gets in | ||
673 | */ | ||
674 | spin_lock_irqsave(&irq_big_lock, flags); | ||
675 | |||
676 | /* Now try to resolve */ | ||
677 | ptr = radix_tree_lookup(tree, hwirq); | ||
678 | /* Found it, return */ | ||
679 | if (ptr) { | ||
680 | virq = ptr - irq_map; | ||
681 | goto bail; | ||
400 | } | 682 | } |
683 | |||
684 | /* If not there, try to insert it */ | ||
685 | virq = irq_find_mapping(host, hwirq); | ||
686 | if (virq != NO_IRQ) | ||
687 | radix_tree_insert(tree, virq, &irq_map[virq]); | ||
688 | bail: | ||
689 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
690 | return virq; | ||
401 | } | 691 | } |
402 | 692 | ||
403 | static inline void do_softirq_onstack(void) | 693 | unsigned int irq_linear_revmap(struct irq_host *host, |
694 | irq_hw_number_t hwirq) | ||
404 | { | 695 | { |
405 | struct thread_info *curtp, *irqtp; | 696 | unsigned int *revmap; |
406 | 697 | ||
407 | curtp = current_thread_info(); | 698 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
408 | irqtp = softirq_ctx[smp_processor_id()]; | 699 | |
409 | irqtp->task = curtp->task; | 700 | /* Check revmap bounds */ |
410 | call_do_softirq(irqtp); | 701 | if (unlikely(hwirq >= host->revmap_data.linear.size)) |
411 | irqtp->task = NULL; | 702 | return irq_find_mapping(host, hwirq); |
703 | |||
704 | /* Check if revmap was allocated */ | ||
705 | revmap = host->revmap_data.linear.revmap; | ||
706 | if (unlikely(revmap == NULL)) | ||
707 | return irq_find_mapping(host, hwirq); | ||
708 | |||
709 | /* Fill up revmap with slow path if no mapping found */ | ||
710 | if (unlikely(revmap[hwirq] == NO_IRQ)) | ||
711 | revmap[hwirq] = irq_find_mapping(host, hwirq); | ||
712 | |||
713 | return revmap[hwirq]; | ||
412 | } | 714 | } |
413 | 715 | ||
414 | #else | 716 | unsigned int irq_alloc_virt(struct irq_host *host, |
415 | #define do_softirq_onstack() __do_softirq() | 717 | unsigned int count, |
416 | #endif /* CONFIG_IRQSTACKS */ | 718 | unsigned int hint) |
719 | { | ||
720 | unsigned long flags; | ||
721 | unsigned int i, j, found = NO_IRQ; | ||
722 | unsigned int limit = irq_virq_count - count; | ||
417 | 723 | ||
418 | void do_softirq(void) | 724 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
725 | return NO_IRQ; | ||
726 | |||
727 | spin_lock_irqsave(&irq_big_lock, flags); | ||
728 | |||
729 | /* Use hint for 1 interrupt if any */ | ||
730 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | ||
731 | hint < irq_virq_count && irq_map[hint].host == NULL) { | ||
732 | found = hint; | ||
733 | goto hint_found; | ||
734 | } | ||
735 | |||
736 | /* Look for count consecutive numbers in the allocatable | ||
737 | * (non-legacy) space | ||
738 | */ | ||
739 | for (i = NUM_ISA_INTERRUPTS; i <= limit; ) { | ||
740 | for (j = i; j < (i + count); j++) | ||
741 | if (irq_map[j].host != NULL) { | ||
742 | i = j + 1; | ||
743 | continue; | ||
744 | } | ||
745 | found = i; | ||
746 | break; | ||
747 | } | ||
748 | if (found == NO_IRQ) { | ||
749 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
750 | return NO_IRQ; | ||
751 | } | ||
752 | hint_found: | ||
753 | for (i = found; i < (found + count); i++) { | ||
754 | irq_map[i].hwirq = host->inval_irq; | ||
755 | smp_wmb(); | ||
756 | irq_map[i].host = host; | ||
757 | } | ||
758 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
759 | return found; | ||
760 | } | ||
761 | |||
762 | void irq_free_virt(unsigned int virq, unsigned int count) | ||
419 | { | 763 | { |
420 | unsigned long flags; | 764 | unsigned long flags; |
765 | unsigned int i; | ||
421 | 766 | ||
422 | if (in_interrupt()) | 767 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
423 | return; | 768 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
424 | 769 | ||
425 | local_irq_save(flags); | 770 | spin_lock_irqsave(&irq_big_lock, flags); |
771 | for (i = virq; i < (virq + count); i++) { | ||
772 | struct irq_host *host; | ||
426 | 773 | ||
427 | if (local_softirq_pending()) | 774 | if (i < NUM_ISA_INTERRUPTS || |
428 | do_softirq_onstack(); | 775 | (virq + count) > irq_virq_count) |
776 | continue; | ||
429 | 777 | ||
430 | local_irq_restore(flags); | 778 | host = irq_map[i].host; |
779 | irq_map[i].hwirq = host->inval_irq; | ||
780 | smp_wmb(); | ||
781 | irq_map[i].host = NULL; | ||
782 | } | ||
783 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
431 | } | 784 | } |
432 | EXPORT_SYMBOL(do_softirq); | 785 | |
786 | void irq_early_init(void) | ||
787 | { | ||
788 | unsigned int i; | ||
789 | |||
790 | for (i = 0; i < NR_IRQS; i++) | ||
791 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | ||
792 | } | ||
793 | |||
794 | /* We need to create the radix trees late */ | ||
795 | static int irq_late_init(void) | ||
796 | { | ||
797 | struct irq_host *h; | ||
798 | unsigned long flags; | ||
799 | |||
800 | spin_lock_irqsave(&irq_big_lock, flags); | ||
801 | list_for_each_entry(h, &irq_hosts, link) { | ||
802 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | ||
803 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | ||
804 | } | ||
805 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | arch_initcall(irq_late_init); | ||
810 | |||
811 | #endif /* CONFIG_PPC_MERGE */ | ||
433 | 812 | ||
434 | #ifdef CONFIG_PCI_MSI | 813 | #ifdef CONFIG_PCI_MSI |
435 | int pci_enable_msi(struct pci_dev * pdev) | 814 | int pci_enable_msi(struct pci_dev * pdev) |