diff options
57 files changed, 2829 insertions, 2126 deletions
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index e47d40ac6f39..97ddc02a3d42 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -323,13 +323,11 @@ int ibmebus_request_irq(struct ibmebus_dev *dev, | |||
323 | unsigned long irq_flags, const char * devname, | 323 | unsigned long irq_flags, const char * devname, |
324 | void *dev_id) | 324 | void *dev_id) |
325 | { | 325 | { |
326 | unsigned int irq = virt_irq_create_mapping(ist); | 326 | unsigned int irq = irq_create_mapping(NULL, ist, 0); |
327 | 327 | ||
328 | if (irq == NO_IRQ) | 328 | if (irq == NO_IRQ) |
329 | return -EINVAL; | 329 | return -EINVAL; |
330 | 330 | ||
331 | irq = irq_offset_up(irq); | ||
332 | |||
333 | return request_irq(irq, handler, | 331 | return request_irq(irq, handler, |
334 | irq_flags, devname, dev_id); | 332 | irq_flags, devname, dev_id); |
335 | } | 333 | } |
@@ -337,12 +335,9 @@ EXPORT_SYMBOL(ibmebus_request_irq); | |||
337 | 335 | ||
338 | void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id) | 336 | void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id) |
339 | { | 337 | { |
340 | unsigned int irq = virt_irq_create_mapping(ist); | 338 | unsigned int irq = irq_find_mapping(NULL, ist); |
341 | 339 | ||
342 | irq = irq_offset_up(irq); | ||
343 | free_irq(irq, dev_id); | 340 | free_irq(irq, dev_id); |
344 | |||
345 | return; | ||
346 | } | 341 | } |
347 | EXPORT_SYMBOL(ibmebus_free_irq); | 342 | EXPORT_SYMBOL(ibmebus_free_irq); |
348 | 343 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 91248559099a..05a700940f67 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -29,6 +29,8 @@ | |||
29 | * to reduce code space and undefined function references. | 29 | * to reduce code space and undefined function references. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #undef DEBUG | ||
33 | |||
32 | #include <linux/module.h> | 34 | #include <linux/module.h> |
33 | #include <linux/threads.h> | 35 | #include <linux/threads.h> |
34 | #include <linux/kernel_stat.h> | 36 | #include <linux/kernel_stat.h> |
@@ -46,7 +48,10 @@ | |||
46 | #include <linux/cpumask.h> | 48 | #include <linux/cpumask.h> |
47 | #include <linux/profile.h> | 49 | #include <linux/profile.h> |
48 | #include <linux/bitops.h> | 50 | #include <linux/bitops.h> |
49 | #include <linux/pci.h> | 51 | #include <linux/list.h> |
52 | #include <linux/radix-tree.h> | ||
53 | #include <linux/mutex.h> | ||
54 | #include <linux/bootmem.h> | ||
50 | 55 | ||
51 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
52 | #include <asm/system.h> | 57 | #include <asm/system.h> |
@@ -57,6 +62,7 @@ | |||
57 | #include <asm/prom.h> | 62 | #include <asm/prom.h> |
58 | #include <asm/ptrace.h> | 63 | #include <asm/ptrace.h> |
59 | #include <asm/machdep.h> | 64 | #include <asm/machdep.h> |
65 | #include <asm/udbg.h> | ||
60 | #ifdef CONFIG_PPC_ISERIES | 66 | #ifdef CONFIG_PPC_ISERIES |
61 | #include <asm/paca.h> | 67 | #include <asm/paca.h> |
62 | #endif | 68 | #endif |
@@ -88,7 +94,6 @@ extern atomic_t ipi_sent; | |||
88 | EXPORT_SYMBOL(irq_desc); | 94 | EXPORT_SYMBOL(irq_desc); |
89 | 95 | ||
90 | int distribute_irqs = 1; | 96 | int distribute_irqs = 1; |
91 | u64 ppc64_interrupt_controller; | ||
92 | #endif /* CONFIG_PPC64 */ | 97 | #endif /* CONFIG_PPC64 */ |
93 | 98 | ||
94 | int show_interrupts(struct seq_file *p, void *v) | 99 | int show_interrupts(struct seq_file *p, void *v) |
@@ -181,7 +186,7 @@ void fixup_irqs(cpumask_t map) | |||
181 | 186 | ||
182 | void do_IRQ(struct pt_regs *regs) | 187 | void do_IRQ(struct pt_regs *regs) |
183 | { | 188 | { |
184 | int irq; | 189 | unsigned int irq; |
185 | #ifdef CONFIG_IRQSTACKS | 190 | #ifdef CONFIG_IRQSTACKS |
186 | struct thread_info *curtp, *irqtp; | 191 | struct thread_info *curtp, *irqtp; |
187 | #endif | 192 | #endif |
@@ -212,7 +217,7 @@ void do_IRQ(struct pt_regs *regs) | |||
212 | */ | 217 | */ |
213 | irq = ppc_md.get_irq(regs); | 218 | irq = ppc_md.get_irq(regs); |
214 | 219 | ||
215 | if (irq >= 0) { | 220 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { |
216 | #ifdef CONFIG_IRQSTACKS | 221 | #ifdef CONFIG_IRQSTACKS |
217 | /* Switch to the irq stack to handle this */ | 222 | /* Switch to the irq stack to handle this */ |
218 | curtp = current_thread_info(); | 223 | curtp = current_thread_info(); |
@@ -231,7 +236,7 @@ void do_IRQ(struct pt_regs *regs) | |||
231 | } else | 236 | } else |
232 | #endif | 237 | #endif |
233 | generic_handle_irq(irq, regs); | 238 | generic_handle_irq(irq, regs); |
234 | } else if (irq != -2) | 239 | } else if (irq != NO_IRQ_IGNORE) |
235 | /* That's not SMP safe ... but who cares ? */ | 240 | /* That's not SMP safe ... but who cares ? */ |
236 | ppc_spurious_interrupts++; | 241 | ppc_spurious_interrupts++; |
237 | 242 | ||
@@ -254,123 +259,6 @@ void __init init_IRQ(void) | |||
254 | #endif | 259 | #endif |
255 | } | 260 | } |
256 | 261 | ||
257 | #ifdef CONFIG_PPC64 | ||
258 | /* | ||
259 | * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. | ||
260 | */ | ||
261 | |||
262 | #define UNDEFINED_IRQ 0xffffffff | ||
263 | unsigned int virt_irq_to_real_map[NR_IRQS]; | ||
264 | |||
265 | /* | ||
266 | * Don't use virtual irqs 0, 1, 2 for devices. | ||
267 | * The pcnet32 driver considers interrupt numbers < 2 to be invalid, | ||
268 | * and 2 is the XICS IPI interrupt. | ||
269 | * We limit virtual irqs to __irq_offet_value less than virt_irq_max so | ||
270 | * that when we offset them we don't end up with an interrupt | ||
271 | * number >= virt_irq_max. | ||
272 | */ | ||
273 | #define MIN_VIRT_IRQ 3 | ||
274 | |||
275 | unsigned int virt_irq_max; | ||
276 | static unsigned int max_virt_irq; | ||
277 | static unsigned int nr_virt_irqs; | ||
278 | |||
279 | void | ||
280 | virt_irq_init(void) | ||
281 | { | ||
282 | int i; | ||
283 | |||
284 | if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) | ||
285 | virt_irq_max = NR_IRQS - 1; | ||
286 | max_virt_irq = virt_irq_max - __irq_offset_value; | ||
287 | nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; | ||
288 | |||
289 | for (i = 0; i < NR_IRQS; i++) | ||
290 | virt_irq_to_real_map[i] = UNDEFINED_IRQ; | ||
291 | } | ||
292 | |||
293 | /* Create a mapping for a real_irq if it doesn't already exist. | ||
294 | * Return the virtual irq as a convenience. | ||
295 | */ | ||
296 | int virt_irq_create_mapping(unsigned int real_irq) | ||
297 | { | ||
298 | unsigned int virq, first_virq; | ||
299 | static int warned; | ||
300 | |||
301 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | ||
302 | return real_irq; /* no mapping for openpic (for now) */ | ||
303 | |||
304 | if (ppc64_interrupt_controller == IC_CELL_PIC) | ||
305 | return real_irq; /* no mapping for iic either */ | ||
306 | |||
307 | /* don't map interrupts < MIN_VIRT_IRQ */ | ||
308 | if (real_irq < MIN_VIRT_IRQ) { | ||
309 | virt_irq_to_real_map[real_irq] = real_irq; | ||
310 | return real_irq; | ||
311 | } | ||
312 | |||
313 | /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ | ||
314 | virq = real_irq; | ||
315 | if (virq > max_virt_irq) | ||
316 | virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; | ||
317 | |||
318 | /* search for this number or a free slot */ | ||
319 | first_virq = virq; | ||
320 | while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { | ||
321 | if (virt_irq_to_real_map[virq] == real_irq) | ||
322 | return virq; | ||
323 | if (++virq > max_virt_irq) | ||
324 | virq = MIN_VIRT_IRQ; | ||
325 | if (virq == first_virq) | ||
326 | goto nospace; /* oops, no free slots */ | ||
327 | } | ||
328 | |||
329 | virt_irq_to_real_map[virq] = real_irq; | ||
330 | return virq; | ||
331 | |||
332 | nospace: | ||
333 | if (!warned) { | ||
334 | printk(KERN_CRIT "Interrupt table is full\n"); | ||
335 | printk(KERN_CRIT "Increase virt_irq_max (currently %d) " | ||
336 | "in your kernel sources and rebuild.\n", virt_irq_max); | ||
337 | warned = 1; | ||
338 | } | ||
339 | return NO_IRQ; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * In most cases will get a hit on the very first slot checked in the | ||
344 | * virt_irq_to_real_map. Only when there are a large number of | ||
345 | * IRQs will this be expensive. | ||
346 | */ | ||
347 | unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) | ||
348 | { | ||
349 | unsigned int virq; | ||
350 | unsigned int first_virq; | ||
351 | |||
352 | virq = real_irq; | ||
353 | |||
354 | if (virq > max_virt_irq) | ||
355 | virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; | ||
356 | |||
357 | first_virq = virq; | ||
358 | |||
359 | do { | ||
360 | if (virt_irq_to_real_map[virq] == real_irq) | ||
361 | return virq; | ||
362 | |||
363 | virq++; | ||
364 | |||
365 | if (virq >= max_virt_irq) | ||
366 | virq = 0; | ||
367 | |||
368 | } while (first_virq != virq); | ||
369 | |||
370 | return NO_IRQ; | ||
371 | |||
372 | } | ||
373 | #endif /* CONFIG_PPC64 */ | ||
374 | 262 | ||
375 | #ifdef CONFIG_IRQSTACKS | 263 | #ifdef CONFIG_IRQSTACKS |
376 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; | 264 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
@@ -430,6 +318,503 @@ void do_softirq(void) | |||
430 | } | 318 | } |
431 | EXPORT_SYMBOL(do_softirq); | 319 | EXPORT_SYMBOL(do_softirq); |
432 | 320 | ||
321 | |||
322 | /* | ||
323 | * IRQ controller and virtual interrupts | ||
324 | */ | ||
325 | |||
326 | #ifdef CONFIG_PPC_MERGE | ||
327 | |||
328 | static LIST_HEAD(irq_hosts); | ||
329 | static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; | ||
330 | |||
331 | struct irq_map_entry irq_map[NR_IRQS]; | ||
332 | static unsigned int irq_virq_count = NR_IRQS; | ||
333 | static struct irq_host *irq_default_host; | ||
334 | |||
335 | struct irq_host *irq_alloc_host(unsigned int revmap_type, | ||
336 | unsigned int revmap_arg, | ||
337 | struct irq_host_ops *ops, | ||
338 | irq_hw_number_t inval_irq) | ||
339 | { | ||
340 | struct irq_host *host; | ||
341 | unsigned int size = sizeof(struct irq_host); | ||
342 | unsigned int i; | ||
343 | unsigned int *rmap; | ||
344 | unsigned long flags; | ||
345 | |||
346 | /* Allocate structure and revmap table if using linear mapping */ | ||
347 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | ||
348 | size += revmap_arg * sizeof(unsigned int); | ||
349 | if (mem_init_done) | ||
350 | host = kzalloc(size, GFP_KERNEL); | ||
351 | else { | ||
352 | host = alloc_bootmem(size); | ||
353 | if (host) | ||
354 | memset(host, 0, size); | ||
355 | } | ||
356 | if (host == NULL) | ||
357 | return NULL; | ||
358 | |||
359 | /* Fill structure */ | ||
360 | host->revmap_type = revmap_type; | ||
361 | host->inval_irq = inval_irq; | ||
362 | host->ops = ops; | ||
363 | |||
364 | spin_lock_irqsave(&irq_big_lock, flags); | ||
365 | |||
366 | /* If it's a legacy controller, check for duplicates and | ||
367 | * mark it as allocated (we use irq 0 host pointer for that | ||
368 | */ | ||
369 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | ||
370 | if (irq_map[0].host != NULL) { | ||
371 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
372 | /* If we are early boot, we can't free the structure, | ||
373 | * too bad... | ||
374 | * this will be fixed once slab is made available early | ||
375 | * instead of the current cruft | ||
376 | */ | ||
377 | if (mem_init_done) | ||
378 | kfree(host); | ||
379 | return NULL; | ||
380 | } | ||
381 | irq_map[0].host = host; | ||
382 | } | ||
383 | |||
384 | list_add(&host->link, &irq_hosts); | ||
385 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
386 | |||
387 | /* Additional setups per revmap type */ | ||
388 | switch(revmap_type) { | ||
389 | case IRQ_HOST_MAP_LEGACY: | ||
390 | /* 0 is always the invalid number for legacy */ | ||
391 | host->inval_irq = 0; | ||
392 | /* setup us as the host for all legacy interrupts */ | ||
393 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | ||
394 | irq_map[i].hwirq = 0; | ||
395 | smp_wmb(); | ||
396 | irq_map[i].host = host; | ||
397 | smp_wmb(); | ||
398 | |||
399 | /* Clear some flags */ | ||
400 | get_irq_desc(i)->status | ||
401 | &= ~(IRQ_NOREQUEST | IRQ_LEVEL); | ||
402 | |||
403 | /* Legacy flags are left to default at this point, | ||
404 | * one can then use irq_create_mapping() to | ||
405 | * explicitely change them | ||
406 | */ | ||
407 | ops->map(host, i, i, 0); | ||
408 | } | ||
409 | break; | ||
410 | case IRQ_HOST_MAP_LINEAR: | ||
411 | rmap = (unsigned int *)(host + 1); | ||
412 | for (i = 0; i < revmap_arg; i++) | ||
413 | rmap[i] = IRQ_NONE; | ||
414 | host->revmap_data.linear.size = revmap_arg; | ||
415 | smp_wmb(); | ||
416 | host->revmap_data.linear.revmap = rmap; | ||
417 | break; | ||
418 | default: | ||
419 | break; | ||
420 | } | ||
421 | |||
422 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | ||
423 | |||
424 | return host; | ||
425 | } | ||
426 | |||
427 | struct irq_host *irq_find_host(struct device_node *node) | ||
428 | { | ||
429 | struct irq_host *h, *found = NULL; | ||
430 | unsigned long flags; | ||
431 | |||
432 | /* We might want to match the legacy controller last since | ||
433 | * it might potentially be set to match all interrupts in | ||
434 | * the absence of a device node. This isn't a problem so far | ||
435 | * yet though... | ||
436 | */ | ||
437 | spin_lock_irqsave(&irq_big_lock, flags); | ||
438 | list_for_each_entry(h, &irq_hosts, link) | ||
439 | if (h->ops->match == NULL || h->ops->match(h, node)) { | ||
440 | found = h; | ||
441 | break; | ||
442 | } | ||
443 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
444 | return found; | ||
445 | } | ||
446 | EXPORT_SYMBOL_GPL(irq_find_host); | ||
447 | |||
448 | void irq_set_default_host(struct irq_host *host) | ||
449 | { | ||
450 | pr_debug("irq: Default host set to @0x%p\n", host); | ||
451 | |||
452 | irq_default_host = host; | ||
453 | } | ||
454 | |||
455 | void irq_set_virq_count(unsigned int count) | ||
456 | { | ||
457 | pr_debug("irq: Trying to set virq count to %d\n", count); | ||
458 | |||
459 | BUG_ON(count < NUM_ISA_INTERRUPTS); | ||
460 | if (count < NR_IRQS) | ||
461 | irq_virq_count = count; | ||
462 | } | ||
463 | |||
464 | unsigned int irq_create_mapping(struct irq_host *host, | ||
465 | irq_hw_number_t hwirq, | ||
466 | unsigned int flags) | ||
467 | { | ||
468 | unsigned int virq, hint; | ||
469 | |||
470 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx, 0x%x)\n", | ||
471 | host, hwirq, flags); | ||
472 | |||
473 | /* Look for default host if nececssary */ | ||
474 | if (host == NULL) | ||
475 | host = irq_default_host; | ||
476 | if (host == NULL) { | ||
477 | printk(KERN_WARNING "irq_create_mapping called for" | ||
478 | " NULL host, hwirq=%lx\n", hwirq); | ||
479 | WARN_ON(1); | ||
480 | return NO_IRQ; | ||
481 | } | ||
482 | pr_debug("irq: -> using host @%p\n", host); | ||
483 | |||
484 | /* Check if mapping already exist, if it does, call | ||
485 | * host->ops->map() to update the flags | ||
486 | */ | ||
487 | virq = irq_find_mapping(host, hwirq); | ||
488 | if (virq != IRQ_NONE) { | ||
489 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | ||
490 | host->ops->map(host, virq, hwirq, flags); | ||
491 | return virq; | ||
492 | } | ||
493 | |||
494 | /* Get a virtual interrupt number */ | ||
495 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | ||
496 | /* Handle legacy */ | ||
497 | virq = (unsigned int)hwirq; | ||
498 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | ||
499 | return NO_IRQ; | ||
500 | return virq; | ||
501 | } else { | ||
502 | /* Allocate a virtual interrupt number */ | ||
503 | hint = hwirq % irq_virq_count; | ||
504 | virq = irq_alloc_virt(host, 1, hint); | ||
505 | if (virq == NO_IRQ) { | ||
506 | pr_debug("irq: -> virq allocation failed\n"); | ||
507 | return NO_IRQ; | ||
508 | } | ||
509 | } | ||
510 | pr_debug("irq: -> obtained virq %d\n", virq); | ||
511 | |||
512 | /* Clear some flags */ | ||
513 | get_irq_desc(virq)->status &= ~(IRQ_NOREQUEST | IRQ_LEVEL); | ||
514 | |||
515 | /* map it */ | ||
516 | if (host->ops->map(host, virq, hwirq, flags)) { | ||
517 | pr_debug("irq: -> mapping failed, freeing\n"); | ||
518 | irq_free_virt(virq, 1); | ||
519 | return NO_IRQ; | ||
520 | } | ||
521 | smp_wmb(); | ||
522 | irq_map[virq].hwirq = hwirq; | ||
523 | smp_mb(); | ||
524 | return virq; | ||
525 | } | ||
526 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
527 | |||
528 | extern unsigned int irq_create_of_mapping(struct device_node *controller, | ||
529 | u32 *intspec, unsigned int intsize) | ||
530 | { | ||
531 | struct irq_host *host; | ||
532 | irq_hw_number_t hwirq; | ||
533 | unsigned int flags = IRQ_TYPE_NONE; | ||
534 | |||
535 | if (controller == NULL) | ||
536 | host = irq_default_host; | ||
537 | else | ||
538 | host = irq_find_host(controller); | ||
539 | if (host == NULL) | ||
540 | return NO_IRQ; | ||
541 | |||
542 | /* If host has no translation, then we assume interrupt line */ | ||
543 | if (host->ops->xlate == NULL) | ||
544 | hwirq = intspec[0]; | ||
545 | else { | ||
546 | if (host->ops->xlate(host, controller, intspec, intsize, | ||
547 | &hwirq, &flags)) | ||
548 | return NO_IRQ; | ||
549 | } | ||
550 | |||
551 | return irq_create_mapping(host, hwirq, flags); | ||
552 | } | ||
553 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
554 | |||
555 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) | ||
556 | { | ||
557 | struct of_irq oirq; | ||
558 | |||
559 | if (of_irq_map_one(dev, index, &oirq)) | ||
560 | return NO_IRQ; | ||
561 | |||
562 | return irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
563 | oirq.size); | ||
564 | } | ||
565 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | ||
566 | |||
567 | void irq_dispose_mapping(unsigned int virq) | ||
568 | { | ||
569 | struct irq_host *host = irq_map[virq].host; | ||
570 | irq_hw_number_t hwirq; | ||
571 | unsigned long flags; | ||
572 | |||
573 | WARN_ON (host == NULL); | ||
574 | if (host == NULL) | ||
575 | return; | ||
576 | |||
577 | /* Never unmap legacy interrupts */ | ||
578 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | ||
579 | return; | ||
580 | |||
581 | /* remove chip and handler */ | ||
582 | set_irq_chip_and_handler(virq, NULL, NULL); | ||
583 | |||
584 | /* Make sure it's completed */ | ||
585 | synchronize_irq(virq); | ||
586 | |||
587 | /* Tell the PIC about it */ | ||
588 | if (host->ops->unmap) | ||
589 | host->ops->unmap(host, virq); | ||
590 | smp_mb(); | ||
591 | |||
592 | /* Clear reverse map */ | ||
593 | hwirq = irq_map[virq].hwirq; | ||
594 | switch(host->revmap_type) { | ||
595 | case IRQ_HOST_MAP_LINEAR: | ||
596 | if (hwirq < host->revmap_data.linear.size) | ||
597 | host->revmap_data.linear.revmap[hwirq] = IRQ_NONE; | ||
598 | break; | ||
599 | case IRQ_HOST_MAP_TREE: | ||
600 | /* Check if radix tree allocated yet */ | ||
601 | if (host->revmap_data.tree.gfp_mask == 0) | ||
602 | break; | ||
603 | /* XXX radix tree not safe ! remove lock whem it becomes safe | ||
604 | * and use some RCU sync to make sure everything is ok before we | ||
605 | * can re-use that map entry | ||
606 | */ | ||
607 | spin_lock_irqsave(&irq_big_lock, flags); | ||
608 | radix_tree_delete(&host->revmap_data.tree, hwirq); | ||
609 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
610 | break; | ||
611 | } | ||
612 | |||
613 | /* Destroy map */ | ||
614 | smp_mb(); | ||
615 | irq_map[virq].hwirq = host->inval_irq; | ||
616 | |||
617 | /* Set some flags */ | ||
618 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | ||
619 | |||
620 | /* Free it */ | ||
621 | irq_free_virt(virq, 1); | ||
622 | } | ||
623 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | ||
624 | |||
625 | unsigned int irq_find_mapping(struct irq_host *host, | ||
626 | irq_hw_number_t hwirq) | ||
627 | { | ||
628 | unsigned int i; | ||
629 | unsigned int hint = hwirq % irq_virq_count; | ||
630 | |||
631 | /* Look for default host if nececssary */ | ||
632 | if (host == NULL) | ||
633 | host = irq_default_host; | ||
634 | if (host == NULL) | ||
635 | return NO_IRQ; | ||
636 | |||
637 | /* legacy -> bail early */ | ||
638 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | ||
639 | return hwirq; | ||
640 | |||
641 | /* Slow path does a linear search of the map */ | ||
642 | if (hint < NUM_ISA_INTERRUPTS) | ||
643 | hint = NUM_ISA_INTERRUPTS; | ||
644 | i = hint; | ||
645 | do { | ||
646 | if (irq_map[i].host == host && | ||
647 | irq_map[i].hwirq == hwirq) | ||
648 | return i; | ||
649 | i++; | ||
650 | if (i >= irq_virq_count) | ||
651 | i = NUM_ISA_INTERRUPTS; | ||
652 | } while(i != hint); | ||
653 | return NO_IRQ; | ||
654 | } | ||
655 | EXPORT_SYMBOL_GPL(irq_find_mapping); | ||
656 | |||
657 | |||
658 | unsigned int irq_radix_revmap(struct irq_host *host, | ||
659 | irq_hw_number_t hwirq) | ||
660 | { | ||
661 | struct radix_tree_root *tree; | ||
662 | struct irq_map_entry *ptr; | ||
663 | unsigned int virq; | ||
664 | unsigned long flags; | ||
665 | |||
666 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | ||
667 | |||
668 | /* Check if the radix tree exist yet. We test the value of | ||
669 | * the gfp_mask for that. Sneaky but saves another int in the | ||
670 | * structure. If not, we fallback to slow mode | ||
671 | */ | ||
672 | tree = &host->revmap_data.tree; | ||
673 | if (tree->gfp_mask == 0) | ||
674 | return irq_find_mapping(host, hwirq); | ||
675 | |||
676 | /* XXX Current radix trees are NOT SMP safe !!! Remove that lock | ||
677 | * when that is fixed (when Nick's patch gets in | ||
678 | */ | ||
679 | spin_lock_irqsave(&irq_big_lock, flags); | ||
680 | |||
681 | /* Now try to resolve */ | ||
682 | ptr = radix_tree_lookup(tree, hwirq); | ||
683 | /* Found it, return */ | ||
684 | if (ptr) { | ||
685 | virq = ptr - irq_map; | ||
686 | goto bail; | ||
687 | } | ||
688 | |||
689 | /* If not there, try to insert it */ | ||
690 | virq = irq_find_mapping(host, hwirq); | ||
691 | if (virq != NO_IRQ) | ||
692 | radix_tree_insert(tree, virq, &irq_map[virq]); | ||
693 | bail: | ||
694 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
695 | return virq; | ||
696 | } | ||
697 | |||
698 | unsigned int irq_linear_revmap(struct irq_host *host, | ||
699 | irq_hw_number_t hwirq) | ||
700 | { | ||
701 | unsigned int *revmap; | ||
702 | |||
703 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); | ||
704 | |||
705 | /* Check revmap bounds */ | ||
706 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | ||
707 | return irq_find_mapping(host, hwirq); | ||
708 | |||
709 | /* Check if revmap was allocated */ | ||
710 | revmap = host->revmap_data.linear.revmap; | ||
711 | if (unlikely(revmap == NULL)) | ||
712 | return irq_find_mapping(host, hwirq); | ||
713 | |||
714 | /* Fill up revmap with slow path if no mapping found */ | ||
715 | if (unlikely(revmap[hwirq] == NO_IRQ)) | ||
716 | revmap[hwirq] = irq_find_mapping(host, hwirq); | ||
717 | |||
718 | return revmap[hwirq]; | ||
719 | } | ||
720 | |||
721 | unsigned int irq_alloc_virt(struct irq_host *host, | ||
722 | unsigned int count, | ||
723 | unsigned int hint) | ||
724 | { | ||
725 | unsigned long flags; | ||
726 | unsigned int i, j, found = NO_IRQ; | ||
727 | unsigned int limit = irq_virq_count - count; | ||
728 | |||
729 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) | ||
730 | return NO_IRQ; | ||
731 | |||
732 | spin_lock_irqsave(&irq_big_lock, flags); | ||
733 | |||
734 | /* Use hint for 1 interrupt if any */ | ||
735 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | ||
736 | hint < irq_virq_count && irq_map[hint].host == NULL) { | ||
737 | found = hint; | ||
738 | goto hint_found; | ||
739 | } | ||
740 | |||
741 | /* Look for count consecutive numbers in the allocatable | ||
742 | * (non-legacy) space | ||
743 | */ | ||
744 | for (i = NUM_ISA_INTERRUPTS; i <= limit; ) { | ||
745 | for (j = i; j < (i + count); j++) | ||
746 | if (irq_map[j].host != NULL) { | ||
747 | i = j + 1; | ||
748 | continue; | ||
749 | } | ||
750 | found = i; | ||
751 | break; | ||
752 | } | ||
753 | if (found == NO_IRQ) { | ||
754 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
755 | return NO_IRQ; | ||
756 | } | ||
757 | hint_found: | ||
758 | for (i = found; i < (found + count); i++) { | ||
759 | irq_map[i].hwirq = host->inval_irq; | ||
760 | smp_wmb(); | ||
761 | irq_map[i].host = host; | ||
762 | } | ||
763 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
764 | return found; | ||
765 | } | ||
766 | |||
767 | void irq_free_virt(unsigned int virq, unsigned int count) | ||
768 | { | ||
769 | unsigned long flags; | ||
770 | unsigned int i; | ||
771 | |||
772 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | ||
773 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | ||
774 | |||
775 | spin_lock_irqsave(&irq_big_lock, flags); | ||
776 | for (i = virq; i < (virq + count); i++) { | ||
777 | struct irq_host *host; | ||
778 | |||
779 | if (i < NUM_ISA_INTERRUPTS || | ||
780 | (virq + count) > irq_virq_count) | ||
781 | continue; | ||
782 | |||
783 | host = irq_map[i].host; | ||
784 | irq_map[i].hwirq = host->inval_irq; | ||
785 | smp_wmb(); | ||
786 | irq_map[i].host = NULL; | ||
787 | } | ||
788 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
789 | } | ||
790 | |||
791 | void irq_early_init(void) | ||
792 | { | ||
793 | unsigned int i; | ||
794 | |||
795 | for (i = 0; i < NR_IRQS; i++) | ||
796 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | ||
797 | } | ||
798 | |||
799 | /* We need to create the radix trees late */ | ||
800 | static int irq_late_init(void) | ||
801 | { | ||
802 | struct irq_host *h; | ||
803 | unsigned long flags; | ||
804 | |||
805 | spin_lock_irqsave(&irq_big_lock, flags); | ||
806 | list_for_each_entry(h, &irq_hosts, link) { | ||
807 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | ||
808 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | ||
809 | } | ||
810 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | arch_initcall(irq_late_init); | ||
815 | |||
816 | #endif /* CONFIG_PPC_MERGE */ | ||
817 | |||
433 | #ifdef CONFIG_PCI_MSI | 818 | #ifdef CONFIG_PCI_MSI |
434 | int pci_enable_msi(struct pci_dev * pdev) | 819 | int pci_enable_msi(struct pci_dev * pdev) |
435 | { | 820 | { |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index a55056676ca4..7e98e778b52f 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -28,6 +28,7 @@ static struct legacy_serial_info { | |||
28 | struct device_node *np; | 28 | struct device_node *np; |
29 | unsigned int speed; | 29 | unsigned int speed; |
30 | unsigned int clock; | 30 | unsigned int clock; |
31 | int irq_check_parent; | ||
31 | phys_addr_t taddr; | 32 | phys_addr_t taddr; |
32 | } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; | 33 | } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; |
33 | static unsigned int legacy_serial_count; | 34 | static unsigned int legacy_serial_count; |
@@ -36,7 +37,7 @@ static int legacy_serial_console = -1; | |||
36 | static int __init add_legacy_port(struct device_node *np, int want_index, | 37 | static int __init add_legacy_port(struct device_node *np, int want_index, |
37 | int iotype, phys_addr_t base, | 38 | int iotype, phys_addr_t base, |
38 | phys_addr_t taddr, unsigned long irq, | 39 | phys_addr_t taddr, unsigned long irq, |
39 | upf_t flags) | 40 | upf_t flags, int irq_check_parent) |
40 | { | 41 | { |
41 | u32 *clk, *spd, clock = BASE_BAUD * 16; | 42 | u32 *clk, *spd, clock = BASE_BAUD * 16; |
42 | int index; | 43 | int index; |
@@ -68,7 +69,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
68 | if (legacy_serial_infos[index].np != 0) { | 69 | if (legacy_serial_infos[index].np != 0) { |
69 | /* if we still have some room, move it, else override */ | 70 | /* if we still have some room, move it, else override */ |
70 | if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { | 71 | if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { |
71 | printk(KERN_INFO "Moved legacy port %d -> %d\n", | 72 | printk(KERN_DEBUG "Moved legacy port %d -> %d\n", |
72 | index, legacy_serial_count); | 73 | index, legacy_serial_count); |
73 | legacy_serial_ports[legacy_serial_count] = | 74 | legacy_serial_ports[legacy_serial_count] = |
74 | legacy_serial_ports[index]; | 75 | legacy_serial_ports[index]; |
@@ -76,7 +77,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
76 | legacy_serial_infos[index]; | 77 | legacy_serial_infos[index]; |
77 | legacy_serial_count++; | 78 | legacy_serial_count++; |
78 | } else { | 79 | } else { |
79 | printk(KERN_INFO "Replacing legacy port %d\n", index); | 80 | printk(KERN_DEBUG "Replacing legacy port %d\n", index); |
80 | } | 81 | } |
81 | } | 82 | } |
82 | 83 | ||
@@ -95,10 +96,11 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
95 | legacy_serial_infos[index].np = of_node_get(np); | 96 | legacy_serial_infos[index].np = of_node_get(np); |
96 | legacy_serial_infos[index].clock = clock; | 97 | legacy_serial_infos[index].clock = clock; |
97 | legacy_serial_infos[index].speed = spd ? *spd : 0; | 98 | legacy_serial_infos[index].speed = spd ? *spd : 0; |
99 | legacy_serial_infos[index].irq_check_parent = irq_check_parent; | ||
98 | 100 | ||
99 | printk(KERN_INFO "Found legacy serial port %d for %s\n", | 101 | printk(KERN_DEBUG "Found legacy serial port %d for %s\n", |
100 | index, np->full_name); | 102 | index, np->full_name); |
101 | printk(KERN_INFO " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", | 103 | printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", |
102 | (iotype == UPIO_PORT) ? "port" : "mem", | 104 | (iotype == UPIO_PORT) ? "port" : "mem", |
103 | (unsigned long long)base, (unsigned long long)taddr, irq, | 105 | (unsigned long long)base, (unsigned long long)taddr, irq, |
104 | legacy_serial_ports[index].uartclk, | 106 | legacy_serial_ports[index].uartclk, |
@@ -132,7 +134,7 @@ static int __init add_legacy_soc_port(struct device_node *np, | |||
132 | /* Add port, irq will be dealt with later. We passed a translated | 134 | /* Add port, irq will be dealt with later. We passed a translated |
133 | * IO port value. It will be fixed up later along with the irq | 135 | * IO port value. It will be fixed up later along with the irq |
134 | */ | 136 | */ |
135 | return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags); | 137 | return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); |
136 | } | 138 | } |
137 | 139 | ||
138 | static int __init add_legacy_isa_port(struct device_node *np, | 140 | static int __init add_legacy_isa_port(struct device_node *np, |
@@ -170,7 +172,7 @@ static int __init add_legacy_isa_port(struct device_node *np, | |||
170 | 172 | ||
171 | /* Add port, irq will be dealt with later */ | 173 | /* Add port, irq will be dealt with later */ |
172 | return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, | 174 | return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, |
173 | NO_IRQ, UPF_BOOT_AUTOCONF); | 175 | NO_IRQ, UPF_BOOT_AUTOCONF, 0); |
174 | 176 | ||
175 | } | 177 | } |
176 | 178 | ||
@@ -242,7 +244,8 @@ static int __init add_legacy_pci_port(struct device_node *np, | |||
242 | /* Add port, irq will be dealt with later. We passed a translated | 244 | /* Add port, irq will be dealt with later. We passed a translated |
243 | * IO port value. It will be fixed up later along with the irq | 245 | * IO port value. It will be fixed up later along with the irq |
244 | */ | 246 | */ |
245 | return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF); | 247 | return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, |
248 | UPF_BOOT_AUTOCONF, np != pci_dev); | ||
246 | } | 249 | } |
247 | #endif | 250 | #endif |
248 | 251 | ||
@@ -373,27 +376,22 @@ static void __init fixup_port_irq(int index, | |||
373 | struct device_node *np, | 376 | struct device_node *np, |
374 | struct plat_serial8250_port *port) | 377 | struct plat_serial8250_port *port) |
375 | { | 378 | { |
379 | unsigned int virq; | ||
380 | |||
376 | DBG("fixup_port_irq(%d)\n", index); | 381 | DBG("fixup_port_irq(%d)\n", index); |
377 | 382 | ||
378 | /* Check for interrupts in that node */ | 383 | virq = irq_of_parse_and_map(np, 0); |
379 | if (np->n_intrs > 0) { | 384 | if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) { |
380 | port->irq = np->intrs[0].line; | 385 | np = of_get_parent(np); |
381 | DBG(" port %d (%s), irq=%d\n", | 386 | if (np == NULL) |
382 | index, np->full_name, port->irq); | 387 | return; |
383 | return; | 388 | virq = irq_of_parse_and_map(np, 0); |
389 | of_node_put(np); | ||
384 | } | 390 | } |
385 | 391 | if (virq == NO_IRQ) | |
386 | /* Check for interrupts in the parent */ | ||
387 | np = of_get_parent(np); | ||
388 | if (np == NULL) | ||
389 | return; | 392 | return; |
390 | 393 | ||
391 | if (np->n_intrs > 0) { | 394 | port->irq = virq; |
392 | port->irq = np->intrs[0].line; | ||
393 | DBG(" port %d (%s), irq=%d\n", | ||
394 | index, np->full_name, port->irq); | ||
395 | } | ||
396 | of_node_put(np); | ||
397 | } | 395 | } |
398 | 396 | ||
399 | static void __init fixup_port_pio(int index, | 397 | static void __init fixup_port_pio(int index, |
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 1333335c474e..898dae8ab6d9 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c | |||
@@ -1404,6 +1404,43 @@ pcibios_update_irq(struct pci_dev *dev, int irq) | |||
1404 | /* XXX FIXME - update OF device tree node interrupt property */ | 1404 | /* XXX FIXME - update OF device tree node interrupt property */ |
1405 | } | 1405 | } |
1406 | 1406 | ||
1407 | #ifdef CONFIG_PPC_MERGE | ||
1408 | /* XXX This is a copy of the ppc64 version. This is temporary until we start | ||
1409 | * merging the 2 PCI layers | ||
1410 | */ | ||
1411 | /* | ||
1412 | * Reads the interrupt pin to determine if interrupt is use by card. | ||
1413 | * If the interrupt is used, then gets the interrupt line from the | ||
1414 | * openfirmware and sets it in the pci_dev and pci_config line. | ||
1415 | */ | ||
1416 | int pci_read_irq_line(struct pci_dev *pci_dev) | ||
1417 | { | ||
1418 | struct of_irq oirq; | ||
1419 | unsigned int virq; | ||
1420 | |||
1421 | DBG("Try to map irq for %s...\n", pci_name(pci_dev)); | ||
1422 | |||
1423 | if (of_irq_map_pci(pci_dev, &oirq)) { | ||
1424 | DBG(" -> failed !\n"); | ||
1425 | return -1; | ||
1426 | } | ||
1427 | |||
1428 | DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", | ||
1429 | oirq.size, oirq.specifier[0], oirq.controller->full_name); | ||
1430 | |||
1431 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); | ||
1432 | if(virq == NO_IRQ) { | ||
1433 | DBG(" -> failed to map !\n"); | ||
1434 | return -1; | ||
1435 | } | ||
1436 | pci_dev->irq = virq; | ||
1437 | pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); | ||
1438 | |||
1439 | return 0; | ||
1440 | } | ||
1441 | EXPORT_SYMBOL(pci_read_irq_line); | ||
1442 | #endif /* CONFIG_PPC_MERGE */ | ||
1443 | |||
1407 | int pcibios_enable_device(struct pci_dev *dev, int mask) | 1444 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
1408 | { | 1445 | { |
1409 | u16 cmd, old_cmd; | 1446 | u16 cmd, old_cmd; |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index bea8451fb57b..efc0b5559ee0 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -398,12 +398,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
398 | } else { | 398 | } else { |
399 | dev->hdr_type = PCI_HEADER_TYPE_NORMAL; | 399 | dev->hdr_type = PCI_HEADER_TYPE_NORMAL; |
400 | dev->rom_base_reg = PCI_ROM_ADDRESS; | 400 | dev->rom_base_reg = PCI_ROM_ADDRESS; |
401 | /* Maybe do a default OF mapping here */ | ||
401 | dev->irq = NO_IRQ; | 402 | dev->irq = NO_IRQ; |
402 | if (node->n_intrs > 0) { | ||
403 | dev->irq = node->intrs[0].line; | ||
404 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, | ||
405 | dev->irq); | ||
406 | } | ||
407 | } | 403 | } |
408 | 404 | ||
409 | pci_parse_of_addrs(node, dev); | 405 | pci_parse_of_addrs(node, dev); |
@@ -1288,23 +1284,26 @@ EXPORT_SYMBOL(pcibios_fixup_bus); | |||
1288 | */ | 1284 | */ |
1289 | int pci_read_irq_line(struct pci_dev *pci_dev) | 1285 | int pci_read_irq_line(struct pci_dev *pci_dev) |
1290 | { | 1286 | { |
1291 | u8 intpin; | 1287 | struct of_irq oirq; |
1292 | struct device_node *node; | 1288 | unsigned int virq; |
1293 | |||
1294 | pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin); | ||
1295 | if (intpin == 0) | ||
1296 | return 0; | ||
1297 | 1289 | ||
1298 | node = pci_device_to_OF_node(pci_dev); | 1290 | DBG("Try to map irq for %s...\n", pci_name(pci_dev)); |
1299 | if (node == NULL) | ||
1300 | return -1; | ||
1301 | 1291 | ||
1302 | if (node->n_intrs == 0) | 1292 | if (of_irq_map_pci(pci_dev, &oirq)) { |
1293 | DBG(" -> failed !\n"); | ||
1303 | return -1; | 1294 | return -1; |
1295 | } | ||
1304 | 1296 | ||
1305 | pci_dev->irq = node->intrs[0].line; | 1297 | DBG(" -> got one, spec %d cells (0x%08x...) on %s\n", |
1298 | oirq.size, oirq.specifier[0], oirq.controller->full_name); | ||
1306 | 1299 | ||
1307 | pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq); | 1300 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); |
1301 | if(virq == NO_IRQ) { | ||
1302 | DBG(" -> failed to map !\n"); | ||
1303 | return -1; | ||
1304 | } | ||
1305 | pci_dev->irq = virq; | ||
1306 | pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq); | ||
1308 | 1307 | ||
1309 | return 0; | 1308 | return 0; |
1310 | } | 1309 | } |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ef3619c28702..a1787ffb6319 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/kexec.h> | 31 | #include <linux/kexec.h> |
32 | #include <linux/debugfs.h> | 32 | #include <linux/debugfs.h> |
33 | #include <linux/irq.h> | ||
33 | 34 | ||
34 | #include <asm/prom.h> | 35 | #include <asm/prom.h> |
35 | #include <asm/rtas.h> | 36 | #include <asm/rtas.h> |
@@ -86,424 +87,6 @@ static DEFINE_RWLOCK(devtree_lock); | |||
86 | /* export that to outside world */ | 87 | /* export that to outside world */ |
87 | struct device_node *of_chosen; | 88 | struct device_node *of_chosen; |
88 | 89 | ||
89 | struct device_node *dflt_interrupt_controller; | ||
90 | int num_interrupt_controllers; | ||
91 | |||
92 | /* | ||
93 | * Wrapper for allocating memory for various data that needs to be | ||
94 | * attached to device nodes as they are processed at boot or when | ||
95 | * added to the device tree later (e.g. DLPAR). At boot there is | ||
96 | * already a region reserved so we just increment *mem_start by size; | ||
97 | * otherwise we call kmalloc. | ||
98 | */ | ||
99 | static void * prom_alloc(unsigned long size, unsigned long *mem_start) | ||
100 | { | ||
101 | unsigned long tmp; | ||
102 | |||
103 | if (!mem_start) | ||
104 | return kmalloc(size, GFP_KERNEL); | ||
105 | |||
106 | tmp = *mem_start; | ||
107 | *mem_start += size; | ||
108 | return (void *)tmp; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Find the device_node with a given phandle. | ||
113 | */ | ||
114 | static struct device_node * find_phandle(phandle ph) | ||
115 | { | ||
116 | struct device_node *np; | ||
117 | |||
118 | for (np = allnodes; np != 0; np = np->allnext) | ||
119 | if (np->linux_phandle == ph) | ||
120 | return np; | ||
121 | return NULL; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Find the interrupt parent of a node. | ||
126 | */ | ||
127 | static struct device_node * __devinit intr_parent(struct device_node *p) | ||
128 | { | ||
129 | phandle *parp; | ||
130 | |||
131 | parp = (phandle *) get_property(p, "interrupt-parent", NULL); | ||
132 | if (parp == NULL) | ||
133 | return p->parent; | ||
134 | p = find_phandle(*parp); | ||
135 | if (p != NULL) | ||
136 | return p; | ||
137 | /* | ||
138 | * On a powermac booted with BootX, we don't get to know the | ||
139 | * phandles for any nodes, so find_phandle will return NULL. | ||
140 | * Fortunately these machines only have one interrupt controller | ||
141 | * so there isn't in fact any ambiguity. -- paulus | ||
142 | */ | ||
143 | if (num_interrupt_controllers == 1) | ||
144 | p = dflt_interrupt_controller; | ||
145 | return p; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Find out the size of each entry of the interrupts property | ||
150 | * for a node. | ||
151 | */ | ||
152 | int __devinit prom_n_intr_cells(struct device_node *np) | ||
153 | { | ||
154 | struct device_node *p; | ||
155 | unsigned int *icp; | ||
156 | |||
157 | for (p = np; (p = intr_parent(p)) != NULL; ) { | ||
158 | icp = (unsigned int *) | ||
159 | get_property(p, "#interrupt-cells", NULL); | ||
160 | if (icp != NULL) | ||
161 | return *icp; | ||
162 | if (get_property(p, "interrupt-controller", NULL) != NULL | ||
163 | || get_property(p, "interrupt-map", NULL) != NULL) { | ||
164 | printk("oops, node %s doesn't have #interrupt-cells\n", | ||
165 | p->full_name); | ||
166 | return 1; | ||
167 | } | ||
168 | } | ||
169 | #ifdef DEBUG_IRQ | ||
170 | printk("prom_n_intr_cells failed for %s\n", np->full_name); | ||
171 | #endif | ||
172 | return 1; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * Map an interrupt from a device up to the platform interrupt | ||
177 | * descriptor. | ||
178 | */ | ||
179 | static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler, | ||
180 | struct device_node *np, unsigned int *ints, | ||
181 | int nintrc) | ||
182 | { | ||
183 | struct device_node *p, *ipar; | ||
184 | unsigned int *imap, *imask, *ip; | ||
185 | int i, imaplen, match; | ||
186 | int newintrc = 0, newaddrc = 0; | ||
187 | unsigned int *reg; | ||
188 | int naddrc; | ||
189 | |||
190 | reg = (unsigned int *) get_property(np, "reg", NULL); | ||
191 | naddrc = prom_n_addr_cells(np); | ||
192 | p = intr_parent(np); | ||
193 | while (p != NULL) { | ||
194 | if (get_property(p, "interrupt-controller", NULL) != NULL) | ||
195 | /* this node is an interrupt controller, stop here */ | ||
196 | break; | ||
197 | imap = (unsigned int *) | ||
198 | get_property(p, "interrupt-map", &imaplen); | ||
199 | if (imap == NULL) { | ||
200 | p = intr_parent(p); | ||
201 | continue; | ||
202 | } | ||
203 | imask = (unsigned int *) | ||
204 | get_property(p, "interrupt-map-mask", NULL); | ||
205 | if (imask == NULL) { | ||
206 | printk("oops, %s has interrupt-map but no mask\n", | ||
207 | p->full_name); | ||
208 | return 0; | ||
209 | } | ||
210 | imaplen /= sizeof(unsigned int); | ||
211 | match = 0; | ||
212 | ipar = NULL; | ||
213 | while (imaplen > 0 && !match) { | ||
214 | /* check the child-interrupt field */ | ||
215 | match = 1; | ||
216 | for (i = 0; i < naddrc && match; ++i) | ||
217 | match = ((reg[i] ^ imap[i]) & imask[i]) == 0; | ||
218 | for (; i < naddrc + nintrc && match; ++i) | ||
219 | match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0; | ||
220 | imap += naddrc + nintrc; | ||
221 | imaplen -= naddrc + nintrc; | ||
222 | /* grab the interrupt parent */ | ||
223 | ipar = find_phandle((phandle) *imap++); | ||
224 | --imaplen; | ||
225 | if (ipar == NULL && num_interrupt_controllers == 1) | ||
226 | /* cope with BootX not giving us phandles */ | ||
227 | ipar = dflt_interrupt_controller; | ||
228 | if (ipar == NULL) { | ||
229 | printk("oops, no int parent %x in map of %s\n", | ||
230 | imap[-1], p->full_name); | ||
231 | return 0; | ||
232 | } | ||
233 | /* find the parent's # addr and intr cells */ | ||
234 | ip = (unsigned int *) | ||
235 | get_property(ipar, "#interrupt-cells", NULL); | ||
236 | if (ip == NULL) { | ||
237 | printk("oops, no #interrupt-cells on %s\n", | ||
238 | ipar->full_name); | ||
239 | return 0; | ||
240 | } | ||
241 | newintrc = *ip; | ||
242 | ip = (unsigned int *) | ||
243 | get_property(ipar, "#address-cells", NULL); | ||
244 | newaddrc = (ip == NULL)? 0: *ip; | ||
245 | imap += newaddrc + newintrc; | ||
246 | imaplen -= newaddrc + newintrc; | ||
247 | } | ||
248 | if (imaplen < 0) { | ||
249 | printk("oops, error decoding int-map on %s, len=%d\n", | ||
250 | p->full_name, imaplen); | ||
251 | return 0; | ||
252 | } | ||
253 | if (!match) { | ||
254 | #ifdef DEBUG_IRQ | ||
255 | printk("oops, no match in %s int-map for %s\n", | ||
256 | p->full_name, np->full_name); | ||
257 | #endif | ||
258 | return 0; | ||
259 | } | ||
260 | p = ipar; | ||
261 | naddrc = newaddrc; | ||
262 | nintrc = newintrc; | ||
263 | ints = imap - nintrc; | ||
264 | reg = ints - naddrc; | ||
265 | } | ||
266 | if (p == NULL) { | ||
267 | #ifdef DEBUG_IRQ | ||
268 | printk("hmmm, int tree for %s doesn't have ctrler\n", | ||
269 | np->full_name); | ||
270 | #endif | ||
271 | return 0; | ||
272 | } | ||
273 | *irq = ints; | ||
274 | *ictrler = p; | ||
275 | return nintrc; | ||
276 | } | ||
277 | |||
278 | static unsigned char map_isa_senses[4] = { | ||
279 | IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, | ||
280 | IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, | ||
281 | IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, | ||
282 | IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE | ||
283 | }; | ||
284 | |||
285 | static unsigned char map_mpic_senses[4] = { | ||
286 | IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE, | ||
287 | IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, | ||
288 | /* 2 seems to be used for the 8259 cascade... */ | ||
289 | IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE, | ||
290 | IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE, | ||
291 | }; | ||
292 | |||
293 | static int __devinit finish_node_interrupts(struct device_node *np, | ||
294 | unsigned long *mem_start, | ||
295 | int measure_only) | ||
296 | { | ||
297 | unsigned int *ints; | ||
298 | int intlen, intrcells, intrcount; | ||
299 | int i, j, n, sense; | ||
300 | unsigned int *irq, virq; | ||
301 | struct device_node *ic; | ||
302 | int trace = 0; | ||
303 | |||
304 | //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0) | ||
305 | #define TRACE(fmt...) | ||
306 | |||
307 | if (!strcmp(np->name, "smu-doorbell")) | ||
308 | trace = 1; | ||
309 | |||
310 | TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n", | ||
311 | num_interrupt_controllers); | ||
312 | |||
313 | if (num_interrupt_controllers == 0) { | ||
314 | /* | ||
315 | * Old machines just have a list of interrupt numbers | ||
316 | * and no interrupt-controller nodes. | ||
317 | */ | ||
318 | ints = (unsigned int *) get_property(np, "AAPL,interrupts", | ||
319 | &intlen); | ||
320 | /* XXX old interpret_pci_props looked in parent too */ | ||
321 | /* XXX old interpret_macio_props looked for interrupts | ||
322 | before AAPL,interrupts */ | ||
323 | if (ints == NULL) | ||
324 | ints = (unsigned int *) get_property(np, "interrupts", | ||
325 | &intlen); | ||
326 | if (ints == NULL) | ||
327 | return 0; | ||
328 | |||
329 | np->n_intrs = intlen / sizeof(unsigned int); | ||
330 | np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]), | ||
331 | mem_start); | ||
332 | if (!np->intrs) | ||
333 | return -ENOMEM; | ||
334 | if (measure_only) | ||
335 | return 0; | ||
336 | |||
337 | for (i = 0; i < np->n_intrs; ++i) { | ||
338 | np->intrs[i].line = *ints++; | ||
339 | np->intrs[i].sense = IRQ_SENSE_LEVEL | ||
340 | | IRQ_POLARITY_NEGATIVE; | ||
341 | } | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | ints = (unsigned int *) get_property(np, "interrupts", &intlen); | ||
346 | TRACE("ints=%p, intlen=%d\n", ints, intlen); | ||
347 | if (ints == NULL) | ||
348 | return 0; | ||
349 | intrcells = prom_n_intr_cells(np); | ||
350 | intlen /= intrcells * sizeof(unsigned int); | ||
351 | TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen); | ||
352 | np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); | ||
353 | if (!np->intrs) | ||
354 | return -ENOMEM; | ||
355 | |||
356 | if (measure_only) | ||
357 | return 0; | ||
358 | |||
359 | intrcount = 0; | ||
360 | for (i = 0; i < intlen; ++i, ints += intrcells) { | ||
361 | n = map_interrupt(&irq, &ic, np, ints, intrcells); | ||
362 | TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n); | ||
363 | if (n <= 0) | ||
364 | continue; | ||
365 | |||
366 | /* don't map IRQ numbers under a cascaded 8259 controller */ | ||
367 | if (ic && device_is_compatible(ic, "chrp,iic")) { | ||
368 | np->intrs[intrcount].line = irq[0]; | ||
369 | sense = (n > 1)? (irq[1] & 3): 3; | ||
370 | np->intrs[intrcount].sense = map_isa_senses[sense]; | ||
371 | } else { | ||
372 | virq = virt_irq_create_mapping(irq[0]); | ||
373 | TRACE("virq=%d\n", virq); | ||
374 | #ifdef CONFIG_PPC64 | ||
375 | if (virq == NO_IRQ) { | ||
376 | printk(KERN_CRIT "Could not allocate interrupt" | ||
377 | " number for %s\n", np->full_name); | ||
378 | continue; | ||
379 | } | ||
380 | #endif | ||
381 | np->intrs[intrcount].line = irq_offset_up(virq); | ||
382 | sense = (n > 1)? (irq[1] & 3): 1; | ||
383 | |||
384 | /* Apple uses bits in there in a different way, let's | ||
385 | * only keep the real sense bit on macs | ||
386 | */ | ||
387 | if (machine_is(powermac)) | ||
388 | sense &= 0x1; | ||
389 | np->intrs[intrcount].sense = map_mpic_senses[sense]; | ||
390 | } | ||
391 | |||
392 | #ifdef CONFIG_PPC64 | ||
393 | /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */ | ||
394 | if (machine_is(powermac) && ic && ic->parent) { | ||
395 | char *name = get_property(ic->parent, "name", NULL); | ||
396 | if (name && !strcmp(name, "u3")) | ||
397 | np->intrs[intrcount].line += 128; | ||
398 | else if (!(name && (!strcmp(name, "mac-io") || | ||
399 | !strcmp(name, "u4")))) | ||
400 | /* ignore other cascaded controllers, such as | ||
401 | the k2-sata-root */ | ||
402 | break; | ||
403 | } | ||
404 | #endif /* CONFIG_PPC64 */ | ||
405 | if (n > 2) { | ||
406 | printk("hmmm, got %d intr cells for %s:", n, | ||
407 | np->full_name); | ||
408 | for (j = 0; j < n; ++j) | ||
409 | printk(" %d", irq[j]); | ||
410 | printk("\n"); | ||
411 | } | ||
412 | ++intrcount; | ||
413 | } | ||
414 | np->n_intrs = intrcount; | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static int __devinit finish_node(struct device_node *np, | ||
420 | unsigned long *mem_start, | ||
421 | int measure_only) | ||
422 | { | ||
423 | struct device_node *child; | ||
424 | int rc = 0; | ||
425 | |||
426 | rc = finish_node_interrupts(np, mem_start, measure_only); | ||
427 | if (rc) | ||
428 | goto out; | ||
429 | |||
430 | for (child = np->child; child != NULL; child = child->sibling) { | ||
431 | rc = finish_node(child, mem_start, measure_only); | ||
432 | if (rc) | ||
433 | goto out; | ||
434 | } | ||
435 | out: | ||
436 | return rc; | ||
437 | } | ||
438 | |||
439 | static void __init scan_interrupt_controllers(void) | ||
440 | { | ||
441 | struct device_node *np; | ||
442 | int n = 0; | ||
443 | char *name, *ic; | ||
444 | int iclen; | ||
445 | |||
446 | for (np = allnodes; np != NULL; np = np->allnext) { | ||
447 | ic = get_property(np, "interrupt-controller", &iclen); | ||
448 | name = get_property(np, "name", NULL); | ||
449 | /* checking iclen makes sure we don't get a false | ||
450 | match on /chosen.interrupt_controller */ | ||
451 | if ((name != NULL | ||
452 | && strcmp(name, "interrupt-controller") == 0) | ||
453 | || (ic != NULL && iclen == 0 | ||
454 | && strcmp(name, "AppleKiwi"))) { | ||
455 | if (n == 0) | ||
456 | dflt_interrupt_controller = np; | ||
457 | ++n; | ||
458 | } | ||
459 | } | ||
460 | num_interrupt_controllers = n; | ||
461 | } | ||
462 | |||
463 | /** | ||
464 | * finish_device_tree is called once things are running normally | ||
465 | * (i.e. with text and data mapped to the address they were linked at). | ||
466 | * It traverses the device tree and fills in some of the additional, | ||
467 | * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt | ||
468 | * mapping is also initialized at this point. | ||
469 | */ | ||
470 | void __init finish_device_tree(void) | ||
471 | { | ||
472 | unsigned long start, end, size = 0; | ||
473 | |||
474 | DBG(" -> finish_device_tree\n"); | ||
475 | |||
476 | #ifdef CONFIG_PPC64 | ||
477 | /* Initialize virtual IRQ map */ | ||
478 | virt_irq_init(); | ||
479 | #endif | ||
480 | scan_interrupt_controllers(); | ||
481 | |||
482 | /* | ||
483 | * Finish device-tree (pre-parsing some properties etc...) | ||
484 | * We do this in 2 passes. One with "measure_only" set, which | ||
485 | * will only measure the amount of memory needed, then we can | ||
486 | * allocate that memory, and call finish_node again. However, | ||
487 | * we must be careful as most routines will fail nowadays when | ||
488 | * prom_alloc() returns 0, so we must make sure our first pass | ||
489 | * doesn't start at 0. We pre-initialize size to 16 for that | ||
490 | * reason and then remove those additional 16 bytes | ||
491 | */ | ||
492 | size = 16; | ||
493 | finish_node(allnodes, &size, 1); | ||
494 | size -= 16; | ||
495 | |||
496 | if (0 == size) | ||
497 | end = start = 0; | ||
498 | else | ||
499 | end = start = (unsigned long)__va(lmb_alloc(size, 128)); | ||
500 | |||
501 | finish_node(allnodes, &end, 0); | ||
502 | BUG_ON(end != start + size); | ||
503 | |||
504 | DBG(" <- finish_device_tree\n"); | ||
505 | } | ||
506 | |||
507 | static inline char *find_flat_dt_string(u32 offset) | 90 | static inline char *find_flat_dt_string(u32 offset) |
508 | { | 91 | { |
509 | return ((char *)initial_boot_params) + | 92 | return ((char *)initial_boot_params) + |
@@ -1389,27 +972,6 @@ prom_n_size_cells(struct device_node* np) | |||
1389 | EXPORT_SYMBOL(prom_n_size_cells); | 972 | EXPORT_SYMBOL(prom_n_size_cells); |
1390 | 973 | ||
1391 | /** | 974 | /** |
1392 | * Work out the sense (active-low level / active-high edge) | ||
1393 | * of each interrupt from the device tree. | ||
1394 | */ | ||
1395 | void __init prom_get_irq_senses(unsigned char *senses, int off, int max) | ||
1396 | { | ||
1397 | struct device_node *np; | ||
1398 | int i, j; | ||
1399 | |||
1400 | /* default to level-triggered */ | ||
1401 | memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off); | ||
1402 | |||
1403 | for (np = allnodes; np != 0; np = np->allnext) { | ||
1404 | for (j = 0; j < np->n_intrs; j++) { | ||
1405 | i = np->intrs[j].line; | ||
1406 | if (i >= off && i < max) | ||
1407 | senses[i-off] = np->intrs[j].sense; | ||
1408 | } | ||
1409 | } | ||
1410 | } | ||
1411 | |||
1412 | /** | ||
1413 | * Construct and return a list of the device_nodes with a given name. | 975 | * Construct and return a list of the device_nodes with a given name. |
1414 | */ | 976 | */ |
1415 | struct device_node *find_devices(const char *name) | 977 | struct device_node *find_devices(const char *name) |
@@ -1808,7 +1370,6 @@ static void of_node_release(struct kref *kref) | |||
1808 | node->deadprops = NULL; | 1370 | node->deadprops = NULL; |
1809 | } | 1371 | } |
1810 | } | 1372 | } |
1811 | kfree(node->intrs); | ||
1812 | kfree(node->full_name); | 1373 | kfree(node->full_name); |
1813 | kfree(node->data); | 1374 | kfree(node->data); |
1814 | kfree(node); | 1375 | kfree(node); |
@@ -1881,13 +1442,7 @@ void of_detach_node(const struct device_node *np) | |||
1881 | #ifdef CONFIG_PPC_PSERIES | 1442 | #ifdef CONFIG_PPC_PSERIES |
1882 | /* | 1443 | /* |
1883 | * Fix up the uninitialized fields in a new device node: | 1444 | * Fix up the uninitialized fields in a new device node: |
1884 | * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields | 1445 | * name, type and pci-specific fields |
1885 | * | ||
1886 | * A lot of boot-time code is duplicated here, because functions such | ||
1887 | * as finish_node_interrupts, interpret_pci_props, etc. cannot use the | ||
1888 | * slab allocator. | ||
1889 | * | ||
1890 | * This should probably be split up into smaller chunks. | ||
1891 | */ | 1446 | */ |
1892 | 1447 | ||
1893 | static int of_finish_dynamic_node(struct device_node *node) | 1448 | static int of_finish_dynamic_node(struct device_node *node) |
@@ -1928,8 +1483,6 @@ static int prom_reconfig_notifier(struct notifier_block *nb, | |||
1928 | switch (action) { | 1483 | switch (action) { |
1929 | case PSERIES_RECONFIG_ADD: | 1484 | case PSERIES_RECONFIG_ADD: |
1930 | err = of_finish_dynamic_node(node); | 1485 | err = of_finish_dynamic_node(node); |
1931 | if (!err) | ||
1932 | finish_node(node, NULL, 0); | ||
1933 | if (err < 0) { | 1486 | if (err < 0) { |
1934 | printk(KERN_ERR "finish_node returned %d\n", err); | 1487 | printk(KERN_ERR "finish_node returned %d\n", err); |
1935 | err = NOTIFY_BAD; | 1488 | err = NOTIFY_BAD; |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 6eb7e49b394a..cda022657324 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -297,19 +297,9 @@ unsigned long __init find_and_init_phbs(void) | |||
297 | struct device_node *node; | 297 | struct device_node *node; |
298 | struct pci_controller *phb; | 298 | struct pci_controller *phb; |
299 | unsigned int index; | 299 | unsigned int index; |
300 | unsigned int root_size_cells = 0; | ||
301 | unsigned int *opprop = NULL; | ||
302 | struct device_node *root = of_find_node_by_path("/"); | 300 | struct device_node *root = of_find_node_by_path("/"); |
303 | 301 | ||
304 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | ||
305 | opprop = (unsigned int *)get_property(root, | ||
306 | "platform-open-pic", NULL); | ||
307 | } | ||
308 | |||
309 | root_size_cells = prom_n_size_cells(root); | ||
310 | |||
311 | index = 0; | 302 | index = 0; |
312 | |||
313 | for (node = of_get_next_child(root, NULL); | 303 | for (node = of_get_next_child(root, NULL); |
314 | node != NULL; | 304 | node != NULL; |
315 | node = of_get_next_child(root, node)) { | 305 | node = of_get_next_child(root, node)) { |
@@ -324,13 +314,6 @@ unsigned long __init find_and_init_phbs(void) | |||
324 | setup_phb(node, phb); | 314 | setup_phb(node, phb); |
325 | pci_process_bridge_OF_ranges(phb, node, 0); | 315 | pci_process_bridge_OF_ranges(phb, node, 0); |
326 | pci_setup_phb_io(phb, index == 0); | 316 | pci_setup_phb_io(phb, index == 0); |
327 | #ifdef CONFIG_PPC_PSERIES | ||
328 | /* XXX This code need serious fixing ... --BenH */ | ||
329 | if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { | ||
330 | int addr = root_size_cells * (index + 2) - 1; | ||
331 | mpic_assign_isu(pSeries_mpic, index, opprop[addr]); | ||
332 | } | ||
333 | #endif | ||
334 | index++; | 317 | index++; |
335 | } | 318 | } |
336 | 319 | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 0c21de39c161..e0df2ba1ab9f 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -239,7 +239,6 @@ void __init setup_arch(char **cmdline_p) | |||
239 | ppc_md.init_early(); | 239 | ppc_md.init_early(); |
240 | 240 | ||
241 | find_legacy_serial_ports(); | 241 | find_legacy_serial_ports(); |
242 | finish_device_tree(); | ||
243 | 242 | ||
244 | smp_setup_cpu_maps(); | 243 | smp_setup_cpu_maps(); |
245 | 244 | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index ac7276c40685..fd1785e4c9bb 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -361,12 +361,15 @@ void __init setup_system(void) | |||
361 | 361 | ||
362 | /* | 362 | /* |
363 | * Fill the ppc64_caches & systemcfg structures with informations | 363 | * Fill the ppc64_caches & systemcfg structures with informations |
364 | * retrieved from the device-tree. Need to be called before | 364 | * retrieved from the device-tree. |
365 | * finish_device_tree() since the later requires some of the | ||
366 | * informations filled up here to properly parse the interrupt tree. | ||
367 | */ | 365 | */ |
368 | initialize_cache_info(); | 366 | initialize_cache_info(); |
369 | 367 | ||
368 | /* | ||
369 | * Initialize irq remapping subsystem | ||
370 | */ | ||
371 | irq_early_init(); | ||
372 | |||
370 | #ifdef CONFIG_PPC_RTAS | 373 | #ifdef CONFIG_PPC_RTAS |
371 | /* | 374 | /* |
372 | * Initialize RTAS if available | 375 | * Initialize RTAS if available |
@@ -394,12 +397,6 @@ void __init setup_system(void) | |||
394 | find_legacy_serial_ports(); | 397 | find_legacy_serial_ports(); |
395 | 398 | ||
396 | /* | 399 | /* |
397 | * "Finish" the device-tree, that is do the actual parsing of | ||
398 | * some of the properties like the interrupt map | ||
399 | */ | ||
400 | finish_device_tree(); | ||
401 | |||
402 | /* | ||
403 | * Initialize xmon | 400 | * Initialize xmon |
404 | */ | 401 | */ |
405 | #ifdef CONFIG_XMON_DEFAULT | 402 | #ifdef CONFIG_XMON_DEFAULT |
@@ -427,8 +424,6 @@ void __init setup_system(void) | |||
427 | 424 | ||
428 | printk("-----------------------------------------------------\n"); | 425 | printk("-----------------------------------------------------\n"); |
429 | printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); | 426 | printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); |
430 | printk("ppc64_interrupt_controller = 0x%ld\n", | ||
431 | ppc64_interrupt_controller); | ||
432 | printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); | 427 | printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); |
433 | printk("ppc64_caches.dcache_line_size = 0x%x\n", | 428 | printk("ppc64_caches.dcache_line_size = 0x%x\n", |
434 | ppc64_caches.dline_size); | 429 | ppc64_caches.dline_size); |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index cdf5867838a6..fad8580f9081 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -218,7 +218,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) | |||
218 | { | 218 | { |
219 | struct vio_dev *viodev; | 219 | struct vio_dev *viodev; |
220 | unsigned int *unit_address; | 220 | unsigned int *unit_address; |
221 | unsigned int *irq_p; | ||
222 | 221 | ||
223 | /* we need the 'device_type' property, in order to match with drivers */ | 222 | /* we need the 'device_type' property, in order to match with drivers */ |
224 | if (of_node->type == NULL) { | 223 | if (of_node->type == NULL) { |
@@ -243,16 +242,7 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) | |||
243 | 242 | ||
244 | viodev->dev.platform_data = of_node_get(of_node); | 243 | viodev->dev.platform_data = of_node_get(of_node); |
245 | 244 | ||
246 | viodev->irq = NO_IRQ; | 245 | viodev->irq = irq_of_parse_and_map(of_node, 0); |
247 | irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL); | ||
248 | if (irq_p) { | ||
249 | int virq = virt_irq_create_mapping(*irq_p); | ||
250 | if (virq == NO_IRQ) { | ||
251 | printk(KERN_ERR "Unable to allocate interrupt " | ||
252 | "number for %s\n", of_node->full_name); | ||
253 | } else | ||
254 | viodev->irq = irq_offset_up(virq); | ||
255 | } | ||
256 | 246 | ||
257 | snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); | 247 | snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); |
258 | viodev->name = of_node->name; | 248 | viodev->name = of_node->name; |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 97936f547f19..9d5da7896892 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -1,6 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Cell Internal Interrupt Controller | 2 | * Cell Internal Interrupt Controller |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * IBM, Corp. | ||
6 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | 7 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
5 | * | 8 | * |
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | 9 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
@@ -25,11 +28,13 @@ | |||
25 | #include <linux/module.h> | 28 | #include <linux/module.h> |
26 | #include <linux/percpu.h> | 29 | #include <linux/percpu.h> |
27 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/ioport.h> | ||
28 | 32 | ||
29 | #include <asm/io.h> | 33 | #include <asm/io.h> |
30 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
31 | #include <asm/prom.h> | 35 | #include <asm/prom.h> |
32 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
37 | #include <asm/machdep.h> | ||
33 | 38 | ||
34 | #include "interrupt.h" | 39 | #include "interrupt.h" |
35 | #include "cbe_regs.h" | 40 | #include "cbe_regs.h" |
@@ -39,9 +44,25 @@ struct iic { | |||
39 | u8 target_id; | 44 | u8 target_id; |
40 | u8 eoi_stack[16]; | 45 | u8 eoi_stack[16]; |
41 | int eoi_ptr; | 46 | int eoi_ptr; |
47 | struct irq_host *host; | ||
42 | }; | 48 | }; |
43 | 49 | ||
44 | static DEFINE_PER_CPU(struct iic, iic); | 50 | static DEFINE_PER_CPU(struct iic, iic); |
51 | #define IIC_NODE_COUNT 2 | ||
52 | static struct irq_host *iic_hosts[IIC_NODE_COUNT]; | ||
53 | |||
54 | /* Convert between "pending" bits and hw irq number */ | ||
55 | static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) | ||
56 | { | ||
57 | unsigned char unit = bits.source & 0xf; | ||
58 | |||
59 | if (bits.flags & CBE_IIC_IRQ_IPI) | ||
60 | return IIC_IRQ_IPI0 | (bits.prio >> 4); | ||
61 | else if (bits.class <= 3) | ||
62 | return (bits.class << 4) | unit; | ||
63 | else | ||
64 | return IIC_IRQ_INVALID; | ||
65 | } | ||
45 | 66 | ||
46 | static void iic_mask(unsigned int irq) | 67 | static void iic_mask(unsigned int irq) |
47 | { | 68 | { |
@@ -65,197 +86,21 @@ static struct irq_chip iic_chip = { | |||
65 | .eoi = iic_eoi, | 86 | .eoi = iic_eoi, |
66 | }; | 87 | }; |
67 | 88 | ||
68 | /* XXX All of this has to be reworked completely. We need to assign a real | ||
69 | * interrupt numbers to the external interrupts and remove all the hard coded | ||
70 | * interrupt maps (rely on the device-tree whenever possible). | ||
71 | * | ||
72 | * Basically, my scheme is to define the "pendings" bits to be the HW interrupt | ||
73 | * number (ignoring the data and flags here). That means we can sort-of split | ||
74 | * external sources based on priority, and we can use request_irq() on pretty | ||
75 | * much anything. | ||
76 | * | ||
77 | * For spider or axon, they have their own interrupt space. spider will just have | ||
78 | * local "hardward" interrupts 0...xx * node stride. The node stride is not | ||
79 | * necessary (separate interrupt chips will have separate HW number space), but | ||
80 | * will allow to be compatible with existing device-trees. | ||
81 | * | ||
82 | * All of thise little world will get a standard remapping scheme to map those HW | ||
83 | * numbers into the linux flat irq number space. | ||
84 | */ | ||
85 | static int iic_external_get_irq(struct cbe_iic_pending_bits pending) | ||
86 | { | ||
87 | int irq; | ||
88 | unsigned char node, unit; | ||
89 | |||
90 | node = pending.source >> 4; | ||
91 | unit = pending.source & 0xf; | ||
92 | irq = -1; | ||
93 | |||
94 | /* | ||
95 | * This mapping is specific to the Cell Broadband | ||
96 | * Engine. We might need to get the numbers | ||
97 | * from the device tree to support future CPUs. | ||
98 | */ | ||
99 | switch (unit) { | ||
100 | case 0x00: | ||
101 | case 0x0b: | ||
102 | /* | ||
103 | * One of these units can be connected | ||
104 | * to an external interrupt controller. | ||
105 | */ | ||
106 | if (pending.class != 2) | ||
107 | break; | ||
108 | /* TODO: We might want to silently ignore cascade interrupts | ||
109 | * when no cascade handler exist yet | ||
110 | */ | ||
111 | irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE; | ||
112 | break; | ||
113 | case 0x01 ... 0x04: | ||
114 | case 0x07 ... 0x0a: | ||
115 | /* | ||
116 | * These units are connected to the SPEs | ||
117 | */ | ||
118 | if (pending.class > 2) | ||
119 | break; | ||
120 | irq = IIC_SPE_OFFSET | ||
121 | + pending.class * IIC_CLASS_STRIDE | ||
122 | + node * IIC_NODE_STRIDE | ||
123 | + unit; | ||
124 | break; | ||
125 | } | ||
126 | if (irq == -1) | ||
127 | printk(KERN_WARNING "Unexpected interrupt class %02x, " | ||
128 | "source %02x, prio %02x, cpu %02x\n", pending.class, | ||
129 | pending.source, pending.prio, smp_processor_id()); | ||
130 | return irq; | ||
131 | } | ||
132 | |||
133 | /* Get an IRQ number from the pending state register of the IIC */ | 89 | /* Get an IRQ number from the pending state register of the IIC */ |
134 | int iic_get_irq(struct pt_regs *regs) | 90 | static unsigned int iic_get_irq(struct pt_regs *regs) |
135 | { | ||
136 | struct iic *iic; | ||
137 | int irq; | ||
138 | struct cbe_iic_pending_bits pending; | ||
139 | |||
140 | iic = &__get_cpu_var(iic); | ||
141 | *(unsigned long *) &pending = | ||
142 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | ||
143 | iic->eoi_stack[++iic->eoi_ptr] = pending.prio; | ||
144 | BUG_ON(iic->eoi_ptr > 15); | ||
145 | |||
146 | irq = -1; | ||
147 | if (pending.flags & CBE_IIC_IRQ_VALID) { | ||
148 | if (pending.flags & CBE_IIC_IRQ_IPI) { | ||
149 | irq = IIC_IPI_OFFSET + (pending.prio >> 4); | ||
150 | /* | ||
151 | if (irq > 0x80) | ||
152 | printk(KERN_WARNING "Unexpected IPI prio %02x" | ||
153 | "on CPU %02x\n", pending.prio, | ||
154 | smp_processor_id()); | ||
155 | */ | ||
156 | } else { | ||
157 | irq = iic_external_get_irq(pending); | ||
158 | } | ||
159 | } | ||
160 | return irq; | ||
161 | } | ||
162 | |||
163 | /* hardcoded part to be compatible with older firmware */ | ||
164 | |||
165 | static int __init setup_iic_hardcoded(void) | ||
166 | { | ||
167 | struct device_node *np; | ||
168 | int nodeid, cpu; | ||
169 | unsigned long regs; | ||
170 | struct iic *iic; | ||
171 | |||
172 | for_each_possible_cpu(cpu) { | ||
173 | iic = &per_cpu(iic, cpu); | ||
174 | nodeid = cpu/2; | ||
175 | |||
176 | for (np = of_find_node_by_type(NULL, "cpu"); | ||
177 | np; | ||
178 | np = of_find_node_by_type(np, "cpu")) { | ||
179 | if (nodeid == *(int *)get_property(np, "node-id", NULL)) | ||
180 | break; | ||
181 | } | ||
182 | |||
183 | if (!np) { | ||
184 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); | ||
185 | iic->regs = NULL; | ||
186 | iic->target_id = 0xff; | ||
187 | return -ENODEV; | ||
188 | } | ||
189 | |||
190 | regs = *(long *)get_property(np, "iic", NULL); | ||
191 | |||
192 | /* hack until we have decided on the devtree info */ | ||
193 | regs += 0x400; | ||
194 | if (cpu & 1) | ||
195 | regs += 0x20; | ||
196 | |||
197 | printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs); | ||
198 | iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs)); | ||
199 | iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); | ||
200 | iic->eoi_stack[0] = 0xff; | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int __init setup_iic(void) | ||
207 | { | 91 | { |
208 | struct device_node *dn; | 92 | struct cbe_iic_pending_bits pending; |
209 | unsigned long *regs; | 93 | struct iic *iic; |
210 | char *compatible; | 94 | |
211 | unsigned *np, found = 0; | 95 | iic = &__get_cpu_var(iic); |
212 | struct iic *iic = NULL; | 96 | *(unsigned long *) &pending = |
213 | 97 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | |
214 | for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | 98 | iic->eoi_stack[++iic->eoi_ptr] = pending.prio; |
215 | compatible = (char *)get_property(dn, "compatible", NULL); | 99 | BUG_ON(iic->eoi_ptr > 15); |
216 | 100 | if (pending.flags & CBE_IIC_IRQ_VALID) | |
217 | if (!compatible) { | 101 | return irq_linear_revmap(iic->host, |
218 | printk(KERN_WARNING "no compatible property found !\n"); | 102 | iic_pending_to_hwnum(pending)); |
219 | continue; | 103 | return NO_IRQ; |
220 | } | ||
221 | |||
222 | if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller")) | ||
223 | regs = (unsigned long *)get_property(dn,"reg", NULL); | ||
224 | else | ||
225 | continue; | ||
226 | |||
227 | if (!regs) | ||
228 | printk(KERN_WARNING "IIC: no reg property\n"); | ||
229 | |||
230 | np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL); | ||
231 | |||
232 | if (!np) { | ||
233 | printk(KERN_WARNING "IIC: CPU association not found\n"); | ||
234 | iic->regs = NULL; | ||
235 | iic->target_id = 0xff; | ||
236 | return -ENODEV; | ||
237 | } | ||
238 | |||
239 | iic = &per_cpu(iic, np[0]); | ||
240 | iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs)); | ||
241 | iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe); | ||
242 | iic->eoi_stack[0] = 0xff; | ||
243 | printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs); | ||
244 | |||
245 | iic = &per_cpu(iic, np[1]); | ||
246 | iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs)); | ||
247 | iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe); | ||
248 | iic->eoi_stack[0] = 0xff; | ||
249 | |||
250 | printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs); | ||
251 | |||
252 | found++; | ||
253 | } | ||
254 | |||
255 | if (found) | ||
256 | return 0; | ||
257 | else | ||
258 | return -ENODEV; | ||
259 | } | 104 | } |
260 | 105 | ||
261 | #ifdef CONFIG_SMP | 106 | #ifdef CONFIG_SMP |
@@ -263,12 +108,12 @@ static int __init setup_iic(void) | |||
263 | /* Use the highest interrupt priorities for IPI */ | 108 | /* Use the highest interrupt priorities for IPI */ |
264 | static inline int iic_ipi_to_irq(int ipi) | 109 | static inline int iic_ipi_to_irq(int ipi) |
265 | { | 110 | { |
266 | return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; | 111 | return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi; |
267 | } | 112 | } |
268 | 113 | ||
269 | static inline int iic_irq_to_ipi(int irq) | 114 | static inline int iic_irq_to_ipi(int irq) |
270 | { | 115 | { |
271 | return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); | 116 | return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0); |
272 | } | 117 | } |
273 | 118 | ||
274 | void iic_setup_cpu(void) | 119 | void iic_setup_cpu(void) |
@@ -287,22 +132,51 @@ u8 iic_get_target_id(int cpu) | |||
287 | } | 132 | } |
288 | EXPORT_SYMBOL_GPL(iic_get_target_id); | 133 | EXPORT_SYMBOL_GPL(iic_get_target_id); |
289 | 134 | ||
135 | struct irq_host *iic_get_irq_host(int node) | ||
136 | { | ||
137 | if (node < 0 || node >= IIC_NODE_COUNT) | ||
138 | return NULL; | ||
139 | return iic_hosts[node]; | ||
140 | } | ||
141 | EXPORT_SYMBOL_GPL(iic_get_irq_host); | ||
142 | |||
143 | |||
290 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 144 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) |
291 | { | 145 | { |
292 | smp_message_recv(iic_irq_to_ipi(irq), regs); | 146 | int ipi = (int)(long)dev_id; |
147 | |||
148 | smp_message_recv(ipi, regs); | ||
149 | |||
293 | return IRQ_HANDLED; | 150 | return IRQ_HANDLED; |
294 | } | 151 | } |
295 | 152 | ||
296 | static void iic_request_ipi(int ipi, const char *name) | 153 | static void iic_request_ipi(int ipi, const char *name) |
297 | { | 154 | { |
298 | int irq; | 155 | int node, virq; |
299 | 156 | ||
300 | irq = iic_ipi_to_irq(ipi); | 157 | for (node = 0; node < IIC_NODE_COUNT; node++) { |
301 | 158 | char *rname; | |
302 | /* IPIs are marked IRQF_DISABLED as they must run with irqs | 159 | if (iic_hosts[node] == NULL) |
303 | * disabled */ | 160 | continue; |
304 | set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq); | 161 | virq = irq_create_mapping(iic_hosts[node], |
305 | request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL); | 162 | iic_ipi_to_irq(ipi), 0); |
163 | if (virq == NO_IRQ) { | ||
164 | printk(KERN_ERR | ||
165 | "iic: failed to map IPI %s on node %d\n", | ||
166 | name, node); | ||
167 | continue; | ||
168 | } | ||
169 | rname = kzalloc(strlen(name) + 16, GFP_KERNEL); | ||
170 | if (rname) | ||
171 | sprintf(rname, "%s node %d", name, node); | ||
172 | else | ||
173 | rname = (char *)name; | ||
174 | if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, | ||
175 | rname, (void *)(long)ipi)) | ||
176 | printk(KERN_ERR | ||
177 | "iic: failed to request IPI %s on node %d\n", | ||
178 | name, node); | ||
179 | } | ||
306 | } | 180 | } |
307 | 181 | ||
308 | void iic_request_IPIs(void) | 182 | void iic_request_IPIs(void) |
@@ -313,41 +187,119 @@ void iic_request_IPIs(void) | |||
313 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | 187 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); |
314 | #endif /* CONFIG_DEBUGGER */ | 188 | #endif /* CONFIG_DEBUGGER */ |
315 | } | 189 | } |
190 | |||
316 | #endif /* CONFIG_SMP */ | 191 | #endif /* CONFIG_SMP */ |
317 | 192 | ||
318 | static void __init iic_setup_builtin_handlers(void) | 193 | |
194 | static int iic_host_match(struct irq_host *h, struct device_node *node) | ||
195 | { | ||
196 | return h->host_data != NULL && node == h->host_data; | ||
197 | } | ||
198 | |||
199 | static int iic_host_map(struct irq_host *h, unsigned int virq, | ||
200 | irq_hw_number_t hw, unsigned int flags) | ||
201 | { | ||
202 | if (hw < IIC_IRQ_IPI0) | ||
203 | set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); | ||
204 | else | ||
205 | set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int iic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
210 | u32 *intspec, unsigned int intsize, | ||
211 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
212 | |||
319 | { | 213 | { |
320 | int be, isrc; | 214 | /* Currently, we don't translate anything. That needs to be fixed as |
215 | * we get better defined device-trees. iic interrupts have to be | ||
216 | * explicitely mapped by whoever needs them | ||
217 | */ | ||
218 | return -ENODEV; | ||
219 | } | ||
220 | |||
221 | static struct irq_host_ops iic_host_ops = { | ||
222 | .match = iic_host_match, | ||
223 | .map = iic_host_map, | ||
224 | .xlate = iic_host_xlate, | ||
225 | }; | ||
226 | |||
227 | static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, | ||
228 | struct irq_host *host) | ||
229 | { | ||
230 | /* XXX FIXME: should locate the linux CPU number from the HW cpu | ||
231 | * number properly. We are lucky for now | ||
232 | */ | ||
233 | struct iic *iic = &per_cpu(iic, hw_cpu); | ||
321 | 234 | ||
322 | /* XXX FIXME: Assume two threads per BE are present */ | 235 | iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); |
323 | for (be=0; be < num_present_cpus() / 2; be++) { | 236 | BUG_ON(iic->regs == NULL); |
324 | int irq; | ||
325 | 237 | ||
326 | /* setup SPE chip and handlers */ | 238 | iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); |
327 | for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { | 239 | iic->eoi_stack[0] = 0xff; |
328 | irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; | 240 | iic->host = host; |
329 | set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq); | 241 | out_be64(&iic->regs->prio, 0); |
242 | |||
243 | printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n", | ||
244 | hw_cpu, addr, iic->regs, iic->target_id); | ||
245 | } | ||
246 | |||
247 | static int __init setup_iic(void) | ||
248 | { | ||
249 | struct device_node *dn; | ||
250 | struct resource r0, r1; | ||
251 | struct irq_host *host; | ||
252 | int found = 0; | ||
253 | u32 *np; | ||
254 | |||
255 | for (dn = NULL; | ||
256 | (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { | ||
257 | if (!device_is_compatible(dn, | ||
258 | "IBM,CBEA-Internal-Interrupt-Controller")) | ||
259 | continue; | ||
260 | np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges", | ||
261 | NULL); | ||
262 | if (np == NULL) { | ||
263 | printk(KERN_WARNING "IIC: CPU association not found\n"); | ||
264 | of_node_put(dn); | ||
265 | return -ENODEV; | ||
330 | } | 266 | } |
331 | /* setup cascade chip */ | 267 | if (of_address_to_resource(dn, 0, &r0) || |
332 | irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE; | 268 | of_address_to_resource(dn, 1, &r1)) { |
333 | set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq); | 269 | printk(KERN_WARNING "IIC: Can't resolve addresses\n"); |
270 | of_node_put(dn); | ||
271 | return -ENODEV; | ||
272 | } | ||
273 | host = NULL; | ||
274 | if (found < IIC_NODE_COUNT) { | ||
275 | host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, | ||
276 | IIC_SOURCE_COUNT, | ||
277 | &iic_host_ops, | ||
278 | IIC_IRQ_INVALID); | ||
279 | iic_hosts[found] = host; | ||
280 | BUG_ON(iic_hosts[found] == NULL); | ||
281 | iic_hosts[found]->host_data = of_node_get(dn); | ||
282 | found++; | ||
283 | } | ||
284 | init_one_iic(np[0], r0.start, host); | ||
285 | init_one_iic(np[1], r1.start, host); | ||
334 | } | 286 | } |
287 | |||
288 | if (found) | ||
289 | return 0; | ||
290 | else | ||
291 | return -ENODEV; | ||
335 | } | 292 | } |
336 | 293 | ||
337 | void __init iic_init_IRQ(void) | 294 | void __init iic_init_IRQ(void) |
338 | { | 295 | { |
339 | int cpu, irq_offset; | 296 | /* Discover and initialize iics */ |
340 | struct iic *iic; | ||
341 | |||
342 | if (setup_iic() < 0) | 297 | if (setup_iic() < 0) |
343 | setup_iic_hardcoded(); | 298 | panic("IIC: Failed to initialize !\n"); |
344 | 299 | ||
345 | irq_offset = 0; | 300 | /* Set master interrupt handling function */ |
346 | for_each_possible_cpu(cpu) { | 301 | ppc_md.get_irq = iic_get_irq; |
347 | iic = &per_cpu(iic, cpu); | ||
348 | if (iic->regs) | ||
349 | out_be64(&iic->regs->prio, 0xff); | ||
350 | } | ||
351 | iic_setup_builtin_handlers(); | ||
352 | 302 | ||
303 | /* Enable on current CPU */ | ||
304 | iic_setup_cpu(); | ||
353 | } | 305 | } |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index c74515aeb630..5560a92ec3ab 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -37,23 +37,22 @@ | |||
37 | */ | 37 | */ |
38 | 38 | ||
39 | enum { | 39 | enum { |
40 | IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ | 40 | IIC_IRQ_INVALID = 0xff, |
41 | IIC_EXT_CASCADE = 0x20, /* There is no interrupt 32 on spider */ | 41 | IIC_IRQ_MAX = 0x3f, |
42 | IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ | 42 | IIC_IRQ_EXT_IOIF0 = 0x20, |
43 | IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ | 43 | IIC_IRQ_EXT_IOIF1 = 0x2b, |
44 | IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ | 44 | IIC_IRQ_IPI0 = 0x40, |
45 | IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ | 45 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ |
46 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ | 46 | IIC_SOURCE_COUNT = 0x50, |
47 | IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ | ||
48 | }; | 47 | }; |
49 | 48 | ||
50 | extern void iic_init_IRQ(void); | 49 | extern void iic_init_IRQ(void); |
51 | extern int iic_get_irq(struct pt_regs *regs); | ||
52 | extern void iic_cause_IPI(int cpu, int mesg); | 50 | extern void iic_cause_IPI(int cpu, int mesg); |
53 | extern void iic_request_IPIs(void); | 51 | extern void iic_request_IPIs(void); |
54 | extern void iic_setup_cpu(void); | 52 | extern void iic_setup_cpu(void); |
55 | 53 | ||
56 | extern u8 iic_get_target_id(int cpu); | 54 | extern u8 iic_get_target_id(int cpu); |
55 | extern struct irq_host *iic_get_irq_host(int node); | ||
57 | 56 | ||
58 | extern void spider_init_IRQ(void); | 57 | extern void spider_init_IRQ(void); |
59 | 58 | ||
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 70a4e903f7e2..282987d6d4a2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -80,6 +80,14 @@ static void cell_progress(char *s, unsigned short hex) | |||
80 | printk("*** %04x : %s\n", hex, s ? s : ""); | 80 | printk("*** %04x : %s\n", hex, s ? s : ""); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void __init cell_pcibios_fixup(void) | ||
84 | { | ||
85 | struct pci_dev *dev = NULL; | ||
86 | |||
87 | for_each_pci_dev(dev) | ||
88 | pci_read_irq_line(dev); | ||
89 | } | ||
90 | |||
83 | static void __init cell_init_irq(void) | 91 | static void __init cell_init_irq(void) |
84 | { | 92 | { |
85 | iic_init_IRQ(); | 93 | iic_init_IRQ(); |
@@ -130,8 +138,6 @@ static void __init cell_init_early(void) | |||
130 | 138 | ||
131 | cell_init_iommu(); | 139 | cell_init_iommu(); |
132 | 140 | ||
133 | ppc64_interrupt_controller = IC_CELL_PIC; | ||
134 | |||
135 | DBG(" <- cell_init_early()\n"); | 141 | DBG(" <- cell_init_early()\n"); |
136 | } | 142 | } |
137 | 143 | ||
@@ -178,8 +184,7 @@ define_machine(cell) { | |||
178 | .check_legacy_ioport = cell_check_legacy_ioport, | 184 | .check_legacy_ioport = cell_check_legacy_ioport, |
179 | .progress = cell_progress, | 185 | .progress = cell_progress, |
180 | .init_IRQ = cell_init_irq, | 186 | .init_IRQ = cell_init_irq, |
181 | .get_irq = iic_get_irq, | 187 | .pcibios_fixup = cell_pcibios_fixup, |
182 | |||
183 | #ifdef CONFIG_KEXEC | 188 | #ifdef CONFIG_KEXEC |
184 | .machine_kexec = default_machine_kexec, | 189 | .machine_kexec = default_machine_kexec, |
185 | .machine_kexec_prepare = default_machine_kexec_prepare, | 190 | .machine_kexec_prepare = default_machine_kexec_prepare, |
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 98425acb6cda..ae7ef88f1a37 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/ioport.h> | ||
25 | 26 | ||
26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
27 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
@@ -56,58 +57,67 @@ enum { | |||
56 | REISWAITEN = 0x508, /* Reissue Wait Control*/ | 57 | REISWAITEN = 0x508, /* Reissue Wait Control*/ |
57 | }; | 58 | }; |
58 | 59 | ||
59 | static void __iomem *spider_pics[4]; | 60 | #define SPIDER_CHIP_COUNT 4 |
61 | #define SPIDER_SRC_COUNT 64 | ||
62 | #define SPIDER_IRQ_INVALID 63 | ||
60 | 63 | ||
61 | static void __iomem *spider_get_pic(int irq) | 64 | struct spider_pic { |
62 | { | 65 | struct irq_host *host; |
63 | int node = irq / IIC_NODE_STRIDE; | 66 | struct device_node *of_node; |
64 | irq %= IIC_NODE_STRIDE; | 67 | void __iomem *regs; |
65 | 68 | unsigned int node_id; | |
66 | if (irq >= IIC_EXT_OFFSET && | 69 | }; |
67 | irq < IIC_EXT_OFFSET + IIC_NUM_EXT && | 70 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; |
68 | spider_pics) | ||
69 | return spider_pics[node]; | ||
70 | return NULL; | ||
71 | } | ||
72 | 71 | ||
73 | static int spider_get_nr(unsigned int irq) | 72 | static struct spider_pic *spider_virq_to_pic(unsigned int virq) |
74 | { | 73 | { |
75 | return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; | 74 | return irq_map[virq].host->host_data; |
76 | } | 75 | } |
77 | 76 | ||
78 | static void __iomem *spider_get_irq_config(int irq) | 77 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, |
78 | unsigned int src) | ||
79 | { | 79 | { |
80 | void __iomem *pic; | 80 | return pic->regs + TIR_CFGA + 8 * src; |
81 | pic = spider_get_pic(irq); | ||
82 | return pic + TIR_CFGA + 8 * spider_get_nr(irq); | ||
83 | } | 81 | } |
84 | 82 | ||
85 | static void spider_unmask_irq(unsigned int irq) | 83 | static void spider_unmask_irq(unsigned int virq) |
86 | { | 84 | { |
87 | int nodeid = (irq / IIC_NODE_STRIDE) * 0x10; | 85 | struct spider_pic *pic = spider_virq_to_pic(virq); |
88 | void __iomem *cfg = spider_get_irq_config(irq); | 86 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); |
89 | irq = spider_get_nr(irq); | ||
90 | 87 | ||
91 | /* FIXME: Most of that is configuration and has nothing to do with enabling/disable, | 88 | /* We use no locking as we should be covered by the descriptor lock |
92 | * besides, it's also partially bogus. | 89 | * for access to invidual source configuration registers |
93 | */ | 90 | */ |
94 | out_be32(cfg, (in_be32(cfg) & ~0xf0)| 0x3107000eu | nodeid); | 91 | out_be32(cfg, in_be32(cfg) | 0x30000000u); |
95 | out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); | ||
96 | } | 92 | } |
97 | 93 | ||
98 | static void spider_mask_irq(unsigned int irq) | 94 | static void spider_mask_irq(unsigned int virq) |
99 | { | 95 | { |
100 | void __iomem *cfg = spider_get_irq_config(irq); | 96 | struct spider_pic *pic = spider_virq_to_pic(virq); |
101 | irq = spider_get_nr(irq); | 97 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); |
102 | 98 | ||
99 | /* We use no locking as we should be covered by the descriptor lock | ||
100 | * for access to invidual source configuration registers | ||
101 | */ | ||
103 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | 102 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); |
104 | } | 103 | } |
105 | 104 | ||
106 | static void spider_ack_irq(unsigned int irq) | 105 | static void spider_ack_irq(unsigned int virq) |
107 | { | 106 | { |
108 | /* Should reset edge detection logic but we don't configure any edge interrupt | 107 | struct spider_pic *pic = spider_virq_to_pic(virq); |
109 | * at the moment. | 108 | unsigned int src = irq_map[virq].hwirq; |
109 | |||
110 | /* Reset edge detection logic if necessary | ||
110 | */ | 111 | */ |
112 | if (get_irq_desc(virq)->status & IRQ_LEVEL) | ||
113 | return; | ||
114 | |||
115 | /* Only interrupts 47 to 50 can be set to edge */ | ||
116 | if (src < 47 || src > 50) | ||
117 | return; | ||
118 | |||
119 | /* Perform the clear of the edge logic */ | ||
120 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); | ||
111 | } | 121 | } |
112 | 122 | ||
113 | static struct irq_chip spider_pic = { | 123 | static struct irq_chip spider_pic = { |
@@ -117,102 +127,243 @@ static struct irq_chip spider_pic = { | |||
117 | .ack = spider_ack_irq, | 127 | .ack = spider_ack_irq, |
118 | }; | 128 | }; |
119 | 129 | ||
120 | static int spider_get_irq(int node) | 130 | static int spider_host_match(struct irq_host *h, struct device_node *node) |
131 | { | ||
132 | struct spider_pic *pic = h->host_data; | ||
133 | return node == pic->of_node; | ||
134 | } | ||
135 | |||
136 | static int spider_host_map(struct irq_host *h, unsigned int virq, | ||
137 | irq_hw_number_t hw, unsigned int flags) | ||
121 | { | 138 | { |
122 | unsigned long cs; | 139 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; |
123 | void __iomem *regs = spider_pics[node]; | 140 | struct spider_pic *pic = h->host_data; |
141 | void __iomem *cfg = spider_get_irq_config(pic, hw); | ||
142 | int level = 0; | ||
143 | u32 ic; | ||
144 | |||
145 | /* Note that only level high is supported for most interrupts */ | ||
146 | if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && | ||
147 | (hw < 47 || hw > 50)) | ||
148 | return -EINVAL; | ||
149 | |||
150 | /* Decode sense type */ | ||
151 | switch(sense) { | ||
152 | case IRQ_TYPE_EDGE_RISING: | ||
153 | ic = 0x3; | ||
154 | break; | ||
155 | case IRQ_TYPE_EDGE_FALLING: | ||
156 | ic = 0x2; | ||
157 | break; | ||
158 | case IRQ_TYPE_LEVEL_LOW: | ||
159 | ic = 0x0; | ||
160 | level = 1; | ||
161 | break; | ||
162 | case IRQ_TYPE_LEVEL_HIGH: | ||
163 | case IRQ_TYPE_NONE: | ||
164 | ic = 0x1; | ||
165 | level = 1; | ||
166 | break; | ||
167 | default: | ||
168 | return -EINVAL; | ||
169 | } | ||
124 | 170 | ||
125 | cs = in_be32(regs + TIR_CS) >> 24; | 171 | /* Configure the source. One gross hack that was there before and |
172 | * that I've kept around is the priority to the BE which I set to | ||
173 | * be the same as the interrupt source number. I don't know wether | ||
174 | * that's supposed to make any kind of sense however, we'll have to | ||
175 | * decide that, but for now, I'm not changing the behaviour. | ||
176 | */ | ||
177 | out_be32(cfg, (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe); | ||
178 | out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); | ||
126 | 179 | ||
127 | if (cs == 63) | 180 | if (level) |
128 | return -1; | 181 | get_irq_desc(virq)->status |= IRQ_LEVEL; |
129 | else | 182 | set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); |
130 | return cs; | 183 | return 0; |
184 | } | ||
185 | |||
186 | static int spider_host_xlate(struct irq_host *h, struct device_node *ct, | ||
187 | u32 *intspec, unsigned int intsize, | ||
188 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
189 | |||
190 | { | ||
191 | /* Spider interrupts have 2 cells, first is the interrupt source, | ||
192 | * second, well, I don't know for sure yet ... We mask the top bits | ||
193 | * because old device-trees encode a node number in there | ||
194 | */ | ||
195 | *out_hwirq = intspec[0] & 0x3f; | ||
196 | *out_flags = IRQ_TYPE_LEVEL_HIGH; | ||
197 | return 0; | ||
131 | } | 198 | } |
132 | 199 | ||
200 | static struct irq_host_ops spider_host_ops = { | ||
201 | .match = spider_host_match, | ||
202 | .map = spider_host_map, | ||
203 | .xlate = spider_host_xlate, | ||
204 | }; | ||
205 | |||
133 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, | 206 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, |
134 | struct pt_regs *regs) | 207 | struct pt_regs *regs) |
135 | { | 208 | { |
136 | int node = (int)(long)desc->handler_data; | 209 | struct spider_pic *pic = desc->handler_data; |
137 | int cascade_irq; | 210 | unsigned int cs, virq; |
138 | 211 | ||
139 | cascade_irq = spider_get_irq(node); | 212 | cs = in_be32(pic->regs + TIR_CS) >> 24; |
140 | generic_handle_irq(cascade_irq, regs); | 213 | if (cs == SPIDER_IRQ_INVALID) |
214 | virq = NO_IRQ; | ||
215 | else | ||
216 | virq = irq_linear_revmap(pic->host, cs); | ||
217 | if (virq != NO_IRQ) | ||
218 | generic_handle_irq(virq, regs); | ||
141 | desc->chip->eoi(irq); | 219 | desc->chip->eoi(irq); |
142 | } | 220 | } |
143 | 221 | ||
144 | /* hardcoded part to be compatible with older firmware */ | 222 | /* For hooking up the cascace we have a problem. Our device-tree is |
223 | * crap and we don't know on which BE iic interrupt we are hooked on at | ||
224 | * least not the "standard" way. We can reconstitute it based on two | ||
225 | * informations though: which BE node we are connected to and wether | ||
226 | * we are connected to IOIF0 or IOIF1. Right now, we really only care | ||
227 | * about the IBM cell blade and we know that its firmware gives us an | ||
228 | * interrupt-map property which is pretty strange. | ||
229 | */ | ||
230 | static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) | ||
231 | { | ||
232 | unsigned int virq; | ||
233 | u32 *imap, *tmp; | ||
234 | int imaplen, intsize, unit; | ||
235 | struct device_node *iic; | ||
236 | struct irq_host *iic_host; | ||
237 | |||
238 | #if 0 /* Enable that when we have a way to retreive the node as well */ | ||
239 | /* First, we check wether we have a real "interrupts" in the device | ||
240 | * tree in case the device-tree is ever fixed | ||
241 | */ | ||
242 | struct of_irq oirq; | ||
243 | if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) { | ||
244 | virq = irq_create_of_mapping(oirq.controller, oirq.specifier, | ||
245 | oirq.size); | ||
246 | goto bail; | ||
247 | } | ||
248 | #endif | ||
249 | |||
250 | /* Now do the horrible hacks */ | ||
251 | tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL); | ||
252 | if (tmp == NULL) | ||
253 | return NO_IRQ; | ||
254 | intsize = *tmp; | ||
255 | imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen); | ||
256 | if (imap == NULL || imaplen < (intsize + 1)) | ||
257 | return NO_IRQ; | ||
258 | iic = of_find_node_by_phandle(imap[intsize]); | ||
259 | if (iic == NULL) | ||
260 | return NO_IRQ; | ||
261 | imap += intsize + 1; | ||
262 | tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL); | ||
263 | if (tmp == NULL) | ||
264 | return NO_IRQ; | ||
265 | intsize = *tmp; | ||
266 | /* Assume unit is last entry of interrupt specifier */ | ||
267 | unit = imap[intsize - 1]; | ||
268 | /* Ok, we have a unit, now let's try to get the node */ | ||
269 | tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL); | ||
270 | if (tmp == NULL) { | ||
271 | of_node_put(iic); | ||
272 | return NO_IRQ; | ||
273 | } | ||
274 | /* ugly as hell but works for now */ | ||
275 | pic->node_id = (*tmp) >> 1; | ||
276 | of_node_put(iic); | ||
277 | |||
278 | /* Ok, now let's get cracking. You may ask me why I just didn't match | ||
279 | * the iic host from the iic OF node, but that way I'm still compatible | ||
280 | * with really really old old firmwares for which we don't have a node | ||
281 | */ | ||
282 | iic_host = iic_get_irq_host(pic->node_id); | ||
283 | if (iic_host == NULL) | ||
284 | return NO_IRQ; | ||
285 | /* Manufacture an IIC interrupt number of class 2 */ | ||
286 | virq = irq_create_mapping(iic_host, 0x20 | unit, 0); | ||
287 | if (virq == NO_IRQ) | ||
288 | printk(KERN_ERR "spider_pic: failed to map cascade !"); | ||
289 | return virq; | ||
290 | } | ||
291 | |||
145 | 292 | ||
146 | static void __init spider_init_one(int node, unsigned long addr) | 293 | static void __init spider_init_one(struct device_node *of_node, int chip, |
294 | unsigned long addr) | ||
147 | { | 295 | { |
148 | int n, irq; | 296 | struct spider_pic *pic = &spider_pics[chip]; |
297 | int i, virq; | ||
149 | 298 | ||
150 | spider_pics[node] = ioremap(addr, 0x800); | 299 | /* Map registers */ |
151 | if (spider_pics[node] == NULL) | 300 | pic->regs = ioremap(addr, 0x1000); |
301 | if (pic->regs == NULL) | ||
152 | panic("spider_pic: can't map registers !"); | 302 | panic("spider_pic: can't map registers !"); |
153 | 303 | ||
154 | printk(KERN_INFO "spider_pic: mapped for node %d, addr: 0x%lx mapped to %p\n", | 304 | /* Allocate a host */ |
155 | node, addr, spider_pics[node]); | 305 | pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT, |
306 | &spider_host_ops, SPIDER_IRQ_INVALID); | ||
307 | if (pic->host == NULL) | ||
308 | panic("spider_pic: can't allocate irq host !"); | ||
309 | pic->host->host_data = pic; | ||
156 | 310 | ||
157 | for (n = 0; n < IIC_NUM_EXT; n++) { | 311 | /* Fill out other bits */ |
158 | if (n == IIC_EXT_CASCADE) | 312 | pic->of_node = of_node_get(of_node); |
159 | continue; | 313 | |
160 | irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; | 314 | /* Go through all sources and disable them */ |
161 | set_irq_chip_and_handler(irq, &spider_pic, handle_level_irq); | 315 | for (i = 0; i < SPIDER_SRC_COUNT; i++) { |
162 | get_irq_desc(irq)->status |= IRQ_LEVEL; | 316 | void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; |
317 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | ||
163 | } | 318 | } |
164 | 319 | ||
165 | /* do not mask any interrupts because of level */ | 320 | /* do not mask any interrupts because of level */ |
166 | out_be32(spider_pics[node] + TIR_MSK, 0x0); | 321 | out_be32(pic->regs + TIR_MSK, 0x0); |
167 | |||
168 | /* disable edge detection clear */ | ||
169 | /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ | ||
170 | 322 | ||
171 | /* enable interrupt packets to be output */ | 323 | /* enable interrupt packets to be output */ |
172 | out_be32(spider_pics[node] + TIR_PIEN, | 324 | out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); |
173 | in_be32(spider_pics[node] + TIR_PIEN) | 0x1); | ||
174 | 325 | ||
175 | /* Hook up cascade */ | 326 | /* Hook up the cascade interrupt to the iic and nodeid */ |
176 | irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE; | 327 | virq = spider_find_cascade_and_node(pic); |
177 | set_irq_data(irq, (void *)(long)node); | 328 | if (virq == NO_IRQ) |
178 | set_irq_chained_handler(irq, spider_irq_cascade); | 329 | return; |
330 | set_irq_data(virq, pic); | ||
331 | set_irq_chained_handler(virq, spider_irq_cascade); | ||
332 | |||
333 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", | ||
334 | pic->node_id, addr, of_node->full_name); | ||
179 | 335 | ||
180 | /* Enable the interrupt detection enable bit. Do this last! */ | 336 | /* Enable the interrupt detection enable bit. Do this last! */ |
181 | out_be32(spider_pics[node] + TIR_DEN, | 337 | out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); |
182 | in_be32(spider_pics[node] + TIR_DEN) | 0x1); | ||
183 | } | 338 | } |
184 | 339 | ||
185 | void __init spider_init_IRQ(void) | 340 | void __init spider_init_IRQ(void) |
186 | { | 341 | { |
187 | unsigned long *spider_reg; | 342 | struct resource r; |
188 | struct device_node *dn; | 343 | struct device_node *dn; |
189 | char *compatible; | 344 | int chip = 0; |
190 | int node = 0; | 345 | |
191 | 346 | /* XXX node numbers are totally bogus. We _hope_ we get the device | |
192 | /* XXX node numbers are totally bogus. We _hope_ we get the device nodes in the right | 347 | * nodes in the right order here but that's definitely not guaranteed, |
193 | * order here but that's definitely not guaranteed, we need to get the node from the | 348 | * we need to get the node from the device tree instead. |
194 | * device tree instead. There is currently no proper property for it (but our whole | 349 | * There is currently no proper property for it (but our whole |
195 | * device-tree is bogus anyway) so all we can do is pray or maybe test the address | 350 | * device-tree is bogus anyway) so all we can do is pray or maybe test |
196 | * and deduce the node-id | 351 | * the address and deduce the node-id |
197 | */ | 352 | */ |
198 | for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { | 353 | for (dn = NULL; |
199 | compatible = (char *)get_property(dn, "compatible", NULL); | 354 | (dn = of_find_node_by_name(dn, "interrupt-controller"));) { |
200 | 355 | if (device_is_compatible(dn, "CBEA,platform-spider-pic")) { | |
201 | if (!compatible) | 356 | if (of_address_to_resource(dn, 0, &r)) { |
202 | continue; | 357 | printk(KERN_WARNING "spider-pic: Failed\n"); |
203 | 358 | continue; | |
204 | if (strstr(compatible, "CBEA,platform-spider-pic")) | 359 | } |
205 | spider_reg = (unsigned long *)get_property(dn, "reg", NULL); | 360 | } else if (device_is_compatible(dn, "sti,platform-spider-pic") |
206 | else if (strstr(compatible, "sti,platform-spider-pic") && (node < 2)) { | 361 | && (chip < 2)) { |
207 | static long hard_coded_pics[] = { 0x24000008000, 0x34000008000 }; | 362 | static long hard_coded_pics[] = |
208 | spider_reg = &hard_coded_pics[node]; | 363 | { 0x24000008000, 0x34000008000 }; |
364 | r.start = hard_coded_pics[chip]; | ||
209 | } else | 365 | } else |
210 | continue; | 366 | continue; |
211 | 367 | spider_init_one(dn, chip++, r.start); | |
212 | if (spider_reg == NULL) | ||
213 | printk(KERN_ERR "spider_pic: No address for node %d\n", node); | ||
214 | |||
215 | spider_init_one(node, *spider_reg); | ||
216 | node++; | ||
217 | } | 368 | } |
218 | } | 369 | } |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 656c1ef5f4ad..5d2313a6c82b 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -264,51 +264,57 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |||
264 | return stat ? IRQ_HANDLED : IRQ_NONE; | 264 | return stat ? IRQ_HANDLED : IRQ_NONE; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int | 267 | static int spu_request_irqs(struct spu *spu) |
268 | spu_request_irqs(struct spu *spu) | ||
269 | { | 268 | { |
270 | int ret; | 269 | int ret = 0; |
271 | int irq_base; | ||
272 | |||
273 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | ||
274 | |||
275 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); | ||
276 | ret = request_irq(irq_base + spu->isrc, | ||
277 | spu_irq_class_0, IRQF_DISABLED, spu->irq_c0, spu); | ||
278 | if (ret) | ||
279 | goto out; | ||
280 | |||
281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | ||
282 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | ||
283 | spu_irq_class_1, IRQF_DISABLED, spu->irq_c1, spu); | ||
284 | if (ret) | ||
285 | goto out1; | ||
286 | 270 | ||
287 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | 271 | if (spu->irqs[0] != NO_IRQ) { |
288 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | 272 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
289 | spu_irq_class_2, IRQF_DISABLED, spu->irq_c2, spu); | 273 | spu->number); |
290 | if (ret) | 274 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
291 | goto out2; | 275 | IRQF_DISABLED, |
292 | goto out; | 276 | spu->irq_c0, spu); |
277 | if (ret) | ||
278 | goto bail0; | ||
279 | } | ||
280 | if (spu->irqs[1] != NO_IRQ) { | ||
281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", | ||
282 | spu->number); | ||
283 | ret = request_irq(spu->irqs[1], spu_irq_class_1, | ||
284 | IRQF_DISABLED, | ||
285 | spu->irq_c1, spu); | ||
286 | if (ret) | ||
287 | goto bail1; | ||
288 | } | ||
289 | if (spu->irqs[2] != NO_IRQ) { | ||
290 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", | ||
291 | spu->number); | ||
292 | ret = request_irq(spu->irqs[2], spu_irq_class_2, | ||
293 | IRQF_DISABLED, | ||
294 | spu->irq_c2, spu); | ||
295 | if (ret) | ||
296 | goto bail2; | ||
297 | } | ||
298 | return 0; | ||
293 | 299 | ||
294 | out2: | 300 | bail2: |
295 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | 301 | if (spu->irqs[1] != NO_IRQ) |
296 | out1: | 302 | free_irq(spu->irqs[1], spu); |
297 | free_irq(irq_base + spu->isrc, spu); | 303 | bail1: |
298 | out: | 304 | if (spu->irqs[0] != NO_IRQ) |
305 | free_irq(spu->irqs[0], spu); | ||
306 | bail0: | ||
299 | return ret; | 307 | return ret; |
300 | } | 308 | } |
301 | 309 | ||
302 | static void | 310 | static void spu_free_irqs(struct spu *spu) |
303 | spu_free_irqs(struct spu *spu) | ||
304 | { | 311 | { |
305 | int irq_base; | 312 | if (spu->irqs[0] != NO_IRQ) |
306 | 313 | free_irq(spu->irqs[0], spu); | |
307 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | 314 | if (spu->irqs[1] != NO_IRQ) |
308 | 315 | free_irq(spu->irqs[1], spu); | |
309 | free_irq(irq_base + spu->isrc, spu); | 316 | if (spu->irqs[2] != NO_IRQ) |
310 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | 317 | free_irq(spu->irqs[2], spu); |
311 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); | ||
312 | } | 318 | } |
313 | 319 | ||
314 | static LIST_HEAD(spu_list); | 320 | static LIST_HEAD(spu_list); |
@@ -559,17 +565,38 @@ static void spu_unmap(struct spu *spu) | |||
559 | iounmap((u8 __iomem *)spu->local_store); | 565 | iounmap((u8 __iomem *)spu->local_store); |
560 | } | 566 | } |
561 | 567 | ||
568 | /* This function shall be abstracted for HV platforms */ | ||
569 | static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) | ||
570 | { | ||
571 | struct irq_host *host; | ||
572 | unsigned int isrc; | ||
573 | u32 *tmp; | ||
574 | |||
575 | host = iic_get_irq_host(spu->node); | ||
576 | if (host == NULL) | ||
577 | return -ENODEV; | ||
578 | |||
579 | /* Get the interrupt source from the device-tree */ | ||
580 | tmp = (u32 *)get_property(np, "isrc", NULL); | ||
581 | if (!tmp) | ||
582 | return -ENODEV; | ||
583 | spu->isrc = isrc = tmp[0]; | ||
584 | |||
585 | /* Now map interrupts of all 3 classes */ | ||
586 | spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc, 0); | ||
587 | spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc, 0); | ||
588 | spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc, 0); | ||
589 | |||
590 | /* Right now, we only fail if class 2 failed */ | ||
591 | return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; | ||
592 | } | ||
593 | |||
562 | static int __init spu_map_device(struct spu *spu, struct device_node *node) | 594 | static int __init spu_map_device(struct spu *spu, struct device_node *node) |
563 | { | 595 | { |
564 | char *prop; | 596 | char *prop; |
565 | int ret; | 597 | int ret; |
566 | 598 | ||
567 | ret = -ENODEV; | 599 | ret = -ENODEV; |
568 | prop = get_property(node, "isrc", NULL); | ||
569 | if (!prop) | ||
570 | goto out; | ||
571 | spu->isrc = *(unsigned int *)prop; | ||
572 | |||
573 | spu->name = get_property(node, "name", NULL); | 600 | spu->name = get_property(node, "name", NULL); |
574 | if (!spu->name) | 601 | if (!spu->name) |
575 | goto out; | 602 | goto out; |
@@ -636,7 +663,8 @@ static int spu_create_sysdev(struct spu *spu) | |||
636 | return ret; | 663 | return ret; |
637 | } | 664 | } |
638 | 665 | ||
639 | sysdev_create_file(&spu->sysdev, &attr_isrc); | 666 | if (spu->isrc != 0) |
667 | sysdev_create_file(&spu->sysdev, &attr_isrc); | ||
640 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); | 668 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); |
641 | 669 | ||
642 | return 0; | 670 | return 0; |
@@ -668,6 +696,9 @@ static int __init create_spu(struct device_node *spe) | |||
668 | spu->nid = of_node_to_nid(spe); | 696 | spu->nid = of_node_to_nid(spe); |
669 | if (spu->nid == -1) | 697 | if (spu->nid == -1) |
670 | spu->nid = 0; | 698 | spu->nid = 0; |
699 | ret = spu_map_interrupts(spu, spe); | ||
700 | if (ret) | ||
701 | goto out_unmap; | ||
671 | spin_lock_init(&spu->register_lock); | 702 | spin_lock_init(&spu->register_lock); |
672 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); | 703 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); |
673 | spu_mfc_sr1_set(spu, 0x33); | 704 | spu_mfc_sr1_set(spu, 0x33); |
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 66c253498803..6802cdc3168a 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/sections.h> | 19 | #include <asm/sections.h> |
20 | #include <asm/pci-bridge.h> | 20 | #include <asm/pci-bridge.h> |
21 | #include <asm/open_pic.h> | ||
22 | #include <asm/grackle.h> | 21 | #include <asm/grackle.h> |
23 | #include <asm/rtas.h> | 22 | #include <asm/rtas.h> |
24 | 23 | ||
@@ -161,15 +160,9 @@ void __init | |||
161 | chrp_pcibios_fixup(void) | 160 | chrp_pcibios_fixup(void) |
162 | { | 161 | { |
163 | struct pci_dev *dev = NULL; | 162 | struct pci_dev *dev = NULL; |
164 | struct device_node *np; | ||
165 | 163 | ||
166 | /* PCI interrupts are controlled by the OpenPIC */ | 164 | for_each_pci_dev(dev) |
167 | for_each_pci_dev(dev) { | 165 | pci_read_irq_line(dev); |
168 | np = pci_device_to_OF_node(dev); | ||
169 | if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0)) | ||
170 | dev->irq = np->intrs[0].line; | ||
171 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | ||
172 | } | ||
173 | } | 166 | } |
174 | 167 | ||
175 | #define PRG_CL_RESET_VALID 0x00010000 | 168 | #define PRG_CL_RESET_VALID 0x00010000 |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index a5dffc8d93a9..bb10171132fa 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -59,7 +59,7 @@ void rtas_indicator_progress(char *, unsigned short); | |||
59 | int _chrp_type; | 59 | int _chrp_type; |
60 | EXPORT_SYMBOL(_chrp_type); | 60 | EXPORT_SYMBOL(_chrp_type); |
61 | 61 | ||
62 | struct mpic *chrp_mpic; | 62 | static struct mpic *chrp_mpic; |
63 | 63 | ||
64 | /* Used for doing CHRP event-scans */ | 64 | /* Used for doing CHRP event-scans */ |
65 | DEFINE_PER_CPU(struct timer_list, heartbeat_timer); | 65 | DEFINE_PER_CPU(struct timer_list, heartbeat_timer); |
@@ -315,19 +315,13 @@ chrp_event_scan(unsigned long unused) | |||
315 | jiffies + event_scan_interval); | 315 | jiffies + event_scan_interval); |
316 | } | 316 | } |
317 | 317 | ||
318 | void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, | 318 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, |
319 | struct pt_regs *regs) | 319 | struct pt_regs *regs) |
320 | { | 320 | { |
321 | unsigned int max = 100; | 321 | unsigned int cascade_irq = i8259_irq(regs); |
322 | 322 | if (cascade_irq != NO_IRQ) | |
323 | while(max--) { | 323 | generic_handle_irq(cascade_irq, regs); |
324 | int irq = i8259_irq(regs); | 324 | desc->chip->eoi(irq); |
325 | if (max == 99) | ||
326 | desc->chip->eoi(irq); | ||
327 | if (irq < 0) | ||
328 | break; | ||
329 | generic_handle_irq(irq, regs); | ||
330 | }; | ||
331 | } | 325 | } |
332 | 326 | ||
333 | /* | 327 | /* |
@@ -336,18 +330,17 @@ void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc, | |||
336 | static void __init chrp_find_openpic(void) | 330 | static void __init chrp_find_openpic(void) |
337 | { | 331 | { |
338 | struct device_node *np, *root; | 332 | struct device_node *np, *root; |
339 | int len, i, j, irq_count; | 333 | int len, i, j; |
340 | int isu_size, idu_size; | 334 | int isu_size, idu_size; |
341 | unsigned int *iranges, *opprop = NULL; | 335 | unsigned int *iranges, *opprop = NULL; |
342 | int oplen = 0; | 336 | int oplen = 0; |
343 | unsigned long opaddr; | 337 | unsigned long opaddr; |
344 | int na = 1; | 338 | int na = 1; |
345 | unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS]; | ||
346 | 339 | ||
347 | np = find_type_devices("open-pic"); | 340 | np = of_find_node_by_type(NULL, "open-pic"); |
348 | if (np == NULL) | 341 | if (np == NULL) |
349 | return; | 342 | return; |
350 | root = find_path_device("/"); | 343 | root = of_find_node_by_path("/"); |
351 | if (root) { | 344 | if (root) { |
352 | opprop = (unsigned int *) get_property | 345 | opprop = (unsigned int *) get_property |
353 | (root, "platform-open-pic", &oplen); | 346 | (root, "platform-open-pic", &oplen); |
@@ -358,19 +351,15 @@ static void __init chrp_find_openpic(void) | |||
358 | oplen /= na * sizeof(unsigned int); | 351 | oplen /= na * sizeof(unsigned int); |
359 | } else { | 352 | } else { |
360 | struct resource r; | 353 | struct resource r; |
361 | if (of_address_to_resource(np, 0, &r)) | 354 | if (of_address_to_resource(np, 0, &r)) { |
362 | return; | 355 | goto bail; |
356 | } | ||
363 | opaddr = r.start; | 357 | opaddr = r.start; |
364 | oplen = 0; | 358 | oplen = 0; |
365 | } | 359 | } |
366 | 360 | ||
367 | printk(KERN_INFO "OpenPIC at %lx\n", opaddr); | 361 | printk(KERN_INFO "OpenPIC at %lx\n", opaddr); |
368 | 362 | ||
369 | irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ | ||
370 | prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4); | ||
371 | /* i8259 cascade is always positive level */ | ||
372 | init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE; | ||
373 | |||
374 | iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); | 363 | iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); |
375 | if (iranges == NULL) | 364 | if (iranges == NULL) |
376 | len = 0; /* non-distributed mpic */ | 365 | len = 0; /* non-distributed mpic */ |
@@ -397,15 +386,12 @@ static void __init chrp_find_openpic(void) | |||
397 | if (len > 1) | 386 | if (len > 1) |
398 | isu_size = iranges[3]; | 387 | isu_size = iranges[3]; |
399 | 388 | ||
400 | chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY, | 389 | chrp_mpic = mpic_alloc(np, opaddr, MPIC_PRIMARY, |
401 | isu_size, NUM_ISA_INTERRUPTS, irq_count, | 390 | isu_size, 0, " MPIC "); |
402 | NR_IRQS - 4, init_senses, irq_count, | ||
403 | " MPIC "); | ||
404 | if (chrp_mpic == NULL) { | 391 | if (chrp_mpic == NULL) { |
405 | printk(KERN_ERR "Failed to allocate MPIC structure\n"); | 392 | printk(KERN_ERR "Failed to allocate MPIC structure\n"); |
406 | return; | 393 | goto bail; |
407 | } | 394 | } |
408 | |||
409 | j = na - 1; | 395 | j = na - 1; |
410 | for (i = 1; i < len; ++i) { | 396 | for (i = 1; i < len; ++i) { |
411 | iranges += 2; | 397 | iranges += 2; |
@@ -417,7 +403,10 @@ static void __init chrp_find_openpic(void) | |||
417 | } | 403 | } |
418 | 404 | ||
419 | mpic_init(chrp_mpic); | 405 | mpic_init(chrp_mpic); |
420 | set_irq_chained_handler(NUM_ISA_INTERRUPTS, chrp_8259_cascade); | 406 | ppc_md.get_irq = mpic_get_irq; |
407 | bail: | ||
408 | of_node_put(root); | ||
409 | of_node_put(np); | ||
421 | } | 410 | } |
422 | 411 | ||
423 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | 412 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) |
@@ -428,14 +417,34 @@ static struct irqaction xmon_irqaction = { | |||
428 | }; | 417 | }; |
429 | #endif | 418 | #endif |
430 | 419 | ||
431 | void __init chrp_init_IRQ(void) | 420 | static void __init chrp_find_8259(void) |
432 | { | 421 | { |
433 | struct device_node *np; | 422 | struct device_node *np, *pic = NULL; |
434 | unsigned long chrp_int_ack = 0; | 423 | unsigned long chrp_int_ack = 0; |
435 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | 424 | unsigned int cascade_irq; |
436 | struct device_node *kbd; | ||
437 | #endif | ||
438 | 425 | ||
426 | /* Look for cascade */ | ||
427 | for_each_node_by_type(np, "interrupt-controller") | ||
428 | if (device_is_compatible(np, "chrp,iic")) { | ||
429 | pic = np; | ||
430 | break; | ||
431 | } | ||
432 | /* Ok, 8259 wasn't found. We need to handle the case where | ||
433 | * we have a pegasos that claims to be chrp but doesn't have | ||
434 | * a proper interrupt tree | ||
435 | */ | ||
436 | if (pic == NULL && chrp_mpic != NULL) { | ||
437 | printk(KERN_ERR "i8259: Not found in device-tree" | ||
438 | " assuming no legacy interrupts\n"); | ||
439 | return; | ||
440 | } | ||
441 | |||
442 | /* Look for intack. In a perfect world, we would look for it on | ||
443 | * the ISA bus that holds the 8259 but heh... Works that way. If | ||
444 | * we ever see a problem, we can try to re-use the pSeries code here. | ||
445 | * Also, Pegasos-type platforms don't have a proper node to start | ||
446 | * from anyway | ||
447 | */ | ||
439 | for (np = find_devices("pci"); np != NULL; np = np->next) { | 448 | for (np = find_devices("pci"); np != NULL; np = np->next) { |
440 | unsigned int *addrp = (unsigned int *) | 449 | unsigned int *addrp = (unsigned int *) |
441 | get_property(np, "8259-interrupt-acknowledge", NULL); | 450 | get_property(np, "8259-interrupt-acknowledge", NULL); |
@@ -446,11 +455,29 @@ void __init chrp_init_IRQ(void) | |||
446 | break; | 455 | break; |
447 | } | 456 | } |
448 | if (np == NULL) | 457 | if (np == NULL) |
449 | printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n"); | 458 | printk(KERN_WARNING "Cannot find PCI interrupt acknowledge" |
459 | " address, polling\n"); | ||
460 | |||
461 | i8259_init(pic, chrp_int_ack); | ||
462 | if (ppc_md.get_irq == NULL) | ||
463 | ppc_md.get_irq = i8259_irq; | ||
464 | if (chrp_mpic != NULL) { | ||
465 | cascade_irq = irq_of_parse_and_map(pic, 0); | ||
466 | if (cascade_irq == NO_IRQ) | ||
467 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); | ||
468 | else | ||
469 | set_irq_chained_handler(cascade_irq, | ||
470 | chrp_8259_cascade); | ||
471 | } | ||
472 | } | ||
450 | 473 | ||
474 | void __init chrp_init_IRQ(void) | ||
475 | { | ||
476 | #if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) | ||
477 | struct device_node *kbd; | ||
478 | #endif | ||
451 | chrp_find_openpic(); | 479 | chrp_find_openpic(); |
452 | 480 | chrp_find_8259(); | |
453 | i8259_init(chrp_int_ack, 0); | ||
454 | 481 | ||
455 | if (_chrp_type == _CHRP_Pegasos) | 482 | if (_chrp_type == _CHRP_Pegasos) |
456 | ppc_md.get_irq = i8259_irq; | 483 | ppc_md.get_irq = i8259_irq; |
@@ -535,10 +562,6 @@ static int __init chrp_probe(void) | |||
535 | DMA_MODE_READ = 0x44; | 562 | DMA_MODE_READ = 0x44; |
536 | DMA_MODE_WRITE = 0x48; | 563 | DMA_MODE_WRITE = 0x48; |
537 | isa_io_base = CHRP_ISA_IO_BASE; /* default value */ | 564 | isa_io_base = CHRP_ISA_IO_BASE; /* default value */ |
538 | ppc_do_canonicalize_irqs = 1; | ||
539 | |||
540 | /* Assume we have an 8259... */ | ||
541 | __irq_offset_value = NUM_ISA_INTERRUPTS; | ||
542 | 565 | ||
543 | return 1; | 566 | return 1; |
544 | } | 567 | } |
@@ -550,7 +573,6 @@ define_machine(chrp) { | |||
550 | .init = chrp_init2, | 573 | .init = chrp_init2, |
551 | .show_cpuinfo = chrp_show_cpuinfo, | 574 | .show_cpuinfo = chrp_show_cpuinfo, |
552 | .init_IRQ = chrp_init_IRQ, | 575 | .init_IRQ = chrp_init_IRQ, |
553 | .get_irq = mpic_get_irq, | ||
554 | .pcibios_fixup = chrp_pcibios_fixup, | 576 | .pcibios_fixup = chrp_pcibios_fixup, |
555 | .restart = rtas_restart, | 577 | .restart = rtas_restart, |
556 | .power_off = rtas_power_off, | 578 | .power_off = rtas_power_off, |
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index c298ca1ea680..1d2307e87c30 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <asm/smp.h> | 29 | #include <asm/smp.h> |
30 | #include <asm/residual.h> | 30 | #include <asm/residual.h> |
31 | #include <asm/time.h> | 31 | #include <asm/time.h> |
32 | #include <asm/open_pic.h> | ||
33 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
34 | #include <asm/smp.h> | 33 | #include <asm/smp.h> |
35 | #include <asm/mpic.h> | 34 | #include <asm/mpic.h> |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 7fb6a08786c0..2275e64f3152 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -162,27 +162,6 @@ static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs) | |||
162 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); | 162 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); |
163 | } | 163 | } |
164 | 164 | ||
165 | /* | ||
166 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | ||
167 | * It must be called before the bus walk. | ||
168 | */ | ||
169 | void __init iSeries_init_IRQ(void) | ||
170 | { | ||
171 | /* Register PCI event handler and open an event path */ | ||
172 | int ret; | ||
173 | |||
174 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
175 | &pci_event_handler); | ||
176 | if (ret == 0) { | ||
177 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
178 | if (ret != 0) | ||
179 | printk(KERN_ERR "iseries_init_IRQ: open event path " | ||
180 | "failed with rc 0x%x\n", ret); | ||
181 | } else | ||
182 | printk(KERN_ERR "iseries_init_IRQ: register handler " | ||
183 | "failed with rc 0x%x\n", ret); | ||
184 | } | ||
185 | |||
186 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) | 165 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) |
187 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) | 166 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) |
188 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) | 167 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) |
@@ -196,7 +175,7 @@ static void iseries_enable_IRQ(unsigned int irq) | |||
196 | { | 175 | { |
197 | u32 bus, dev_id, function, mask; | 176 | u32 bus, dev_id, function, mask; |
198 | const u32 sub_bus = 0; | 177 | const u32 sub_bus = 0; |
199 | unsigned int rirq = virt_irq_to_real_map[irq]; | 178 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
200 | 179 | ||
201 | /* The IRQ has already been locked by the caller */ | 180 | /* The IRQ has already been locked by the caller */ |
202 | bus = REAL_IRQ_TO_BUS(rirq); | 181 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -213,7 +192,7 @@ static unsigned int iseries_startup_IRQ(unsigned int irq) | |||
213 | { | 192 | { |
214 | u32 bus, dev_id, function, mask; | 193 | u32 bus, dev_id, function, mask; |
215 | const u32 sub_bus = 0; | 194 | const u32 sub_bus = 0; |
216 | unsigned int rirq = virt_irq_to_real_map[irq]; | 195 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
217 | 196 | ||
218 | bus = REAL_IRQ_TO_BUS(rirq); | 197 | bus = REAL_IRQ_TO_BUS(rirq); |
219 | function = REAL_IRQ_TO_FUNC(rirq); | 198 | function = REAL_IRQ_TO_FUNC(rirq); |
@@ -254,7 +233,7 @@ static void iseries_shutdown_IRQ(unsigned int irq) | |||
254 | { | 233 | { |
255 | u32 bus, dev_id, function, mask; | 234 | u32 bus, dev_id, function, mask; |
256 | const u32 sub_bus = 0; | 235 | const u32 sub_bus = 0; |
257 | unsigned int rirq = virt_irq_to_real_map[irq]; | 236 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
258 | 237 | ||
259 | /* irq should be locked by the caller */ | 238 | /* irq should be locked by the caller */ |
260 | bus = REAL_IRQ_TO_BUS(rirq); | 239 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -277,7 +256,7 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
277 | { | 256 | { |
278 | u32 bus, dev_id, function, mask; | 257 | u32 bus, dev_id, function, mask; |
279 | const u32 sub_bus = 0; | 258 | const u32 sub_bus = 0; |
280 | unsigned int rirq = virt_irq_to_real_map[irq]; | 259 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
281 | 260 | ||
282 | /* The IRQ has already been locked by the caller */ | 261 | /* The IRQ has already been locked by the caller */ |
283 | bus = REAL_IRQ_TO_BUS(rirq); | 262 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -291,7 +270,7 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
291 | 270 | ||
292 | static void iseries_end_IRQ(unsigned int irq) | 271 | static void iseries_end_IRQ(unsigned int irq) |
293 | { | 272 | { |
294 | unsigned int rirq = virt_irq_to_real_map[irq]; | 273 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; |
295 | 274 | ||
296 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), | 275 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), |
297 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); | 276 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); |
@@ -314,16 +293,14 @@ static struct irq_chip iseries_pic = { | |||
314 | int __init iSeries_allocate_IRQ(HvBusNumber bus, | 293 | int __init iSeries_allocate_IRQ(HvBusNumber bus, |
315 | HvSubBusNumber sub_bus, u32 bsubbus) | 294 | HvSubBusNumber sub_bus, u32 bsubbus) |
316 | { | 295 | { |
317 | int virtirq; | ||
318 | unsigned int realirq; | 296 | unsigned int realirq; |
319 | u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); | 297 | u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); |
320 | u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); | 298 | u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); |
321 | 299 | ||
322 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) | 300 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) |
323 | + function; | 301 | + function; |
324 | virtirq = virt_irq_create_mapping(realirq); | 302 | |
325 | set_irq_chip_and_handler(virtirq, &iseries_pic, handle_fasteoi_irq); | 303 | return irq_create_mapping(NULL, realirq, IRQ_TYPE_NONE); |
326 | return virtirq; | ||
327 | } | 304 | } |
328 | 305 | ||
329 | #endif /* CONFIG_PCI */ | 306 | #endif /* CONFIG_PCI */ |
@@ -331,10 +308,9 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus, | |||
331 | /* | 308 | /* |
332 | * Get the next pending IRQ. | 309 | * Get the next pending IRQ. |
333 | */ | 310 | */ |
334 | int iSeries_get_irq(struct pt_regs *regs) | 311 | unsigned int iSeries_get_irq(struct pt_regs *regs) |
335 | { | 312 | { |
336 | /* -2 means ignore this interrupt */ | 313 | int irq = NO_IRQ_IGNORE; |
337 | int irq = -2; | ||
338 | 314 | ||
339 | #ifdef CONFIG_SMP | 315 | #ifdef CONFIG_SMP |
340 | if (get_lppaca()->int_dword.fields.ipi_cnt) { | 316 | if (get_lppaca()->int_dword.fields.ipi_cnt) { |
@@ -357,9 +333,57 @@ int iSeries_get_irq(struct pt_regs *regs) | |||
357 | } | 333 | } |
358 | spin_unlock(&pending_irqs_lock); | 334 | spin_unlock(&pending_irqs_lock); |
359 | if (irq >= NR_IRQS) | 335 | if (irq >= NR_IRQS) |
360 | irq = -2; | 336 | irq = NO_IRQ_IGNORE; |
361 | } | 337 | } |
362 | #endif | 338 | #endif |
363 | 339 | ||
364 | return irq; | 340 | return irq; |
365 | } | 341 | } |
342 | |||
343 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, | ||
344 | irq_hw_number_t hw, unsigned int flags) | ||
345 | { | ||
346 | set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static struct irq_host_ops iseries_irq_host_ops = { | ||
352 | .map = iseries_irq_host_map, | ||
353 | }; | ||
354 | |||
355 | /* | ||
356 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c | ||
357 | * It must be called before the bus walk. | ||
358 | */ | ||
359 | void __init iSeries_init_IRQ(void) | ||
360 | { | ||
361 | /* Register PCI event handler and open an event path */ | ||
362 | struct irq_host *host; | ||
363 | int ret; | ||
364 | |||
365 | /* | ||
366 | * The Hypervisor only allows us up to 256 interrupt | ||
367 | * sources (the irq number is passed in a u8). | ||
368 | */ | ||
369 | irq_set_virq_count(256); | ||
370 | |||
371 | /* Create irq host. No need for a revmap since HV will give us | ||
372 | * back our virtual irq number | ||
373 | */ | ||
374 | host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0); | ||
375 | BUG_ON(host == NULL); | ||
376 | irq_set_default_host(host); | ||
377 | |||
378 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
379 | &pci_event_handler); | ||
380 | if (ret == 0) { | ||
381 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
382 | if (ret != 0) | ||
383 | printk(KERN_ERR "iseries_init_IRQ: open event path " | ||
384 | "failed with rc 0x%x\n", ret); | ||
385 | } else | ||
386 | printk(KERN_ERR "iseries_init_IRQ: register handler " | ||
387 | "failed with rc 0x%x\n", ret); | ||
388 | } | ||
389 | |||
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h index 188aa808abd7..1ee8985140e5 100644 --- a/arch/powerpc/platforms/iseries/irq.h +++ b/arch/powerpc/platforms/iseries/irq.h | |||
@@ -4,6 +4,6 @@ | |||
4 | extern void iSeries_init_IRQ(void); | 4 | extern void iSeries_init_IRQ(void); |
5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); | 5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32); |
6 | extern void iSeries_activate_IRQs(void); | 6 | extern void iSeries_activate_IRQs(void); |
7 | extern int iSeries_get_irq(struct pt_regs *); | 7 | extern unsigned int iSeries_get_irq(struct pt_regs *); |
8 | 8 | ||
9 | #endif /* _ISERIES_IRQ_H */ | 9 | #endif /* _ISERIES_IRQ_H */ |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index c877074745b2..c9605d773a77 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -294,8 +294,6 @@ static void __init iSeries_init_early(void) | |||
294 | { | 294 | { |
295 | DBG(" -> iSeries_init_early()\n"); | 295 | DBG(" -> iSeries_init_early()\n"); |
296 | 296 | ||
297 | ppc64_interrupt_controller = IC_ISERIES; | ||
298 | |||
299 | #if defined(CONFIG_BLK_DEV_INITRD) | 297 | #if defined(CONFIG_BLK_DEV_INITRD) |
300 | /* | 298 | /* |
301 | * If the init RAM disk has been configured and there is | 299 | * If the init RAM disk has been configured and there is |
@@ -659,12 +657,6 @@ static int __init iseries_probe(void) | |||
659 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 657 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
660 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 658 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
661 | 659 | ||
662 | /* | ||
663 | * The Hypervisor only allows us up to 256 interrupt | ||
664 | * sources (the irq number is passed in a u8). | ||
665 | */ | ||
666 | virt_irq_max = 255; | ||
667 | |||
668 | hpte_init_iSeries(); | 660 | hpte_init_iSeries(); |
669 | 661 | ||
670 | return 1; | 662 | return 1; |
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index f7170ff86dab..63a1670d3bfd 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c | |||
@@ -443,18 +443,23 @@ void __init maple_pci_init(void) | |||
443 | int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) | 443 | int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) |
444 | { | 444 | { |
445 | struct device_node *np; | 445 | struct device_node *np; |
446 | int irq = channel ? 15 : 14; | 446 | unsigned int defirq = channel ? 15 : 14; |
447 | unsigned int irq; | ||
447 | 448 | ||
448 | if (pdev->vendor != PCI_VENDOR_ID_AMD || | 449 | if (pdev->vendor != PCI_VENDOR_ID_AMD || |
449 | pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) | 450 | pdev->device != PCI_DEVICE_ID_AMD_8111_IDE) |
450 | return irq; | 451 | return defirq; |
451 | 452 | ||
452 | np = pci_device_to_OF_node(pdev); | 453 | np = pci_device_to_OF_node(pdev); |
453 | if (np == NULL) | 454 | if (np == NULL) |
454 | return irq; | 455 | return defirq; |
455 | if (np->n_intrs < 2) | 456 | irq = irq_of_parse_and_map(np, channel & 0x1); |
456 | return irq; | 457 | if (irq == NO_IRQ) { |
457 | return np->intrs[channel & 0x1].line; | 458 | printk("Failed to map onboard IDE interrupt for channel %d\n", |
459 | channel); | ||
460 | return defirq; | ||
461 | } | ||
462 | return irq; | ||
458 | } | 463 | } |
459 | 464 | ||
460 | /* XXX: To remove once all firmwares are ok */ | 465 | /* XXX: To remove once all firmwares are ok */ |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 611ca8e979e5..cb528c9de4c3 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -198,50 +198,81 @@ static void __init maple_init_early(void) | |||
198 | { | 198 | { |
199 | DBG(" -> maple_init_early\n"); | 199 | DBG(" -> maple_init_early\n"); |
200 | 200 | ||
201 | /* Setup interrupt mapping options */ | ||
202 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
203 | |||
204 | iommu_init_early_dart(); | 201 | iommu_init_early_dart(); |
205 | 202 | ||
206 | DBG(" <- maple_init_early\n"); | 203 | DBG(" <- maple_init_early\n"); |
207 | } | 204 | } |
208 | 205 | ||
209 | 206 | /* | |
210 | static __init void maple_init_IRQ(void) | 207 | * This is almost identical to pSeries and CHRP. We need to make that |
208 | * code generic at one point, with appropriate bits in the device-tree to | ||
209 | * identify the presence of an HT APIC | ||
210 | */ | ||
211 | static void __init maple_init_IRQ(void) | ||
211 | { | 212 | { |
212 | struct device_node *root; | 213 | struct device_node *root, *np, *mpic_node = NULL; |
213 | unsigned int *opprop; | 214 | unsigned int *opprop; |
214 | unsigned long opic_addr; | 215 | unsigned long openpic_addr = 0; |
216 | int naddr, n, i, opplen, has_isus = 0; | ||
215 | struct mpic *mpic; | 217 | struct mpic *mpic; |
216 | unsigned char senses[128]; | 218 | unsigned int flags = MPIC_PRIMARY; |
217 | int n; | ||
218 | 219 | ||
219 | DBG(" -> maple_init_IRQ\n"); | 220 | /* Locate MPIC in the device-tree. Note that there is a bug |
221 | * in Maple device-tree where the type of the controller is | ||
222 | * open-pic and not interrupt-controller | ||
223 | */ | ||
224 | for_each_node_by_type(np, "open-pic") { | ||
225 | mpic_node = np; | ||
226 | break; | ||
227 | } | ||
228 | if (mpic_node == NULL) { | ||
229 | printk(KERN_ERR | ||
230 | "Failed to locate the MPIC interrupt controller\n"); | ||
231 | return; | ||
232 | } | ||
220 | 233 | ||
221 | /* XXX: Non standard, replace that with a proper openpic/mpic node | 234 | /* Find address list in /platform-open-pic */ |
222 | * in the device-tree. Find the Open PIC if present */ | ||
223 | root = of_find_node_by_path("/"); | 235 | root = of_find_node_by_path("/"); |
224 | opprop = (unsigned int *) get_property(root, | 236 | naddr = prom_n_addr_cells(root); |
225 | "platform-open-pic", NULL); | 237 | opprop = (unsigned int *) get_property(root, "platform-open-pic", |
226 | if (opprop == 0) | 238 | &opplen); |
227 | panic("OpenPIC not found !\n"); | 239 | if (opprop != 0) { |
228 | 240 | openpic_addr = of_read_number(opprop, naddr); | |
229 | n = prom_n_addr_cells(root); | 241 | has_isus = (opplen > naddr); |
230 | for (opic_addr = 0; n > 0; --n) | 242 | printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n", |
231 | opic_addr = (opic_addr << 32) + *opprop++; | 243 | openpic_addr, has_isus); |
244 | } | ||
232 | of_node_put(root); | 245 | of_node_put(root); |
233 | 246 | ||
234 | /* Obtain sense values from device-tree */ | 247 | BUG_ON(openpic_addr == 0); |
235 | prom_get_irq_senses(senses, 0, 128); | ||
236 | 248 | ||
237 | mpic = mpic_alloc(opic_addr, | 249 | /* Check for a big endian MPIC */ |
238 | MPIC_PRIMARY | MPIC_BIG_ENDIAN | | 250 | if (get_property(np, "big-endian", NULL) != NULL) |
239 | MPIC_BROKEN_U3 | MPIC_WANTS_RESET, | 251 | flags |= MPIC_BIG_ENDIAN; |
240 | 0, 0, 128, 128, senses, 128, "U3-MPIC"); | 252 | |
253 | /* XXX Maple specific bits */ | ||
254 | flags |= MPIC_BROKEN_U3 | MPIC_WANTS_RESET; | ||
255 | |||
256 | /* Setup the openpic driver. More device-tree junks, we hard code no | ||
257 | * ISUs for now. I'll have to revisit some stuffs with the folks doing | ||
258 | * the firmware for those | ||
259 | */ | ||
260 | mpic = mpic_alloc(mpic_node, openpic_addr, flags, | ||
261 | /*has_isus ? 16 :*/ 0, 0, " MPIC "); | ||
241 | BUG_ON(mpic == NULL); | 262 | BUG_ON(mpic == NULL); |
242 | mpic_init(mpic); | ||
243 | 263 | ||
244 | DBG(" <- maple_init_IRQ\n"); | 264 | /* Add ISUs */ |
265 | opplen /= sizeof(u32); | ||
266 | for (n = 0, i = naddr; i < opplen; i += naddr, n++) { | ||
267 | unsigned long isuaddr = of_read_number(opprop + i, naddr); | ||
268 | mpic_assign_isu(mpic, n, isuaddr); | ||
269 | } | ||
270 | |||
271 | /* All ISUs are setup, complete initialization */ | ||
272 | mpic_init(mpic); | ||
273 | ppc_md.get_irq = mpic_get_irq; | ||
274 | of_node_put(mpic_node); | ||
275 | of_node_put(root); | ||
245 | } | 276 | } |
246 | 277 | ||
247 | static void __init maple_progress(char *s, unsigned short hex) | 278 | static void __init maple_progress(char *s, unsigned short hex) |
@@ -279,7 +310,6 @@ define_machine(maple_md) { | |||
279 | .setup_arch = maple_setup_arch, | 310 | .setup_arch = maple_setup_arch, |
280 | .init_early = maple_init_early, | 311 | .init_early = maple_init_early, |
281 | .init_IRQ = maple_init_IRQ, | 312 | .init_IRQ = maple_init_IRQ, |
282 | .get_irq = mpic_get_irq, | ||
283 | .pcibios_fixup = maple_pcibios_fixup, | 313 | .pcibios_fixup = maple_pcibios_fixup, |
284 | .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, | 314 | .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, |
285 | .restart = maple_restart, | 315 | .restart = maple_restart, |
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index 5685ad9e88e8..e63d52f227ee 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c | |||
@@ -162,6 +162,8 @@ static void __init bootx_add_chosen_props(unsigned long base, | |||
162 | { | 162 | { |
163 | u32 val; | 163 | u32 val; |
164 | 164 | ||
165 | bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end); | ||
166 | |||
165 | if (bootx_info->kernelParamsOffset) { | 167 | if (bootx_info->kernelParamsOffset) { |
166 | char *args = (char *)((unsigned long)bootx_info) + | 168 | char *args = (char *)((unsigned long)bootx_info) + |
167 | bootx_info->kernelParamsOffset; | 169 | bootx_info->kernelParamsOffset; |
@@ -228,7 +230,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base, | |||
228 | 230 | ||
229 | if (!strcmp(namep, "/chosen")) { | 231 | if (!strcmp(namep, "/chosen")) { |
230 | DBG(" detected /chosen ! adding properties names !\n"); | 232 | DBG(" detected /chosen ! adding properties names !\n"); |
231 | bootx_dt_add_string("linux,platform", mem_end); | 233 | bootx_dt_add_string("linux,bootx", mem_end); |
232 | bootx_dt_add_string("linux,stdout-path", mem_end); | 234 | bootx_dt_add_string("linux,stdout-path", mem_end); |
233 | bootx_dt_add_string("linux,initrd-start", mem_end); | 235 | bootx_dt_add_string("linux,initrd-start", mem_end); |
234 | bootx_dt_add_string("linux,initrd-end", mem_end); | 236 | bootx_dt_add_string("linux,initrd-end", mem_end); |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index ceafaf52a668..8677f50c2586 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -522,10 +522,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) | |||
522 | host->speed = KW_I2C_MODE_25KHZ; | 522 | host->speed = KW_I2C_MODE_25KHZ; |
523 | break; | 523 | break; |
524 | } | 524 | } |
525 | if (np->n_intrs > 0) | 525 | host->irq = irq_of_parse_and_map(np, 0); |
526 | host->irq = np->intrs[0].line; | 526 | if (host->irq == NO_IRQ) |
527 | else | 527 | printk(KERN_WARNING |
528 | host->irq = NO_IRQ; | 528 | "low_i2c: Failed to map interrupt for %s\n", |
529 | np->full_name); | ||
529 | 530 | ||
530 | host->base = ioremap((*addrp), 0x1000); | 531 | host->base = ioremap((*addrp), 0x1000); |
531 | if (host->base == NULL) { | 532 | if (host->base == NULL) { |
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index 41fa2409482a..6a36ea9bf673 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
30 | #include <asm/nvram.h> | 30 | #include <asm/nvram.h> |
31 | 31 | ||
32 | #include "pmac.h" | ||
33 | |||
32 | #define DEBUG | 34 | #define DEBUG |
33 | 35 | ||
34 | #ifdef DEBUG | 36 | #ifdef DEBUG |
@@ -80,9 +82,6 @@ static int nvram_partitions[3]; | |||
80 | // XXX Turn that into a sem | 82 | // XXX Turn that into a sem |
81 | static DEFINE_SPINLOCK(nv_lock); | 83 | static DEFINE_SPINLOCK(nv_lock); |
82 | 84 | ||
83 | extern int pmac_newworld; | ||
84 | extern int system_running; | ||
85 | |||
86 | static int (*core99_write_bank)(int bank, u8* datas); | 85 | static int (*core99_write_bank)(int bank, u8* datas); |
87 | static int (*core99_erase_bank)(int bank); | 86 | static int (*core99_erase_bank)(int bank); |
88 | 87 | ||
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index d524a915aa86..556b349797e8 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -46,6 +46,9 @@ static int has_uninorth; | |||
46 | static struct pci_controller *u3_agp; | 46 | static struct pci_controller *u3_agp; |
47 | static struct pci_controller *u4_pcie; | 47 | static struct pci_controller *u4_pcie; |
48 | static struct pci_controller *u3_ht; | 48 | static struct pci_controller *u3_ht; |
49 | #define has_second_ohare 0 | ||
50 | #else | ||
51 | static int has_second_ohare; | ||
49 | #endif /* CONFIG_PPC64 */ | 52 | #endif /* CONFIG_PPC64 */ |
50 | 53 | ||
51 | extern u8 pci_cache_line_size; | 54 | extern u8 pci_cache_line_size; |
@@ -647,6 +650,33 @@ static void __init init_p2pbridge(void) | |||
647 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); | 650 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); |
648 | } | 651 | } |
649 | 652 | ||
653 | static void __init init_second_ohare(void) | ||
654 | { | ||
655 | struct device_node *np = of_find_node_by_name(NULL, "pci106b,7"); | ||
656 | unsigned char bus, devfn; | ||
657 | unsigned short cmd; | ||
658 | |||
659 | if (np == NULL) | ||
660 | return; | ||
661 | |||
662 | /* This must run before we initialize the PICs since the second | ||
663 | * ohare hosts a PIC that will be accessed there. | ||
664 | */ | ||
665 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { | ||
666 | struct pci_controller* hose = | ||
667 | pci_find_hose_for_OF_device(np); | ||
668 | if (!hose) { | ||
669 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
670 | return; | ||
671 | } | ||
672 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
673 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
674 | cmd &= ~PCI_COMMAND_IO; | ||
675 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
676 | } | ||
677 | has_second_ohare = 1; | ||
678 | } | ||
679 | |||
650 | /* | 680 | /* |
651 | * Some Apple desktop machines have a NEC PD720100A USB2 controller | 681 | * Some Apple desktop machines have a NEC PD720100A USB2 controller |
652 | * on the motherboard. Open Firmware, on these, will disable the | 682 | * on the motherboard. Open Firmware, on these, will disable the |
@@ -688,9 +718,6 @@ static void __init fixup_nec_usb2(void) | |||
688 | " EHCI, fixing up...\n"); | 718 | " EHCI, fixing up...\n"); |
689 | data &= ~1UL; | 719 | data &= ~1UL; |
690 | early_write_config_dword(hose, bus, devfn, 0xe4, data); | 720 | early_write_config_dword(hose, bus, devfn, 0xe4, data); |
691 | early_write_config_byte(hose, bus, | ||
692 | devfn | 2, PCI_INTERRUPT_LINE, | ||
693 | nec->intrs[0].line); | ||
694 | } | 721 | } |
695 | } | 722 | } |
696 | } | 723 | } |
@@ -958,32 +985,28 @@ static int __init add_bridge(struct device_node *dev) | |||
958 | return 0; | 985 | return 0; |
959 | } | 986 | } |
960 | 987 | ||
961 | static void __init pcibios_fixup_OF_interrupts(void) | 988 | void __init pmac_pcibios_fixup(void) |
962 | { | 989 | { |
963 | struct pci_dev* dev = NULL; | 990 | struct pci_dev* dev = NULL; |
964 | 991 | ||
965 | /* | ||
966 | * Open Firmware often doesn't initialize the | ||
967 | * PCI_INTERRUPT_LINE config register properly, so we | ||
968 | * should find the device node and apply the interrupt | ||
969 | * obtained from the OF device-tree | ||
970 | */ | ||
971 | for_each_pci_dev(dev) { | 992 | for_each_pci_dev(dev) { |
972 | struct device_node *node; | 993 | /* Read interrupt from the device-tree */ |
973 | node = pci_device_to_OF_node(dev); | 994 | pci_read_irq_line(dev); |
974 | /* this is the node, see if it has interrupts */ | 995 | |
975 | if (node && node->n_intrs > 0) | 996 | /* Fixup interrupt for the modem/ethernet combo controller. |
976 | dev->irq = node->intrs[0].line; | 997 | * on machines with a second ohare chip. |
977 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | 998 | * The number in the device tree (27) is bogus (correct for |
999 | * the ethernet-only board but not the combo ethernet/modem | ||
1000 | * board). The real interrupt is 28 on the second controller | ||
1001 | * -> 28+32 = 60. | ||
1002 | */ | ||
1003 | if (has_second_ohare && | ||
1004 | dev->vendor == PCI_VENDOR_ID_DEC && | ||
1005 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) | ||
1006 | dev->irq = irq_create_mapping(NULL, 60, 0); | ||
978 | } | 1007 | } |
979 | } | 1008 | } |
980 | 1009 | ||
981 | void __init pmac_pcibios_fixup(void) | ||
982 | { | ||
983 | /* Fixup interrupts according to OF tree */ | ||
984 | pcibios_fixup_OF_interrupts(); | ||
985 | } | ||
986 | |||
987 | #ifdef CONFIG_PPC64 | 1010 | #ifdef CONFIG_PPC64 |
988 | static void __init pmac_fixup_phb_resources(void) | 1011 | static void __init pmac_fixup_phb_resources(void) |
989 | { | 1012 | { |
@@ -1071,6 +1094,7 @@ void __init pmac_pci_init(void) | |||
1071 | 1094 | ||
1072 | #else /* CONFIG_PPC64 */ | 1095 | #else /* CONFIG_PPC64 */ |
1073 | init_p2pbridge(); | 1096 | init_p2pbridge(); |
1097 | init_second_ohare(); | ||
1074 | fixup_nec_usb2(); | 1098 | fixup_nec_usb2(); |
1075 | 1099 | ||
1076 | /* We are still having some issues with the Xserve G4, enabling | 1100 | /* We are still having some issues with the Xserve G4, enabling |
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index d6eab8b3f7de..6d66359ec8c8 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c | |||
@@ -24,19 +24,18 @@ static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs) | |||
24 | 24 | ||
25 | static int macio_do_gpio_irq_enable(struct pmf_function *func) | 25 | static int macio_do_gpio_irq_enable(struct pmf_function *func) |
26 | { | 26 | { |
27 | if (func->node->n_intrs < 1) | 27 | unsigned int irq = irq_of_parse_and_map(func->node, 0); |
28 | if (irq == NO_IRQ) | ||
28 | return -EINVAL; | 29 | return -EINVAL; |
29 | 30 | return request_irq(irq, macio_gpio_irq, 0, func->node->name, func); | |
30 | return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0, | ||
31 | func->node->name, func); | ||
32 | } | 31 | } |
33 | 32 | ||
34 | static int macio_do_gpio_irq_disable(struct pmf_function *func) | 33 | static int macio_do_gpio_irq_disable(struct pmf_function *func) |
35 | { | 34 | { |
36 | if (func->node->n_intrs < 1) | 35 | unsigned int irq = irq_of_parse_and_map(func->node, 0); |
36 | if (irq == NO_IRQ) | ||
37 | return -EINVAL; | 37 | return -EINVAL; |
38 | 38 | free_irq(irq, func); | |
39 | free_irq(func->node->intrs[0].line, func); | ||
40 | return 0; | 39 | return 0; |
41 | } | 40 | } |
42 | 41 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 58a4c7b90b8b..3d328bc1f7e0 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -65,13 +65,11 @@ static u32 level_mask[4]; | |||
65 | 65 | ||
66 | static DEFINE_SPINLOCK(pmac_pic_lock); | 66 | static DEFINE_SPINLOCK(pmac_pic_lock); |
67 | 67 | ||
68 | #define GATWICK_IRQ_POOL_SIZE 10 | ||
69 | static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; | ||
70 | |||
71 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 68 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
72 | static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | 69 | static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; |
73 | static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | 70 | static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
74 | static int pmac_irq_cascade = -1; | 71 | static int pmac_irq_cascade = -1; |
72 | static struct irq_host *pmac_pic_host; | ||
75 | 73 | ||
76 | static void __pmac_retrigger(unsigned int irq_nr) | 74 | static void __pmac_retrigger(unsigned int irq_nr) |
77 | { | 75 | { |
@@ -86,18 +84,16 @@ static void __pmac_retrigger(unsigned int irq_nr) | |||
86 | } | 84 | } |
87 | } | 85 | } |
88 | 86 | ||
89 | static void pmac_mask_and_ack_irq(unsigned int irq_nr) | 87 | static void pmac_mask_and_ack_irq(unsigned int virq) |
90 | { | 88 | { |
91 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 89 | unsigned int src = irq_map[virq].hwirq; |
92 | int i = irq_nr >> 5; | 90 | unsigned long bit = 1UL << (virq & 0x1f); |
91 | int i = virq >> 5; | ||
93 | unsigned long flags; | 92 | unsigned long flags; |
94 | 93 | ||
95 | if ((unsigned)irq_nr >= max_irqs) | ||
96 | return; | ||
97 | |||
98 | spin_lock_irqsave(&pmac_pic_lock, flags); | 94 | spin_lock_irqsave(&pmac_pic_lock, flags); |
99 | __clear_bit(irq_nr, ppc_cached_irq_mask); | 95 | __clear_bit(src, ppc_cached_irq_mask); |
100 | if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts)) | 96 | if (__test_and_clear_bit(src, ppc_lost_interrupts)) |
101 | atomic_dec(&ppc_n_lost_interrupts); | 97 | atomic_dec(&ppc_n_lost_interrupts); |
102 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); | 98 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); |
103 | out_le32(&pmac_irq_hw[i]->ack, bit); | 99 | out_le32(&pmac_irq_hw[i]->ack, bit); |
@@ -110,17 +106,15 @@ static void pmac_mask_and_ack_irq(unsigned int irq_nr) | |||
110 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 106 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
111 | } | 107 | } |
112 | 108 | ||
113 | static void pmac_ack_irq(unsigned int irq_nr) | 109 | static void pmac_ack_irq(unsigned int virq) |
114 | { | 110 | { |
115 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 111 | unsigned int src = irq_map[virq].hwirq; |
116 | int i = irq_nr >> 5; | 112 | unsigned long bit = 1UL << (src & 0x1f); |
113 | int i = src >> 5; | ||
117 | unsigned long flags; | 114 | unsigned long flags; |
118 | 115 | ||
119 | if ((unsigned)irq_nr >= max_irqs) | ||
120 | return; | ||
121 | |||
122 | spin_lock_irqsave(&pmac_pic_lock, flags); | 116 | spin_lock_irqsave(&pmac_pic_lock, flags); |
123 | if (__test_and_clear_bit(irq_nr, ppc_lost_interrupts)) | 117 | if (__test_and_clear_bit(src, ppc_lost_interrupts)) |
124 | atomic_dec(&ppc_n_lost_interrupts); | 118 | atomic_dec(&ppc_n_lost_interrupts); |
125 | out_le32(&pmac_irq_hw[i]->ack, bit); | 119 | out_le32(&pmac_irq_hw[i]->ack, bit); |
126 | (void)in_le32(&pmac_irq_hw[i]->ack); | 120 | (void)in_le32(&pmac_irq_hw[i]->ack); |
@@ -157,48 +151,51 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | |||
157 | /* When an irq gets requested for the first client, if it's an | 151 | /* When an irq gets requested for the first client, if it's an |
158 | * edge interrupt, we clear any previous one on the controller | 152 | * edge interrupt, we clear any previous one on the controller |
159 | */ | 153 | */ |
160 | static unsigned int pmac_startup_irq(unsigned int irq_nr) | 154 | static unsigned int pmac_startup_irq(unsigned int virq) |
161 | { | 155 | { |
162 | unsigned long flags; | 156 | unsigned long flags; |
163 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 157 | unsigned int src = irq_map[virq].hwirq; |
164 | int i = irq_nr >> 5; | 158 | unsigned long bit = 1UL << (src & 0x1f); |
159 | int i = src >> 5; | ||
165 | 160 | ||
166 | spin_lock_irqsave(&pmac_pic_lock, flags); | 161 | spin_lock_irqsave(&pmac_pic_lock, flags); |
167 | if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0) | 162 | if ((irq_desc[virq].status & IRQ_LEVEL) == 0) |
168 | out_le32(&pmac_irq_hw[i]->ack, bit); | 163 | out_le32(&pmac_irq_hw[i]->ack, bit); |
169 | __set_bit(irq_nr, ppc_cached_irq_mask); | 164 | __set_bit(src, ppc_cached_irq_mask); |
170 | __pmac_set_irq_mask(irq_nr, 0); | 165 | __pmac_set_irq_mask(src, 0); |
171 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 166 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
172 | 167 | ||
173 | return 0; | 168 | return 0; |
174 | } | 169 | } |
175 | 170 | ||
176 | static void pmac_mask_irq(unsigned int irq_nr) | 171 | static void pmac_mask_irq(unsigned int virq) |
177 | { | 172 | { |
178 | unsigned long flags; | 173 | unsigned long flags; |
174 | unsigned int src = irq_map[virq].hwirq; | ||
179 | 175 | ||
180 | spin_lock_irqsave(&pmac_pic_lock, flags); | 176 | spin_lock_irqsave(&pmac_pic_lock, flags); |
181 | __clear_bit(irq_nr, ppc_cached_irq_mask); | 177 | __clear_bit(src, ppc_cached_irq_mask); |
182 | __pmac_set_irq_mask(irq_nr, 0); | 178 | __pmac_set_irq_mask(src, 0); |
183 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 179 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
184 | } | 180 | } |
185 | 181 | ||
186 | static void pmac_unmask_irq(unsigned int irq_nr) | 182 | static void pmac_unmask_irq(unsigned int virq) |
187 | { | 183 | { |
188 | unsigned long flags; | 184 | unsigned long flags; |
185 | unsigned int src = irq_map[virq].hwirq; | ||
189 | 186 | ||
190 | spin_lock_irqsave(&pmac_pic_lock, flags); | 187 | spin_lock_irqsave(&pmac_pic_lock, flags); |
191 | __set_bit(irq_nr, ppc_cached_irq_mask); | 188 | __set_bit(src, ppc_cached_irq_mask); |
192 | __pmac_set_irq_mask(irq_nr, 0); | 189 | __pmac_set_irq_mask(src, 0); |
193 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 190 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
194 | } | 191 | } |
195 | 192 | ||
196 | static int pmac_retrigger(unsigned int irq_nr) | 193 | static int pmac_retrigger(unsigned int virq) |
197 | { | 194 | { |
198 | unsigned long flags; | 195 | unsigned long flags; |
199 | 196 | ||
200 | spin_lock_irqsave(&pmac_pic_lock, flags); | 197 | spin_lock_irqsave(&pmac_pic_lock, flags); |
201 | __pmac_retrigger(irq_nr); | 198 | __pmac_retrigger(irq_map[virq].hwirq); |
202 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 199 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
203 | return 1; | 200 | return 1; |
204 | } | 201 | } |
@@ -238,7 +235,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) | |||
238 | return rc; | 235 | return rc; |
239 | } | 236 | } |
240 | 237 | ||
241 | static int pmac_get_irq(struct pt_regs *regs) | 238 | static unsigned int pmac_pic_get_irq(struct pt_regs *regs) |
242 | { | 239 | { |
243 | int irq; | 240 | int irq; |
244 | unsigned long bits = 0; | 241 | unsigned long bits = 0; |
@@ -250,7 +247,7 @@ static int pmac_get_irq(struct pt_regs *regs) | |||
250 | /* IPI's are a hack on the powersurge -- Cort */ | 247 | /* IPI's are a hack on the powersurge -- Cort */ |
251 | if ( smp_processor_id() != 0 ) { | 248 | if ( smp_processor_id() != 0 ) { |
252 | psurge_smp_message_recv(regs); | 249 | psurge_smp_message_recv(regs); |
253 | return -2; /* ignore, already handled */ | 250 | return NO_IRQ_IGNORE; /* ignore, already handled */ |
254 | } | 251 | } |
255 | #endif /* CONFIG_SMP */ | 252 | #endif /* CONFIG_SMP */ |
256 | spin_lock_irqsave(&pmac_pic_lock, flags); | 253 | spin_lock_irqsave(&pmac_pic_lock, flags); |
@@ -266,133 +263,9 @@ static int pmac_get_irq(struct pt_regs *regs) | |||
266 | break; | 263 | break; |
267 | } | 264 | } |
268 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | 265 | spin_unlock_irqrestore(&pmac_pic_lock, flags); |
269 | 266 | if (unlikely(irq < 0)) | |
270 | return irq; | 267 | return NO_IRQ; |
271 | } | 268 | return irq_linear_revmap(pmac_pic_host, irq); |
272 | |||
273 | /* This routine will fix some missing interrupt values in the device tree | ||
274 | * on the gatwick mac-io controller used by some PowerBooks | ||
275 | * | ||
276 | * Walking of OF nodes could use a bit more fixing up here, but it's not | ||
277 | * very important as this is all boot time code on static portions of the | ||
278 | * device-tree. | ||
279 | * | ||
280 | * However, the modifications done to "intrs" will have to be removed and | ||
281 | * replaced with proper updates of the "interrupts" properties or | ||
282 | * AAPL,interrupts, yet to be decided, once the dynamic parsing is there. | ||
283 | */ | ||
284 | static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, | ||
285 | int irq_base) | ||
286 | { | ||
287 | struct device_node *node; | ||
288 | int count; | ||
289 | |||
290 | memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); | ||
291 | count = 0; | ||
292 | for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) { | ||
293 | /* Fix SCC */ | ||
294 | if ((strcasecmp(node->name, "escc") == 0) && node->child) { | ||
295 | if (node->child->n_intrs < 3) { | ||
296 | node->child->intrs = &gatwick_int_pool[count]; | ||
297 | count += 3; | ||
298 | } | ||
299 | node->child->n_intrs = 3; | ||
300 | node->child->intrs[0].line = 15+irq_base; | ||
301 | node->child->intrs[1].line = 4+irq_base; | ||
302 | node->child->intrs[2].line = 5+irq_base; | ||
303 | printk(KERN_INFO "irq: fixed SCC on gatwick" | ||
304 | " (%d,%d,%d)\n", | ||
305 | node->child->intrs[0].line, | ||
306 | node->child->intrs[1].line, | ||
307 | node->child->intrs[2].line); | ||
308 | } | ||
309 | /* Fix media-bay & left SWIM */ | ||
310 | if (strcasecmp(node->name, "media-bay") == 0) { | ||
311 | struct device_node* ya_node; | ||
312 | |||
313 | if (node->n_intrs == 0) | ||
314 | node->intrs = &gatwick_int_pool[count++]; | ||
315 | node->n_intrs = 1; | ||
316 | node->intrs[0].line = 29+irq_base; | ||
317 | printk(KERN_INFO "irq: fixed media-bay on gatwick" | ||
318 | " (%d)\n", node->intrs[0].line); | ||
319 | |||
320 | ya_node = node->child; | ||
321 | while(ya_node) { | ||
322 | if (strcasecmp(ya_node->name, "floppy") == 0) { | ||
323 | if (ya_node->n_intrs < 2) { | ||
324 | ya_node->intrs = &gatwick_int_pool[count]; | ||
325 | count += 2; | ||
326 | } | ||
327 | ya_node->n_intrs = 2; | ||
328 | ya_node->intrs[0].line = 19+irq_base; | ||
329 | ya_node->intrs[1].line = 1+irq_base; | ||
330 | printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", | ||
331 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
332 | } | ||
333 | if (strcasecmp(ya_node->name, "ata4") == 0) { | ||
334 | if (ya_node->n_intrs < 2) { | ||
335 | ya_node->intrs = &gatwick_int_pool[count]; | ||
336 | count += 2; | ||
337 | } | ||
338 | ya_node->n_intrs = 2; | ||
339 | ya_node->intrs[0].line = 14+irq_base; | ||
340 | ya_node->intrs[1].line = 3+irq_base; | ||
341 | printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", | ||
342 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
343 | } | ||
344 | ya_node = ya_node->sibling; | ||
345 | } | ||
346 | } | ||
347 | } | ||
348 | if (count > 10) { | ||
349 | printk("WARNING !! Gatwick interrupt pool overflow\n"); | ||
350 | printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); | ||
351 | printk(" requested = %d\n", count); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * The PowerBook 3400/2400/3500 can have a combo ethernet/modem | ||
357 | * card which includes an ohare chip that acts as a second interrupt | ||
358 | * controller. If we find this second ohare, set it up and fix the | ||
359 | * interrupt value in the device tree for the ethernet chip. | ||
360 | */ | ||
361 | static void __init enable_second_ohare(struct device_node *np) | ||
362 | { | ||
363 | unsigned char bus, devfn; | ||
364 | unsigned short cmd; | ||
365 | struct device_node *ether; | ||
366 | |||
367 | /* This code doesn't strictly belong here, it could be part of | ||
368 | * either the PCI initialisation or the feature code. It's kept | ||
369 | * here for historical reasons. | ||
370 | */ | ||
371 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { | ||
372 | struct pci_controller* hose = | ||
373 | pci_find_hose_for_OF_device(np); | ||
374 | if (!hose) { | ||
375 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
376 | return; | ||
377 | } | ||
378 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
379 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
380 | cmd &= ~PCI_COMMAND_IO; | ||
381 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
382 | } | ||
383 | |||
384 | /* Fix interrupt for the modem/ethernet combo controller. The number | ||
385 | * in the device tree (27) is bogus (correct for the ethernet-only | ||
386 | * board but not the combo ethernet/modem board). | ||
387 | * The real interrupt is 28 on the second controller -> 28+32 = 60. | ||
388 | */ | ||
389 | ether = of_find_node_by_name(NULL, "pci1011,14"); | ||
390 | if (ether && ether->n_intrs > 0) { | ||
391 | ether->intrs[0].line = 60; | ||
392 | printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", | ||
393 | ether->intrs[0].line); | ||
394 | } | ||
395 | of_node_put(ether); | ||
396 | } | 269 | } |
397 | 270 | ||
398 | #ifdef CONFIG_XMON | 271 | #ifdef CONFIG_XMON |
@@ -411,6 +284,50 @@ static struct irqaction gatwick_cascade_action = { | |||
411 | .name = "cascade", | 284 | .name = "cascade", |
412 | }; | 285 | }; |
413 | 286 | ||
287 | static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) | ||
288 | { | ||
289 | /* We match all, we don't always have a node anyway */ | ||
290 | return 1; | ||
291 | } | ||
292 | |||
293 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | ||
294 | irq_hw_number_t hw, unsigned int flags) | ||
295 | { | ||
296 | struct irq_desc *desc = get_irq_desc(virq); | ||
297 | int level; | ||
298 | |||
299 | if (hw >= max_irqs) | ||
300 | return -EINVAL; | ||
301 | |||
302 | /* Mark level interrupts, set delayed disable for edge ones and set | ||
303 | * handlers | ||
304 | */ | ||
305 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); | ||
306 | if (level) | ||
307 | desc->status |= IRQ_LEVEL; | ||
308 | else | ||
309 | desc->status |= IRQ_DELAYED_DISABLE; | ||
310 | set_irq_chip_and_handler(virq, &pmac_pic, level ? | ||
311 | handle_level_irq : handle_edge_irq); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
316 | u32 *intspec, unsigned int intsize, | ||
317 | irq_hw_number_t *out_hwirq, | ||
318 | unsigned int *out_flags) | ||
319 | |||
320 | { | ||
321 | *out_hwirq = *intspec; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static struct irq_host_ops pmac_pic_host_ops = { | ||
326 | .match = pmac_pic_host_match, | ||
327 | .map = pmac_pic_host_map, | ||
328 | .xlate = pmac_pic_host_xlate, | ||
329 | }; | ||
330 | |||
414 | static void __init pmac_pic_probe_oldstyle(void) | 331 | static void __init pmac_pic_probe_oldstyle(void) |
415 | { | 332 | { |
416 | int i; | 333 | int i; |
@@ -420,7 +337,7 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
420 | struct resource r; | 337 | struct resource r; |
421 | 338 | ||
422 | /* Set our get_irq function */ | 339 | /* Set our get_irq function */ |
423 | ppc_md.get_irq = pmac_get_irq; | 340 | ppc_md.get_irq = pmac_pic_get_irq; |
424 | 341 | ||
425 | /* | 342 | /* |
426 | * Find the interrupt controller type & node | 343 | * Find the interrupt controller type & node |
@@ -438,7 +355,6 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
438 | if (slave) { | 355 | if (slave) { |
439 | max_irqs = 64; | 356 | max_irqs = 64; |
440 | level_mask[1] = OHARE_LEVEL_MASK; | 357 | level_mask[1] = OHARE_LEVEL_MASK; |
441 | enable_second_ohare(slave); | ||
442 | } | 358 | } |
443 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { | 359 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { |
444 | max_irqs = max_real_irqs = 64; | 360 | max_irqs = max_real_irqs = 64; |
@@ -462,21 +378,18 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
462 | max_irqs = 128; | 378 | max_irqs = 128; |
463 | level_mask[2] = HEATHROW_LEVEL_MASK; | 379 | level_mask[2] = HEATHROW_LEVEL_MASK; |
464 | level_mask[3] = 0; | 380 | level_mask[3] = 0; |
465 | pmac_fix_gatwick_interrupts(slave, max_real_irqs); | ||
466 | } | 381 | } |
467 | } | 382 | } |
468 | BUG_ON(master == NULL); | 383 | BUG_ON(master == NULL); |
469 | 384 | ||
470 | /* Mark level interrupts and set handlers */ | 385 | /* |
471 | for (i = 0; i < max_irqs; i++) { | 386 | * Allocate an irq host |
472 | int level = !!(level_mask[i >> 5] & (1UL << (i & 0x1f))); | 387 | */ |
473 | if (level) | 388 | pmac_pic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, max_irqs, |
474 | irq_desc[i].status |= IRQ_LEVEL; | 389 | &pmac_pic_host_ops, |
475 | else | 390 | max_irqs); |
476 | irq_desc[i].status |= IRQ_DELAYED_DISABLE; | 391 | BUG_ON(pmac_pic_host == NULL); |
477 | set_irq_chip_and_handler(i, &pmac_pic, level ? | 392 | irq_set_default_host(pmac_pic_host); |
478 | handle_level_irq : handle_edge_irq); | ||
479 | } | ||
480 | 393 | ||
481 | /* Get addresses of first controller if we have a node for it */ | 394 | /* Get addresses of first controller if we have a node for it */ |
482 | BUG_ON(of_address_to_resource(master, 0, &r)); | 395 | BUG_ON(of_address_to_resource(master, 0, &r)); |
@@ -503,7 +416,7 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
503 | pmac_irq_hw[i++] = | 416 | pmac_irq_hw[i++] = |
504 | (volatile struct pmac_irq_hw __iomem *) | 417 | (volatile struct pmac_irq_hw __iomem *) |
505 | (addr + 0x10); | 418 | (addr + 0x10); |
506 | pmac_irq_cascade = slave->intrs[0].line; | 419 | pmac_irq_cascade = irq_of_parse_and_map(slave, 0); |
507 | 420 | ||
508 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" | 421 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" |
509 | " cascade: %d\n", slave->full_name, | 422 | " cascade: %d\n", slave->full_name, |
@@ -516,12 +429,12 @@ static void __init pmac_pic_probe_oldstyle(void) | |||
516 | out_le32(&pmac_irq_hw[i]->enable, 0); | 429 | out_le32(&pmac_irq_hw[i]->enable, 0); |
517 | 430 | ||
518 | /* Hookup cascade irq */ | 431 | /* Hookup cascade irq */ |
519 | if (slave) | 432 | if (slave && pmac_irq_cascade != NO_IRQ) |
520 | setup_irq(pmac_irq_cascade, &gatwick_cascade_action); | 433 | setup_irq(pmac_irq_cascade, &gatwick_cascade_action); |
521 | 434 | ||
522 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); | 435 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); |
523 | #ifdef CONFIG_XMON | 436 | #ifdef CONFIG_XMON |
524 | setup_irq(20, &xmon_action); | 437 | setup_irq(irq_create_mapping(NULL, 20, 0), &xmon_action); |
525 | #endif | 438 | #endif |
526 | } | 439 | } |
527 | #endif /* CONFIG_PPC32 */ | 440 | #endif /* CONFIG_PPC32 */ |
@@ -530,16 +443,11 @@ static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc, | |||
530 | struct pt_regs *regs) | 443 | struct pt_regs *regs) |
531 | { | 444 | { |
532 | struct mpic *mpic = desc->handler_data; | 445 | struct mpic *mpic = desc->handler_data; |
533 | unsigned int max = 100; | ||
534 | 446 | ||
535 | while(max--) { | 447 | unsigned int cascade_irq = mpic_get_one_irq(mpic, regs); |
536 | int cascade_irq = mpic_get_one_irq(mpic, regs); | 448 | if (cascade_irq != NO_IRQ) |
537 | if (max == 99) | ||
538 | desc->chip->eoi(irq); | ||
539 | if (irq < 0) | ||
540 | break; | ||
541 | generic_handle_irq(cascade_irq, regs); | 449 | generic_handle_irq(cascade_irq, regs); |
542 | }; | 450 | desc->chip->eoi(irq); |
543 | } | 451 | } |
544 | 452 | ||
545 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | 453 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) |
@@ -549,21 +457,20 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | |||
549 | int nmi_irq; | 457 | int nmi_irq; |
550 | 458 | ||
551 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); | 459 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); |
552 | if (pswitch && pswitch->n_intrs) { | 460 | if (pswitch) { |
553 | nmi_irq = pswitch->intrs[0].line; | 461 | nmi_irq = irq_of_parse_and_map(pswitch, 0); |
554 | mpic_irq_set_priority(nmi_irq, 9); | 462 | if (nmi_irq != NO_IRQ) { |
555 | setup_irq(nmi_irq, &xmon_action); | 463 | mpic_irq_set_priority(nmi_irq, 9); |
464 | setup_irq(nmi_irq, &xmon_action); | ||
465 | } | ||
466 | of_node_put(pswitch); | ||
556 | } | 467 | } |
557 | of_node_put(pswitch); | ||
558 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ | 468 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ |
559 | } | 469 | } |
560 | 470 | ||
561 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | 471 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, |
562 | int master) | 472 | int master) |
563 | { | 473 | { |
564 | unsigned char senses[128]; | ||
565 | int offset = master ? 0 : 128; | ||
566 | int count = master ? 128 : 124; | ||
567 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; | 474 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; |
568 | struct resource r; | 475 | struct resource r; |
569 | struct mpic *mpic; | 476 | struct mpic *mpic; |
@@ -576,8 +483,6 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | |||
576 | 483 | ||
577 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); | 484 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); |
578 | 485 | ||
579 | prom_get_irq_senses(senses, offset, offset + count); | ||
580 | |||
581 | flags |= MPIC_WANTS_RESET; | 486 | flags |= MPIC_WANTS_RESET; |
582 | if (get_property(np, "big-endian", NULL)) | 487 | if (get_property(np, "big-endian", NULL)) |
583 | flags |= MPIC_BIG_ENDIAN; | 488 | flags |= MPIC_BIG_ENDIAN; |
@@ -588,8 +493,7 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | |||
588 | if (master && (flags & MPIC_BIG_ENDIAN)) | 493 | if (master && (flags & MPIC_BIG_ENDIAN)) |
589 | flags |= MPIC_BROKEN_U3; | 494 | flags |= MPIC_BROKEN_U3; |
590 | 495 | ||
591 | mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0, | 496 | mpic = mpic_alloc(np, r.start, flags, 0, 0, name); |
592 | senses, count, name); | ||
593 | if (mpic == NULL) | 497 | if (mpic == NULL) |
594 | return NULL; | 498 | return NULL; |
595 | 499 | ||
@@ -602,6 +506,7 @@ static int __init pmac_pic_probe_mpic(void) | |||
602 | { | 506 | { |
603 | struct mpic *mpic1, *mpic2; | 507 | struct mpic *mpic1, *mpic2; |
604 | struct device_node *np, *master = NULL, *slave = NULL; | 508 | struct device_node *np, *master = NULL, *slave = NULL; |
509 | unsigned int cascade; | ||
605 | 510 | ||
606 | /* We can have up to 2 MPICs cascaded */ | 511 | /* We can have up to 2 MPICs cascaded */ |
607 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) | 512 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) |
@@ -638,8 +543,15 @@ static int __init pmac_pic_probe_mpic(void) | |||
638 | of_node_put(master); | 543 | of_node_put(master); |
639 | 544 | ||
640 | /* No slave, let's go out */ | 545 | /* No slave, let's go out */ |
641 | if (slave == NULL || slave->n_intrs < 1) | 546 | if (slave == NULL) |
547 | return 0; | ||
548 | |||
549 | /* Get/Map slave interrupt */ | ||
550 | cascade = irq_of_parse_and_map(slave, 0); | ||
551 | if (cascade == NO_IRQ) { | ||
552 | printk(KERN_ERR "Failed to map cascade IRQ\n"); | ||
642 | return 0; | 553 | return 0; |
554 | } | ||
643 | 555 | ||
644 | mpic2 = pmac_setup_one_mpic(slave, 0); | 556 | mpic2 = pmac_setup_one_mpic(slave, 0); |
645 | if (mpic2 == NULL) { | 557 | if (mpic2 == NULL) { |
@@ -647,8 +559,8 @@ static int __init pmac_pic_probe_mpic(void) | |||
647 | of_node_put(slave); | 559 | of_node_put(slave); |
648 | return 0; | 560 | return 0; |
649 | } | 561 | } |
650 | set_irq_data(slave->intrs[0].line, mpic2); | 562 | set_irq_data(cascade, mpic2); |
651 | set_irq_chained_handler(slave->intrs[0].line, pmac_u3_cascade); | 563 | set_irq_chained_handler(cascade, pmac_u3_cascade); |
652 | 564 | ||
653 | of_node_put(slave); | 565 | of_node_put(slave); |
654 | return 0; | 566 | return 0; |
@@ -657,6 +569,19 @@ static int __init pmac_pic_probe_mpic(void) | |||
657 | 569 | ||
658 | void __init pmac_pic_init(void) | 570 | void __init pmac_pic_init(void) |
659 | { | 571 | { |
572 | unsigned int flags = 0; | ||
573 | |||
574 | /* We configure the OF parsing based on our oldworld vs. newworld | ||
575 | * platform type and wether we were booted by BootX. | ||
576 | */ | ||
577 | #ifdef CONFIG_PPC32 | ||
578 | if (!pmac_newworld) | ||
579 | flags |= OF_IMAP_OLDWORLD_MAC; | ||
580 | if (get_property(of_chosen, "linux,bootx", NULL) != NULL) | ||
581 | flags |= OF_IMAP_NO_PHANDLE; | ||
582 | of_irq_map_init(flags); | ||
583 | #endif /* CONFIG_PPC_32 */ | ||
584 | |||
660 | /* We first try to detect Apple's new Core99 chipset, since mac-io | 585 | /* We first try to detect Apple's new Core99 chipset, since mac-io |
661 | * is quite different on those machines and contains an IBM MPIC2. | 586 | * is quite different on those machines and contains an IBM MPIC2. |
662 | */ | 587 | */ |
@@ -679,6 +604,7 @@ unsigned long sleep_save_mask[2]; | |||
679 | 604 | ||
680 | /* This used to be passed by the PMU driver but that link got | 605 | /* This used to be passed by the PMU driver but that link got |
681 | * broken with the new driver model. We use this tweak for now... | 606 | * broken with the new driver model. We use this tweak for now... |
607 | * We really want to do things differently though... | ||
682 | */ | 608 | */ |
683 | static int pmacpic_find_viaint(void) | 609 | static int pmacpic_find_viaint(void) |
684 | { | 610 | { |
@@ -692,7 +618,7 @@ static int pmacpic_find_viaint(void) | |||
692 | np = of_find_node_by_name(NULL, "via-pmu"); | 618 | np = of_find_node_by_name(NULL, "via-pmu"); |
693 | if (np == NULL) | 619 | if (np == NULL) |
694 | goto not_found; | 620 | goto not_found; |
695 | viaint = np->intrs[0].line; | 621 | viaint = irq_of_parse_and_map(np, 0);; |
696 | #endif /* CONFIG_ADB_PMU */ | 622 | #endif /* CONFIG_ADB_PMU */ |
697 | 623 | ||
698 | not_found: | 624 | not_found: |
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 21c7b0f8f329..94e7b24b840b 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -12,6 +12,8 @@ | |||
12 | 12 | ||
13 | struct rtc_time; | 13 | struct rtc_time; |
14 | 14 | ||
15 | extern int pmac_newworld; | ||
16 | |||
15 | extern long pmac_time_init(void); | 17 | extern long pmac_time_init(void); |
16 | extern unsigned long pmac_get_boot_time(void); | 18 | extern unsigned long pmac_get_boot_time(void); |
17 | extern void pmac_get_rtc_time(struct rtc_time *); | 19 | extern void pmac_get_rtc_time(struct rtc_time *); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 8654b5f07836..31a9da769fa2 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -613,9 +613,6 @@ static void __init pmac_init_early(void) | |||
613 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); | 613 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); |
614 | 614 | ||
615 | #ifdef CONFIG_PPC64 | 615 | #ifdef CONFIG_PPC64 |
616 | /* Setup interrupt mapping options */ | ||
617 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
618 | |||
619 | iommu_init_early_dart(); | 616 | iommu_init_early_dart(); |
620 | #endif | 617 | #endif |
621 | } | 618 | } |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 9639c66b453d..9df783088b61 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -72,32 +72,62 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id, | |||
72 | 72 | ||
73 | /* #define DEBUG */ | 73 | /* #define DEBUG */ |
74 | 74 | ||
75 | static void request_ras_irqs(struct device_node *np, char *propname, | 75 | |
76 | static void request_ras_irqs(struct device_node *np, | ||
76 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 77 | irqreturn_t (*handler)(int, void *, struct pt_regs *), |
77 | const char *name) | 78 | const char *name) |
78 | { | 79 | { |
79 | unsigned int *ireg, len, i; | 80 | int i, index, count = 0; |
80 | int virq, n_intr; | 81 | struct of_irq oirq; |
81 | 82 | u32 *opicprop; | |
82 | ireg = (unsigned int *)get_property(np, propname, &len); | 83 | unsigned int opicplen; |
83 | if (ireg == NULL) | 84 | unsigned int virqs[16]; |
84 | return; | 85 | |
85 | n_intr = prom_n_intr_cells(np); | 86 | /* Check for obsolete "open-pic-interrupt" property. If present, then |
86 | len /= n_intr * sizeof(*ireg); | 87 | * map those interrupts using the default interrupt host and default |
87 | 88 | * trigger | |
88 | for (i = 0; i < len; i++) { | 89 | */ |
89 | virq = virt_irq_create_mapping(*ireg); | 90 | opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen); |
90 | if (virq == NO_IRQ) { | 91 | if (opicprop) { |
91 | printk(KERN_ERR "Unable to allocate interrupt " | 92 | opicplen /= sizeof(u32); |
92 | "number for %s\n", np->full_name); | 93 | for (i = 0; i < opicplen; i++) { |
93 | return; | 94 | if (count > 15) |
95 | break; | ||
96 | virqs[count] = irq_create_mapping(NULL, *(opicprop++), | ||
97 | IRQ_TYPE_NONE); | ||
98 | if (virqs[count] == NO_IRQ) | ||
99 | printk(KERN_ERR "Unable to allocate interrupt " | ||
100 | "number for %s\n", np->full_name); | ||
101 | else | ||
102 | count++; | ||
103 | |||
94 | } | 104 | } |
95 | if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) { | 105 | } |
106 | /* Else use normal interrupt tree parsing */ | ||
107 | else { | ||
108 | /* First try to do a proper OF tree parsing */ | ||
109 | for (index = 0; of_irq_map_one(np, index, &oirq) == 0; | ||
110 | index++) { | ||
111 | if (count > 15) | ||
112 | break; | ||
113 | virqs[count] = irq_create_of_mapping(oirq.controller, | ||
114 | oirq.specifier, | ||
115 | oirq.size); | ||
116 | if (virqs[count] == NO_IRQ) | ||
117 | printk(KERN_ERR "Unable to allocate interrupt " | ||
118 | "number for %s\n", np->full_name); | ||
119 | else | ||
120 | count++; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | /* Now request them */ | ||
125 | for (i = 0; i < count; i++) { | ||
126 | if (request_irq(virqs[i], handler, 0, name, NULL)) { | ||
96 | printk(KERN_ERR "Unable to request interrupt %d for " | 127 | printk(KERN_ERR "Unable to request interrupt %d for " |
97 | "%s\n", irq_offset_up(virq), np->full_name); | 128 | "%s\n", virqs[i], np->full_name); |
98 | return; | 129 | return; |
99 | } | 130 | } |
100 | ireg += n_intr; | ||
101 | } | 131 | } |
102 | } | 132 | } |
103 | 133 | ||
@@ -115,20 +145,14 @@ static int __init init_ras_IRQ(void) | |||
115 | /* Internal Errors */ | 145 | /* Internal Errors */ |
116 | np = of_find_node_by_path("/event-sources/internal-errors"); | 146 | np = of_find_node_by_path("/event-sources/internal-errors"); |
117 | if (np != NULL) { | 147 | if (np != NULL) { |
118 | request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt, | 148 | request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); |
119 | "RAS_ERROR"); | ||
120 | request_ras_irqs(np, "interrupts", ras_error_interrupt, | ||
121 | "RAS_ERROR"); | ||
122 | of_node_put(np); | 149 | of_node_put(np); |
123 | } | 150 | } |
124 | 151 | ||
125 | /* EPOW Events */ | 152 | /* EPOW Events */ |
126 | np = of_find_node_by_path("/event-sources/epow-events"); | 153 | np = of_find_node_by_path("/event-sources/epow-events"); |
127 | if (np != NULL) { | 154 | if (np != NULL) { |
128 | request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt, | 155 | request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); |
129 | "RAS_EPOW"); | ||
130 | request_ras_irqs(np, "interrupts", ras_epow_interrupt, | ||
131 | "RAS_EPOW"); | ||
132 | of_node_put(np); | 156 | of_node_put(np); |
133 | } | 157 | } |
134 | 158 | ||
@@ -162,7 +186,7 @@ ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
162 | 186 | ||
163 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 187 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
164 | RAS_VECTOR_OFFSET, | 188 | RAS_VECTOR_OFFSET, |
165 | virt_irq_to_real(irq_offset_down(irq)), | 189 | irq_map[irq].hwirq, |
166 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, | 190 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, |
167 | critical, __pa(&ras_log_buf), | 191 | critical, __pa(&ras_log_buf), |
168 | rtas_get_error_log_max()); | 192 | rtas_get_error_log_max()); |
@@ -198,7 +222,7 @@ ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
198 | 222 | ||
199 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 223 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
200 | RAS_VECTOR_OFFSET, | 224 | RAS_VECTOR_OFFSET, |
201 | virt_irq_to_real(irq_offset_down(irq)), | 225 | irq_map[irq].hwirq, |
202 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, | 226 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, |
203 | __pa(&ras_log_buf), | 227 | __pa(&ras_log_buf), |
204 | rtas_get_error_log_max()); | 228 | rtas_get_error_log_max()); |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 476b564a208b..54a52437265c 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -76,6 +76,9 @@ | |||
76 | #define DBG(fmt...) | 76 | #define DBG(fmt...) |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | /* move those away to a .h */ | ||
80 | extern void smp_init_pseries_mpic(void); | ||
81 | extern void smp_init_pseries_xics(void); | ||
79 | extern void find_udbg_vterm(void); | 82 | extern void find_udbg_vterm(void); |
80 | 83 | ||
81 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ | 84 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ |
@@ -83,7 +86,7 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */ | |||
83 | static void pseries_shared_idle_sleep(void); | 86 | static void pseries_shared_idle_sleep(void); |
84 | static void pseries_dedicated_idle_sleep(void); | 87 | static void pseries_dedicated_idle_sleep(void); |
85 | 88 | ||
86 | struct mpic *pSeries_mpic; | 89 | static struct device_node *pSeries_mpic_node; |
87 | 90 | ||
88 | static void pSeries_show_cpuinfo(struct seq_file *m) | 91 | static void pSeries_show_cpuinfo(struct seq_file *m) |
89 | { | 92 | { |
@@ -118,78 +121,92 @@ static void __init fwnmi_init(void) | |||
118 | fwnmi_active = 1; | 121 | fwnmi_active = 1; |
119 | } | 122 | } |
120 | 123 | ||
121 | void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc, | 124 | void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, |
122 | struct pt_regs *regs) | 125 | struct pt_regs *regs) |
123 | { | 126 | { |
124 | unsigned int max = 100; | 127 | unsigned int cascade_irq = i8259_irq(regs); |
125 | 128 | if (cascade_irq != NO_IRQ) | |
126 | while(max--) { | ||
127 | int cascade_irq = i8259_irq(regs); | ||
128 | if (max == 99) | ||
129 | desc->chip->eoi(irq); | ||
130 | if (cascade_irq < 0) | ||
131 | break; | ||
132 | generic_handle_irq(cascade_irq, regs); | 129 | generic_handle_irq(cascade_irq, regs); |
133 | }; | 130 | desc->chip->eoi(irq); |
134 | } | 131 | } |
135 | 132 | ||
136 | static void __init pSeries_init_mpic(void) | 133 | static void __init pseries_mpic_init_IRQ(void) |
137 | { | 134 | { |
135 | struct device_node *np, *old, *cascade = NULL; | ||
138 | unsigned int *addrp; | 136 | unsigned int *addrp; |
139 | struct device_node *np; | ||
140 | unsigned long intack = 0; | 137 | unsigned long intack = 0; |
141 | |||
142 | /* All ISUs are setup, complete initialization */ | ||
143 | mpic_init(pSeries_mpic); | ||
144 | |||
145 | /* Check what kind of cascade ACK we have */ | ||
146 | if (!(np = of_find_node_by_name(NULL, "pci")) | ||
147 | || !(addrp = (unsigned int *) | ||
148 | get_property(np, "8259-interrupt-acknowledge", NULL))) | ||
149 | printk(KERN_ERR "Cannot find pci to get ack address\n"); | ||
150 | else | ||
151 | intack = addrp[prom_n_addr_cells(np)-1]; | ||
152 | of_node_put(np); | ||
153 | |||
154 | /* Setup the legacy interrupts & controller */ | ||
155 | i8259_init(intack, 0); | ||
156 | |||
157 | /* Hook cascade to mpic */ | ||
158 | set_irq_chained_handler(NUM_ISA_INTERRUPTS, pSeries_8259_cascade); | ||
159 | } | ||
160 | |||
161 | static void __init pSeries_setup_mpic(void) | ||
162 | { | ||
163 | unsigned int *opprop; | 138 | unsigned int *opprop; |
164 | unsigned long openpic_addr = 0; | 139 | unsigned long openpic_addr = 0; |
165 | unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS]; | 140 | unsigned int cascade_irq; |
166 | struct device_node *root; | 141 | int naddr, n, i, opplen; |
167 | int irq_count; | 142 | struct mpic *mpic; |
168 | 143 | ||
169 | /* Find the Open PIC if present */ | 144 | np = of_find_node_by_path("/"); |
170 | root = of_find_node_by_path("/"); | 145 | naddr = prom_n_addr_cells(np); |
171 | opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL); | 146 | opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen); |
172 | if (opprop != 0) { | 147 | if (opprop != 0) { |
173 | int n = prom_n_addr_cells(root); | 148 | openpic_addr = of_read_number(opprop, naddr); |
174 | |||
175 | for (openpic_addr = 0; n > 0; --n) | ||
176 | openpic_addr = (openpic_addr << 32) + *opprop++; | ||
177 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); | 149 | printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); |
178 | } | 150 | } |
179 | of_node_put(root); | 151 | of_node_put(np); |
180 | 152 | ||
181 | BUG_ON(openpic_addr == 0); | 153 | BUG_ON(openpic_addr == 0); |
182 | 154 | ||
183 | /* Get the sense values from OF */ | ||
184 | prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS); | ||
185 | |||
186 | /* Setup the openpic driver */ | 155 | /* Setup the openpic driver */ |
187 | irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */ | 156 | mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, |
188 | pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY, | 157 | MPIC_PRIMARY, |
189 | 16, 16, irq_count, /* isu size, irq offset, irq count */ | 158 | 16, 250, /* isu size, irq count */ |
190 | NR_IRQS - 4, /* ipi offset */ | 159 | " MPIC "); |
191 | senses, irq_count, /* sense & sense size */ | 160 | BUG_ON(mpic == NULL); |
192 | " MPIC "); | 161 | |
162 | /* Add ISUs */ | ||
163 | opplen /= sizeof(u32); | ||
164 | for (n = 0, i = naddr; i < opplen; i += naddr, n++) { | ||
165 | unsigned long isuaddr = of_read_number(opprop + i, naddr); | ||
166 | mpic_assign_isu(mpic, n, isuaddr); | ||
167 | } | ||
168 | |||
169 | /* All ISUs are setup, complete initialization */ | ||
170 | mpic_init(mpic); | ||
171 | |||
172 | /* Look for cascade */ | ||
173 | for_each_node_by_type(np, "interrupt-controller") | ||
174 | if (device_is_compatible(np, "chrp,iic")) { | ||
175 | cascade = np; | ||
176 | break; | ||
177 | } | ||
178 | if (cascade == NULL) | ||
179 | return; | ||
180 | |||
181 | cascade_irq = irq_of_parse_and_map(cascade, 0); | ||
182 | if (cascade == NO_IRQ) { | ||
183 | printk(KERN_ERR "xics: failed to map cascade interrupt"); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* Check ACK type */ | ||
188 | for (old = of_node_get(cascade); old != NULL ; old = np) { | ||
189 | np = of_get_parent(old); | ||
190 | of_node_put(old); | ||
191 | if (np == NULL) | ||
192 | break; | ||
193 | if (strcmp(np->name, "pci") != 0) | ||
194 | continue; | ||
195 | addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", | ||
196 | NULL); | ||
197 | if (addrp == NULL) | ||
198 | continue; | ||
199 | naddr = prom_n_addr_cells(np); | ||
200 | intack = addrp[naddr-1]; | ||
201 | if (naddr > 1) | ||
202 | intack |= ((unsigned long)addrp[naddr-2]) << 32; | ||
203 | } | ||
204 | if (intack) | ||
205 | printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n", | ||
206 | intack); | ||
207 | i8259_init(cascade, intack); | ||
208 | of_node_put(cascade); | ||
209 | set_irq_chained_handler(cascade_irq, pseries_8259_cascade); | ||
193 | } | 210 | } |
194 | 211 | ||
195 | static void pseries_lpar_enable_pmcs(void) | 212 | static void pseries_lpar_enable_pmcs(void) |
@@ -207,21 +224,67 @@ static void pseries_lpar_enable_pmcs(void) | |||
207 | get_lppaca()->pmcregs_in_use = 1; | 224 | get_lppaca()->pmcregs_in_use = 1; |
208 | } | 225 | } |
209 | 226 | ||
210 | static void __init pSeries_setup_arch(void) | 227 | #ifdef CONFIG_KEXEC |
228 | static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) | ||
211 | { | 229 | { |
212 | /* Fixup ppc_md depending on the type of interrupt controller */ | 230 | mpic_teardown_this_cpu(secondary); |
213 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | 231 | } |
214 | ppc_md.init_IRQ = pSeries_init_mpic; | ||
215 | ppc_md.get_irq = mpic_get_irq; | ||
216 | /* Allocate the mpic now, so that find_and_init_phbs() can | ||
217 | * fill the ISUs */ | ||
218 | pSeries_setup_mpic(); | ||
219 | } else | ||
220 | ppc_md.init_IRQ = xics_init_IRQ; | ||
221 | 232 | ||
233 | static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) | ||
234 | { | ||
235 | /* Don't risk a hypervisor call if we're crashing */ | ||
236 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { | ||
237 | unsigned long vpa = __pa(get_lppaca()); | ||
238 | |||
239 | if (unregister_vpa(hard_smp_processor_id(), vpa)) { | ||
240 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | ||
241 | "failed\n", smp_processor_id(), | ||
242 | hard_smp_processor_id()); | ||
243 | } | ||
244 | } | ||
245 | xics_teardown_cpu(secondary); | ||
246 | } | ||
247 | #endif /* CONFIG_KEXEC */ | ||
248 | |||
249 | static void __init pseries_discover_pic(void) | ||
250 | { | ||
251 | struct device_node *np; | ||
252 | char *typep; | ||
253 | |||
254 | for (np = NULL; (np = of_find_node_by_name(np, | ||
255 | "interrupt-controller"));) { | ||
256 | typep = (char *)get_property(np, "compatible", NULL); | ||
257 | if (strstr(typep, "open-pic")) { | ||
258 | pSeries_mpic_node = of_node_get(np); | ||
259 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | ||
260 | ppc_md.get_irq = mpic_get_irq; | ||
261 | #ifdef CONFIG_KEXEC | ||
262 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic; | ||
263 | #endif | ||
222 | #ifdef CONFIG_SMP | 264 | #ifdef CONFIG_SMP |
223 | smp_init_pSeries(); | 265 | smp_init_pseries_mpic(); |
224 | #endif | 266 | #endif |
267 | return; | ||
268 | } else if (strstr(typep, "ppc-xicp")) { | ||
269 | ppc_md.init_IRQ = xics_init_IRQ; | ||
270 | #ifdef CONFIG_KEXEC | ||
271 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; | ||
272 | #endif | ||
273 | #ifdef CONFIG_SMP | ||
274 | smp_init_pseries_xics(); | ||
275 | #endif | ||
276 | return; | ||
277 | } | ||
278 | } | ||
279 | printk(KERN_ERR "pSeries_discover_pic: failed to recognize" | ||
280 | " interrupt-controller\n"); | ||
281 | } | ||
282 | |||
283 | static void __init pSeries_setup_arch(void) | ||
284 | { | ||
285 | /* Discover PIC type and setup ppc_md accordingly */ | ||
286 | pseries_discover_pic(); | ||
287 | |||
225 | /* openpic global configuration register (64-bit format). */ | 288 | /* openpic global configuration register (64-bit format). */ |
226 | /* openpic Interrupt Source Unit pointer (64-bit format). */ | 289 | /* openpic Interrupt Source Unit pointer (64-bit format). */ |
227 | /* python0 facility area (mmio) (64-bit format) REAL address. */ | 290 | /* python0 facility area (mmio) (64-bit format) REAL address. */ |
@@ -273,33 +336,6 @@ static int __init pSeries_init_panel(void) | |||
273 | } | 336 | } |
274 | arch_initcall(pSeries_init_panel); | 337 | arch_initcall(pSeries_init_panel); |
275 | 338 | ||
276 | static void __init pSeries_discover_pic(void) | ||
277 | { | ||
278 | struct device_node *np; | ||
279 | char *typep; | ||
280 | |||
281 | /* | ||
282 | * Setup interrupt mapping options that are needed for finish_device_tree | ||
283 | * to properly parse the OF interrupt tree & do the virtual irq mapping | ||
284 | */ | ||
285 | __irq_offset_value = NUM_ISA_INTERRUPTS; | ||
286 | ppc64_interrupt_controller = IC_INVALID; | ||
287 | for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { | ||
288 | typep = (char *)get_property(np, "compatible", NULL); | ||
289 | if (strstr(typep, "open-pic")) { | ||
290 | ppc64_interrupt_controller = IC_OPEN_PIC; | ||
291 | break; | ||
292 | } else if (strstr(typep, "ppc-xicp")) { | ||
293 | ppc64_interrupt_controller = IC_PPC_XIC; | ||
294 | break; | ||
295 | } | ||
296 | } | ||
297 | if (ppc64_interrupt_controller == IC_INVALID) | ||
298 | printk("pSeries_discover_pic: failed to recognize" | ||
299 | " interrupt-controller\n"); | ||
300 | |||
301 | } | ||
302 | |||
303 | static void pSeries_mach_cpu_die(void) | 339 | static void pSeries_mach_cpu_die(void) |
304 | { | 340 | { |
305 | local_irq_disable(); | 341 | local_irq_disable(); |
@@ -342,8 +378,6 @@ static void __init pSeries_init_early(void) | |||
342 | 378 | ||
343 | iommu_init_early_pSeries(); | 379 | iommu_init_early_pSeries(); |
344 | 380 | ||
345 | pSeries_discover_pic(); | ||
346 | |||
347 | DBG(" <- pSeries_init_early()\n"); | 381 | DBG(" <- pSeries_init_early()\n"); |
348 | } | 382 | } |
349 | 383 | ||
@@ -515,27 +549,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus) | |||
515 | return PCI_PROBE_NORMAL; | 549 | return PCI_PROBE_NORMAL; |
516 | } | 550 | } |
517 | 551 | ||
518 | #ifdef CONFIG_KEXEC | ||
519 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | ||
520 | { | ||
521 | /* Don't risk a hypervisor call if we're crashing */ | ||
522 | if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { | ||
523 | unsigned long vpa = __pa(get_lppaca()); | ||
524 | |||
525 | if (unregister_vpa(hard_smp_processor_id(), vpa)) { | ||
526 | printk("VPA deregistration of cpu %u (hw_cpu_id %d) " | ||
527 | "failed\n", smp_processor_id(), | ||
528 | hard_smp_processor_id()); | ||
529 | } | ||
530 | } | ||
531 | |||
532 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | ||
533 | mpic_teardown_this_cpu(secondary); | ||
534 | else | ||
535 | xics_teardown_cpu(secondary); | ||
536 | } | ||
537 | #endif | ||
538 | |||
539 | define_machine(pseries) { | 552 | define_machine(pseries) { |
540 | .name = "pSeries", | 553 | .name = "pSeries", |
541 | .probe = pSeries_probe, | 554 | .probe = pSeries_probe, |
@@ -560,7 +573,6 @@ define_machine(pseries) { | |||
560 | .system_reset_exception = pSeries_system_reset_exception, | 573 | .system_reset_exception = pSeries_system_reset_exception, |
561 | .machine_check_exception = pSeries_machine_check_exception, | 574 | .machine_check_exception = pSeries_machine_check_exception, |
562 | #ifdef CONFIG_KEXEC | 575 | #ifdef CONFIG_KEXEC |
563 | .kexec_cpu_down = pseries_kexec_cpu_down, | ||
564 | .machine_kexec = default_machine_kexec, | 576 | .machine_kexec = default_machine_kexec, |
565 | .machine_kexec_prepare = default_machine_kexec_prepare, | 577 | .machine_kexec_prepare = default_machine_kexec_prepare, |
566 | .machine_crash_shutdown = default_machine_crash_shutdown, | 578 | .machine_crash_shutdown = default_machine_crash_shutdown, |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 4ad144df49c2..ac61098ff401 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -416,27 +416,12 @@ static struct smp_ops_t pSeries_xics_smp_ops = { | |||
416 | #endif | 416 | #endif |
417 | 417 | ||
418 | /* This is called very early */ | 418 | /* This is called very early */ |
419 | void __init smp_init_pSeries(void) | 419 | static void __init smp_init_pseries(void) |
420 | { | 420 | { |
421 | int i; | 421 | int i; |
422 | 422 | ||
423 | DBG(" -> smp_init_pSeries()\n"); | 423 | DBG(" -> smp_init_pSeries()\n"); |
424 | 424 | ||
425 | switch (ppc64_interrupt_controller) { | ||
426 | #ifdef CONFIG_MPIC | ||
427 | case IC_OPEN_PIC: | ||
428 | smp_ops = &pSeries_mpic_smp_ops; | ||
429 | break; | ||
430 | #endif | ||
431 | #ifdef CONFIG_XICS | ||
432 | case IC_PPC_XIC: | ||
433 | smp_ops = &pSeries_xics_smp_ops; | ||
434 | break; | ||
435 | #endif | ||
436 | default: | ||
437 | panic("Invalid interrupt controller"); | ||
438 | } | ||
439 | |||
440 | #ifdef CONFIG_HOTPLUG_CPU | 425 | #ifdef CONFIG_HOTPLUG_CPU |
441 | smp_ops->cpu_disable = pSeries_cpu_disable; | 426 | smp_ops->cpu_disable = pSeries_cpu_disable; |
442 | smp_ops->cpu_die = pSeries_cpu_die; | 427 | smp_ops->cpu_die = pSeries_cpu_die; |
@@ -471,3 +456,18 @@ void __init smp_init_pSeries(void) | |||
471 | DBG(" <- smp_init_pSeries()\n"); | 456 | DBG(" <- smp_init_pSeries()\n"); |
472 | } | 457 | } |
473 | 458 | ||
459 | #ifdef CONFIG_MPIC | ||
460 | void __init smp_init_pseries_mpic(void) | ||
461 | { | ||
462 | smp_ops = &pSeries_mpic_smp_ops; | ||
463 | |||
464 | smp_init_pseries(); | ||
465 | } | ||
466 | #endif | ||
467 | |||
468 | void __init smp_init_pseries_xics(void) | ||
469 | { | ||
470 | smp_ops = &pSeries_xics_smp_ops; | ||
471 | |||
472 | smp_init_pseries(); | ||
473 | } | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index c7f04420066d..716972aa9777 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | |||
12 | #undef DEBUG | ||
13 | |||
11 | #include <linux/types.h> | 14 | #include <linux/types.h> |
12 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
13 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -19,6 +22,7 @@ | |||
19 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
20 | #include <linux/radix-tree.h> | 23 | #include <linux/radix-tree.h> |
21 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | |||
22 | #include <asm/firmware.h> | 26 | #include <asm/firmware.h> |
23 | #include <asm/prom.h> | 27 | #include <asm/prom.h> |
24 | #include <asm/io.h> | 28 | #include <asm/io.h> |
@@ -31,9 +35,6 @@ | |||
31 | 35 | ||
32 | #include "xics.h" | 36 | #include "xics.h" |
33 | 37 | ||
34 | /* This is used to map real irq numbers to virtual */ | ||
35 | static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC); | ||
36 | |||
37 | #define XICS_IPI 2 | 38 | #define XICS_IPI 2 |
38 | #define XICS_IRQ_SPURIOUS 0 | 39 | #define XICS_IRQ_SPURIOUS 0 |
39 | 40 | ||
@@ -64,12 +65,12 @@ struct xics_ipl { | |||
64 | 65 | ||
65 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | 66 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; |
66 | 67 | ||
67 | static int xics_irq_8259_cascade = 0; | ||
68 | static int xics_irq_8259_cascade_real = 0; | ||
69 | static unsigned int default_server = 0xFF; | 68 | static unsigned int default_server = 0xFF; |
70 | static unsigned int default_distrib_server = 0; | 69 | static unsigned int default_distrib_server = 0; |
71 | static unsigned int interrupt_server_size = 8; | 70 | static unsigned int interrupt_server_size = 8; |
72 | 71 | ||
72 | static struct irq_host *xics_host; | ||
73 | |||
73 | /* | 74 | /* |
74 | * XICS only has a single IPI, so encode the messages per CPU | 75 | * XICS only has a single IPI, so encode the messages per CPU |
75 | */ | 76 | */ |
@@ -85,7 +86,7 @@ static int ibm_int_off; | |||
85 | /* Direct HW low level accessors */ | 86 | /* Direct HW low level accessors */ |
86 | 87 | ||
87 | 88 | ||
88 | static inline int direct_xirr_info_get(int n_cpu) | 89 | static inline unsigned int direct_xirr_info_get(int n_cpu) |
89 | { | 90 | { |
90 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); | 91 | return in_be32(&xics_per_cpu[n_cpu]->xirr.word); |
91 | } | 92 | } |
@@ -130,7 +131,7 @@ static inline long plpar_xirr(unsigned long *xirr_ret) | |||
130 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); | 131 | return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); |
131 | } | 132 | } |
132 | 133 | ||
133 | static inline int lpar_xirr_info_get(int n_cpu) | 134 | static inline unsigned int lpar_xirr_info_get(int n_cpu) |
134 | { | 135 | { |
135 | unsigned long lpar_rc; | 136 | unsigned long lpar_rc; |
136 | unsigned long return_value; | 137 | unsigned long return_value; |
@@ -138,7 +139,7 @@ static inline int lpar_xirr_info_get(int n_cpu) | |||
138 | lpar_rc = plpar_xirr(&return_value); | 139 | lpar_rc = plpar_xirr(&return_value); |
139 | if (lpar_rc != H_SUCCESS) | 140 | if (lpar_rc != H_SUCCESS) |
140 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); | 141 | panic(" bad return code xirr - rc = %lx \n", lpar_rc); |
141 | return (int)return_value; | 142 | return (unsigned int)return_value; |
142 | } | 143 | } |
143 | 144 | ||
144 | static inline void lpar_xirr_info_set(int n_cpu, int value) | 145 | static inline void lpar_xirr_info_set(int n_cpu, int value) |
@@ -175,11 +176,11 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) | |||
175 | 176 | ||
176 | 177 | ||
177 | #ifdef CONFIG_SMP | 178 | #ifdef CONFIG_SMP |
178 | static int get_irq_server(unsigned int irq) | 179 | static int get_irq_server(unsigned int virq) |
179 | { | 180 | { |
180 | unsigned int server; | 181 | unsigned int server; |
181 | /* For the moment only implement delivery to all cpus or one cpu */ | 182 | /* For the moment only implement delivery to all cpus or one cpu */ |
182 | cpumask_t cpumask = irq_desc[irq].affinity; | 183 | cpumask_t cpumask = irq_desc[virq].affinity; |
183 | cpumask_t tmp = CPU_MASK_NONE; | 184 | cpumask_t tmp = CPU_MASK_NONE; |
184 | 185 | ||
185 | if (!distribute_irqs) | 186 | if (!distribute_irqs) |
@@ -200,7 +201,7 @@ static int get_irq_server(unsigned int irq) | |||
200 | 201 | ||
201 | } | 202 | } |
202 | #else | 203 | #else |
203 | static int get_irq_server(unsigned int irq) | 204 | static int get_irq_server(unsigned int virq) |
204 | { | 205 | { |
205 | return default_server; | 206 | return default_server; |
206 | } | 207 | } |
@@ -213,9 +214,11 @@ static void xics_unmask_irq(unsigned int virq) | |||
213 | int call_status; | 214 | int call_status; |
214 | unsigned int server; | 215 | unsigned int server; |
215 | 216 | ||
216 | irq = virt_irq_to_real(irq_offset_down(virq)); | 217 | pr_debug("xics: unmask virq %d\n", virq); |
217 | WARN_ON(irq == NO_IRQ); | 218 | |
218 | if (irq == XICS_IPI || irq == NO_IRQ) | 219 | irq = (unsigned int)irq_map[virq].hwirq; |
220 | pr_debug(" -> map to hwirq 0x%x\n", irq); | ||
221 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
219 | return; | 222 | return; |
220 | 223 | ||
221 | server = get_irq_server(virq); | 224 | server = get_irq_server(virq); |
@@ -267,75 +270,57 @@ static void xics_mask_irq(unsigned int virq) | |||
267 | { | 270 | { |
268 | unsigned int irq; | 271 | unsigned int irq; |
269 | 272 | ||
270 | irq = virt_irq_to_real(irq_offset_down(virq)); | 273 | pr_debug("xics: mask virq %d\n", virq); |
271 | WARN_ON(irq == NO_IRQ); | 274 | |
272 | if (irq != NO_IRQ) | 275 | irq = (unsigned int)irq_map[virq].hwirq; |
273 | xics_mask_real_irq(irq); | 276 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
277 | return; | ||
278 | xics_mask_real_irq(irq); | ||
274 | } | 279 | } |
275 | 280 | ||
276 | static void xics_set_irq_revmap(unsigned int virq) | 281 | static unsigned int xics_startup(unsigned int virq) |
277 | { | 282 | { |
278 | unsigned int irq; | 283 | unsigned int irq; |
279 | 284 | ||
280 | irq = irq_offset_down(virq); | 285 | /* force a reverse mapping of the interrupt so it gets in the cache */ |
281 | if (radix_tree_insert(&irq_map, virt_irq_to_real(irq), | 286 | irq = (unsigned int)irq_map[virq].hwirq; |
282 | &virt_irq_to_real_map[irq]) == -ENOMEM) | 287 | irq_radix_revmap(xics_host, irq); |
283 | printk(KERN_CRIT "Out of memory creating real -> virtual" | ||
284 | " IRQ mapping for irq %u (real 0x%x)\n", | ||
285 | virq, virt_irq_to_real(irq)); | ||
286 | } | ||
287 | 288 | ||
288 | static unsigned int xics_startup(unsigned int virq) | 289 | /* unmask it */ |
289 | { | ||
290 | xics_set_irq_revmap(virq); | ||
291 | xics_unmask_irq(virq); | 290 | xics_unmask_irq(virq); |
292 | return 0; | 291 | return 0; |
293 | } | 292 | } |
294 | 293 | ||
295 | static unsigned int real_irq_to_virt(unsigned int real_irq) | 294 | static void xics_eoi_direct(unsigned int virq) |
296 | { | ||
297 | unsigned int *ptr; | ||
298 | |||
299 | ptr = radix_tree_lookup(&irq_map, real_irq); | ||
300 | if (ptr == NULL) | ||
301 | return NO_IRQ; | ||
302 | return ptr - virt_irq_to_real_map; | ||
303 | } | ||
304 | |||
305 | static void xics_eoi_direct(unsigned int irq) | ||
306 | { | 295 | { |
307 | int cpu = smp_processor_id(); | 296 | int cpu = smp_processor_id(); |
297 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
308 | 298 | ||
309 | iosync(); | 299 | iosync(); |
310 | direct_xirr_info_set(cpu, ((0xff << 24) | | 300 | direct_xirr_info_set(cpu, (0xff << 24) | irq); |
311 | (virt_irq_to_real(irq_offset_down(irq))))); | ||
312 | } | 301 | } |
313 | 302 | ||
314 | 303 | ||
315 | static void xics_eoi_lpar(unsigned int irq) | 304 | static void xics_eoi_lpar(unsigned int virq) |
316 | { | 305 | { |
317 | int cpu = smp_processor_id(); | 306 | int cpu = smp_processor_id(); |
307 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
318 | 308 | ||
319 | iosync(); | 309 | iosync(); |
320 | lpar_xirr_info_set(cpu, ((0xff << 24) | | 310 | lpar_xirr_info_set(cpu, (0xff << 24) | irq); |
321 | (virt_irq_to_real(irq_offset_down(irq))))); | ||
322 | |||
323 | } | 311 | } |
324 | 312 | ||
325 | static inline int xics_remap_irq(int vec) | 313 | static inline unsigned int xics_remap_irq(unsigned int vec) |
326 | { | 314 | { |
327 | int irq; | 315 | unsigned int irq; |
328 | 316 | ||
329 | vec &= 0x00ffffff; | 317 | vec &= 0x00ffffff; |
330 | 318 | ||
331 | if (vec == XICS_IRQ_SPURIOUS) | 319 | if (vec == XICS_IRQ_SPURIOUS) |
332 | return NO_IRQ; | 320 | return NO_IRQ; |
333 | 321 | irq = irq_radix_revmap(xics_host, vec); | |
334 | irq = real_irq_to_virt(vec); | ||
335 | if (irq == NO_IRQ) | ||
336 | irq = real_irq_to_virt_slowpath(vec); | ||
337 | if (likely(irq != NO_IRQ)) | 322 | if (likely(irq != NO_IRQ)) |
338 | return irq_offset_up(irq); | 323 | return irq; |
339 | 324 | ||
340 | printk(KERN_ERR "Interrupt %u (real) is invalid," | 325 | printk(KERN_ERR "Interrupt %u (real) is invalid," |
341 | " disabling it.\n", vec); | 326 | " disabling it.\n", vec); |
@@ -343,14 +328,14 @@ static inline int xics_remap_irq(int vec) | |||
343 | return NO_IRQ; | 328 | return NO_IRQ; |
344 | } | 329 | } |
345 | 330 | ||
346 | static int xics_get_irq_direct(struct pt_regs *regs) | 331 | static unsigned int xics_get_irq_direct(struct pt_regs *regs) |
347 | { | 332 | { |
348 | unsigned int cpu = smp_processor_id(); | 333 | unsigned int cpu = smp_processor_id(); |
349 | 334 | ||
350 | return xics_remap_irq(direct_xirr_info_get(cpu)); | 335 | return xics_remap_irq(direct_xirr_info_get(cpu)); |
351 | } | 336 | } |
352 | 337 | ||
353 | static int xics_get_irq_lpar(struct pt_regs *regs) | 338 | static unsigned int xics_get_irq_lpar(struct pt_regs *regs) |
354 | { | 339 | { |
355 | unsigned int cpu = smp_processor_id(); | 340 | unsigned int cpu = smp_processor_id(); |
356 | 341 | ||
@@ -437,8 +422,8 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
437 | unsigned long newmask; | 422 | unsigned long newmask; |
438 | cpumask_t tmp = CPU_MASK_NONE; | 423 | cpumask_t tmp = CPU_MASK_NONE; |
439 | 424 | ||
440 | irq = virt_irq_to_real(irq_offset_down(virq)); | 425 | irq = (unsigned int)irq_map[virq].hwirq; |
441 | if (irq == XICS_IPI || irq == NO_IRQ) | 426 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
442 | return; | 427 | return; |
443 | 428 | ||
444 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 429 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); |
@@ -469,6 +454,24 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
469 | } | 454 | } |
470 | } | 455 | } |
471 | 456 | ||
457 | void xics_setup_cpu(void) | ||
458 | { | ||
459 | int cpu = smp_processor_id(); | ||
460 | |||
461 | xics_set_cpu_priority(cpu, 0xff); | ||
462 | |||
463 | /* | ||
464 | * Put the calling processor into the GIQ. This is really only | ||
465 | * necessary from a secondary thread as the OF start-cpu interface | ||
466 | * performs this function for us on primary threads. | ||
467 | * | ||
468 | * XXX: undo of teardown on kexec needs this too, as may hotplug | ||
469 | */ | ||
470 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | ||
471 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | ||
472 | } | ||
473 | |||
474 | |||
472 | static struct irq_chip xics_pic_direct = { | 475 | static struct irq_chip xics_pic_direct = { |
473 | .typename = " XICS ", | 476 | .typename = " XICS ", |
474 | .startup = xics_startup, | 477 | .startup = xics_startup, |
@@ -489,90 +492,245 @@ static struct irq_chip xics_pic_lpar = { | |||
489 | }; | 492 | }; |
490 | 493 | ||
491 | 494 | ||
492 | void xics_setup_cpu(void) | 495 | static int xics_host_match(struct irq_host *h, struct device_node *node) |
493 | { | 496 | { |
494 | int cpu = smp_processor_id(); | 497 | /* IBM machines have interrupt parents of various funky types for things |
498 | * like vdevices, events, etc... The trick we use here is to match | ||
499 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
500 | */ | ||
501 | return !device_is_compatible(node, "chrp,iic"); | ||
502 | } | ||
495 | 503 | ||
496 | xics_set_cpu_priority(cpu, 0xff); | 504 | static int xics_host_map_direct(struct irq_host *h, unsigned int virq, |
505 | irq_hw_number_t hw, unsigned int flags) | ||
506 | { | ||
507 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; | ||
497 | 508 | ||
498 | /* | 509 | pr_debug("xics: map_direct virq %d, hwirq 0x%lx, flags: 0x%x\n", |
499 | * Put the calling processor into the GIQ. This is really only | 510 | virq, hw, flags); |
500 | * necessary from a secondary thread as the OF start-cpu interface | 511 | |
501 | * performs this function for us on primary threads. | 512 | if (sense && sense != IRQ_TYPE_LEVEL_LOW) |
502 | * | 513 | printk(KERN_WARNING "xics: using unsupported sense 0x%x" |
503 | * XXX: undo of teardown on kexec needs this too, as may hotplug | 514 | " for irq %d (h: 0x%lx)\n", flags, virq, hw); |
515 | |||
516 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
517 | set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int xics_host_map_lpar(struct irq_host *h, unsigned int virq, | ||
522 | irq_hw_number_t hw, unsigned int flags) | ||
523 | { | ||
524 | unsigned int sense = flags & IRQ_TYPE_SENSE_MASK; | ||
525 | |||
526 | pr_debug("xics: map_lpar virq %d, hwirq 0x%lx, flags: 0x%x\n", | ||
527 | virq, hw, flags); | ||
528 | |||
529 | if (sense && sense != IRQ_TYPE_LEVEL_LOW) | ||
530 | printk(KERN_WARNING "xics: using unsupported sense 0x%x" | ||
531 | " for irq %d (h: 0x%lx)\n", flags, virq, hw); | ||
532 | |||
533 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
534 | set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
539 | u32 *intspec, unsigned int intsize, | ||
540 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
541 | |||
542 | { | ||
543 | /* Current xics implementation translates everything | ||
544 | * to level. It is not technically right for MSIs but this | ||
545 | * is irrelevant at this point. We might get smarter in the future | ||
504 | */ | 546 | */ |
505 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 547 | *out_hwirq = intspec[0]; |
506 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | 548 | *out_flags = IRQ_TYPE_LEVEL_LOW; |
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static struct irq_host_ops xics_host_direct_ops = { | ||
554 | .match = xics_host_match, | ||
555 | .map = xics_host_map_direct, | ||
556 | .xlate = xics_host_xlate, | ||
557 | }; | ||
558 | |||
559 | static struct irq_host_ops xics_host_lpar_ops = { | ||
560 | .match = xics_host_match, | ||
561 | .map = xics_host_map_lpar, | ||
562 | .xlate = xics_host_xlate, | ||
563 | }; | ||
564 | |||
565 | static void __init xics_init_host(void) | ||
566 | { | ||
567 | struct irq_host_ops *ops; | ||
568 | |||
569 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
570 | ops = &xics_host_lpar_ops; | ||
571 | else | ||
572 | ops = &xics_host_direct_ops; | ||
573 | xics_host = irq_alloc_host(IRQ_HOST_MAP_TREE, 0, ops, | ||
574 | XICS_IRQ_SPURIOUS); | ||
575 | BUG_ON(xics_host == NULL); | ||
576 | irq_set_default_host(xics_host); | ||
507 | } | 577 | } |
508 | 578 | ||
509 | void xics_init_IRQ(void) | 579 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, |
580 | unsigned long size) | ||
510 | { | 581 | { |
582 | #ifdef CONFIG_SMP | ||
511 | int i; | 583 | int i; |
512 | unsigned long intr_size = 0; | ||
513 | struct device_node *np; | ||
514 | uint *ireg, ilen, indx = 0; | ||
515 | unsigned long intr_base = 0; | ||
516 | struct xics_interrupt_node { | ||
517 | unsigned long addr; | ||
518 | unsigned long size; | ||
519 | } intnodes[NR_CPUS]; | ||
520 | struct irq_chip *chip; | ||
521 | 584 | ||
522 | ppc64_boot_msg(0x20, "XICS Init"); | 585 | /* This may look gross but it's good enough for now, we don't quite |
586 | * have a hard -> linux processor id matching. | ||
587 | */ | ||
588 | for_each_possible_cpu(i) { | ||
589 | if (!cpu_present(i)) | ||
590 | continue; | ||
591 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
592 | xics_per_cpu[i] = ioremap(addr, size); | ||
593 | return; | ||
594 | } | ||
595 | } | ||
596 | #else | ||
597 | if (hw_id != 0) | ||
598 | return; | ||
599 | xics_per_cpu[0] = ioremap(addr, size); | ||
600 | #endif /* CONFIG_SMP */ | ||
601 | } | ||
523 | 602 | ||
524 | ibm_get_xive = rtas_token("ibm,get-xive"); | 603 | static void __init xics_init_one_node(struct device_node *np, |
525 | ibm_set_xive = rtas_token("ibm,set-xive"); | 604 | unsigned int *indx) |
526 | ibm_int_on = rtas_token("ibm,int-on"); | 605 | { |
527 | ibm_int_off = rtas_token("ibm,int-off"); | 606 | unsigned int ilen; |
607 | u32 *ireg; | ||
528 | 608 | ||
529 | np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); | 609 | /* This code does the theorically broken assumption that the interrupt |
530 | if (!np) | 610 | * server numbers are the same as the hard CPU numbers. |
531 | panic("xics_init_IRQ: can't find interrupt presentation"); | 611 | * This happens to be the case so far but we are playing with fire... |
612 | * should be fixed one of these days. -BenH. | ||
613 | */ | ||
614 | ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
532 | 615 | ||
533 | nextnode: | 616 | /* Do that ever happen ? we'll know soon enough... but even good'old |
534 | ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL); | 617 | * f80 does have that property .. |
618 | */ | ||
619 | WARN_ON(ireg == NULL); | ||
535 | if (ireg) { | 620 | if (ireg) { |
536 | /* | 621 | /* |
537 | * set node starting index for this node | 622 | * set node starting index for this node |
538 | */ | 623 | */ |
539 | indx = *ireg; | 624 | *indx = *ireg; |
540 | } | 625 | } |
541 | 626 | ireg = (u32 *)get_property(np, "reg", &ilen); | |
542 | ireg = (uint *)get_property(np, "reg", &ilen); | ||
543 | if (!ireg) | 627 | if (!ireg) |
544 | panic("xics_init_IRQ: can't find interrupt reg property"); | 628 | panic("xics_init_IRQ: can't find interrupt reg property"); |
545 | 629 | ||
546 | while (ilen) { | 630 | while (ilen >= (4 * sizeof(u32))) { |
547 | intnodes[indx].addr = (unsigned long)*ireg++ << 32; | 631 | unsigned long addr, size; |
548 | ilen -= sizeof(uint); | 632 | |
549 | intnodes[indx].addr |= *ireg++; | 633 | /* XXX Use proper OF parsing code here !!! */ |
550 | ilen -= sizeof(uint); | 634 | addr = (unsigned long)*ireg++ << 32; |
551 | intnodes[indx].size = (unsigned long)*ireg++ << 32; | 635 | ilen -= sizeof(u32); |
552 | ilen -= sizeof(uint); | 636 | addr |= *ireg++; |
553 | intnodes[indx].size |= *ireg++; | 637 | ilen -= sizeof(u32); |
554 | ilen -= sizeof(uint); | 638 | size = (unsigned long)*ireg++ << 32; |
555 | indx++; | 639 | ilen -= sizeof(u32); |
556 | if (indx >= NR_CPUS) break; | 640 | size |= *ireg++; |
641 | ilen -= sizeof(u32); | ||
642 | xics_map_one_cpu(*indx, addr, size); | ||
643 | (*indx)++; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | |||
648 | static void __init xics_setup_8259_cascade(void) | ||
649 | { | ||
650 | struct device_node *np, *old, *found = NULL; | ||
651 | int cascade, naddr; | ||
652 | u32 *addrp; | ||
653 | unsigned long intack = 0; | ||
654 | |||
655 | for_each_node_by_type(np, "interrupt-controller") | ||
656 | if (device_is_compatible(np, "chrp,iic")) { | ||
657 | found = np; | ||
658 | break; | ||
659 | } | ||
660 | if (found == NULL) { | ||
661 | printk(KERN_DEBUG "xics: no ISA interrupt controller\n"); | ||
662 | return; | ||
557 | } | 663 | } |
664 | cascade = irq_of_parse_and_map(found, 0); | ||
665 | if (cascade == NO_IRQ) { | ||
666 | printk(KERN_ERR "xics: failed to map cascade interrupt"); | ||
667 | return; | ||
668 | } | ||
669 | pr_debug("xics: cascade mapped to irq %d\n", cascade); | ||
670 | |||
671 | for (old = of_node_get(found); old != NULL ; old = np) { | ||
672 | np = of_get_parent(old); | ||
673 | of_node_put(old); | ||
674 | if (np == NULL) | ||
675 | break; | ||
676 | if (strcmp(np->name, "pci") != 0) | ||
677 | continue; | ||
678 | addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL); | ||
679 | if (addrp == NULL) | ||
680 | continue; | ||
681 | naddr = prom_n_addr_cells(np); | ||
682 | intack = addrp[naddr-1]; | ||
683 | if (naddr > 1) | ||
684 | intack |= ((unsigned long)addrp[naddr-2]) << 32; | ||
685 | } | ||
686 | if (intack) | ||
687 | printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack); | ||
688 | i8259_init(found, intack); | ||
689 | of_node_put(found); | ||
690 | set_irq_chained_handler(cascade, pseries_8259_cascade); | ||
691 | } | ||
558 | 692 | ||
559 | np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation"); | 693 | void __init xics_init_IRQ(void) |
560 | if ((indx < NR_CPUS) && np) goto nextnode; | 694 | { |
695 | int i; | ||
696 | struct device_node *np; | ||
697 | u32 *ireg, ilen, indx = 0; | ||
698 | int found = 0; | ||
699 | |||
700 | ppc64_boot_msg(0x20, "XICS Init"); | ||
701 | |||
702 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
703 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
704 | ibm_int_on = rtas_token("ibm,int-on"); | ||
705 | ibm_int_off = rtas_token("ibm,int-off"); | ||
706 | |||
707 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
708 | found = 1; | ||
709 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
710 | break; | ||
711 | xics_init_one_node(np, &indx); | ||
712 | } | ||
713 | if (found == 0) | ||
714 | return; | ||
715 | |||
716 | xics_init_host(); | ||
561 | 717 | ||
562 | /* Find the server numbers for the boot cpu. */ | 718 | /* Find the server numbers for the boot cpu. */ |
563 | for (np = of_find_node_by_type(NULL, "cpu"); | 719 | for (np = of_find_node_by_type(NULL, "cpu"); |
564 | np; | 720 | np; |
565 | np = of_find_node_by_type(np, "cpu")) { | 721 | np = of_find_node_by_type(np, "cpu")) { |
566 | ireg = (uint *)get_property(np, "reg", &ilen); | 722 | ireg = (u32 *)get_property(np, "reg", &ilen); |
567 | if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { | 723 | if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { |
568 | ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", | 724 | ireg = (u32 *)get_property(np, |
569 | &ilen); | 725 | "ibm,ppc-interrupt-gserver#s", |
726 | &ilen); | ||
570 | i = ilen / sizeof(int); | 727 | i = ilen / sizeof(int); |
571 | if (ireg && i > 0) { | 728 | if (ireg && i > 0) { |
572 | default_server = ireg[0]; | 729 | default_server = ireg[0]; |
573 | default_distrib_server = ireg[i-1]; /* take last element */ | 730 | /* take last element */ |
731 | default_distrib_server = ireg[i-1]; | ||
574 | } | 732 | } |
575 | ireg = (uint *)get_property(np, | 733 | ireg = (u32 *)get_property(np, |
576 | "ibm,interrupt-server#-size", NULL); | 734 | "ibm,interrupt-server#-size", NULL); |
577 | if (ireg) | 735 | if (ireg) |
578 | interrupt_server_size = *ireg; | 736 | interrupt_server_size = *ireg; |
@@ -581,102 +739,48 @@ nextnode: | |||
581 | } | 739 | } |
582 | of_node_put(np); | 740 | of_node_put(np); |
583 | 741 | ||
584 | intr_base = intnodes[0].addr; | 742 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
585 | intr_size = intnodes[0].size; | 743 | ppc_md.get_irq = xics_get_irq_lpar; |
586 | 744 | else | |
587 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | ||
588 | ppc_md.get_irq = xics_get_irq_lpar; | ||
589 | chip = &xics_pic_lpar; | ||
590 | } else { | ||
591 | #ifdef CONFIG_SMP | ||
592 | for_each_possible_cpu(i) { | ||
593 | int hard_id; | ||
594 | |||
595 | /* FIXME: Do this dynamically! --RR */ | ||
596 | if (!cpu_present(i)) | ||
597 | continue; | ||
598 | |||
599 | hard_id = get_hard_smp_processor_id(i); | ||
600 | xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, | ||
601 | intnodes[hard_id].size); | ||
602 | } | ||
603 | #else | ||
604 | xics_per_cpu[0] = ioremap(intr_base, intr_size); | ||
605 | #endif /* CONFIG_SMP */ | ||
606 | ppc_md.get_irq = xics_get_irq_direct; | 745 | ppc_md.get_irq = xics_get_irq_direct; |
607 | chip = &xics_pic_direct; | ||
608 | |||
609 | } | ||
610 | |||
611 | for (i = irq_offset_value(); i < NR_IRQS; ++i) { | ||
612 | /* All IRQs on XICS are level for now. MSI code may want to modify | ||
613 | * that for reporting purposes | ||
614 | */ | ||
615 | get_irq_desc(i)->status |= IRQ_LEVEL; | ||
616 | set_irq_chip_and_handler(i, chip, handle_fasteoi_irq); | ||
617 | } | ||
618 | 746 | ||
619 | xics_setup_cpu(); | 747 | xics_setup_cpu(); |
620 | 748 | ||
621 | ppc64_boot_msg(0x21, "XICS Done"); | 749 | xics_setup_8259_cascade(); |
622 | } | ||
623 | 750 | ||
624 | static int xics_setup_8259_cascade(void) | 751 | ppc64_boot_msg(0x21, "XICS Done"); |
625 | { | ||
626 | struct device_node *np; | ||
627 | uint *ireg; | ||
628 | |||
629 | np = of_find_node_by_type(NULL, "interrupt-controller"); | ||
630 | if (np == NULL) { | ||
631 | printk(KERN_WARNING "xics: no ISA interrupt controller\n"); | ||
632 | xics_irq_8259_cascade_real = -1; | ||
633 | xics_irq_8259_cascade = -1; | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | ireg = (uint *) get_property(np, "interrupts", NULL); | ||
638 | if (!ireg) | ||
639 | panic("xics_init_IRQ: can't find ISA interrupts property"); | ||
640 | |||
641 | xics_irq_8259_cascade_real = *ireg; | ||
642 | xics_irq_8259_cascade = irq_offset_up | ||
643 | (virt_irq_create_mapping(xics_irq_8259_cascade_real)); | ||
644 | i8259_init(0, 0); | ||
645 | of_node_put(np); | ||
646 | |||
647 | xics_set_irq_revmap(xics_irq_8259_cascade); | ||
648 | set_irq_chained_handler(xics_irq_8259_cascade, pSeries_8259_cascade); | ||
649 | |||
650 | return 0; | ||
651 | } | 752 | } |
652 | arch_initcall(xics_setup_8259_cascade); | ||
653 | 753 | ||
654 | 754 | ||
655 | #ifdef CONFIG_SMP | 755 | #ifdef CONFIG_SMP |
656 | void xics_request_IPIs(void) | 756 | void xics_request_IPIs(void) |
657 | { | 757 | { |
658 | virt_irq_to_real_map[XICS_IPI] = XICS_IPI; | 758 | unsigned int ipi; |
759 | |||
760 | ipi = irq_create_mapping(xics_host, XICS_IPI, 0); | ||
761 | BUG_ON(ipi == NO_IRQ); | ||
659 | 762 | ||
660 | /* | 763 | /* |
661 | * IPIs are marked IRQF_DISABLED as they must run with irqs | 764 | * IPIs are marked IRQF_DISABLED as they must run with irqs |
662 | * disabled | 765 | * disabled |
663 | */ | 766 | */ |
664 | set_irq_handler(irq_offset_up(XICS_IPI), handle_percpu_irq); | 767 | set_irq_handler(ipi, handle_percpu_irq); |
665 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 768 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
666 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_lpar, | 769 | request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED, |
667 | SA_INTERRUPT, "IPI", NULL); | 770 | "IPI", NULL); |
668 | else | 771 | else |
669 | request_irq(irq_offset_up(XICS_IPI), xics_ipi_action_direct, | 772 | request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED, |
670 | SA_INTERRUPT, "IPI", NULL); | 773 | "IPI", NULL); |
671 | } | 774 | } |
672 | #endif /* CONFIG_SMP */ | 775 | #endif /* CONFIG_SMP */ |
673 | 776 | ||
674 | void xics_teardown_cpu(int secondary) | 777 | void xics_teardown_cpu(int secondary) |
675 | { | 778 | { |
676 | struct irq_desc *desc = get_irq_desc(irq_offset_up(XICS_IPI)); | ||
677 | int cpu = smp_processor_id(); | 779 | int cpu = smp_processor_id(); |
780 | unsigned int ipi; | ||
781 | struct irq_desc *desc; | ||
678 | 782 | ||
679 | xics_set_cpu_priority(cpu, 0); | 783 | xics_set_cpu_priority(cpu, 0); |
680 | 784 | ||
681 | /* | 785 | /* |
682 | * we need to EOI the IPI if we got here from kexec down IPI | 786 | * we need to EOI the IPI if we got here from kexec down IPI |
@@ -685,6 +789,11 @@ void xics_teardown_cpu(int secondary) | |||
685 | * should we be flagging idle loop instead? | 789 | * should we be flagging idle loop instead? |
686 | * or creating some task to be scheduled? | 790 | * or creating some task to be scheduled? |
687 | */ | 791 | */ |
792 | |||
793 | ipi = irq_find_mapping(xics_host, XICS_IPI); | ||
794 | if (ipi == XICS_IRQ_SPURIOUS) | ||
795 | return; | ||
796 | desc = get_irq_desc(ipi); | ||
688 | if (desc->chip && desc->chip->eoi) | 797 | if (desc->chip && desc->chip->eoi) |
689 | desc->chip->eoi(XICS_IPI); | 798 | desc->chip->eoi(XICS_IPI); |
690 | 799 | ||
@@ -694,8 +803,8 @@ void xics_teardown_cpu(int secondary) | |||
694 | */ | 803 | */ |
695 | if (secondary) | 804 | if (secondary) |
696 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | 805 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, |
697 | (1UL << interrupt_server_size) - 1 - | 806 | (1UL << interrupt_server_size) - 1 - |
698 | default_distrib_server, 0); | 807 | default_distrib_server, 0); |
699 | } | 808 | } |
700 | 809 | ||
701 | #ifdef CONFIG_HOTPLUG_CPU | 810 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -723,15 +832,15 @@ void xics_migrate_irqs_away(void) | |||
723 | unsigned long flags; | 832 | unsigned long flags; |
724 | 833 | ||
725 | /* We cant set affinity on ISA interrupts */ | 834 | /* We cant set affinity on ISA interrupts */ |
726 | if (virq < irq_offset_value()) | 835 | if (virq < NUM_ISA_INTERRUPTS) |
727 | continue; | 836 | continue; |
728 | 837 | if (irq_map[virq].host != xics_host) | |
729 | desc = get_irq_desc(virq); | 838 | continue; |
730 | irq = virt_irq_to_real(irq_offset_down(virq)); | 839 | irq = (unsigned int)irq_map[virq].hwirq; |
731 | |||
732 | /* We need to get IPIs still. */ | 840 | /* We need to get IPIs still. */ |
733 | if (irq == XICS_IPI || irq == NO_IRQ) | 841 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) |
734 | continue; | 842 | continue; |
843 | desc = get_irq_desc(virq); | ||
735 | 844 | ||
736 | /* We only need to migrate enabled IRQS */ | 845 | /* We only need to migrate enabled IRQS */ |
737 | if (desc == NULL || desc->chip == NULL | 846 | if (desc == NULL || desc->chip == NULL |
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h index 67dedf3514ec..6ee1055b0ffb 100644 --- a/arch/powerpc/platforms/pseries/xics.h +++ b/arch/powerpc/platforms/pseries/xics.h | |||
@@ -31,7 +31,7 @@ struct xics_ipi_struct { | |||
31 | extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; | 31 | extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; |
32 | 32 | ||
33 | struct irq_desc; | 33 | struct irq_desc; |
34 | extern void pSeries_8259_cascade(unsigned int irq, struct irq_desc *desc, | 34 | extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, |
35 | struct pt_regs *regs); | 35 | struct pt_regs *regs); |
36 | 36 | ||
37 | #endif /* _POWERPC_KERNEL_XICS_H */ | 37 | #endif /* _POWERPC_KERNEL_XICS_H */ |
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index c2e9465871aa..72c73a6105cd 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c | |||
@@ -6,11 +6,16 @@ | |||
6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | #undef DEBUG | ||
10 | |||
9 | #include <linux/init.h> | 11 | #include <linux/init.h> |
10 | #include <linux/ioport.h> | 12 | #include <linux/ioport.h> |
11 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel.h> | ||
15 | #include <linux/delay.h> | ||
12 | #include <asm/io.h> | 16 | #include <asm/io.h> |
13 | #include <asm/i8259.h> | 17 | #include <asm/i8259.h> |
18 | #include <asm/prom.h> | ||
14 | 19 | ||
15 | static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ | 20 | static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ |
16 | 21 | ||
@@ -20,7 +25,8 @@ static unsigned char cached_8259[2] = { 0xff, 0xff }; | |||
20 | 25 | ||
21 | static DEFINE_SPINLOCK(i8259_lock); | 26 | static DEFINE_SPINLOCK(i8259_lock); |
22 | 27 | ||
23 | static int i8259_pic_irq_offset; | 28 | static struct device_node *i8259_node; |
29 | static struct irq_host *i8259_host; | ||
24 | 30 | ||
25 | /* | 31 | /* |
26 | * Acknowledge the IRQ using either the PCI host bridge's interrupt | 32 | * Acknowledge the IRQ using either the PCI host bridge's interrupt |
@@ -28,16 +34,18 @@ static int i8259_pic_irq_offset; | |||
28 | * which is called. It should be noted that polling is broken on some | 34 | * which is called. It should be noted that polling is broken on some |
29 | * IBM and Motorola PReP boxes so we must use the int-ack feature on them. | 35 | * IBM and Motorola PReP boxes so we must use the int-ack feature on them. |
30 | */ | 36 | */ |
31 | int i8259_irq(struct pt_regs *regs) | 37 | unsigned int i8259_irq(struct pt_regs *regs) |
32 | { | 38 | { |
33 | int irq; | 39 | int irq; |
34 | 40 | int lock = 0; | |
35 | spin_lock(&i8259_lock); | ||
36 | 41 | ||
37 | /* Either int-ack or poll for the IRQ */ | 42 | /* Either int-ack or poll for the IRQ */ |
38 | if (pci_intack) | 43 | if (pci_intack) |
39 | irq = readb(pci_intack); | 44 | irq = readb(pci_intack); |
40 | else { | 45 | else { |
46 | spin_lock(&i8259_lock); | ||
47 | lock = 1; | ||
48 | |||
41 | /* Perform an interrupt acknowledge cycle on controller 1. */ | 49 | /* Perform an interrupt acknowledge cycle on controller 1. */ |
42 | outb(0x0C, 0x20); /* prepare for poll */ | 50 | outb(0x0C, 0x20); /* prepare for poll */ |
43 | irq = inb(0x20) & 7; | 51 | irq = inb(0x20) & 7; |
@@ -62,11 +70,13 @@ int i8259_irq(struct pt_regs *regs) | |||
62 | if (!pci_intack) | 70 | if (!pci_intack) |
63 | outb(0x0B, 0x20); /* ISR register */ | 71 | outb(0x0B, 0x20); /* ISR register */ |
64 | if(~inb(0x20) & 0x80) | 72 | if(~inb(0x20) & 0x80) |
65 | irq = -1; | 73 | irq = NO_IRQ; |
66 | } | 74 | } else if (irq == 0xff) |
75 | irq = NO_IRQ; | ||
67 | 76 | ||
68 | spin_unlock(&i8259_lock); | 77 | if (lock) |
69 | return irq + i8259_pic_irq_offset; | 78 | spin_unlock(&i8259_lock); |
79 | return irq; | ||
70 | } | 80 | } |
71 | 81 | ||
72 | static void i8259_mask_and_ack_irq(unsigned int irq_nr) | 82 | static void i8259_mask_and_ack_irq(unsigned int irq_nr) |
@@ -74,7 +84,6 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr) | |||
74 | unsigned long flags; | 84 | unsigned long flags; |
75 | 85 | ||
76 | spin_lock_irqsave(&i8259_lock, flags); | 86 | spin_lock_irqsave(&i8259_lock, flags); |
77 | irq_nr -= i8259_pic_irq_offset; | ||
78 | if (irq_nr > 7) { | 87 | if (irq_nr > 7) { |
79 | cached_A1 |= 1 << (irq_nr-8); | 88 | cached_A1 |= 1 << (irq_nr-8); |
80 | inb(0xA1); /* DUMMY */ | 89 | inb(0xA1); /* DUMMY */ |
@@ -100,8 +109,9 @@ static void i8259_mask_irq(unsigned int irq_nr) | |||
100 | { | 109 | { |
101 | unsigned long flags; | 110 | unsigned long flags; |
102 | 111 | ||
112 | pr_debug("i8259_mask_irq(%d)\n", irq_nr); | ||
113 | |||
103 | spin_lock_irqsave(&i8259_lock, flags); | 114 | spin_lock_irqsave(&i8259_lock, flags); |
104 | irq_nr -= i8259_pic_irq_offset; | ||
105 | if (irq_nr < 8) | 115 | if (irq_nr < 8) |
106 | cached_21 |= 1 << irq_nr; | 116 | cached_21 |= 1 << irq_nr; |
107 | else | 117 | else |
@@ -114,8 +124,9 @@ static void i8259_unmask_irq(unsigned int irq_nr) | |||
114 | { | 124 | { |
115 | unsigned long flags; | 125 | unsigned long flags; |
116 | 126 | ||
127 | pr_debug("i8259_unmask_irq(%d)\n", irq_nr); | ||
128 | |||
117 | spin_lock_irqsave(&i8259_lock, flags); | 129 | spin_lock_irqsave(&i8259_lock, flags); |
118 | irq_nr -= i8259_pic_irq_offset; | ||
119 | if (irq_nr < 8) | 130 | if (irq_nr < 8) |
120 | cached_21 &= ~(1 << irq_nr); | 131 | cached_21 &= ~(1 << irq_nr); |
121 | else | 132 | else |
@@ -152,25 +163,84 @@ static struct resource pic_edgectrl_iores = { | |||
152 | .flags = IORESOURCE_BUSY, | 163 | .flags = IORESOURCE_BUSY, |
153 | }; | 164 | }; |
154 | 165 | ||
155 | static struct irqaction i8259_irqaction = { | 166 | static int i8259_host_match(struct irq_host *h, struct device_node *node) |
156 | .handler = no_action, | 167 | { |
157 | .flags = IRQF_DISABLED, | 168 | return i8259_node == NULL || i8259_node == node; |
158 | .mask = CPU_MASK_NONE, | 169 | } |
159 | .name = "82c59 secondary cascade", | 170 | |
171 | static int i8259_host_map(struct irq_host *h, unsigned int virq, | ||
172 | irq_hw_number_t hw, unsigned int flags) | ||
173 | { | ||
174 | pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); | ||
175 | |||
176 | /* We block the internal cascade */ | ||
177 | if (hw == 2) | ||
178 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | ||
179 | |||
180 | /* We use the level stuff only for now, we might want to | ||
181 | * be more cautious here but that works for now | ||
182 | */ | ||
183 | get_irq_desc(virq)->status |= IRQ_LEVEL; | ||
184 | set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static void i8259_host_unmap(struct irq_host *h, unsigned int virq) | ||
189 | { | ||
190 | /* Make sure irq is masked in hardware */ | ||
191 | i8259_mask_irq(virq); | ||
192 | |||
193 | /* remove chip and handler */ | ||
194 | set_irq_chip_and_handler(virq, NULL, NULL); | ||
195 | |||
196 | /* Make sure it's completed */ | ||
197 | synchronize_irq(virq); | ||
198 | } | ||
199 | |||
200 | static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, | ||
201 | u32 *intspec, unsigned int intsize, | ||
202 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
203 | { | ||
204 | static unsigned char map_isa_senses[4] = { | ||
205 | IRQ_TYPE_LEVEL_LOW, | ||
206 | IRQ_TYPE_LEVEL_HIGH, | ||
207 | IRQ_TYPE_EDGE_FALLING, | ||
208 | IRQ_TYPE_EDGE_RISING, | ||
209 | }; | ||
210 | |||
211 | *out_hwirq = intspec[0]; | ||
212 | if (intsize > 1 && intspec[1] < 4) | ||
213 | *out_flags = map_isa_senses[intspec[1]]; | ||
214 | else | ||
215 | *out_flags = IRQ_TYPE_NONE; | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static struct irq_host_ops i8259_host_ops = { | ||
221 | .match = i8259_host_match, | ||
222 | .map = i8259_host_map, | ||
223 | .unmap = i8259_host_unmap, | ||
224 | .xlate = i8259_host_xlate, | ||
160 | }; | 225 | }; |
161 | 226 | ||
162 | /* | 227 | /**** |
163 | * i8259_init() | 228 | * i8259_init - Initialize the legacy controller |
164 | * intack_addr - PCI interrupt acknowledge (real) address which will return | 229 | * @node: device node of the legacy PIC (can be NULL, but then, it will match |
165 | * the active irq from the 8259 | 230 | * all interrupts, so beware) |
231 | * @intack_addr: PCI interrupt acknowledge (real) address which will return | ||
232 | * the active irq from the 8259 | ||
166 | */ | 233 | */ |
167 | void __init i8259_init(unsigned long intack_addr, int offset) | 234 | void i8259_init(struct device_node *node, unsigned long intack_addr) |
168 | { | 235 | { |
169 | unsigned long flags; | 236 | unsigned long flags; |
170 | int i; | ||
171 | 237 | ||
238 | /* initialize the controller */ | ||
172 | spin_lock_irqsave(&i8259_lock, flags); | 239 | spin_lock_irqsave(&i8259_lock, flags); |
173 | i8259_pic_irq_offset = offset; | 240 | |
241 | /* Mask all first */ | ||
242 | outb(0xff, 0xA1); | ||
243 | outb(0xff, 0x21); | ||
174 | 244 | ||
175 | /* init master interrupt controller */ | 245 | /* init master interrupt controller */ |
176 | outb(0x11, 0x20); /* Start init sequence */ | 246 | outb(0x11, 0x20); /* Start init sequence */ |
@@ -184,24 +254,36 @@ void __init i8259_init(unsigned long intack_addr, int offset) | |||
184 | outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ | 254 | outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ |
185 | outb(0x01, 0xA1); /* Select 8086 mode */ | 255 | outb(0x01, 0xA1); /* Select 8086 mode */ |
186 | 256 | ||
257 | /* That thing is slow */ | ||
258 | udelay(100); | ||
259 | |||
187 | /* always read ISR */ | 260 | /* always read ISR */ |
188 | outb(0x0B, 0x20); | 261 | outb(0x0B, 0x20); |
189 | outb(0x0B, 0xA0); | 262 | outb(0x0B, 0xA0); |
190 | 263 | ||
191 | /* Mask all interrupts */ | 264 | /* Unmask the internal cascade */ |
265 | cached_21 &= ~(1 << 2); | ||
266 | |||
267 | /* Set interrupt masks */ | ||
192 | outb(cached_A1, 0xA1); | 268 | outb(cached_A1, 0xA1); |
193 | outb(cached_21, 0x21); | 269 | outb(cached_21, 0x21); |
194 | 270 | ||
195 | spin_unlock_irqrestore(&i8259_lock, flags); | 271 | spin_unlock_irqrestore(&i8259_lock, flags); |
196 | 272 | ||
197 | for (i = 0; i < NUM_ISA_INTERRUPTS; ++i) { | 273 | /* create a legacy host */ |
198 | set_irq_chip_and_handler(offset + i, &i8259_pic, | 274 | if (node) |
199 | handle_level_irq); | 275 | i8259_node = of_node_get(node); |
200 | irq_desc[offset + i].status |= IRQ_LEVEL; | 276 | i8259_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &i8259_host_ops, 0); |
277 | if (i8259_host == NULL) { | ||
278 | printk(KERN_ERR "i8259: failed to allocate irq host !\n"); | ||
279 | return; | ||
201 | } | 280 | } |
202 | 281 | ||
203 | /* reserve our resources */ | 282 | /* reserve our resources */ |
204 | setup_irq(offset + 2, &i8259_irqaction); | 283 | /* XXX should we continue doing that ? it seems to cause problems |
284 | * with further requesting of PCI IO resources for that range... | ||
285 | * need to look into it. | ||
286 | */ | ||
205 | request_resource(&ioport_resource, &pic1_iores); | 287 | request_resource(&ioport_resource, &pic1_iores); |
206 | request_resource(&ioport_resource, &pic2_iores); | 288 | request_resource(&ioport_resource, &pic2_iores); |
207 | request_resource(&ioport_resource, &pic_edgectrl_iores); | 289 | request_resource(&ioport_resource, &pic_edgectrl_iores); |
@@ -209,4 +291,5 @@ void __init i8259_init(unsigned long intack_addr, int offset) | |||
209 | if (intack_addr != 0) | 291 | if (intack_addr != 0) |
210 | pci_intack = ioremap(intack_addr, 1); | 292 | pci_intack = ioremap(intack_addr, 1); |
211 | 293 | ||
294 | printk(KERN_INFO "i8259 legacy interrupt controller initialized\n"); | ||
212 | } | 295 | } |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 9a95f16c19a5..7d31d7cc392d 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -340,27 +340,19 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) | |||
340 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | 340 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
341 | 341 | ||
342 | 342 | ||
343 | #define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | ||
344 | |||
343 | /* Find an mpic associated with a given linux interrupt */ | 345 | /* Find an mpic associated with a given linux interrupt */ |
344 | static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) | 346 | static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) |
345 | { | 347 | { |
346 | struct mpic *mpic = mpics; | 348 | unsigned int src = mpic_irq_to_hw(irq); |
347 | 349 | ||
348 | while(mpic) { | 350 | if (irq < NUM_ISA_INTERRUPTS) |
349 | /* search IPIs first since they may override the main interrupts */ | 351 | return NULL; |
350 | if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { | 352 | if (is_ipi) |
351 | if (is_ipi) | 353 | *is_ipi = (src >= MPIC_VEC_IPI_0 && src <= MPIC_VEC_IPI_3); |
352 | *is_ipi = 1; | 354 | |
353 | return mpic; | 355 | return irq_desc[irq].chip_data; |
354 | } | ||
355 | if (irq >= mpic->irq_offset && | ||
356 | irq < (mpic->irq_offset + mpic->irq_count)) { | ||
357 | if (is_ipi) | ||
358 | *is_ipi = 0; | ||
359 | return mpic; | ||
360 | } | ||
361 | mpic = mpic -> next; | ||
362 | } | ||
363 | return NULL; | ||
364 | } | 356 | } |
365 | 357 | ||
366 | /* Convert a cpu mask from logical to physical cpu numbers. */ | 358 | /* Convert a cpu mask from logical to physical cpu numbers. */ |
@@ -398,9 +390,7 @@ static inline void mpic_eoi(struct mpic *mpic) | |||
398 | #ifdef CONFIG_SMP | 390 | #ifdef CONFIG_SMP |
399 | static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 391 | static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) |
400 | { | 392 | { |
401 | struct mpic *mpic = dev_id; | 393 | smp_message_recv(mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0, regs); |
402 | |||
403 | smp_message_recv(irq - mpic->ipi_offset, regs); | ||
404 | return IRQ_HANDLED; | 394 | return IRQ_HANDLED; |
405 | } | 395 | } |
406 | #endif /* CONFIG_SMP */ | 396 | #endif /* CONFIG_SMP */ |
@@ -414,7 +404,7 @@ static void mpic_unmask_irq(unsigned int irq) | |||
414 | { | 404 | { |
415 | unsigned int loops = 100000; | 405 | unsigned int loops = 100000; |
416 | struct mpic *mpic = mpic_from_irq(irq); | 406 | struct mpic *mpic = mpic_from_irq(irq); |
417 | unsigned int src = irq - mpic->irq_offset; | 407 | unsigned int src = mpic_irq_to_hw(irq); |
418 | 408 | ||
419 | DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); | 409 | DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); |
420 | 410 | ||
@@ -435,7 +425,7 @@ static void mpic_mask_irq(unsigned int irq) | |||
435 | { | 425 | { |
436 | unsigned int loops = 100000; | 426 | unsigned int loops = 100000; |
437 | struct mpic *mpic = mpic_from_irq(irq); | 427 | struct mpic *mpic = mpic_from_irq(irq); |
438 | unsigned int src = irq - mpic->irq_offset; | 428 | unsigned int src = mpic_irq_to_hw(irq); |
439 | 429 | ||
440 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); | 430 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); |
441 | 431 | ||
@@ -472,7 +462,7 @@ static void mpic_end_irq(unsigned int irq) | |||
472 | static void mpic_unmask_ht_irq(unsigned int irq) | 462 | static void mpic_unmask_ht_irq(unsigned int irq) |
473 | { | 463 | { |
474 | struct mpic *mpic = mpic_from_irq(irq); | 464 | struct mpic *mpic = mpic_from_irq(irq); |
475 | unsigned int src = irq - mpic->irq_offset; | 465 | unsigned int src = mpic_irq_to_hw(irq); |
476 | 466 | ||
477 | mpic_unmask_irq(irq); | 467 | mpic_unmask_irq(irq); |
478 | 468 | ||
@@ -483,7 +473,7 @@ static void mpic_unmask_ht_irq(unsigned int irq) | |||
483 | static unsigned int mpic_startup_ht_irq(unsigned int irq) | 473 | static unsigned int mpic_startup_ht_irq(unsigned int irq) |
484 | { | 474 | { |
485 | struct mpic *mpic = mpic_from_irq(irq); | 475 | struct mpic *mpic = mpic_from_irq(irq); |
486 | unsigned int src = irq - mpic->irq_offset; | 476 | unsigned int src = mpic_irq_to_hw(irq); |
487 | 477 | ||
488 | mpic_unmask_irq(irq); | 478 | mpic_unmask_irq(irq); |
489 | mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); | 479 | mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); |
@@ -494,7 +484,7 @@ static unsigned int mpic_startup_ht_irq(unsigned int irq) | |||
494 | static void mpic_shutdown_ht_irq(unsigned int irq) | 484 | static void mpic_shutdown_ht_irq(unsigned int irq) |
495 | { | 485 | { |
496 | struct mpic *mpic = mpic_from_irq(irq); | 486 | struct mpic *mpic = mpic_from_irq(irq); |
497 | unsigned int src = irq - mpic->irq_offset; | 487 | unsigned int src = mpic_irq_to_hw(irq); |
498 | 488 | ||
499 | mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); | 489 | mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); |
500 | mpic_mask_irq(irq); | 490 | mpic_mask_irq(irq); |
@@ -503,7 +493,7 @@ static void mpic_shutdown_ht_irq(unsigned int irq) | |||
503 | static void mpic_end_ht_irq(unsigned int irq) | 493 | static void mpic_end_ht_irq(unsigned int irq) |
504 | { | 494 | { |
505 | struct mpic *mpic = mpic_from_irq(irq); | 495 | struct mpic *mpic = mpic_from_irq(irq); |
506 | unsigned int src = irq - mpic->irq_offset; | 496 | unsigned int src = mpic_irq_to_hw(irq); |
507 | 497 | ||
508 | #ifdef DEBUG_IRQ | 498 | #ifdef DEBUG_IRQ |
509 | DBG("%s: end_irq: %d\n", mpic->name, irq); | 499 | DBG("%s: end_irq: %d\n", mpic->name, irq); |
@@ -525,7 +515,7 @@ static void mpic_end_ht_irq(unsigned int irq) | |||
525 | static void mpic_unmask_ipi(unsigned int irq) | 515 | static void mpic_unmask_ipi(unsigned int irq) |
526 | { | 516 | { |
527 | struct mpic *mpic = mpic_from_ipi(irq); | 517 | struct mpic *mpic = mpic_from_ipi(irq); |
528 | unsigned int src = irq - mpic->ipi_offset; | 518 | unsigned int src = mpic_irq_to_hw(irq) - MPIC_VEC_IPI_0; |
529 | 519 | ||
530 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); | 520 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); |
531 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); | 521 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); |
@@ -555,15 +545,46 @@ static void mpic_end_ipi(unsigned int irq) | |||
555 | static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | 545 | static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) |
556 | { | 546 | { |
557 | struct mpic *mpic = mpic_from_irq(irq); | 547 | struct mpic *mpic = mpic_from_irq(irq); |
548 | unsigned int src = mpic_irq_to_hw(irq); | ||
558 | 549 | ||
559 | cpumask_t tmp; | 550 | cpumask_t tmp; |
560 | 551 | ||
561 | cpus_and(tmp, cpumask, cpu_online_map); | 552 | cpus_and(tmp, cpumask, cpu_online_map); |
562 | 553 | ||
563 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, | 554 | mpic_irq_write(src, MPIC_IRQ_DESTINATION, |
564 | mpic_physmask(cpus_addr(tmp)[0])); | 555 | mpic_physmask(cpus_addr(tmp)[0])); |
565 | } | 556 | } |
566 | 557 | ||
558 | static unsigned int mpic_flags_to_vecpri(unsigned int flags, int *level) | ||
559 | { | ||
560 | unsigned int vecpri; | ||
561 | |||
562 | /* Now convert sense value */ | ||
563 | switch(flags & IRQ_TYPE_SENSE_MASK) { | ||
564 | case IRQ_TYPE_EDGE_RISING: | ||
565 | vecpri = MPIC_VECPRI_SENSE_EDGE | | ||
566 | MPIC_VECPRI_POLARITY_POSITIVE; | ||
567 | *level = 0; | ||
568 | break; | ||
569 | case IRQ_TYPE_EDGE_FALLING: | ||
570 | vecpri = MPIC_VECPRI_SENSE_EDGE | | ||
571 | MPIC_VECPRI_POLARITY_NEGATIVE; | ||
572 | *level = 0; | ||
573 | break; | ||
574 | case IRQ_TYPE_LEVEL_HIGH: | ||
575 | vecpri = MPIC_VECPRI_SENSE_LEVEL | | ||
576 | MPIC_VECPRI_POLARITY_POSITIVE; | ||
577 | *level = 1; | ||
578 | break; | ||
579 | case IRQ_TYPE_LEVEL_LOW: | ||
580 | default: | ||
581 | vecpri = MPIC_VECPRI_SENSE_LEVEL | | ||
582 | MPIC_VECPRI_POLARITY_NEGATIVE; | ||
583 | *level = 1; | ||
584 | } | ||
585 | return vecpri; | ||
586 | } | ||
587 | |||
567 | static struct irq_chip mpic_irq_chip = { | 588 | static struct irq_chip mpic_irq_chip = { |
568 | .mask = mpic_mask_irq, | 589 | .mask = mpic_mask_irq, |
569 | .unmask = mpic_unmask_irq, | 590 | .unmask = mpic_unmask_irq, |
@@ -589,19 +610,111 @@ static struct irq_chip mpic_irq_ht_chip = { | |||
589 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | 610 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
590 | 611 | ||
591 | 612 | ||
613 | static int mpic_host_match(struct irq_host *h, struct device_node *node) | ||
614 | { | ||
615 | struct mpic *mpic = h->host_data; | ||
616 | |||
617 | /* Exact match, unless mpic node is NULL */ | ||
618 | return mpic->of_node == NULL || mpic->of_node == node; | ||
619 | } | ||
620 | |||
621 | static int mpic_host_map(struct irq_host *h, unsigned int virq, | ||
622 | irq_hw_number_t hw, unsigned int flags) | ||
623 | { | ||
624 | struct irq_desc *desc = get_irq_desc(virq); | ||
625 | struct irq_chip *chip; | ||
626 | struct mpic *mpic = h->host_data; | ||
627 | unsigned int vecpri = MPIC_VECPRI_SENSE_LEVEL | | ||
628 | MPIC_VECPRI_POLARITY_NEGATIVE; | ||
629 | int level; | ||
630 | |||
631 | pr_debug("mpic: map virq %d, hwirq 0x%lx, flags: 0x%x\n", | ||
632 | virq, hw, flags); | ||
633 | |||
634 | if (hw == MPIC_VEC_SPURRIOUS) | ||
635 | return -EINVAL; | ||
636 | #ifdef CONFIG_SMP | ||
637 | else if (hw >= MPIC_VEC_IPI_0) { | ||
638 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); | ||
639 | |||
640 | pr_debug("mpic: mapping as IPI\n"); | ||
641 | set_irq_chip_data(virq, mpic); | ||
642 | set_irq_chip_and_handler(virq, &mpic->hc_ipi, | ||
643 | handle_percpu_irq); | ||
644 | return 0; | ||
645 | } | ||
646 | #endif /* CONFIG_SMP */ | ||
647 | |||
648 | if (hw >= mpic->irq_count) | ||
649 | return -EINVAL; | ||
650 | |||
651 | /* If no sense provided, check default sense array */ | ||
652 | if (((flags & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE) && | ||
653 | mpic->senses && hw < mpic->senses_count) | ||
654 | flags |= mpic->senses[hw]; | ||
655 | |||
656 | vecpri = mpic_flags_to_vecpri(flags, &level); | ||
657 | if (level) | ||
658 | desc->status |= IRQ_LEVEL; | ||
659 | chip = &mpic->hc_irq; | ||
660 | |||
661 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
662 | /* Check for HT interrupts, override vecpri */ | ||
663 | if (mpic_is_ht_interrupt(mpic, hw)) { | ||
664 | vecpri &= ~(MPIC_VECPRI_SENSE_MASK | | ||
665 | MPIC_VECPRI_POLARITY_MASK); | ||
666 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | ||
667 | chip = &mpic->hc_ht_irq; | ||
668 | } | ||
669 | #endif | ||
670 | |||
671 | /* Reconfigure irq */ | ||
672 | vecpri |= MPIC_VECPRI_MASK | hw | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | ||
673 | mpic_irq_write(hw, MPIC_IRQ_VECTOR_PRI, vecpri); | ||
674 | |||
675 | pr_debug("mpic: mapping as IRQ\n"); | ||
676 | |||
677 | set_irq_chip_data(virq, mpic); | ||
678 | set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, | ||
683 | u32 *intspec, unsigned int intsize, | ||
684 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
685 | |||
686 | { | ||
687 | static unsigned char map_mpic_senses[4] = { | ||
688 | IRQ_TYPE_EDGE_RISING, | ||
689 | IRQ_TYPE_LEVEL_LOW, | ||
690 | IRQ_TYPE_LEVEL_HIGH, | ||
691 | IRQ_TYPE_EDGE_FALLING, | ||
692 | }; | ||
693 | |||
694 | *out_hwirq = intspec[0]; | ||
695 | if (intsize > 1 && intspec[1] < 4) | ||
696 | *out_flags = map_mpic_senses[intspec[1]]; | ||
697 | else | ||
698 | *out_flags = IRQ_TYPE_NONE; | ||
699 | |||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | static struct irq_host_ops mpic_host_ops = { | ||
704 | .match = mpic_host_match, | ||
705 | .map = mpic_host_map, | ||
706 | .xlate = mpic_host_xlate, | ||
707 | }; | ||
708 | |||
592 | /* | 709 | /* |
593 | * Exported functions | 710 | * Exported functions |
594 | */ | 711 | */ |
595 | 712 | ||
596 | 713 | struct mpic * __init mpic_alloc(struct device_node *node, | |
597 | struct mpic * __init mpic_alloc(unsigned long phys_addr, | 714 | unsigned long phys_addr, |
598 | unsigned int flags, | 715 | unsigned int flags, |
599 | unsigned int isu_size, | 716 | unsigned int isu_size, |
600 | unsigned int irq_offset, | ||
601 | unsigned int irq_count, | 717 | unsigned int irq_count, |
602 | unsigned int ipi_offset, | ||
603 | unsigned char *senses, | ||
604 | unsigned int senses_count, | ||
605 | const char *name) | 718 | const char *name) |
606 | { | 719 | { |
607 | struct mpic *mpic; | 720 | struct mpic *mpic; |
@@ -613,10 +726,19 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr, | |||
613 | if (mpic == NULL) | 726 | if (mpic == NULL) |
614 | return NULL; | 727 | return NULL; |
615 | 728 | ||
616 | |||
617 | memset(mpic, 0, sizeof(struct mpic)); | 729 | memset(mpic, 0, sizeof(struct mpic)); |
618 | mpic->name = name; | 730 | mpic->name = name; |
731 | mpic->of_node = node ? of_node_get(node) : NULL; | ||
619 | 732 | ||
733 | mpic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 256, | ||
734 | &mpic_host_ops, | ||
735 | MPIC_VEC_SPURRIOUS); | ||
736 | if (mpic->irqhost == NULL) { | ||
737 | of_node_put(node); | ||
738 | return NULL; | ||
739 | } | ||
740 | |||
741 | mpic->irqhost->host_data = mpic; | ||
620 | mpic->hc_irq = mpic_irq_chip; | 742 | mpic->hc_irq = mpic_irq_chip; |
621 | mpic->hc_irq.typename = name; | 743 | mpic->hc_irq.typename = name; |
622 | if (flags & MPIC_PRIMARY) | 744 | if (flags & MPIC_PRIMARY) |
@@ -628,18 +750,14 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr, | |||
628 | mpic->hc_ht_irq.set_affinity = mpic_set_affinity; | 750 | mpic->hc_ht_irq.set_affinity = mpic_set_affinity; |
629 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | 751 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
630 | #ifdef CONFIG_SMP | 752 | #ifdef CONFIG_SMP |
631 | mpic->hc_ipi.typename = name; | ||
632 | mpic->hc_ipi = mpic_ipi_chip; | 753 | mpic->hc_ipi = mpic_ipi_chip; |
754 | mpic->hc_ipi.typename = name; | ||
633 | #endif /* CONFIG_SMP */ | 755 | #endif /* CONFIG_SMP */ |
634 | 756 | ||
635 | mpic->flags = flags; | 757 | mpic->flags = flags; |
636 | mpic->isu_size = isu_size; | 758 | mpic->isu_size = isu_size; |
637 | mpic->irq_offset = irq_offset; | ||
638 | mpic->irq_count = irq_count; | 759 | mpic->irq_count = irq_count; |
639 | mpic->ipi_offset = ipi_offset; | ||
640 | mpic->num_sources = 0; /* so far */ | 760 | mpic->num_sources = 0; /* so far */ |
641 | mpic->senses = senses; | ||
642 | mpic->senses_count = senses_count; | ||
643 | 761 | ||
644 | /* Map the global registers */ | 762 | /* Map the global registers */ |
645 | mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); | 763 | mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); |
@@ -707,8 +825,10 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr, | |||
707 | mpic->next = mpics; | 825 | mpic->next = mpics; |
708 | mpics = mpic; | 826 | mpics = mpic; |
709 | 827 | ||
710 | if (flags & MPIC_PRIMARY) | 828 | if (flags & MPIC_PRIMARY) { |
711 | mpic_primary = mpic; | 829 | mpic_primary = mpic; |
830 | irq_set_default_host(mpic->irqhost); | ||
831 | } | ||
712 | 832 | ||
713 | return mpic; | 833 | return mpic; |
714 | } | 834 | } |
@@ -725,11 +845,22 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | |||
725 | mpic->num_sources = isu_first + mpic->isu_size; | 845 | mpic->num_sources = isu_first + mpic->isu_size; |
726 | } | 846 | } |
727 | 847 | ||
848 | void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) | ||
849 | { | ||
850 | mpic->senses = senses; | ||
851 | mpic->senses_count = count; | ||
852 | } | ||
853 | |||
728 | void __init mpic_init(struct mpic *mpic) | 854 | void __init mpic_init(struct mpic *mpic) |
729 | { | 855 | { |
730 | int i; | 856 | int i; |
731 | 857 | ||
732 | BUG_ON(mpic->num_sources == 0); | 858 | BUG_ON(mpic->num_sources == 0); |
859 | WARN_ON(mpic->num_sources > MPIC_VEC_IPI_0); | ||
860 | |||
861 | /* Sanitize source count */ | ||
862 | if (mpic->num_sources > MPIC_VEC_IPI_0) | ||
863 | mpic->num_sources = MPIC_VEC_IPI_0; | ||
733 | 864 | ||
734 | printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); | 865 | printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); |
735 | 866 | ||
@@ -753,14 +884,6 @@ void __init mpic_init(struct mpic *mpic) | |||
753 | MPIC_VECPRI_MASK | | 884 | MPIC_VECPRI_MASK | |
754 | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | | 885 | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | |
755 | (MPIC_VEC_IPI_0 + i)); | 886 | (MPIC_VEC_IPI_0 + i)); |
756 | #ifdef CONFIG_SMP | ||
757 | if (!(mpic->flags & MPIC_PRIMARY)) | ||
758 | continue; | ||
759 | set_irq_chip_data(mpic->ipi_offset+i, mpic); | ||
760 | set_irq_chip_and_handler(mpic->ipi_offset+i, | ||
761 | &mpic->hc_ipi, | ||
762 | handle_percpu_irq); | ||
763 | #endif /* CONFIG_SMP */ | ||
764 | } | 887 | } |
765 | 888 | ||
766 | /* Initialize interrupt sources */ | 889 | /* Initialize interrupt sources */ |
@@ -777,25 +900,15 @@ void __init mpic_init(struct mpic *mpic) | |||
777 | for (i = 0; i < mpic->num_sources; i++) { | 900 | for (i = 0; i < mpic->num_sources; i++) { |
778 | /* start with vector = source number, and masked */ | 901 | /* start with vector = source number, and masked */ |
779 | u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | 902 | u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); |
780 | int level = 0; | 903 | int level = 1; |
781 | 904 | ||
782 | /* if it's an IPI, we skip it */ | ||
783 | if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && | ||
784 | (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) | ||
785 | continue; | ||
786 | |||
787 | /* do senses munging */ | 905 | /* do senses munging */ |
788 | if (mpic->senses && i < mpic->senses_count) { | 906 | if (mpic->senses && i < mpic->senses_count) |
789 | if (mpic->senses[i] & IRQ_SENSE_LEVEL) | 907 | vecpri = mpic_flags_to_vecpri(mpic->senses[i], |
790 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | 908 | &level); |
791 | if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) | 909 | else |
792 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | ||
793 | } else | ||
794 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | 910 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; |
795 | 911 | ||
796 | /* remember if it was a level interrupts */ | ||
797 | level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); | ||
798 | |||
799 | /* deal with broken U3 */ | 912 | /* deal with broken U3 */ |
800 | if (mpic->flags & MPIC_BROKEN_U3) { | 913 | if (mpic->flags & MPIC_BROKEN_U3) { |
801 | #ifdef CONFIG_MPIC_BROKEN_U3 | 914 | #ifdef CONFIG_MPIC_BROKEN_U3 |
@@ -816,21 +929,6 @@ void __init mpic_init(struct mpic *mpic) | |||
816 | mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); | 929 | mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); |
817 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | 930 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, |
818 | 1 << hard_smp_processor_id()); | 931 | 1 << hard_smp_processor_id()); |
819 | |||
820 | /* init linux descriptors */ | ||
821 | if (i < mpic->irq_count) { | ||
822 | struct irq_chip *chip = &mpic->hc_irq; | ||
823 | |||
824 | irq_desc[mpic->irq_offset+i].status |= | ||
825 | level ? IRQ_LEVEL : 0; | ||
826 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
827 | if (mpic_is_ht_interrupt(mpic, i)) | ||
828 | chip = &mpic->hc_ht_irq; | ||
829 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
830 | set_irq_chip_data(mpic->irq_offset+i, mpic); | ||
831 | set_irq_chip_and_handler(mpic->irq_offset+i, chip, | ||
832 | handle_fasteoi_irq); | ||
833 | } | ||
834 | } | 932 | } |
835 | 933 | ||
836 | /* Init spurrious vector */ | 934 | /* Init spurrious vector */ |
@@ -871,19 +969,20 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | |||
871 | { | 969 | { |
872 | int is_ipi; | 970 | int is_ipi; |
873 | struct mpic *mpic = mpic_find(irq, &is_ipi); | 971 | struct mpic *mpic = mpic_find(irq, &is_ipi); |
972 | unsigned int src = mpic_irq_to_hw(irq); | ||
874 | unsigned long flags; | 973 | unsigned long flags; |
875 | u32 reg; | 974 | u32 reg; |
876 | 975 | ||
877 | spin_lock_irqsave(&mpic_lock, flags); | 976 | spin_lock_irqsave(&mpic_lock, flags); |
878 | if (is_ipi) { | 977 | if (is_ipi) { |
879 | reg = mpic_ipi_read(irq - mpic->ipi_offset) & | 978 | reg = mpic_ipi_read(src - MPIC_VEC_IPI_0) & |
880 | ~MPIC_VECPRI_PRIORITY_MASK; | 979 | ~MPIC_VECPRI_PRIORITY_MASK; |
881 | mpic_ipi_write(irq - mpic->ipi_offset, | 980 | mpic_ipi_write(src - MPIC_VEC_IPI_0, |
882 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | 981 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); |
883 | } else { | 982 | } else { |
884 | reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI) | 983 | reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
885 | & ~MPIC_VECPRI_PRIORITY_MASK; | 984 | & ~MPIC_VECPRI_PRIORITY_MASK; |
886 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, | 985 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, |
887 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | 986 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); |
888 | } | 987 | } |
889 | spin_unlock_irqrestore(&mpic_lock, flags); | 988 | spin_unlock_irqrestore(&mpic_lock, flags); |
@@ -893,14 +992,15 @@ unsigned int mpic_irq_get_priority(unsigned int irq) | |||
893 | { | 992 | { |
894 | int is_ipi; | 993 | int is_ipi; |
895 | struct mpic *mpic = mpic_find(irq, &is_ipi); | 994 | struct mpic *mpic = mpic_find(irq, &is_ipi); |
995 | unsigned int src = mpic_irq_to_hw(irq); | ||
896 | unsigned long flags; | 996 | unsigned long flags; |
897 | u32 reg; | 997 | u32 reg; |
898 | 998 | ||
899 | spin_lock_irqsave(&mpic_lock, flags); | 999 | spin_lock_irqsave(&mpic_lock, flags); |
900 | if (is_ipi) | 1000 | if (is_ipi) |
901 | reg = mpic_ipi_read(irq - mpic->ipi_offset); | 1001 | reg = mpic_ipi_read(src = MPIC_VEC_IPI_0); |
902 | else | 1002 | else |
903 | reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); | 1003 | reg = mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI); |
904 | spin_unlock_irqrestore(&mpic_lock, flags); | 1004 | spin_unlock_irqrestore(&mpic_lock, flags); |
905 | return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; | 1005 | return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; |
906 | } | 1006 | } |
@@ -995,29 +1095,20 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | |||
995 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); | 1095 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); |
996 | } | 1096 | } |
997 | 1097 | ||
998 | int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) | 1098 | unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) |
999 | { | 1099 | { |
1000 | u32 irq; | 1100 | u32 src; |
1001 | 1101 | ||
1002 | irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; | 1102 | src = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; |
1003 | #ifdef DEBUG_LOW | 1103 | #ifdef DEBUG_LOW |
1004 | DBG("%s: get_one_irq(): %d\n", mpic->name, irq); | 1104 | DBG("%s: get_one_irq(): %d\n", mpic->name, src); |
1005 | #endif | ||
1006 | if (unlikely(irq == MPIC_VEC_SPURRIOUS)) | ||
1007 | return -1; | ||
1008 | if (irq < MPIC_VEC_IPI_0) { | ||
1009 | #ifdef DEBUG_IRQ | ||
1010 | DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset); | ||
1011 | #endif | 1105 | #endif |
1012 | return irq + mpic->irq_offset; | 1106 | if (unlikely(src == MPIC_VEC_SPURRIOUS)) |
1013 | } | 1107 | return NO_IRQ; |
1014 | #ifdef DEBUG_IPI | 1108 | return irq_linear_revmap(mpic->irqhost, src); |
1015 | DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); | ||
1016 | #endif | ||
1017 | return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; | ||
1018 | } | 1109 | } |
1019 | 1110 | ||
1020 | int mpic_get_irq(struct pt_regs *regs) | 1111 | unsigned int mpic_get_irq(struct pt_regs *regs) |
1021 | { | 1112 | { |
1022 | struct mpic *mpic = mpic_primary; | 1113 | struct mpic *mpic = mpic_primary; |
1023 | 1114 | ||
@@ -1031,25 +1122,27 @@ int mpic_get_irq(struct pt_regs *regs) | |||
1031 | void mpic_request_ipis(void) | 1122 | void mpic_request_ipis(void) |
1032 | { | 1123 | { |
1033 | struct mpic *mpic = mpic_primary; | 1124 | struct mpic *mpic = mpic_primary; |
1034 | 1125 | int i; | |
1126 | static char *ipi_names[] = { | ||
1127 | "IPI0 (call function)", | ||
1128 | "IPI1 (reschedule)", | ||
1129 | "IPI2 (unused)", | ||
1130 | "IPI3 (debugger break)", | ||
1131 | }; | ||
1035 | BUG_ON(mpic == NULL); | 1132 | BUG_ON(mpic == NULL); |
1036 | |||
1037 | printk("requesting IPIs ... \n"); | ||
1038 | 1133 | ||
1039 | /* | 1134 | printk(KERN_INFO "mpic: requesting IPIs ... \n"); |
1040 | * IPIs are marked IRQF_DISABLED as they must run with irqs | 1135 | |
1041 | * disabled | 1136 | for (i = 0; i < 4; i++) { |
1042 | */ | 1137 | unsigned int vipi = irq_create_mapping(mpic->irqhost, |
1043 | request_irq(mpic->ipi_offset+0, mpic_ipi_action, IRQF_DISABLED, | 1138 | MPIC_VEC_IPI_0 + i, 0); |
1044 | "IPI0 (call function)", mpic); | 1139 | if (vipi == NO_IRQ) { |
1045 | request_irq(mpic->ipi_offset+1, mpic_ipi_action, IRQF_DISABLED, | 1140 | printk(KERN_ERR "Failed to map IPI %d\n", i); |
1046 | "IPI1 (reschedule)", mpic); | 1141 | break; |
1047 | request_irq(mpic->ipi_offset+2, mpic_ipi_action, IRQF_DISABLED, | 1142 | } |
1048 | "IPI2 (unused)", mpic); | 1143 | request_irq(vipi, mpic_ipi_action, IRQF_DISABLED, |
1049 | request_irq(mpic->ipi_offset+3, mpic_ipi_action, IRQF_DISABLED, | 1144 | ipi_names[i], mpic); |
1050 | "IPI3 (debugger break)", mpic); | 1145 | } |
1051 | |||
1052 | printk("IPIs requested... \n"); | ||
1053 | } | 1146 | } |
1054 | 1147 | ||
1055 | void smp_mpic_message_pass(int target, int msg) | 1148 | void smp_mpic_message_pass(int target, int msg) |
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 8dc205b275e3..56612a2dca6b 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
@@ -1299,13 +1299,12 @@ static int __init hvsi_console_init(void) | |||
1299 | hp->inbuf_end = hp->inbuf; | 1299 | hp->inbuf_end = hp->inbuf; |
1300 | hp->state = HVSI_CLOSED; | 1300 | hp->state = HVSI_CLOSED; |
1301 | hp->vtermno = *vtermno; | 1301 | hp->vtermno = *vtermno; |
1302 | hp->virq = virt_irq_create_mapping(irq[0]); | 1302 | hp->virq = irq_create_mapping(NULL, irq[0], 0); |
1303 | if (hp->virq == NO_IRQ) { | 1303 | if (hp->virq == NO_IRQ) { |
1304 | printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", | 1304 | printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", |
1305 | __FUNCTION__, hp->virq); | 1305 | __FUNCTION__, irq[0]); |
1306 | continue; | 1306 | continue; |
1307 | } else | 1307 | } |
1308 | hp->virq = irq_offset_up(hp->virq); | ||
1309 | 1308 | ||
1310 | hvsi_count++; | 1309 | hvsi_count++; |
1311 | } | 1310 | } |
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index 314fc0830d90..4b08852c35ee 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c | |||
@@ -90,22 +90,12 @@ int macio_init(void) | |||
90 | { | 90 | { |
91 | struct device_node *adbs; | 91 | struct device_node *adbs; |
92 | struct resource r; | 92 | struct resource r; |
93 | unsigned int irq; | ||
93 | 94 | ||
94 | adbs = find_compatible_devices("adb", "chrp,adb0"); | 95 | adbs = find_compatible_devices("adb", "chrp,adb0"); |
95 | if (adbs == 0) | 96 | if (adbs == 0) |
96 | return -ENXIO; | 97 | return -ENXIO; |
97 | 98 | ||
98 | #if 0 | ||
99 | { int i = 0; | ||
100 | |||
101 | printk("macio_adb_init: node = %p, addrs =", adbs->node); | ||
102 | while(!of_address_to_resource(adbs, i, &r)) | ||
103 | printk(" %x(%x)", r.start, r.end - r.start); | ||
104 | printk(", intrs ="); | ||
105 | for (i = 0; i < adbs->n_intrs; ++i) | ||
106 | printk(" %x", adbs->intrs[i].line); | ||
107 | printk("\n"); } | ||
108 | #endif | ||
109 | if (of_address_to_resource(adbs, 0, &r)) | 99 | if (of_address_to_resource(adbs, 0, &r)) |
110 | return -ENXIO; | 100 | return -ENXIO; |
111 | adb = ioremap(r.start, sizeof(struct adb_regs)); | 101 | adb = ioremap(r.start, sizeof(struct adb_regs)); |
@@ -117,10 +107,9 @@ int macio_init(void) | |||
117 | out_8(&adb->active_lo.r, 0xff); | 107 | out_8(&adb->active_lo.r, 0xff); |
118 | out_8(&adb->autopoll.r, APE); | 108 | out_8(&adb->autopoll.r, APE); |
119 | 109 | ||
120 | if (request_irq(adbs->intrs[0].line, macio_adb_interrupt, | 110 | irq = irq_of_parse_and_map(adbs, 0); |
121 | 0, "ADB", (void *)0)) { | 111 | if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) { |
122 | printk(KERN_ERR "ADB: can't get irq %d\n", | 112 | printk(KERN_ERR "ADB: can't get irq %d\n", irq); |
123 | adbs->intrs[0].line); | ||
124 | return -EAGAIN; | 113 | return -EAGAIN; |
125 | } | 114 | } |
126 | out_8(&adb->intr_enb.r, DFB | TAG); | 115 | out_8(&adb->intr_enb.r, DFB | TAG); |
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 40ae7b6a939d..80c0c665b5f6 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c | |||
@@ -280,75 +280,128 @@ static void macio_release_dev(struct device *dev) | |||
280 | static int macio_resource_quirks(struct device_node *np, struct resource *res, | 280 | static int macio_resource_quirks(struct device_node *np, struct resource *res, |
281 | int index) | 281 | int index) |
282 | { | 282 | { |
283 | if (res->flags & IORESOURCE_MEM) { | 283 | /* Only quirks for memory resources for now */ |
284 | /* Grand Central has too large resource 0 on some machines */ | 284 | if ((res->flags & IORESOURCE_MEM) == 0) |
285 | if (index == 0 && !strcmp(np->name, "gc")) | 285 | return 0; |
286 | res->end = res->start + 0x1ffff; | 286 | |
287 | /* Grand Central has too large resource 0 on some machines */ | ||
288 | if (index == 0 && !strcmp(np->name, "gc")) | ||
289 | res->end = res->start + 0x1ffff; | ||
287 | 290 | ||
288 | /* Airport has bogus resource 2 */ | 291 | /* Airport has bogus resource 2 */ |
289 | if (index >= 2 && !strcmp(np->name, "radio")) | 292 | if (index >= 2 && !strcmp(np->name, "radio")) |
290 | return 1; | 293 | return 1; |
291 | 294 | ||
292 | #ifndef CONFIG_PPC64 | 295 | #ifndef CONFIG_PPC64 |
293 | /* DBDMAs may have bogus sizes */ | 296 | /* DBDMAs may have bogus sizes */ |
294 | if ((res->start & 0x0001f000) == 0x00008000) | 297 | if ((res->start & 0x0001f000) == 0x00008000) |
295 | res->end = res->start + 0xff; | 298 | res->end = res->start + 0xff; |
296 | #endif /* CONFIG_PPC64 */ | 299 | #endif /* CONFIG_PPC64 */ |
297 | 300 | ||
298 | /* ESCC parent eats child resources. We could have added a | 301 | /* ESCC parent eats child resources. We could have added a |
299 | * level of hierarchy, but I don't really feel the need | 302 | * level of hierarchy, but I don't really feel the need |
300 | * for it | 303 | * for it |
301 | */ | 304 | */ |
302 | if (!strcmp(np->name, "escc")) | 305 | if (!strcmp(np->name, "escc")) |
303 | return 1; | 306 | return 1; |
304 | 307 | ||
305 | /* ESCC has bogus resources >= 3 */ | 308 | /* ESCC has bogus resources >= 3 */ |
306 | if (index >= 3 && !(strcmp(np->name, "ch-a") && | 309 | if (index >= 3 && !(strcmp(np->name, "ch-a") && |
307 | strcmp(np->name, "ch-b"))) | 310 | strcmp(np->name, "ch-b"))) |
308 | return 1; | 311 | return 1; |
309 | 312 | ||
310 | /* Media bay has too many resources, keep only first one */ | 313 | /* Media bay has too many resources, keep only first one */ |
311 | if (index > 0 && !strcmp(np->name, "media-bay")) | 314 | if (index > 0 && !strcmp(np->name, "media-bay")) |
312 | return 1; | 315 | return 1; |
313 | 316 | ||
314 | /* Some older IDE resources have bogus sizes */ | 317 | /* Some older IDE resources have bogus sizes */ |
315 | if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && | 318 | if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") && |
316 | strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { | 319 | strcmp(np->type, "ide") && strcmp(np->type, "ata"))) { |
317 | if (index == 0 && (res->end - res->start) > 0xfff) | 320 | if (index == 0 && (res->end - res->start) > 0xfff) |
318 | res->end = res->start + 0xfff; | 321 | res->end = res->start + 0xfff; |
319 | if (index == 1 && (res->end - res->start) > 0xff) | 322 | if (index == 1 && (res->end - res->start) > 0xff) |
320 | res->end = res->start + 0xff; | 323 | res->end = res->start + 0xff; |
321 | } | ||
322 | } | 324 | } |
323 | return 0; | 325 | return 0; |
324 | } | 326 | } |
325 | 327 | ||
328 | static void macio_create_fixup_irq(struct macio_dev *dev, int index, | ||
329 | unsigned int line) | ||
330 | { | ||
331 | unsigned int irq; | ||
326 | 332 | ||
327 | static void macio_setup_interrupts(struct macio_dev *dev) | 333 | irq = irq_create_mapping(NULL, line, 0); |
334 | if (irq != NO_IRQ) { | ||
335 | dev->interrupt[index].start = irq; | ||
336 | dev->interrupt[index].flags = IORESOURCE_IRQ; | ||
337 | dev->interrupt[index].name = dev->ofdev.dev.bus_id; | ||
338 | } | ||
339 | if (dev->n_interrupts <= index) | ||
340 | dev->n_interrupts = index + 1; | ||
341 | } | ||
342 | |||
343 | static void macio_add_missing_resources(struct macio_dev *dev) | ||
328 | { | 344 | { |
329 | struct device_node *np = dev->ofdev.node; | 345 | struct device_node *np = dev->ofdev.node; |
330 | int i,j; | 346 | unsigned int irq_base; |
347 | |||
348 | /* Gatwick has some missing interrupts on child nodes */ | ||
349 | if (dev->bus->chip->type != macio_gatwick) | ||
350 | return; | ||
331 | 351 | ||
332 | /* For now, we use pre-parsed entries in the device-tree for | 352 | /* irq_base is always 64 on gatwick. I have no cleaner way to get |
333 | * interrupt routing and addresses, but we should change that | 353 | * that value from here at this point |
334 | * to dynamically parsed entries and so get rid of most of the | ||
335 | * clutter in struct device_node | ||
336 | */ | 354 | */ |
337 | for (i = j = 0; i < np->n_intrs; i++) { | 355 | irq_base = 64; |
356 | |||
357 | /* Fix SCC */ | ||
358 | if (strcmp(np->name, "ch-a") == 0) { | ||
359 | macio_create_fixup_irq(dev, 0, 15 + irq_base); | ||
360 | macio_create_fixup_irq(dev, 1, 4 + irq_base); | ||
361 | macio_create_fixup_irq(dev, 2, 5 + irq_base); | ||
362 | printk(KERN_INFO "macio: fixed SCC irqs on gatwick\n"); | ||
363 | } | ||
364 | |||
365 | /* Fix media-bay */ | ||
366 | if (strcmp(np->name, "media-bay") == 0) { | ||
367 | macio_create_fixup_irq(dev, 0, 29 + irq_base); | ||
368 | printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n"); | ||
369 | } | ||
370 | |||
371 | /* Fix left media bay childs */ | ||
372 | if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) { | ||
373 | macio_create_fixup_irq(dev, 0, 19 + irq_base); | ||
374 | macio_create_fixup_irq(dev, 1, 1 + irq_base); | ||
375 | printk(KERN_INFO "macio: fixed left floppy irqs\n"); | ||
376 | } | ||
377 | if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) { | ||
378 | macio_create_fixup_irq(dev, 0, 14 + irq_base); | ||
379 | macio_create_fixup_irq(dev, 0, 3 + irq_base); | ||
380 | printk(KERN_INFO "macio: fixed left ide irqs\n"); | ||
381 | } | ||
382 | } | ||
383 | |||
384 | static void macio_setup_interrupts(struct macio_dev *dev) | ||
385 | { | ||
386 | struct device_node *np = dev->ofdev.node; | ||
387 | unsigned int irq; | ||
388 | int i = 0, j = 0; | ||
389 | |||
390 | for (;;) { | ||
338 | struct resource *res = &dev->interrupt[j]; | 391 | struct resource *res = &dev->interrupt[j]; |
339 | 392 | ||
340 | if (j >= MACIO_DEV_COUNT_IRQS) | 393 | if (j >= MACIO_DEV_COUNT_IRQS) |
341 | break; | 394 | break; |
342 | res->start = np->intrs[i].line; | 395 | irq = irq_of_parse_and_map(np, i++); |
343 | res->flags = IORESOURCE_IO; | 396 | if (irq == NO_IRQ) |
344 | if (np->intrs[j].sense) | 397 | break; |
345 | res->flags |= IORESOURCE_IRQ_LOWLEVEL; | 398 | res->start = irq; |
346 | else | 399 | res->flags = IORESOURCE_IRQ; |
347 | res->flags |= IORESOURCE_IRQ_HIGHEDGE; | ||
348 | res->name = dev->ofdev.dev.bus_id; | 400 | res->name = dev->ofdev.dev.bus_id; |
349 | if (macio_resource_quirks(np, res, i)) | 401 | if (macio_resource_quirks(np, res, i - 1)) { |
350 | memset(res, 0, sizeof(struct resource)); | 402 | memset(res, 0, sizeof(struct resource)); |
351 | else | 403 | continue; |
404 | } else | ||
352 | j++; | 405 | j++; |
353 | } | 406 | } |
354 | dev->n_interrupts = j; | 407 | dev->n_interrupts = j; |
@@ -445,6 +498,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, | |||
445 | /* Setup interrupts & resources */ | 498 | /* Setup interrupts & resources */ |
446 | macio_setup_interrupts(dev); | 499 | macio_setup_interrupts(dev); |
447 | macio_setup_resources(dev, parent_res); | 500 | macio_setup_resources(dev, parent_res); |
501 | macio_add_missing_resources(dev); | ||
448 | 502 | ||
449 | /* Register with core */ | 503 | /* Register with core */ |
450 | if (of_device_register(&dev->ofdev) != 0) { | 504 | if (of_device_register(&dev->ofdev) != 0) { |
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index ff6d9bfdc3d2..f139a74696fe 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
@@ -497,8 +497,7 @@ int __init smu_init (void) | |||
497 | smu->doorbell = *data; | 497 | smu->doorbell = *data; |
498 | if (smu->doorbell < 0x50) | 498 | if (smu->doorbell < 0x50) |
499 | smu->doorbell += 0x50; | 499 | smu->doorbell += 0x50; |
500 | if (np->n_intrs > 0) | 500 | smu->db_irq = irq_of_parse_and_map(np, 0); |
501 | smu->db_irq = np->intrs[0].line; | ||
502 | 501 | ||
503 | of_node_put(np); | 502 | of_node_put(np); |
504 | 503 | ||
@@ -515,8 +514,7 @@ int __init smu_init (void) | |||
515 | smu->msg = *data; | 514 | smu->msg = *data; |
516 | if (smu->msg < 0x50) | 515 | if (smu->msg < 0x50) |
517 | smu->msg += 0x50; | 516 | smu->msg += 0x50; |
518 | if (np->n_intrs > 0) | 517 | smu->msg_irq = irq_of_parse_and_map(np, 0); |
519 | smu->msg_irq = np->intrs[0].line; | ||
520 | of_node_put(np); | 518 | of_node_put(np); |
521 | } while(0); | 519 | } while(0); |
522 | 520 | ||
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index 6501db50fb83..69d5452fd22f 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c | |||
@@ -34,13 +34,6 @@ | |||
34 | static volatile unsigned char __iomem *via; | 34 | static volatile unsigned char __iomem *via; |
35 | static DEFINE_SPINLOCK(cuda_lock); | 35 | static DEFINE_SPINLOCK(cuda_lock); |
36 | 36 | ||
37 | #ifdef CONFIG_MAC | ||
38 | #define CUDA_IRQ IRQ_MAC_ADB | ||
39 | #define eieio() | ||
40 | #else | ||
41 | #define CUDA_IRQ vias->intrs[0].line | ||
42 | #endif | ||
43 | |||
44 | /* VIA registers - spaced 0x200 bytes apart */ | 37 | /* VIA registers - spaced 0x200 bytes apart */ |
45 | #define RS 0x200 /* skip between registers */ | 38 | #define RS 0x200 /* skip between registers */ |
46 | #define B 0 /* B-side data */ | 39 | #define B 0 /* B-side data */ |
@@ -189,11 +182,24 @@ int __init find_via_cuda(void) | |||
189 | 182 | ||
190 | static int __init via_cuda_start(void) | 183 | static int __init via_cuda_start(void) |
191 | { | 184 | { |
185 | unsigned int irq; | ||
186 | |||
192 | if (via == NULL) | 187 | if (via == NULL) |
193 | return -ENODEV; | 188 | return -ENODEV; |
194 | 189 | ||
195 | if (request_irq(CUDA_IRQ, cuda_interrupt, 0, "ADB", cuda_interrupt)) { | 190 | #ifdef CONFIG_MAC |
196 | printk(KERN_ERR "cuda_init: can't get irq %d\n", CUDA_IRQ); | 191 | irq = IRQ_MAC_ADB; |
192 | #else /* CONFIG_MAC */ | ||
193 | irq = irq_of_parse_and_map(vias, 0); | ||
194 | if (irq == NO_IRQ) { | ||
195 | printk(KERN_ERR "via-cuda: can't map interrupts for %s\n", | ||
196 | vias->full_name); | ||
197 | return -ENODEV; | ||
198 | } | ||
199 | #endif /* CONFIG_MAP */ | ||
200 | |||
201 | if (request_irq(irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) { | ||
202 | printk(KERN_ERR "via-cuda: can't request irq %d\n", irq); | ||
197 | return -EAGAIN; | 203 | return -EAGAIN; |
198 | } | 204 | } |
199 | 205 | ||
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index c1193d34ec9e..06ca80bfd6b9 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -64,10 +64,6 @@ | |||
64 | #include <asm/backlight.h> | 64 | #include <asm/backlight.h> |
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | #ifdef CONFIG_PPC32 | ||
68 | #include <asm/open_pic.h> | ||
69 | #endif | ||
70 | |||
71 | #include "via-pmu-event.h" | 67 | #include "via-pmu-event.h" |
72 | 68 | ||
73 | /* Some compile options */ | 69 | /* Some compile options */ |
@@ -151,7 +147,7 @@ static int pmu_fully_inited = 0; | |||
151 | static int pmu_has_adb; | 147 | static int pmu_has_adb; |
152 | static struct device_node *gpio_node; | 148 | static struct device_node *gpio_node; |
153 | static unsigned char __iomem *gpio_reg = NULL; | 149 | static unsigned char __iomem *gpio_reg = NULL; |
154 | static int gpio_irq = -1; | 150 | static int gpio_irq = NO_IRQ; |
155 | static int gpio_irq_enabled = -1; | 151 | static int gpio_irq_enabled = -1; |
156 | static volatile int pmu_suspended = 0; | 152 | static volatile int pmu_suspended = 0; |
157 | static spinlock_t pmu_lock; | 153 | static spinlock_t pmu_lock; |
@@ -403,22 +399,21 @@ static int __init pmu_init(void) | |||
403 | */ | 399 | */ |
404 | static int __init via_pmu_start(void) | 400 | static int __init via_pmu_start(void) |
405 | { | 401 | { |
402 | unsigned int irq; | ||
403 | |||
406 | if (vias == NULL) | 404 | if (vias == NULL) |
407 | return -ENODEV; | 405 | return -ENODEV; |
408 | 406 | ||
409 | batt_req.complete = 1; | 407 | batt_req.complete = 1; |
410 | 408 | ||
411 | #ifndef CONFIG_PPC_MERGE | 409 | irq = irq_of_parse_and_map(vias, 0); |
412 | if (pmu_kind == PMU_KEYLARGO_BASED) | 410 | if (irq == NO_IRQ) { |
413 | openpic_set_irq_priority(vias->intrs[0].line, | 411 | printk(KERN_ERR "via-pmu: can't map interruptn"); |
414 | OPENPIC_PRIORITY_DEFAULT + 1); | 412 | return -ENODEV; |
415 | #endif | 413 | } |
416 | 414 | if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) { | |
417 | if (request_irq(vias->intrs[0].line, via_pmu_interrupt, 0, "VIA-PMU", | 415 | printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); |
418 | (void *)0)) { | 416 | return -ENODEV; |
419 | printk(KERN_ERR "VIA-PMU: can't get irq %d\n", | ||
420 | vias->intrs[0].line); | ||
421 | return -EAGAIN; | ||
422 | } | 417 | } |
423 | 418 | ||
424 | if (pmu_kind == PMU_KEYLARGO_BASED) { | 419 | if (pmu_kind == PMU_KEYLARGO_BASED) { |
@@ -426,10 +421,10 @@ static int __init via_pmu_start(void) | |||
426 | if (gpio_node == NULL) | 421 | if (gpio_node == NULL) |
427 | gpio_node = of_find_node_by_name(NULL, | 422 | gpio_node = of_find_node_by_name(NULL, |
428 | "pmu-interrupt"); | 423 | "pmu-interrupt"); |
429 | if (gpio_node && gpio_node->n_intrs > 0) | 424 | if (gpio_node) |
430 | gpio_irq = gpio_node->intrs[0].line; | 425 | gpio_irq = irq_of_parse_and_map(gpio_node, 0); |
431 | 426 | ||
432 | if (gpio_irq != -1) { | 427 | if (gpio_irq != NO_IRQ) { |
433 | if (request_irq(gpio_irq, gpio1_interrupt, 0, | 428 | if (request_irq(gpio_irq, gpio1_interrupt, 0, |
434 | "GPIO1 ADB", (void *)0)) | 429 | "GPIO1 ADB", (void *)0)) |
435 | printk(KERN_ERR "pmu: can't get irq %d" | 430 | printk(KERN_ERR "pmu: can't get irq %d" |
diff --git a/drivers/net/mace.c b/drivers/net/mace.c index f2c0bf89f0c7..29e4b5aa6ead 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c | |||
@@ -242,12 +242,12 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i | |||
242 | } | 242 | } |
243 | rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); | 243 | rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); |
244 | if (rc) { | 244 | if (rc) { |
245 | printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line); | 245 | printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); |
246 | goto err_free_irq; | 246 | goto err_free_irq; |
247 | } | 247 | } |
248 | rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); | 248 | rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); |
249 | if (rc) { | 249 | if (rc) { |
250 | printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line); | 250 | printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); |
251 | goto err_free_tx_irq; | 251 | goto err_free_tx_irq; |
252 | } | 252 | } |
253 | 253 | ||
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index 459c0231aef3..bfd2a22759eb 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c | |||
@@ -1443,8 +1443,8 @@ static int __init pmz_init_port(struct uart_pmac_port *uap) | |||
1443 | uap->flags &= ~PMACZILOG_FLAG_HAS_DMA; | 1443 | uap->flags &= ~PMACZILOG_FLAG_HAS_DMA; |
1444 | goto no_dma; | 1444 | goto no_dma; |
1445 | } | 1445 | } |
1446 | uap->tx_dma_irq = np->intrs[1].line; | 1446 | uap->tx_dma_irq = irq_of_parse_and_map(np, 1); |
1447 | uap->rx_dma_irq = np->intrs[2].line; | 1447 | uap->rx_dma_irq = irq_of_parse_and_map(np, 2); |
1448 | } | 1448 | } |
1449 | no_dma: | 1449 | no_dma: |
1450 | 1450 | ||
@@ -1491,7 +1491,7 @@ no_dma: | |||
1491 | * Init remaining bits of "port" structure | 1491 | * Init remaining bits of "port" structure |
1492 | */ | 1492 | */ |
1493 | uap->port.iotype = UPIO_MEM; | 1493 | uap->port.iotype = UPIO_MEM; |
1494 | uap->port.irq = np->intrs[0].line; | 1494 | uap->port.irq = irq_of_parse_and_map(np, 0); |
1495 | uap->port.uartclk = ZS_CLOCK; | 1495 | uap->port.uartclk = ZS_CLOCK; |
1496 | uap->port.fifosize = 1; | 1496 | uap->port.fifosize = 1; |
1497 | uap->port.ops = &pmz_pops; | 1497 | uap->port.ops = &pmz_pops; |
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h index ff31cb90325f..c80e113052cd 100644 --- a/include/asm-powerpc/i8259.h +++ b/include/asm-powerpc/i8259.h | |||
@@ -4,8 +4,13 @@ | |||
4 | 4 | ||
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
6 | 6 | ||
7 | #ifdef CONFIG_PPC_MERGE | ||
8 | extern void i8259_init(struct device_node *node, unsigned long intack_addr); | ||
9 | extern unsigned int i8259_irq(struct pt_regs *regs); | ||
10 | #else | ||
7 | extern void i8259_init(unsigned long intack_addr, int offset); | 11 | extern void i8259_init(unsigned long intack_addr, int offset); |
8 | extern int i8259_irq(struct pt_regs *regs); | 12 | extern int i8259_irq(struct pt_regs *regs); |
13 | #endif | ||
9 | 14 | ||
10 | #endif /* __KERNEL__ */ | 15 | #endif /* __KERNEL__ */ |
11 | #endif /* _ASM_POWERPC_I8259_H */ | 16 | #endif /* _ASM_POWERPC_I8259_H */ |
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 13fa2ef38dc7..e05754752028 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h | |||
@@ -9,26 +9,14 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/config.h> | ||
12 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/list.h> | ||
15 | #include <linux/radix-tree.h> | ||
13 | 16 | ||
14 | #include <asm/types.h> | 17 | #include <asm/types.h> |
15 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
16 | 19 | ||
17 | /* this number is used when no interrupt has been assigned */ | ||
18 | #define NO_IRQ (-1) | ||
19 | |||
20 | /* | ||
21 | * These constants are used for passing information about interrupt | ||
22 | * signal polarity and level/edge sensing to the low-level PIC chip | ||
23 | * drivers. | ||
24 | */ | ||
25 | #define IRQ_SENSE_MASK 0x1 | ||
26 | #define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */ | ||
27 | #define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */ | ||
28 | |||
29 | #define IRQ_POLARITY_MASK 0x2 | ||
30 | #define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ | ||
31 | #define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ | ||
32 | 20 | ||
33 | #define get_irq_desc(irq) (&irq_desc[(irq)]) | 21 | #define get_irq_desc(irq) (&irq_desc[(irq)]) |
34 | 22 | ||
@@ -36,50 +24,325 @@ | |||
36 | #define for_each_irq(i) \ | 24 | #define for_each_irq(i) \ |
37 | for ((i) = 0; (i) < NR_IRQS; ++(i)) | 25 | for ((i) = 0; (i) < NR_IRQS; ++(i)) |
38 | 26 | ||
39 | #ifdef CONFIG_PPC64 | 27 | extern atomic_t ppc_n_lost_interrupts; |
40 | 28 | ||
41 | /* | 29 | #ifdef CONFIG_PPC_MERGE |
42 | * Maximum number of interrupt sources that we can handle. | 30 | |
31 | /* This number is used when no interrupt has been assigned */ | ||
32 | #define NO_IRQ (0) | ||
33 | |||
34 | /* This is a special irq number to return from get_irq() to tell that | ||
35 | * no interrupt happened _and_ ignore it (don't count it as bad). Some | ||
36 | * platforms like iSeries rely on that. | ||
43 | */ | 37 | */ |
38 | #define NO_IRQ_IGNORE ((unsigned int)-1) | ||
39 | |||
40 | /* Total number of virq in the platform (make it a CONFIG_* option ? */ | ||
44 | #define NR_IRQS 512 | 41 | #define NR_IRQS 512 |
45 | 42 | ||
46 | /* Interrupt numbers are virtual in case they are sparsely | 43 | /* Number of irqs reserved for the legacy controller */ |
47 | * distributed by the hardware. | 44 | #define NUM_ISA_INTERRUPTS 16 |
45 | |||
46 | /* This type is the placeholder for a hardware interrupt number. It has to | ||
47 | * be big enough to enclose whatever representation is used by a given | ||
48 | * platform. | ||
49 | */ | ||
50 | typedef unsigned long irq_hw_number_t; | ||
51 | |||
52 | /* Interrupt controller "host" data structure. This could be defined as a | ||
53 | * irq domain controller. That is, it handles the mapping between hardware | ||
54 | * and virtual interrupt numbers for a given interrupt domain. The host | ||
55 | * structure is generally created by the PIC code for a given PIC instance | ||
56 | * (though a host can cover more than one PIC if they have a flat number | ||
57 | * model). It's the host callbacks that are responsible for setting the | ||
58 | * irq_chip on a given irq_desc after it's been mapped. | ||
59 | * | ||
60 | * The host code and data structures are fairly agnostic to the fact that | ||
61 | * we use an open firmware device-tree. We do have references to struct | ||
62 | * device_node in two places: in irq_find_host() to find the host matching | ||
63 | * a given interrupt controller node, and of course as an argument to its | ||
64 | * counterpart host->ops->match() callback. However, those are treated as | ||
65 | * generic pointers by the core and the fact that it's actually a device-node | ||
66 | * pointer is purely a convention between callers and implementation. This | ||
67 | * code could thus be used on other architectures by replacing those two | ||
68 | * by some sort of arch-specific void * "token" used to identify interrupt | ||
69 | * controllers. | ||
70 | */ | ||
71 | struct irq_host; | ||
72 | struct radix_tree_root; | ||
73 | |||
74 | /* Functions below are provided by the host and called whenever a new mapping | ||
75 | * is created or an old mapping is disposed. The host can then proceed to | ||
76 | * whatever internal data structures management is required. It also needs | ||
77 | * to setup the irq_desc when returning from map(). | ||
78 | */ | ||
79 | struct irq_host_ops { | ||
80 | /* Match an interrupt controller device node to a host, returns | ||
81 | * 1 on a match | ||
82 | */ | ||
83 | int (*match)(struct irq_host *h, struct device_node *node); | ||
84 | |||
85 | /* Create or update a mapping between a virtual irq number and a hw | ||
86 | * irq number. This can be called several times for the same mapping | ||
87 | * but with different flags, though unmap shall always be called | ||
88 | * before the virq->hw mapping is changed. | ||
89 | */ | ||
90 | int (*map)(struct irq_host *h, unsigned int virq, | ||
91 | irq_hw_number_t hw, unsigned int flags); | ||
92 | |||
93 | /* Dispose of such a mapping */ | ||
94 | void (*unmap)(struct irq_host *h, unsigned int virq); | ||
95 | |||
96 | /* Translate device-tree interrupt specifier from raw format coming | ||
97 | * from the firmware to a irq_hw_number_t (interrupt line number) and | ||
98 | * trigger flags that can be passed to irq_create_mapping(). | ||
99 | * If no translation is provided, raw format is assumed to be one cell | ||
100 | * for interrupt line and default sense. | ||
101 | */ | ||
102 | int (*xlate)(struct irq_host *h, struct device_node *ctrler, | ||
103 | u32 *intspec, unsigned int intsize, | ||
104 | irq_hw_number_t *out_hwirq, unsigned int *out_flags); | ||
105 | }; | ||
106 | |||
107 | struct irq_host { | ||
108 | struct list_head link; | ||
109 | |||
110 | /* type of reverse mapping technique */ | ||
111 | unsigned int revmap_type; | ||
112 | #define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ | ||
113 | #define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ | ||
114 | #define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ | ||
115 | #define IRQ_HOST_MAP_TREE 3 /* radix tree */ | ||
116 | union { | ||
117 | struct { | ||
118 | unsigned int size; | ||
119 | unsigned int *revmap; | ||
120 | } linear; | ||
121 | struct radix_tree_root tree; | ||
122 | } revmap_data; | ||
123 | struct irq_host_ops *ops; | ||
124 | void *host_data; | ||
125 | irq_hw_number_t inval_irq; | ||
126 | }; | ||
127 | |||
128 | /* The main irq map itself is an array of NR_IRQ entries containing the | ||
129 | * associate host and irq number. An entry with a host of NULL is free. | ||
130 | * An entry can be allocated if it's free, the allocator always then sets | ||
131 | * hwirq first to the host's invalid irq number and then fills ops. | ||
132 | */ | ||
133 | struct irq_map_entry { | ||
134 | irq_hw_number_t hwirq; | ||
135 | struct irq_host *host; | ||
136 | }; | ||
137 | |||
138 | extern struct irq_map_entry irq_map[NR_IRQS]; | ||
139 | |||
140 | |||
141 | /*** | ||
142 | * irq_alloc_host - Allocate a new irq_host data structure | ||
143 | * @node: device-tree node of the interrupt controller | ||
144 | * @revmap_type: type of reverse mapping to use | ||
145 | * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map | ||
146 | * @ops: map/unmap host callbacks | ||
147 | * @inval_irq: provide a hw number in that host space that is always invalid | ||
148 | * | ||
149 | * Allocates and initialize and irq_host structure. Note that in the case of | ||
150 | * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns | ||
151 | * for all legacy interrupts except 0 (which is always the invalid irq for | ||
152 | * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by | ||
153 | * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated | ||
154 | * later during boot automatically (the reverse mapping will use the slow path | ||
155 | * until that happens). | ||
156 | */ | ||
157 | extern struct irq_host *irq_alloc_host(unsigned int revmap_type, | ||
158 | unsigned int revmap_arg, | ||
159 | struct irq_host_ops *ops, | ||
160 | irq_hw_number_t inval_irq); | ||
161 | |||
162 | |||
163 | /*** | ||
164 | * irq_find_host - Locates a host for a given device node | ||
165 | * @node: device-tree node of the interrupt controller | ||
166 | */ | ||
167 | extern struct irq_host *irq_find_host(struct device_node *node); | ||
168 | |||
169 | |||
170 | /*** | ||
171 | * irq_set_default_host - Set a "default" host | ||
172 | * @host: default host pointer | ||
173 | * | ||
174 | * For convenience, it's possible to set a "default" host that will be used | ||
175 | * whenever NULL is passed to irq_create_mapping(). It makes life easier for | ||
176 | * platforms that want to manipulate a few hard coded interrupt numbers that | ||
177 | * aren't properly represented in the device-tree. | ||
178 | */ | ||
179 | extern void irq_set_default_host(struct irq_host *host); | ||
180 | |||
181 | |||
182 | /*** | ||
183 | * irq_set_virq_count - Set the maximum number of virt irqs | ||
184 | * @count: number of linux virtual irqs, capped with NR_IRQS | ||
185 | * | ||
186 | * This is mainly for use by platforms like iSeries who want to program | ||
187 | * the virtual irq number in the controller to avoid the reverse mapping | ||
188 | */ | ||
189 | extern void irq_set_virq_count(unsigned int count); | ||
190 | |||
191 | |||
192 | /*** | ||
193 | * irq_create_mapping - Map a hardware interrupt into linux virq space | ||
194 | * @host: host owning this hardware interrupt or NULL for default host | ||
195 | * @hwirq: hardware irq number in that host space | ||
196 | * @flags: flags passed to the controller. contains the trigger type among | ||
197 | * others. Use IRQ_TYPE_* defined in include/linux/irq.h | ||
198 | * | ||
199 | * Only one mapping per hardware interrupt is permitted. Returns a linux | ||
200 | * virq number. The flags can be used to provide sense information to the | ||
201 | * controller (typically extracted from the device-tree). If no information | ||
202 | * is passed, the controller defaults will apply (for example, xics can only | ||
203 | * do edge so flags are irrelevant for some pseries specific irqs). | ||
204 | * | ||
205 | * The device-tree generally contains the trigger info in an encoding that is | ||
206 | * specific to a given type of controller. In that case, you can directly use | ||
207 | * host->ops->trigger_xlate() to translate that. | ||
208 | * | ||
209 | * It is recommended that new PICs that don't have existing OF bindings chose | ||
210 | * to use a representation of triggers identical to linux. | ||
211 | */ | ||
212 | extern unsigned int irq_create_mapping(struct irq_host *host, | ||
213 | irq_hw_number_t hwirq, | ||
214 | unsigned int flags); | ||
215 | |||
216 | |||
217 | /*** | ||
218 | * irq_dispose_mapping - Unmap an interrupt | ||
219 | * @virq: linux virq number of the interrupt to unmap | ||
48 | */ | 220 | */ |
49 | extern unsigned int virt_irq_to_real_map[NR_IRQS]; | 221 | extern void irq_dispose_mapping(unsigned int virq); |
50 | 222 | ||
51 | /* The maximum virtual IRQ number that we support. This | 223 | /*** |
52 | * can be set by the platform and will be reduced by the | 224 | * irq_find_mapping - Find a linux virq from an hw irq number. |
53 | * value of __irq_offset_value. It defaults to and is | 225 | * @host: host owning this hardware interrupt |
54 | * capped by (NR_IRQS - 1). | 226 | * @hwirq: hardware irq number in that host space |
227 | * | ||
228 | * This is a slow path, for use by generic code. It's expected that an | ||
229 | * irq controller implementation directly calls the appropriate low level | ||
230 | * mapping function. | ||
55 | */ | 231 | */ |
56 | extern unsigned int virt_irq_max; | 232 | extern unsigned int irq_find_mapping(struct irq_host *host, |
233 | irq_hw_number_t hwirq); | ||
57 | 234 | ||
58 | /* Create a mapping for a real_irq if it doesn't already exist. | 235 | |
59 | * Return the virtual irq as a convenience. | 236 | /*** |
237 | * irq_radix_revmap - Find a linux virq from a hw irq number. | ||
238 | * @host: host owning this hardware interrupt | ||
239 | * @hwirq: hardware irq number in that host space | ||
240 | * | ||
241 | * This is a fast path, for use by irq controller code that uses radix tree | ||
242 | * revmaps | ||
243 | */ | ||
244 | extern unsigned int irq_radix_revmap(struct irq_host *host, | ||
245 | irq_hw_number_t hwirq); | ||
246 | |||
247 | /*** | ||
248 | * irq_linear_revmap - Find a linux virq from a hw irq number. | ||
249 | * @host: host owning this hardware interrupt | ||
250 | * @hwirq: hardware irq number in that host space | ||
251 | * | ||
252 | * This is a fast path, for use by irq controller code that uses linear | ||
253 | * revmaps. It does fallback to the slow path if the revmap doesn't exist | ||
254 | * yet and will create the revmap entry with appropriate locking | ||
255 | */ | ||
256 | |||
257 | extern unsigned int irq_linear_revmap(struct irq_host *host, | ||
258 | irq_hw_number_t hwirq); | ||
259 | |||
260 | |||
261 | |||
262 | /*** | ||
263 | * irq_alloc_virt - Allocate virtual irq numbers | ||
264 | * @host: host owning these new virtual irqs | ||
265 | * @count: number of consecutive numbers to allocate | ||
266 | * @hint: pass a hint number, the allocator will try to use a 1:1 mapping | ||
267 | * | ||
268 | * This is a low level function that is used internally by irq_create_mapping() | ||
269 | * and that can be used by some irq controllers implementations for things | ||
270 | * like allocating ranges of numbers for MSIs. The revmaps are left untouched. | ||
60 | */ | 271 | */ |
61 | int virt_irq_create_mapping(unsigned int real_irq); | 272 | extern unsigned int irq_alloc_virt(struct irq_host *host, |
62 | void virt_irq_init(void); | 273 | unsigned int count, |
274 | unsigned int hint); | ||
275 | |||
276 | /*** | ||
277 | * irq_free_virt - Free virtual irq numbers | ||
278 | * @virq: virtual irq number of the first interrupt to free | ||
279 | * @count: number of interrupts to free | ||
280 | * | ||
281 | * This function is the opposite of irq_alloc_virt. It will not clear reverse | ||
282 | * maps, this should be done previously by unmap'ing the interrupt. In fact, | ||
283 | * all interrupts covered by the range being freed should have been unmapped | ||
284 | * prior to calling this. | ||
285 | */ | ||
286 | extern void irq_free_virt(unsigned int virq, unsigned int count); | ||
287 | |||
288 | |||
289 | /* -- OF helpers -- */ | ||
290 | |||
291 | /* irq_create_of_mapping - Map a hardware interrupt into linux virq space | ||
292 | * @controller: Device node of the interrupt controller | ||
293 | * @inspec: Interrupt specifier from the device-tree | ||
294 | * @intsize: Size of the interrupt specifier from the device-tree | ||
295 | * | ||
296 | * This function is identical to irq_create_mapping except that it takes | ||
297 | * as input informations straight from the device-tree (typically the results | ||
298 | * of the of_irq_map_*() functions | ||
299 | */ | ||
300 | extern unsigned int irq_create_of_mapping(struct device_node *controller, | ||
301 | u32 *intspec, unsigned int intsize); | ||
302 | |||
303 | |||
304 | /* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space | ||
305 | * @device: Device node of the device whose interrupt is to be mapped | ||
306 | * @index: Index of the interrupt to map | ||
307 | * | ||
308 | * This function is a wrapper that chains of_irq_map_one() and | ||
309 | * irq_create_of_mapping() to make things easier to callers | ||
310 | */ | ||
311 | extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index); | ||
312 | |||
313 | /* -- End OF helpers -- */ | ||
63 | 314 | ||
64 | static inline unsigned int virt_irq_to_real(unsigned int virt_irq) | 315 | /*** |
316 | * irq_early_init - Init irq remapping subsystem | ||
317 | */ | ||
318 | extern void irq_early_init(void); | ||
319 | |||
320 | static __inline__ int irq_canonicalize(int irq) | ||
65 | { | 321 | { |
66 | return virt_irq_to_real_map[virt_irq]; | 322 | return irq; |
67 | } | 323 | } |
68 | 324 | ||
69 | extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); | 325 | |
326 | #else /* CONFIG_PPC_MERGE */ | ||
327 | |||
328 | /* This number is used when no interrupt has been assigned */ | ||
329 | #define NO_IRQ (-1) | ||
330 | #define NO_IRQ_IGNORE (-2) | ||
331 | |||
70 | 332 | ||
71 | /* | 333 | /* |
72 | * List of interrupt controllers. | 334 | * These constants are used for passing information about interrupt |
335 | * signal polarity and level/edge sensing to the low-level PIC chip | ||
336 | * drivers. | ||
73 | */ | 337 | */ |
74 | #define IC_INVALID 0 | 338 | #define IRQ_SENSE_MASK 0x1 |
75 | #define IC_OPEN_PIC 1 | 339 | #define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */ |
76 | #define IC_PPC_XIC 2 | 340 | #define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */ |
77 | #define IC_CELL_PIC 3 | ||
78 | #define IC_ISERIES 4 | ||
79 | 341 | ||
80 | extern u64 ppc64_interrupt_controller; | 342 | #define IRQ_POLARITY_MASK 0x2 |
343 | #define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ | ||
344 | #define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ | ||
81 | 345 | ||
82 | #else /* 32-bit */ | ||
83 | 346 | ||
84 | #if defined(CONFIG_40x) | 347 | #if defined(CONFIG_40x) |
85 | #include <asm/ibm4xx.h> | 348 | #include <asm/ibm4xx.h> |
@@ -512,19 +775,11 @@ extern u64 ppc64_interrupt_controller; | |||
512 | 775 | ||
513 | #endif /* CONFIG_8260 */ | 776 | #endif /* CONFIG_8260 */ |
514 | 777 | ||
515 | #endif | 778 | #endif /* Whatever way too big #ifdef */ |
516 | 779 | ||
517 | #ifndef CONFIG_PPC_MERGE | ||
518 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 780 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
519 | /* pedantic: these are long because they are used with set_bit --RR */ | 781 | /* pedantic: these are long because they are used with set_bit --RR */ |
520 | extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | 782 | extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
521 | #endif | ||
522 | |||
523 | extern atomic_t ppc_n_lost_interrupts; | ||
524 | |||
525 | #define virt_irq_create_mapping(x) (x) | ||
526 | |||
527 | #endif | ||
528 | 783 | ||
529 | /* | 784 | /* |
530 | * Because many systems have two overlapping names spaces for | 785 | * Because many systems have two overlapping names spaces for |
@@ -563,6 +818,7 @@ static __inline__ int irq_canonicalize(int irq) | |||
563 | irq = 9; | 818 | irq = 9; |
564 | return irq; | 819 | return irq; |
565 | } | 820 | } |
821 | #endif /* CONFIG_PPC_MERGE */ | ||
566 | 822 | ||
567 | extern int distribute_irqs; | 823 | extern int distribute_irqs; |
568 | 824 | ||
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index eba133d149a7..c17c13742401 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h | |||
@@ -97,7 +97,7 @@ struct machdep_calls { | |||
97 | void (*show_percpuinfo)(struct seq_file *m, int i); | 97 | void (*show_percpuinfo)(struct seq_file *m, int i); |
98 | 98 | ||
99 | void (*init_IRQ)(void); | 99 | void (*init_IRQ)(void); |
100 | int (*get_irq)(struct pt_regs *); | 100 | unsigned int (*get_irq)(struct pt_regs *); |
101 | #ifdef CONFIG_KEXEC | 101 | #ifdef CONFIG_KEXEC |
102 | void (*kexec_cpu_down)(int crash_shutdown, int secondary); | 102 | void (*kexec_cpu_down)(int crash_shutdown, int secondary); |
103 | #endif | 103 | #endif |
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index a2277cb77ddc..eb241c99c457 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h | |||
@@ -129,6 +129,12 @@ struct mpic_irq_fixup | |||
129 | /* The instance data of a given MPIC */ | 129 | /* The instance data of a given MPIC */ |
130 | struct mpic | 130 | struct mpic |
131 | { | 131 | { |
132 | /* The device node of the interrupt controller */ | ||
133 | struct device_node *of_node; | ||
134 | |||
135 | /* The remapper for this MPIC */ | ||
136 | struct irq_host *irqhost; | ||
137 | |||
132 | /* The "linux" controller struct */ | 138 | /* The "linux" controller struct */ |
133 | struct irq_chip hc_irq; | 139 | struct irq_chip hc_irq; |
134 | #ifdef CONFIG_MPIC_BROKEN_U3 | 140 | #ifdef CONFIG_MPIC_BROKEN_U3 |
@@ -144,16 +150,12 @@ struct mpic | |||
144 | unsigned int isu_size; | 150 | unsigned int isu_size; |
145 | unsigned int isu_shift; | 151 | unsigned int isu_shift; |
146 | unsigned int isu_mask; | 152 | unsigned int isu_mask; |
147 | /* Offset of irq vector numbers */ | ||
148 | unsigned int irq_offset; | ||
149 | unsigned int irq_count; | 153 | unsigned int irq_count; |
150 | /* Offset of ipi vector numbers */ | ||
151 | unsigned int ipi_offset; | ||
152 | /* Number of sources */ | 154 | /* Number of sources */ |
153 | unsigned int num_sources; | 155 | unsigned int num_sources; |
154 | /* Number of CPUs */ | 156 | /* Number of CPUs */ |
155 | unsigned int num_cpus; | 157 | unsigned int num_cpus; |
156 | /* senses array */ | 158 | /* default senses array */ |
157 | unsigned char *senses; | 159 | unsigned char *senses; |
158 | unsigned int senses_count; | 160 | unsigned int senses_count; |
159 | 161 | ||
@@ -209,14 +211,11 @@ struct mpic | |||
209 | * The values in the array start at the first source of the MPIC, | 211 | * The values in the array start at the first source of the MPIC, |
210 | * that is senses[0] correspond to linux irq "irq_offset". | 212 | * that is senses[0] correspond to linux irq "irq_offset". |
211 | */ | 213 | */ |
212 | extern struct mpic *mpic_alloc(unsigned long phys_addr, | 214 | extern struct mpic *mpic_alloc(struct device_node *node, |
215 | unsigned long phys_addr, | ||
213 | unsigned int flags, | 216 | unsigned int flags, |
214 | unsigned int isu_size, | 217 | unsigned int isu_size, |
215 | unsigned int irq_offset, | ||
216 | unsigned int irq_count, | 218 | unsigned int irq_count, |
217 | unsigned int ipi_offset, | ||
218 | unsigned char *senses, | ||
219 | unsigned int senses_num, | ||
220 | const char *name); | 219 | const char *name); |
221 | 220 | ||
222 | /* Assign ISUs, to call before mpic_init() | 221 | /* Assign ISUs, to call before mpic_init() |
@@ -228,6 +227,22 @@ extern struct mpic *mpic_alloc(unsigned long phys_addr, | |||
228 | extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | 227 | extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, |
229 | unsigned long phys_addr); | 228 | unsigned long phys_addr); |
230 | 229 | ||
230 | /* Set default sense codes | ||
231 | * | ||
232 | * @mpic: controller | ||
233 | * @senses: array of sense codes | ||
234 | * @count: size of above array | ||
235 | * | ||
236 | * Optionally provide an array (indexed on hardware interrupt numbers | ||
237 | * for this MPIC) of default sense codes for the chip. Those are linux | ||
238 | * sense codes IRQ_TYPE_* | ||
239 | * | ||
240 | * The driver gets ownership of the pointer, don't dispose of it or | ||
241 | * anything like that. __init only. | ||
242 | */ | ||
243 | extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count); | ||
244 | |||
245 | |||
231 | /* Initialize the controller. After this has been called, none of the above | 246 | /* Initialize the controller. After this has been called, none of the above |
232 | * should be called again for this mpic | 247 | * should be called again for this mpic |
233 | */ | 248 | */ |
@@ -269,9 +284,9 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); | |||
269 | void smp_mpic_message_pass(int target, int msg); | 284 | void smp_mpic_message_pass(int target, int msg); |
270 | 285 | ||
271 | /* Fetch interrupt from a given mpic */ | 286 | /* Fetch interrupt from a given mpic */ |
272 | extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); | 287 | extern unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); |
273 | /* This one gets to the primary mpic */ | 288 | /* This one gets to the primary mpic */ |
274 | extern int mpic_get_irq(struct pt_regs *regs); | 289 | extern unsigned int mpic_get_irq(struct pt_regs *regs); |
275 | 290 | ||
276 | /* Set the EPIC clock ratio */ | 291 | /* Set the EPIC clock ratio */ |
277 | void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); | 292 | void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); |
@@ -279,8 +294,5 @@ void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); | |||
279 | /* Enable/Disable EPIC serial interrupt mode */ | 294 | /* Enable/Disable EPIC serial interrupt mode */ |
280 | void mpic_set_serial_int(struct mpic *mpic, int enable); | 295 | void mpic_set_serial_int(struct mpic *mpic, int enable); |
281 | 296 | ||
282 | /* global mpic for pSeries */ | ||
283 | extern struct mpic *pSeries_mpic; | ||
284 | |||
285 | #endif /* __KERNEL__ */ | 297 | #endif /* __KERNEL__ */ |
286 | #endif /* _ASM_POWERPC_MPIC_H */ | 298 | #endif /* _ASM_POWERPC_MPIC_H */ |
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 48bef401bc19..b095a285c84b 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h | |||
@@ -64,11 +64,6 @@ struct boot_param_header | |||
64 | typedef u32 phandle; | 64 | typedef u32 phandle; |
65 | typedef u32 ihandle; | 65 | typedef u32 ihandle; |
66 | 66 | ||
67 | struct interrupt_info { | ||
68 | int line; | ||
69 | int sense; /* +ve/-ve logic, edge or level, etc. */ | ||
70 | }; | ||
71 | |||
72 | struct property { | 67 | struct property { |
73 | char *name; | 68 | char *name; |
74 | int length; | 69 | int length; |
@@ -81,8 +76,6 @@ struct device_node { | |||
81 | char *type; | 76 | char *type; |
82 | phandle node; | 77 | phandle node; |
83 | phandle linux_phandle; | 78 | phandle linux_phandle; |
84 | int n_intrs; | ||
85 | struct interrupt_info *intrs; | ||
86 | char *full_name; | 79 | char *full_name; |
87 | 80 | ||
88 | struct property *properties; | 81 | struct property *properties; |
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 9609d3ee8798..c02d105d8294 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h | |||
@@ -117,6 +117,7 @@ struct spu { | |||
117 | struct list_head sched_list; | 117 | struct list_head sched_list; |
118 | int number; | 118 | int number; |
119 | int nid; | 119 | int nid; |
120 | unsigned int irqs[3]; | ||
120 | u32 isrc; | 121 | u32 isrc; |
121 | u32 node; | 122 | u32 node; |
122 | u64 flags; | 123 | u64 flags; |
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c index bab97547a052..7ae0c0bdfad8 100644 --- a/sound/aoa/core/snd-aoa-gpio-feature.c +++ b/sound/aoa/core/snd-aoa-gpio-feature.c | |||
@@ -112,12 +112,7 @@ static struct device_node *get_gpio(char *name, | |||
112 | 112 | ||
113 | static void get_irq(struct device_node * np, int *irqptr) | 113 | static void get_irq(struct device_node * np, int *irqptr) |
114 | { | 114 | { |
115 | *irqptr = -1; | 115 | *irqptr = irq_of_parse_and_map(np, 0); |
116 | if (!np) | ||
117 | return; | ||
118 | if (np->n_intrs != 1) | ||
119 | return; | ||
120 | *irqptr = np->intrs[0].line; | ||
121 | } | 116 | } |
122 | 117 | ||
123 | /* 0x4 is outenable, 0x1 is out, thus 4 or 5 */ | 118 | /* 0x4 is outenable, 0x1 is out, thus 4 or 5 */ |
diff --git a/sound/aoa/soundbus/i2sbus/i2sbus-core.c b/sound/aoa/soundbus/i2sbus/i2sbus-core.c index f268dacdaa00..01c0724335a3 100644 --- a/sound/aoa/soundbus/i2sbus/i2sbus-core.c +++ b/sound/aoa/soundbus/i2sbus/i2sbus-core.c | |||
@@ -129,7 +129,7 @@ static int i2sbus_add_dev(struct macio_dev *macio, | |||
129 | if (strncmp(np->name, "i2s-", 4)) | 129 | if (strncmp(np->name, "i2s-", 4)) |
130 | return 0; | 130 | return 0; |
131 | 131 | ||
132 | if (np->n_intrs != 3) | 132 | if (macio_irq_count(macio) != 3) |
133 | return 0; | 133 | return 0; |
134 | 134 | ||
135 | dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL); | 135 | dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL); |
@@ -183,9 +183,10 @@ static int i2sbus_add_dev(struct macio_dev *macio, | |||
183 | snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name); | 183 | snprintf(dev->rnames[i], sizeof(dev->rnames[i]), rnames[i], np->name); |
184 | } | 184 | } |
185 | for (i=0;i<3;i++) { | 185 | for (i=0;i<3;i++) { |
186 | if (request_irq(np->intrs[i].line, ints[i], 0, dev->rnames[i], dev)) | 186 | if (request_irq(macio_irq(macio, i), ints[i], 0, |
187 | dev->rnames[i], dev)) | ||
187 | goto err; | 188 | goto err; |
188 | dev->interrupts[i] = np->intrs[i].line; | 189 | dev->interrupts[i] = macio_irq(macio, i); |
189 | } | 190 | } |
190 | 191 | ||
191 | for (i=0;i<3;i++) { | 192 | for (i=0;i<3;i++) { |
diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c index de454ca39226..4359903f4376 100644 --- a/sound/oss/dmasound/dmasound_awacs.c +++ b/sound/oss/dmasound/dmasound_awacs.c | |||
@@ -374,10 +374,7 @@ setup_audio_gpio(const char *name, const char* compatible, int *gpio_addr, int* | |||
374 | *gpio_pol = *pp; | 374 | *gpio_pol = *pp; |
375 | else | 375 | else |
376 | *gpio_pol = 1; | 376 | *gpio_pol = 1; |
377 | if (np->n_intrs > 0) | 377 | return irq_of_parse_and_map(np, 0); |
378 | return np->intrs[0].line; | ||
379 | |||
380 | return 0; | ||
381 | } | 378 | } |
382 | 379 | ||
383 | static inline void | 380 | static inline void |
@@ -2864,14 +2861,13 @@ printk("dmasound_pmac: couldn't find a Codec we can handle\n"); | |||
2864 | * other info if necessary (early AWACS we want to read chip ids) | 2861 | * other info if necessary (early AWACS we want to read chip ids) |
2865 | */ | 2862 | */ |
2866 | 2863 | ||
2867 | if (of_get_address(io, 2, NULL, NULL) == NULL || io->n_intrs < 3) { | 2864 | if (of_get_address(io, 2, NULL, NULL) == NULL) { |
2868 | /* OK - maybe we need to use the 'awacs' node (on earlier | 2865 | /* OK - maybe we need to use the 'awacs' node (on earlier |
2869 | * machines). | 2866 | * machines). |
2870 | */ | 2867 | */ |
2871 | if (awacs_node) { | 2868 | if (awacs_node) { |
2872 | io = awacs_node ; | 2869 | io = awacs_node ; |
2873 | if (of_get_address(io, 2, NULL, NULL) == NULL || | 2870 | if (of_get_address(io, 2, NULL, NULL) == NULL) { |
2874 | io->n_intrs < 3) { | ||
2875 | printk("dmasound_pmac: can't use %s\n", | 2871 | printk("dmasound_pmac: can't use %s\n", |
2876 | io->full_name); | 2872 | io->full_name); |
2877 | return -ENODEV; | 2873 | return -ENODEV; |
@@ -2940,9 +2936,9 @@ printk("dmasound_pmac: couldn't find a Codec we can handle\n"); | |||
2940 | if (awacs_revision == AWACS_SCREAMER && awacs) | 2936 | if (awacs_revision == AWACS_SCREAMER && awacs) |
2941 | awacs_recalibrate(); | 2937 | awacs_recalibrate(); |
2942 | 2938 | ||
2943 | awacs_irq = io->intrs[0].line; | 2939 | awacs_irq = irq_of_parse_and_map(io, 0); |
2944 | awacs_tx_irq = io->intrs[1].line; | 2940 | awacs_tx_irq = irq_of_parse_and_map(io, 1); |
2945 | awacs_rx_irq = io->intrs[2].line; | 2941 | awacs_rx_irq = irq_of_parse_and_map(io, 2); |
2946 | 2942 | ||
2947 | /* Hack for legacy crap that will be killed someday */ | 2943 | /* Hack for legacy crap that will be killed someday */ |
2948 | awacs_node = io; | 2944 | awacs_node = io; |
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c index 90db9a1d1e0a..641430631505 100644 --- a/sound/ppc/pmac.c +++ b/sound/ppc/pmac.c | |||
@@ -1120,6 +1120,7 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1120 | struct snd_pmac *chip; | 1120 | struct snd_pmac *chip; |
1121 | struct device_node *np; | 1121 | struct device_node *np; |
1122 | int i, err; | 1122 | int i, err; |
1123 | unsigned int irq; | ||
1123 | unsigned long ctrl_addr, txdma_addr, rxdma_addr; | 1124 | unsigned long ctrl_addr, txdma_addr, rxdma_addr; |
1124 | static struct snd_device_ops ops = { | 1125 | static struct snd_device_ops ops = { |
1125 | .dev_free = snd_pmac_dev_free, | 1126 | .dev_free = snd_pmac_dev_free, |
@@ -1153,10 +1154,6 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1153 | if (chip->is_k2) { | 1154 | if (chip->is_k2) { |
1154 | static char *rnames[] = { | 1155 | static char *rnames[] = { |
1155 | "Sound Control", "Sound DMA" }; | 1156 | "Sound Control", "Sound DMA" }; |
1156 | if (np->n_intrs < 3) { | ||
1157 | err = -ENODEV; | ||
1158 | goto __error; | ||
1159 | } | ||
1160 | for (i = 0; i < 2; i ++) { | 1157 | for (i = 0; i < 2; i ++) { |
1161 | if (of_address_to_resource(np->parent, i, | 1158 | if (of_address_to_resource(np->parent, i, |
1162 | &chip->rsrc[i])) { | 1159 | &chip->rsrc[i])) { |
@@ -1185,10 +1182,6 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1185 | } else { | 1182 | } else { |
1186 | static char *rnames[] = { | 1183 | static char *rnames[] = { |
1187 | "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; | 1184 | "Sound Control", "Sound Tx DMA", "Sound Rx DMA" }; |
1188 | if (np->n_intrs < 3) { | ||
1189 | err = -ENODEV; | ||
1190 | goto __error; | ||
1191 | } | ||
1192 | for (i = 0; i < 3; i ++) { | 1185 | for (i = 0; i < 3; i ++) { |
1193 | if (of_address_to_resource(np, i, | 1186 | if (of_address_to_resource(np, i, |
1194 | &chip->rsrc[i])) { | 1187 | &chip->rsrc[i])) { |
@@ -1220,28 +1213,30 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return) | |||
1220 | chip->playback.dma = ioremap(txdma_addr, 0x100); | 1213 | chip->playback.dma = ioremap(txdma_addr, 0x100); |
1221 | chip->capture.dma = ioremap(rxdma_addr, 0x100); | 1214 | chip->capture.dma = ioremap(rxdma_addr, 0x100); |
1222 | if (chip->model <= PMAC_BURGUNDY) { | 1215 | if (chip->model <= PMAC_BURGUNDY) { |
1223 | if (request_irq(np->intrs[0].line, snd_pmac_ctrl_intr, 0, | 1216 | irq = irq_of_parse_and_map(np, 0); |
1217 | if (request_irq(irq, snd_pmac_ctrl_intr, 0, | ||
1224 | "PMac", (void*)chip)) { | 1218 | "PMac", (void*)chip)) { |
1225 | snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[0].line); | 1219 | snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", |
1220 | irq); | ||
1226 | err = -EBUSY; | 1221 | err = -EBUSY; |
1227 | goto __error; | 1222 | goto __error; |
1228 | } | 1223 | } |
1229 | chip->irq = np->intrs[0].line; | 1224 | chip->irq = irq; |
1230 | } | 1225 | } |
1231 | if (request_irq(np->intrs[1].line, snd_pmac_tx_intr, 0, | 1226 | irq = irq_of_parse_and_map(np, 1); |
1232 | "PMac Output", (void*)chip)) { | 1227 | if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){ |
1233 | snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[1].line); | 1228 | snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); |
1234 | err = -EBUSY; | 1229 | err = -EBUSY; |
1235 | goto __error; | 1230 | goto __error; |
1236 | } | 1231 | } |
1237 | chip->tx_irq = np->intrs[1].line; | 1232 | chip->tx_irq = irq; |
1238 | if (request_irq(np->intrs[2].line, snd_pmac_rx_intr, 0, | 1233 | irq = irq_of_parse_and_map(np, 2); |
1239 | "PMac Input", (void*)chip)) { | 1234 | if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) { |
1240 | snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", np->intrs[2].line); | 1235 | snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq); |
1241 | err = -EBUSY; | 1236 | err = -EBUSY; |
1242 | goto __error; | 1237 | goto __error; |
1243 | } | 1238 | } |
1244 | chip->rx_irq = np->intrs[2].line; | 1239 | chip->rx_irq = irq; |
1245 | 1240 | ||
1246 | snd_pmac_sound_feature(chip, 1); | 1241 | snd_pmac_sound_feature(chip, 1); |
1247 | 1242 | ||
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c index 70e4ebc70260..692c61177678 100644 --- a/sound/ppc/tumbler.c +++ b/sound/ppc/tumbler.c | |||
@@ -1121,7 +1121,7 @@ static long tumbler_find_device(const char *device, const char *platform, | |||
1121 | DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n", | 1121 | DBG("(I) GPIO device %s found, offset: %x, active state: %d !\n", |
1122 | device, gp->addr, gp->active_state); | 1122 | device, gp->addr, gp->active_state); |
1123 | 1123 | ||
1124 | return (node->n_intrs > 0) ? node->intrs[0].line : 0; | 1124 | return irq_of_parse_and_map(node, 0); |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | /* reset audio */ | 1127 | /* reset audio */ |
@@ -1264,16 +1264,16 @@ static int __init tumbler_init(struct snd_pmac *chip) | |||
1264 | &mix->line_mute, 1); | 1264 | &mix->line_mute, 1); |
1265 | irq = tumbler_find_device("headphone-detect", | 1265 | irq = tumbler_find_device("headphone-detect", |
1266 | NULL, &mix->hp_detect, 0); | 1266 | NULL, &mix->hp_detect, 0); |
1267 | if (irq < 0) | 1267 | if (irq <= NO_IRQ) |
1268 | irq = tumbler_find_device("headphone-detect", | 1268 | irq = tumbler_find_device("headphone-detect", |
1269 | NULL, &mix->hp_detect, 1); | 1269 | NULL, &mix->hp_detect, 1); |
1270 | if (irq < 0) | 1270 | if (irq <= NO_IRQ) |
1271 | irq = tumbler_find_device("keywest-gpio15", | 1271 | irq = tumbler_find_device("keywest-gpio15", |
1272 | NULL, &mix->hp_detect, 1); | 1272 | NULL, &mix->hp_detect, 1); |
1273 | mix->headphone_irq = irq; | 1273 | mix->headphone_irq = irq; |
1274 | irq = tumbler_find_device("line-output-detect", | 1274 | irq = tumbler_find_device("line-output-detect", |
1275 | NULL, &mix->line_detect, 0); | 1275 | NULL, &mix->line_detect, 0); |
1276 | if (irq < 0) | 1276 | if (irq <= NO_IRQ) |
1277 | irq = tumbler_find_device("line-output-detect", | 1277 | irq = tumbler_find_device("line-output-detect", |
1278 | NULL, &mix->line_detect, 1); | 1278 | NULL, &mix->line_detect, 1); |
1279 | mix->lineout_irq = irq; | 1279 | mix->lineout_irq = irq; |