aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c333
1 files changed, 120 insertions, 213 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4a65386995d7..5b428e308666 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,7 +66,6 @@
66#include <asm/ptrace.h> 66#include <asm/ptrace.h>
67#include <asm/machdep.h> 67#include <asm/machdep.h>
68#include <asm/udbg.h> 68#include <asm/udbg.h>
69#include <asm/dbell.h>
70#include <asm/smp.h> 69#include <asm/smp.h>
71 70
72#ifdef CONFIG_PPC64 71#ifdef CONFIG_PPC64
@@ -116,7 +115,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 115 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
117} 116}
118 117
119notrace void raw_local_irq_restore(unsigned long en) 118notrace void arch_local_irq_restore(unsigned long en)
120{ 119{
121 /* 120 /*
122 * get_paca()->soft_enabled = en; 121 * get_paca()->soft_enabled = en;
@@ -160,7 +159,8 @@ notrace void raw_local_irq_restore(unsigned long en)
160 159
161#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) 160#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
162 /* Check for pending doorbell interrupts and resend to ourself */ 161 /* Check for pending doorbell interrupts and resend to ourself */
163 doorbell_check_self(); 162 if (cpu_has_feature(CPU_FTR_DBELL))
163 smp_muxed_ipi_resend();
164#endif 164#endif
165 165
166 /* 166 /*
@@ -192,10 +192,10 @@ notrace void raw_local_irq_restore(unsigned long en)
192 192
193 __hard_irq_enable(); 193 __hard_irq_enable();
194} 194}
195EXPORT_SYMBOL(raw_local_irq_restore); 195EXPORT_SYMBOL(arch_local_irq_restore);
196#endif /* CONFIG_PPC64 */ 196#endif /* CONFIG_PPC64 */
197 197
198static int show_other_interrupts(struct seq_file *p, int prec) 198int arch_show_interrupts(struct seq_file *p, int prec)
199{ 199{
200 int j; 200 int j;
201 201
@@ -231,63 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
231 return 0; 231 return 0;
232} 232}
233 233
234int show_interrupts(struct seq_file *p, void *v)
235{
236 unsigned long flags, any_count = 0;
237 int i = *(loff_t *) v, j, prec;
238 struct irqaction *action;
239 struct irq_desc *desc;
240
241 if (i > nr_irqs)
242 return 0;
243
244 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
245 j *= 10;
246
247 if (i == nr_irqs)
248 return show_other_interrupts(p, prec);
249
250 /* print header */
251 if (i == 0) {
252 seq_printf(p, "%*s", prec + 8, "");
253 for_each_online_cpu(j)
254 seq_printf(p, "CPU%-8d", j);
255 seq_putc(p, '\n');
256 }
257
258 desc = irq_to_desc(i);
259 if (!desc)
260 return 0;
261
262 raw_spin_lock_irqsave(&desc->lock, flags);
263 for_each_online_cpu(j)
264 any_count |= kstat_irqs_cpu(i, j);
265 action = desc->action;
266 if (!action && !any_count)
267 goto out;
268
269 seq_printf(p, "%*d: ", prec, i);
270 for_each_online_cpu(j)
271 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
272
273 if (desc->chip)
274 seq_printf(p, " %-16s", desc->chip->name);
275 else
276 seq_printf(p, " %-16s", "None");
277 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
278
279 if (action) {
280 seq_printf(p, " %s", action->name);
281 while ((action = action->next) != NULL)
282 seq_printf(p, ", %s", action->name);
283 }
284
285 seq_putc(p, '\n');
286out:
287 raw_spin_unlock_irqrestore(&desc->lock, flags);
288 return 0;
289}
290
291/* 234/*
292 * /proc/stat helpers 235 * /proc/stat helpers
293 */ 236 */
@@ -303,30 +246,37 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
303} 246}
304 247
305#ifdef CONFIG_HOTPLUG_CPU 248#ifdef CONFIG_HOTPLUG_CPU
306void fixup_irqs(const struct cpumask *map) 249void migrate_irqs(void)
307{ 250{
308 struct irq_desc *desc; 251 struct irq_desc *desc;
309 unsigned int irq; 252 unsigned int irq;
310 static int warned; 253 static int warned;
311 cpumask_var_t mask; 254 cpumask_var_t mask;
255 const struct cpumask *map = cpu_online_mask;
312 256
313 alloc_cpumask_var(&mask, GFP_KERNEL); 257 alloc_cpumask_var(&mask, GFP_KERNEL);
314 258
315 for_each_irq(irq) { 259 for_each_irq(irq) {
260 struct irq_data *data;
261 struct irq_chip *chip;
262
316 desc = irq_to_desc(irq); 263 desc = irq_to_desc(irq);
317 if (!desc) 264 if (!desc)
318 continue; 265 continue;
319 266
320 if (desc->status & IRQ_PER_CPU) 267 data = irq_desc_get_irq_data(desc);
268 if (irqd_is_per_cpu(data))
321 continue; 269 continue;
322 270
323 cpumask_and(mask, desc->affinity, map); 271 chip = irq_data_get_irq_chip(data);
272
273 cpumask_and(mask, data->affinity, map);
324 if (cpumask_any(mask) >= nr_cpu_ids) { 274 if (cpumask_any(mask) >= nr_cpu_ids) {
325 printk("Breaking affinity for irq %i\n", irq); 275 printk("Breaking affinity for irq %i\n", irq);
326 cpumask_copy(mask, map); 276 cpumask_copy(mask, map);
327 } 277 }
328 if (desc->chip->set_affinity) 278 if (chip->irq_set_affinity)
329 desc->chip->set_affinity(irq, mask); 279 chip->irq_set_affinity(data, mask, true);
330 else if (desc->action && !(warned++)) 280 else if (desc->action && !(warned++))
331 printk("Cannot set affinity for irq %i\n", irq); 281 printk("Cannot set affinity for irq %i\n", irq);
332 } 282 }
@@ -345,17 +295,20 @@ static inline void handle_one_irq(unsigned int irq)
345 unsigned long saved_sp_limit; 295 unsigned long saved_sp_limit;
346 struct irq_desc *desc; 296 struct irq_desc *desc;
347 297
298 desc = irq_to_desc(irq);
299 if (!desc)
300 return;
301
348 /* Switch to the irq stack to handle this */ 302 /* Switch to the irq stack to handle this */
349 curtp = current_thread_info(); 303 curtp = current_thread_info();
350 irqtp = hardirq_ctx[smp_processor_id()]; 304 irqtp = hardirq_ctx[smp_processor_id()];
351 305
352 if (curtp == irqtp) { 306 if (curtp == irqtp) {
353 /* We're already on the irq stack, just handle it */ 307 /* We're already on the irq stack, just handle it */
354 generic_handle_irq(irq); 308 desc->handle_irq(irq, desc);
355 return; 309 return;
356 } 310 }
357 311
358 desc = irq_to_desc(irq);
359 saved_sp_limit = current->thread.ksp_limit; 312 saved_sp_limit = current->thread.ksp_limit;
360 313
361 irqtp->task = curtp->task; 314 irqtp->task = curtp->task;
@@ -447,24 +400,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
447void exc_lvl_ctx_init(void) 400void exc_lvl_ctx_init(void)
448{ 401{
449 struct thread_info *tp; 402 struct thread_info *tp;
450 int i, hw_cpu; 403 int i, cpu_nr;
451 404
452 for_each_possible_cpu(i) { 405 for_each_possible_cpu(i) {
453 hw_cpu = get_hard_smp_processor_id(i); 406#ifdef CONFIG_PPC64
454 memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); 407 cpu_nr = i;
455 tp = critirq_ctx[hw_cpu]; 408#else
456 tp->cpu = i; 409 cpu_nr = get_hard_smp_processor_id(i);
410#endif
411 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
412 tp = critirq_ctx[cpu_nr];
413 tp->cpu = cpu_nr;
457 tp->preempt_count = 0; 414 tp->preempt_count = 0;
458 415
459#ifdef CONFIG_BOOKE 416#ifdef CONFIG_BOOKE
460 memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); 417 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
461 tp = dbgirq_ctx[hw_cpu]; 418 tp = dbgirq_ctx[cpu_nr];
462 tp->cpu = i; 419 tp->cpu = cpu_nr;
463 tp->preempt_count = 0; 420 tp->preempt_count = 0;
464 421
465 memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); 422 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
466 tp = mcheckirq_ctx[hw_cpu]; 423 tp = mcheckirq_ctx[cpu_nr];
467 tp->cpu = i; 424 tp->cpu = cpu_nr;
468 tp->preempt_count = HARDIRQ_OFFSET; 425 tp->preempt_count = HARDIRQ_OFFSET;
469#endif 426#endif
470 } 427 }
@@ -527,20 +484,41 @@ void do_softirq(void)
527 * IRQ controller and virtual interrupts 484 * IRQ controller and virtual interrupts
528 */ 485 */
529 486
487/* The main irq map itself is an array of NR_IRQ entries containing the
488 * associate host and irq number. An entry with a host of NULL is free.
489 * An entry can be allocated if it's free, the allocator always then sets
490 * hwirq first to the host's invalid irq number and then fills ops.
491 */
492struct irq_map_entry {
493 irq_hw_number_t hwirq;
494 struct irq_host *host;
495};
496
530static LIST_HEAD(irq_hosts); 497static LIST_HEAD(irq_hosts);
531static DEFINE_RAW_SPINLOCK(irq_big_lock); 498static DEFINE_RAW_SPINLOCK(irq_big_lock);
532static unsigned int revmap_trees_allocated;
533static DEFINE_MUTEX(revmap_trees_mutex); 499static DEFINE_MUTEX(revmap_trees_mutex);
534struct irq_map_entry irq_map[NR_IRQS]; 500static struct irq_map_entry irq_map[NR_IRQS];
535static unsigned int irq_virq_count = NR_IRQS; 501static unsigned int irq_virq_count = NR_IRQS;
536static struct irq_host *irq_default_host; 502static struct irq_host *irq_default_host;
537 503
504irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
505{
506 return irq_map[d->irq].hwirq;
507}
508EXPORT_SYMBOL_GPL(irqd_to_hwirq);
509
538irq_hw_number_t virq_to_hw(unsigned int virq) 510irq_hw_number_t virq_to_hw(unsigned int virq)
539{ 511{
540 return irq_map[virq].hwirq; 512 return irq_map[virq].hwirq;
541} 513}
542EXPORT_SYMBOL_GPL(virq_to_hw); 514EXPORT_SYMBOL_GPL(virq_to_hw);
543 515
516bool virq_is_host(unsigned int virq, struct irq_host *host)
517{
518 return irq_map[virq].host == host;
519}
520EXPORT_SYMBOL_GPL(virq_is_host);
521
544static int default_irq_host_match(struct irq_host *h, struct device_node *np) 522static int default_irq_host_match(struct irq_host *h, struct device_node *np)
545{ 523{
546 return h->of_node != NULL && h->of_node == np; 524 return h->of_node != NULL && h->of_node == np;
@@ -561,7 +539,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
561 /* Allocate structure and revmap table if using linear mapping */ 539 /* Allocate structure and revmap table if using linear mapping */
562 if (revmap_type == IRQ_HOST_MAP_LINEAR) 540 if (revmap_type == IRQ_HOST_MAP_LINEAR)
563 size += revmap_arg * sizeof(unsigned int); 541 size += revmap_arg * sizeof(unsigned int);
564 host = zalloc_maybe_bootmem(size, GFP_KERNEL); 542 host = kzalloc(size, GFP_KERNEL);
565 if (host == NULL) 543 if (host == NULL)
566 return NULL; 544 return NULL;
567 545
@@ -582,13 +560,8 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
582 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 560 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
583 if (irq_map[0].host != NULL) { 561 if (irq_map[0].host != NULL) {
584 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 562 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
585 /* If we are early boot, we can't free the structure, 563 of_node_put(host->of_node);
586 * too bad... 564 kfree(host);
587 * this will be fixed once slab is made available early
588 * instead of the current cruft
589 */
590 if (mem_init_done)
591 kfree(host);
592 return NULL; 565 return NULL;
593 } 566 }
594 irq_map[0].host = host; 567 irq_map[0].host = host;
@@ -609,14 +582,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
609 irq_map[i].host = host; 582 irq_map[i].host = host;
610 smp_wmb(); 583 smp_wmb();
611 584
612 /* Clear norequest flags */
613 irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
614
615 /* Legacy flags are left to default at this point, 585 /* Legacy flags are left to default at this point,
616 * one can then use irq_create_mapping() to 586 * one can then use irq_create_mapping() to
617 * explicitly change them 587 * explicitly change them
618 */ 588 */
619 ops->map(host, i, i); 589 ops->map(host, i, i);
590
591 /* Clear norequest flags */
592 irq_clear_status_flags(i, IRQ_NOREQUEST);
620 } 593 }
621 break; 594 break;
622 case IRQ_HOST_MAP_LINEAR: 595 case IRQ_HOST_MAP_LINEAR:
@@ -627,6 +600,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
627 smp_wmb(); 600 smp_wmb();
628 host->revmap_data.linear.revmap = rmap; 601 host->revmap_data.linear.revmap = rmap;
629 break; 602 break;
603 case IRQ_HOST_MAP_TREE:
604 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
605 break;
630 default: 606 default:
631 break; 607 break;
632 } 608 }
@@ -676,17 +652,14 @@ void irq_set_virq_count(unsigned int count)
676static int irq_setup_virq(struct irq_host *host, unsigned int virq, 652static int irq_setup_virq(struct irq_host *host, unsigned int virq,
677 irq_hw_number_t hwirq) 653 irq_hw_number_t hwirq)
678{ 654{
679 struct irq_desc *desc; 655 int res;
680 656
681 desc = irq_to_desc_alloc_node(virq, 0); 657 res = irq_alloc_desc_at(virq, 0);
682 if (!desc) { 658 if (res != virq) {
683 pr_debug("irq: -> allocating desc failed\n"); 659 pr_debug("irq: -> allocating desc failed\n");
684 goto error; 660 goto error;
685 } 661 }
686 662
687 /* Clear IRQ_NOREQUEST flag */
688 desc->status &= ~IRQ_NOREQUEST;
689
690 /* map it */ 663 /* map it */
691 smp_wmb(); 664 smp_wmb();
692 irq_map[virq].hwirq = hwirq; 665 irq_map[virq].hwirq = hwirq;
@@ -694,11 +667,15 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
694 667
695 if (host->ops->map(host, virq, hwirq)) { 668 if (host->ops->map(host, virq, hwirq)) {
696 pr_debug("irq: -> mapping failed, freeing\n"); 669 pr_debug("irq: -> mapping failed, freeing\n");
697 goto error; 670 goto errdesc;
698 } 671 }
699 672
673 irq_clear_status_flags(virq, IRQ_NOREQUEST);
674
700 return 0; 675 return 0;
701 676
677errdesc:
678 irq_free_descs(virq, 1);
702error: 679error:
703 irq_free_virt(virq, 1); 680 irq_free_virt(virq, 1);
704 return -1; 681 return -1;
@@ -746,13 +723,9 @@ unsigned int irq_create_mapping(struct irq_host *host,
746 } 723 }
747 pr_debug("irq: -> using host @%p\n", host); 724 pr_debug("irq: -> using host @%p\n", host);
748 725
749 /* Check if mapping already exist, if it does, call 726 /* Check if mapping already exists */
750 * host->ops->map() to update the flags
751 */
752 virq = irq_find_mapping(host, hwirq); 727 virq = irq_find_mapping(host, hwirq);
753 if (virq != NO_IRQ) { 728 if (virq != NO_IRQ) {
754 if (host->ops->remap)
755 host->ops->remap(host, virq, hwirq);
756 pr_debug("irq: -> existing mapping on virq %d\n", virq); 729 pr_debug("irq: -> existing mapping on virq %d\n", virq);
757 return virq; 730 return virq;
758 } 731 }
@@ -818,8 +791,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
818 791
819 /* Set type if specified and different than the current one */ 792 /* Set type if specified and different than the current one */
820 if (type != IRQ_TYPE_NONE && 793 if (type != IRQ_TYPE_NONE &&
821 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) 794 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
822 set_irq_type(virq, type); 795 irq_set_irq_type(virq, type);
823 return virq; 796 return virq;
824} 797}
825EXPORT_SYMBOL_GPL(irq_create_of_mapping); 798EXPORT_SYMBOL_GPL(irq_create_of_mapping);
@@ -833,16 +806,17 @@ void irq_dispose_mapping(unsigned int virq)
833 return; 806 return;
834 807
835 host = irq_map[virq].host; 808 host = irq_map[virq].host;
836 WARN_ON (host == NULL); 809 if (WARN_ON(host == NULL))
837 if (host == NULL)
838 return; 810 return;
839 811
840 /* Never unmap legacy interrupts */ 812 /* Never unmap legacy interrupts */
841 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 813 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
842 return; 814 return;
843 815
816 irq_set_status_flags(virq, IRQ_NOREQUEST);
817
844 /* remove chip and handler */ 818 /* remove chip and handler */
845 set_irq_chip_and_handler(virq, NULL, NULL); 819 irq_set_chip_and_handler(virq, NULL, NULL);
846 820
847 /* Make sure it's completed */ 821 /* Make sure it's completed */
848 synchronize_irq(virq); 822 synchronize_irq(virq);
@@ -860,13 +834,6 @@ void irq_dispose_mapping(unsigned int virq)
860 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 834 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
861 break; 835 break;
862 case IRQ_HOST_MAP_TREE: 836 case IRQ_HOST_MAP_TREE:
863 /*
864 * Check if radix tree allocated yet, if not then nothing to
865 * remove.
866 */
867 smp_rmb();
868 if (revmap_trees_allocated < 1)
869 break;
870 mutex_lock(&revmap_trees_mutex); 837 mutex_lock(&revmap_trees_mutex);
871 radix_tree_delete(&host->revmap_data.tree, hwirq); 838 radix_tree_delete(&host->revmap_data.tree, hwirq);
872 mutex_unlock(&revmap_trees_mutex); 839 mutex_unlock(&revmap_trees_mutex);
@@ -877,9 +844,7 @@ void irq_dispose_mapping(unsigned int virq)
877 smp_mb(); 844 smp_mb();
878 irq_map[virq].hwirq = host->inval_irq; 845 irq_map[virq].hwirq = host->inval_irq;
879 846
880 /* Set some flags */ 847 irq_free_descs(virq, 1);
881 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
882
883 /* Free it */ 848 /* Free it */
884 irq_free_virt(virq, 1); 849 irq_free_virt(virq, 1);
885} 850}
@@ -924,21 +889,17 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
924 struct irq_map_entry *ptr; 889 struct irq_map_entry *ptr;
925 unsigned int virq; 890 unsigned int virq;
926 891
927 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 892 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
928
929 /*
930 * Check if the radix tree exists and has bee initialized.
931 * If not, we fallback to slow mode
932 */
933 if (revmap_trees_allocated < 2)
934 return irq_find_mapping(host, hwirq); 893 return irq_find_mapping(host, hwirq);
935 894
936 /* Now try to resolve */
937 /* 895 /*
938 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 896 * The ptr returned references the static global irq_map.
939 * as it's referencing an entry in the static irq_map table. 897 * but freeing an irq can delete nodes along the path to
898 * do the lookup via call_rcu.
940 */ 899 */
900 rcu_read_lock();
941 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 901 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
902 rcu_read_unlock();
942 903
943 /* 904 /*
944 * If found in radix tree, then fine. 905 * If found in radix tree, then fine.
@@ -956,16 +917,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
956void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 917void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
957 irq_hw_number_t hwirq) 918 irq_hw_number_t hwirq)
958{ 919{
959 920 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
960 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
961
962 /*
963 * Check if the radix tree exists yet.
964 * If not, then the irq will be inserted into the tree when it gets
965 * initialized.
966 */
967 smp_rmb();
968 if (revmap_trees_allocated < 1)
969 return; 921 return;
970 922
971 if (virq != NO_IRQ) { 923 if (virq != NO_IRQ) {
@@ -981,7 +933,8 @@ unsigned int irq_linear_revmap(struct irq_host *host,
981{ 933{
982 unsigned int *revmap; 934 unsigned int *revmap;
983 935
984 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 936 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
937 return irq_find_mapping(host, hwirq);
985 938
986 /* Check revmap bounds */ 939 /* Check revmap bounds */
987 if (unlikely(hwirq >= host->revmap_data.linear.size)) 940 if (unlikely(hwirq >= host->revmap_data.linear.size))
@@ -1054,14 +1007,23 @@ void irq_free_virt(unsigned int virq, unsigned int count)
1054 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1007 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1055 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1008 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1056 1009
1010 if (virq < NUM_ISA_INTERRUPTS) {
1011 if (virq + count < NUM_ISA_INTERRUPTS)
1012 return;
1013 count =- NUM_ISA_INTERRUPTS - virq;
1014 virq = NUM_ISA_INTERRUPTS;
1015 }
1016
1017 if (count > irq_virq_count || virq > irq_virq_count - count) {
1018 if (virq > irq_virq_count)
1019 return;
1020 count = irq_virq_count - virq;
1021 }
1022
1057 raw_spin_lock_irqsave(&irq_big_lock, flags); 1023 raw_spin_lock_irqsave(&irq_big_lock, flags);
1058 for (i = virq; i < (virq + count); i++) { 1024 for (i = virq; i < (virq + count); i++) {
1059 struct irq_host *host; 1025 struct irq_host *host;
1060 1026
1061 if (i < NUM_ISA_INTERRUPTS ||
1062 (virq + count) > irq_virq_count)
1063 continue;
1064
1065 host = irq_map[i].host; 1027 host = irq_map[i].host;
1066 irq_map[i].hwirq = host->inval_irq; 1028 irq_map[i].hwirq = host->inval_irq;
1067 smp_wmb(); 1029 smp_wmb();
@@ -1072,82 +1034,21 @@ void irq_free_virt(unsigned int virq, unsigned int count)
1072 1034
1073int arch_early_irq_init(void) 1035int arch_early_irq_init(void)
1074{ 1036{
1075 struct irq_desc *desc;
1076 int i;
1077
1078 for (i = 0; i < NR_IRQS; i++) {
1079 desc = irq_to_desc(i);
1080 if (desc)
1081 desc->status |= IRQ_NOREQUEST;
1082 }
1083
1084 return 0;
1085}
1086
1087int arch_init_chip_data(struct irq_desc *desc, int node)
1088{
1089 desc->status |= IRQ_NOREQUEST;
1090 return 0; 1037 return 0;
1091} 1038}
1092 1039
1093/* We need to create the radix trees late */
1094static int irq_late_init(void)
1095{
1096 struct irq_host *h;
1097 unsigned int i;
1098
1099 /*
1100 * No mutual exclusion with respect to accessors of the tree is needed
1101 * here as the synchronization is done via the state variable
1102 * revmap_trees_allocated.
1103 */
1104 list_for_each_entry(h, &irq_hosts, link) {
1105 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1106 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1107 }
1108
1109 /*
1110 * Make sure the radix trees inits are visible before setting
1111 * the flag
1112 */
1113 smp_wmb();
1114 revmap_trees_allocated = 1;
1115
1116 /*
1117 * Insert the reverse mapping for those interrupts already present
1118 * in irq_map[].
1119 */
1120 mutex_lock(&revmap_trees_mutex);
1121 for (i = 0; i < irq_virq_count; i++) {
1122 if (irq_map[i].host &&
1123 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1124 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1125 irq_map[i].hwirq, &irq_map[i]);
1126 }
1127 mutex_unlock(&revmap_trees_mutex);
1128
1129 /*
1130 * Make sure the radix trees insertions are visible before setting
1131 * the flag
1132 */
1133 smp_wmb();
1134 revmap_trees_allocated = 2;
1135
1136 return 0;
1137}
1138arch_initcall(irq_late_init);
1139
1140#ifdef CONFIG_VIRQ_DEBUG 1040#ifdef CONFIG_VIRQ_DEBUG
1141static int virq_debug_show(struct seq_file *m, void *private) 1041static int virq_debug_show(struct seq_file *m, void *private)
1142{ 1042{
1143 unsigned long flags; 1043 unsigned long flags;
1144 struct irq_desc *desc; 1044 struct irq_desc *desc;
1145 const char *p; 1045 const char *p;
1146 char none[] = "none"; 1046 static const char none[] = "none";
1047 void *data;
1147 int i; 1048 int i;
1148 1049
1149 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", 1050 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1150 "chip name", "host name"); 1051 "chip name", "chip data", "host name");
1151 1052
1152 for (i = 1; i < nr_irqs; i++) { 1053 for (i = 1; i < nr_irqs; i++) {
1153 desc = irq_to_desc(i); 1054 desc = irq_to_desc(i);
@@ -1157,15 +1058,21 @@ static int virq_debug_show(struct seq_file *m, void *private)
1157 raw_spin_lock_irqsave(&desc->lock, flags); 1058 raw_spin_lock_irqsave(&desc->lock, flags);
1158 1059
1159 if (desc->action && desc->action->handler) { 1060 if (desc->action && desc->action->handler) {
1061 struct irq_chip *chip;
1062
1160 seq_printf(m, "%5d ", i); 1063 seq_printf(m, "%5d ", i);
1161 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1064 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
1162 1065
1163 if (desc->chip && desc->chip->name) 1066 chip = irq_desc_get_chip(desc);
1164 p = desc->chip->name; 1067 if (chip && chip->name)
1068 p = chip->name;
1165 else 1069 else
1166 p = none; 1070 p = none;
1167 seq_printf(m, "%-15s ", p); 1071 seq_printf(m, "%-15s ", p);
1168 1072
1073 data = irq_desc_get_chip_data(desc);
1074 seq_printf(m, "0x%16p ", data);
1075
1169 if (irq_map[i].host && irq_map[i].host->of_node) 1076 if (irq_map[i].host && irq_map[i].host->of_node)
1170 p = irq_map[i].host->of_node->full_name; 1077 p = irq_map[i].host->of_node->full_name;
1171 else 1078 else