aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2013-03-20 13:30:15 -0400
committerTony Luck <tony.luck@intel.com>2013-04-02 12:37:06 -0400
commitd303e9e98fce56cdb3c6f2ac92f626fc2bd51c77 (patch)
tree80421d00fe3c7c7502124e641c8918f129e90876
parent96edc754aa714e51d2044af91b96cc7420c5cb01 (diff)
Fix initialization of CMCI/CMCP interrupts
Back 2010 during a revamp of the irq code some initializations were moved from ia64_mca_init() to ia64_mca_late_init() in commit c75f2aa13f5b268aba369b5dc566088b5194377c Cannot use register_percpu_irq() from ia64_mca_init() But this was hideously wrong. First of all these initializations are now down far too late. Specifically after all the other cpus have been brought up and initialized their own CMC vectors from smp_callin(). Also ia64_mca_late_init() may be called from any cpu so the line: ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ is generally not executed on the BSP, and so the CMC vector isn't setup at all on that processor. Make use of the arch_early_irq_init() hook to get this code executed at just the right moment: not too early, not too late. Reported-by: Fred Hartnett <fred.hartnett@hp.com> Tested-by: Fred Hartnett <fred.hartnett@hp.com> Cc: stable@kernel.org # v2.6.37+ Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/include/asm/mca.h1
-rw-r--r--arch/ia64/kernel/irq.c8
-rw-r--r--arch/ia64/kernel/mca.c37
3 files changed, 33 insertions, 13 deletions
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 43f96ab18fa0..8c7096168716 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
143extern int cpe_vector; 143extern int cpe_vector;
144extern int ia64_cpe_irq; 144extern int ia64_cpe_irq;
145extern void ia64_mca_init(void); 145extern void ia64_mca_init(void);
146extern void ia64_mca_irq_init(void);
146extern void ia64_mca_cpu_init(void *); 147extern void ia64_mca_cpu_init(void *);
147extern void ia64_os_mca_dispatch(void); 148extern void ia64_os_mca_dispatch(void);
148extern void ia64_os_mca_dispatch_end(void); 149extern void ia64_os_mca_dispatch_end(void);
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ad69606613eb..f2c418281130 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/kernel_stat.h> 24#include <linux/kernel_stat.h>
25 25
26#include <asm/mca.h>
27
26/* 28/*
27 * 'what should we do if we get a hw irq event on an illegal vector'. 29 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves. 30 * each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
83 85
84#endif /* CONFIG_SMP */ 86#endif /* CONFIG_SMP */
85 87
88int __init arch_early_irq_init(void)
89{
90 ia64_mca_irq_init();
91 return 0;
92}
93
86#ifdef CONFIG_HOTPLUG_CPU 94#ifdef CONFIG_HOTPLUG_CPU
87unsigned int vectors_in_migration[NR_IRQS]; 95unsigned int vectors_in_migration[NR_IRQS];
88 96
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 65bf9cd39044..d7396dbb07bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
2074 printk(KERN_INFO "MCA related initialization done\n"); 2074 printk(KERN_INFO "MCA related initialization done\n");
2075} 2075}
2076 2076
2077
2077/* 2078/*
2078 * ia64_mca_late_init 2079 * These pieces cannot be done in ia64_mca_init() because it is called before
2079 * 2080 * early_irq_init() which would wipe out our percpu irq registrations. But we
2080 * Opportunity to setup things that require initialization later 2081 * cannot leave them until ia64_mca_late_init() because by then all the other
2081 * than ia64_mca_init. Setup a timer to poll for CPEs if the 2082 * processors have been brought online and have set their own CMC vectors to
2082 * platform doesn't support an interrupt driven mechanism. 2083 * point at a non-existant action. Called from arch_early_irq_init().
2083 *
2084 * Inputs : None
2085 * Outputs : Status
2086 */ 2084 */
2087static int __init 2085void __init ia64_mca_irq_init(void)
2088ia64_mca_late_init(void)
2089{ 2086{
2090 if (!mca_init)
2091 return 0;
2092
2093 /* 2087 /*
2094 * Configure the CMCI/P vector and handler. Interrupts for CMC are 2088 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2095 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2089 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
2108 /* Setup the CPEI/P handler */ 2102 /* Setup the CPEI/P handler */
2109 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2103 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2110#endif 2104#endif
2105}
2106
2107/*
2108 * ia64_mca_late_init
2109 *
2110 * Opportunity to setup things that require initialization later
2111 * than ia64_mca_init. Setup a timer to poll for CPEs if the
2112 * platform doesn't support an interrupt driven mechanism.
2113 *
2114 * Inputs : None
2115 * Outputs : Status
2116 */
2117static int __init
2118ia64_mca_late_init(void)
2119{
2120 if (!mca_init)
2121 return 0;
2111 2122
2112 register_hotcpu_notifier(&mca_cpu_notifier); 2123 register_hotcpu_notifier(&mca_cpu_notifier);
2113 2124