aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/irq.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/kernel/irq.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/kernel/irq.c')
-rw-r--r--arch/arm/kernel/irq.c139
1 files changed, 55 insertions, 84 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c0d5c3b3a760..83bbad03fcc6 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -35,8 +35,10 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/ftrace.h>
38 39
39#include <asm/system.h> 40#include <asm/system.h>
41#include <asm/mach/arch.h>
40#include <asm/mach/irq.h> 42#include <asm/mach/irq.h>
41#include <asm/mach/time.h> 43#include <asm/mach/time.h>
42 44
@@ -47,56 +49,20 @@
47#define irq_finish(irq) do { } while (0) 49#define irq_finish(irq) do { } while (0)
48#endif 50#endif
49 51
50unsigned int arch_nr_irqs;
51void (*init_arch_irq)(void) __initdata = NULL;
52unsigned long irq_err_count; 52unsigned long irq_err_count;
53 53
54int show_interrupts(struct seq_file *p, void *v) 54int arch_show_interrupts(struct seq_file *p, int prec)
55{ 55{
56 int i = *(loff_t *) v, cpu;
57 struct irq_desc *desc;
58 struct irqaction * action;
59 unsigned long flags;
60
61 if (i == 0) {
62 char cpuname[12];
63
64 seq_printf(p, " ");
65 for_each_present_cpu(cpu) {
66 sprintf(cpuname, "CPU%d", cpu);
67 seq_printf(p, " %10s", cpuname);
68 }
69 seq_putc(p, '\n');
70 }
71
72 if (i < nr_irqs) {
73 desc = irq_to_desc(i);
74 raw_spin_lock_irqsave(&desc->lock, flags);
75 action = desc->action;
76 if (!action)
77 goto unlock;
78
79 seq_printf(p, "%3d: ", i);
80 for_each_present_cpu(cpu)
81 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
82 seq_printf(p, " %10s", desc->chip->name ? : "-");
83 seq_printf(p, " %s", action->name);
84 for (action = action->next; action; action = action->next)
85 seq_printf(p, ", %s", action->name);
86
87 seq_putc(p, '\n');
88unlock:
89 raw_spin_unlock_irqrestore(&desc->lock, flags);
90 } else if (i == nr_irqs) {
91#ifdef CONFIG_FIQ 56#ifdef CONFIG_FIQ
92 show_fiq_list(p, v); 57 show_fiq_list(p, prec);
93#endif 58#endif
94#ifdef CONFIG_SMP 59#ifdef CONFIG_SMP
95 show_ipi_list(p); 60 show_ipi_list(p, prec);
96 show_local_irqs(p);
97#endif 61#endif
98 seq_printf(p, "Err: %10lu\n", irq_err_count); 62#ifdef CONFIG_LOCAL_TIMERS
99 } 63 show_local_irqs(p, prec);
64#endif
65 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
100 return 0; 66 return 0;
101} 67}
102 68
@@ -105,7 +71,8 @@ unlock:
105 * come via this function. Instead, they should provide their 71 * come via this function. Instead, they should provide their
106 * own 'handler' 72 * own 'handler'
107 */ 73 */
108asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) 74asmlinkage void __exception_irq_entry
75asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
109{ 76{
110 struct pt_regs *old_regs = set_irq_regs(regs); 77 struct pt_regs *old_regs = set_irq_regs(regs);
111 78
@@ -132,56 +99,53 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
132 99
133void set_irq_flags(unsigned int irq, unsigned int iflags) 100void set_irq_flags(unsigned int irq, unsigned int iflags)
134{ 101{
135 struct irq_desc *desc; 102 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
136 unsigned long flags;
137 103
138 if (irq >= nr_irqs) { 104 if (irq >= nr_irqs) {
139 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); 105 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
140 return; 106 return;
141 } 107 }
142 108
143 desc = irq_to_desc(irq);
144 raw_spin_lock_irqsave(&desc->lock, flags);
145 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
146 if (iflags & IRQF_VALID) 109 if (iflags & IRQF_VALID)
147 desc->status &= ~IRQ_NOREQUEST; 110 clr |= IRQ_NOREQUEST;
148 if (iflags & IRQF_PROBE) 111 if (iflags & IRQF_PROBE)
149 desc->status &= ~IRQ_NOPROBE; 112 clr |= IRQ_NOPROBE;
150 if (!(iflags & IRQF_NOAUTOEN)) 113 if (!(iflags & IRQF_NOAUTOEN))
151 desc->status &= ~IRQ_NOAUTOEN; 114 clr |= IRQ_NOAUTOEN;
152 raw_spin_unlock_irqrestore(&desc->lock, flags); 115 /* Order is clear bits in "clr" then set bits in "set" */
116 irq_modify_status(irq, clr, set & ~clr);
153} 117}
154 118
155void __init init_IRQ(void) 119void __init init_IRQ(void)
156{ 120{
157 struct irq_desc *desc; 121 machine_desc->init_irq();
158 int irq;
159
160 for (irq = 0; irq < nr_irqs; irq++) {
161 desc = irq_to_desc_alloc_node(irq, 0);
162 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
163 }
164
165 init_arch_irq();
166} 122}
167 123
168#ifdef CONFIG_SPARSE_IRQ 124#ifdef CONFIG_SPARSE_IRQ
169int __init arch_probe_nr_irqs(void) 125int __init arch_probe_nr_irqs(void)
170{ 126{
171 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 127 nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
172 return 0; 128 return nr_irqs;
173} 129}
174#endif 130#endif
175 131
176#ifdef CONFIG_HOTPLUG_CPU 132#ifdef CONFIG_HOTPLUG_CPU
177 133
178static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 134static bool migrate_one_irq(struct irq_data *d)
179{ 135{
180 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); 136 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
137 bool ret = false;
138
139 if (cpu >= nr_cpu_ids) {
140 cpu = cpumask_any(cpu_online_mask);
141 ret = true;
142 }
181 143
182 raw_spin_lock_irq(&desc->lock); 144 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
183 desc->chip->set_affinity(irq, cpumask_of(cpu)); 145
184 raw_spin_unlock_irq(&desc->lock); 146 d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
147
148 return ret;
185} 149}
186 150
187/* 151/*
@@ -193,23 +157,30 @@ void migrate_irqs(void)
193{ 157{
194 unsigned int i, cpu = smp_processor_id(); 158 unsigned int i, cpu = smp_processor_id();
195 struct irq_desc *desc; 159 struct irq_desc *desc;
160 unsigned long flags;
161
162 local_irq_save(flags);
196 163
197 for_each_irq_desc(i, desc) { 164 for_each_irq_desc(i, desc) {
198 if (desc->node == cpu) { 165 struct irq_data *d = &desc->irq_data;
199 unsigned int newcpu = cpumask_any_and(desc->affinity, 166 bool affinity_broken = false;
200 cpu_online_mask); 167
201 if (newcpu >= nr_cpu_ids) { 168 raw_spin_lock(&desc->lock);
202 if (printk_ratelimit()) 169 do {
203 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", 170 if (desc->action == NULL)
204 i, cpu); 171 break;
205 172
206 cpumask_setall(desc->affinity); 173 if (d->node != cpu)
207 newcpu = cpumask_any_and(desc->affinity, 174 break;
208 cpu_online_mask); 175
209 } 176 affinity_broken = migrate_one_irq(d);
210 177 } while (0);
211 route_irq(desc, i, newcpu); 178 raw_spin_unlock(&desc->lock);
212 } 179
180 if (affinity_broken && printk_ratelimit())
181 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
213 } 182 }
183
184 local_irq_restore(flags);
214} 185}
215#endif /* CONFIG_HOTPLUG_CPU */ 186#endif /* CONFIG_HOTPLUG_CPU */