aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/irq.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
committerPaul Mundt <lethal@linux-sh.org>2011-01-13 01:06:28 -0500
commitf43dc23d5ea91fca257be02138a255f02d98e806 (patch)
treeb29722f6e965316e90ac97abf79923ced250dc21 /arch/sh/kernel/irq.c
parentf8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff)
parent4162cf64973df51fc885825bc9ca4d055891c49f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts: arch/sh/kernel/cpu/sh2/setup-sh7619.c arch/sh/kernel/cpu/sh2a/setup-mxg.c arch/sh/kernel/cpu/sh2a/setup-sh7201.c arch/sh/kernel/cpu/sh2a/setup-sh7203.c arch/sh/kernel/cpu/sh2a/setup-sh7206.c arch/sh/kernel/cpu/sh3/setup-sh7705.c arch/sh/kernel/cpu/sh3/setup-sh770x.c arch/sh/kernel/cpu/sh3/setup-sh7710.c arch/sh/kernel/cpu/sh3/setup-sh7720.c arch/sh/kernel/cpu/sh4/setup-sh4-202.c arch/sh/kernel/cpu/sh4/setup-sh7750.c arch/sh/kernel/cpu/sh4/setup-sh7760.c arch/sh/kernel/cpu/sh4a/setup-sh7343.c arch/sh/kernel/cpu/sh4a/setup-sh7366.c arch/sh/kernel/cpu/sh4a/setup-sh7722.c arch/sh/kernel/cpu/sh4a/setup-sh7723.c arch/sh/kernel/cpu/sh4a/setup-sh7724.c arch/sh/kernel/cpu/sh4a/setup-sh7763.c arch/sh/kernel/cpu/sh4a/setup-sh7770.c arch/sh/kernel/cpu/sh4a/setup-sh7780.c arch/sh/kernel/cpu/sh4a/setup-sh7785.c arch/sh/kernel/cpu/sh4a/setup-sh7786.c arch/sh/kernel/cpu/sh4a/setup-shx3.c arch/sh/kernel/cpu/sh5/setup-sh5.c drivers/serial/sh-sci.c drivers/serial/sh-sci.h include/linux/serial_sci.h
Diffstat (limited to 'arch/sh/kernel/irq.c')
-rw-r--r--arch/sh/kernel/irq.c138
1 files changed, 95 insertions, 43 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 3d09062f4682..68ecbe6c881a 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -11,6 +11,8 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/kernel_stat.h> 12#include <linux/kernel_stat.h>
13#include <linux/seq_file.h> 13#include <linux/seq_file.h>
14#include <linux/ftrace.h>
15#include <linux/delay.h>
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/machvec.h> 17#include <asm/machvec.h>
16#include <asm/uaccess.h> 18#include <asm/uaccess.h>
@@ -36,7 +38,15 @@ void ack_bad_irq(unsigned int irq)
36 */ 38 */
37static int show_other_interrupts(struct seq_file *p, int prec) 39static int show_other_interrupts(struct seq_file *p, int prec)
38{ 40{
41 int j;
42
43 seq_printf(p, "%*s: ", prec, "NMI");
44 for_each_online_cpu(j)
45 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
46 seq_printf(p, " Non-maskable interrupts\n");
47
39 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 48 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
49
40 return 0; 50 return 0;
41} 51}
42 52
@@ -46,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v)
46 int i = *(loff_t *)v, j, prec; 56 int i = *(loff_t *)v, j, prec;
47 struct irqaction *action; 57 struct irqaction *action;
48 struct irq_desc *desc; 58 struct irq_desc *desc;
59 struct irq_data *data;
60 struct irq_chip *chip;
49 61
50 if (i > nr_irqs) 62 if (i > nr_irqs)
51 return 0; 63 return 0;
@@ -67,7 +79,10 @@ int show_interrupts(struct seq_file *p, void *v)
67 if (!desc) 79 if (!desc)
68 return 0; 80 return 0;
69 81
70 spin_lock_irqsave(&desc->lock, flags); 82 data = irq_get_irq_data(i);
83 chip = irq_data_get_irq_chip(data);
84
85 raw_spin_lock_irqsave(&desc->lock, flags);
71 for_each_online_cpu(j) 86 for_each_online_cpu(j)
72 any_count |= kstat_irqs_cpu(i, j); 87 any_count |= kstat_irqs_cpu(i, j);
73 action = desc->action; 88 action = desc->action;
@@ -77,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
77 seq_printf(p, "%*d: ", prec, i); 92 seq_printf(p, "%*d: ", prec, i);
78 for_each_online_cpu(j) 93 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 94 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
80 seq_printf(p, " %14s", desc->chip->name); 95 seq_printf(p, " %14s", chip->name);
81 seq_printf(p, "-%-8s", desc->name); 96 seq_printf(p, "-%-8s", desc->name);
82 97
83 if (action) { 98 if (action) {
@@ -88,7 +103,7 @@ int show_interrupts(struct seq_file *p, void *v)
88 103
89 seq_putc(p, '\n'); 104 seq_putc(p, '\n');
90out: 105out:
91 spin_unlock_irqrestore(&desc->lock, flags); 106 raw_spin_unlock_irqrestore(&desc->lock, flags);
92 return 0; 107 return 0;
93} 108}
94#endif 109#endif
@@ -104,36 +119,14 @@ union irq_ctx {
104 119
105static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 120static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
106static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 121static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
107#endif
108 122
109asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) 123static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
124static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
125
126static inline void handle_one_irq(unsigned int irq)
110{ 127{
111 struct pt_regs *old_regs = set_irq_regs(regs);
112#ifdef CONFIG_IRQSTACKS
113 union irq_ctx *curctx, *irqctx; 128 union irq_ctx *curctx, *irqctx;
114#endif
115
116 irq_enter();
117
118#ifdef CONFIG_DEBUG_STACKOVERFLOW
119 /* Debugging check for stack overflow: is there less than 1KB free? */
120 {
121 long sp;
122
123 __asm__ __volatile__ ("and r15, %0" :
124 "=r" (sp) : "0" (THREAD_SIZE - 1));
125
126 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
127 printk("do_IRQ: stack overflow: %ld\n",
128 sp - sizeof(struct thread_info));
129 dump_stack();
130 }
131 }
132#endif
133 129
134 irq = irq_demux(intc_evt2irq(irq));
135
136#ifdef CONFIG_IRQSTACKS
137 curctx = (union irq_ctx *)current_thread_info(); 130 curctx = (union irq_ctx *)current_thread_info();
138 irqctx = hardirq_ctx[smp_processor_id()]; 131 irqctx = hardirq_ctx[smp_processor_id()];
139 132
@@ -172,22 +165,9 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
172 "r5", "r6", "r7", "r8", "t", "pr" 165 "r5", "r6", "r7", "r8", "t", "pr"
173 ); 166 );
174 } else 167 } else
175#endif
176 generic_handle_irq(irq); 168 generic_handle_irq(irq);
177
178 irq_exit();
179
180 set_irq_regs(old_regs);
181 return 1;
182} 169}
183 170
184#ifdef CONFIG_IRQSTACKS
185static char softirq_stack[NR_CPUS * THREAD_SIZE]
186 __attribute__((__section__(".bss.page_aligned")));
187
188static char hardirq_stack[NR_CPUS * THREAD_SIZE]
189 __attribute__((__section__(".bss.page_aligned")));
190
191/* 171/*
192 * allocate per-cpu stacks for hardirq and for softirq processing 172 * allocate per-cpu stacks for hardirq and for softirq processing
193 */ 173 */
@@ -267,8 +247,33 @@ asmlinkage void do_softirq(void)
267 247
268 local_irq_restore(flags); 248 local_irq_restore(flags);
269} 249}
250#else
251static inline void handle_one_irq(unsigned int irq)
252{
253 generic_handle_irq(irq);
254}
270#endif 255#endif
271 256
257asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
258{
259 struct pt_regs *old_regs = set_irq_regs(regs);
260
261 irq_enter();
262
263 irq = irq_demux(irq_lookup(irq));
264
265 if (irq != NO_IRQ_IGNORE) {
266 handle_one_irq(irq);
267 irq_finish(irq);
268 }
269
270 irq_exit();
271
272 set_irq_regs(old_regs);
273
274 return IRQ_HANDLED;
275}
276
272void __init init_IRQ(void) 277void __init init_IRQ(void)
273{ 278{
274 plat_irq_setup(); 279 plat_irq_setup();
@@ -277,6 +282,8 @@ void __init init_IRQ(void)
277 if (sh_mv.mv_init_irq) 282 if (sh_mv.mv_init_irq)
278 sh_mv.mv_init_irq(); 283 sh_mv.mv_init_irq();
279 284
285 intc_finalize();
286
280 irq_ctx_init(smp_processor_id()); 287 irq_ctx_init(smp_processor_id());
281} 288}
282 289
@@ -284,6 +291,51 @@ void __init init_IRQ(void)
284int __init arch_probe_nr_irqs(void) 291int __init arch_probe_nr_irqs(void)
285{ 292{
286 nr_irqs = sh_mv.mv_nr_irqs; 293 nr_irqs = sh_mv.mv_nr_irqs;
287 return 0; 294 return NR_IRQS_LEGACY;
295}
296#endif
297
298#ifdef CONFIG_HOTPLUG_CPU
299static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
300{
301 struct irq_desc *desc = irq_to_desc(irq);
302 struct irq_chip *chip = irq_data_get_irq_chip(data);
303
304 printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
305 irq, data->node, cpu);
306
307 raw_spin_lock_irq(&desc->lock);
308 chip->irq_set_affinity(data, cpumask_of(cpu), false);
309 raw_spin_unlock_irq(&desc->lock);
310}
311
312/*
313 * The CPU has been marked offline. Migrate IRQs off this CPU. If
314 * the affinity settings do not allow other CPUs, force them onto any
315 * available CPU.
316 */
317void migrate_irqs(void)
318{
319 unsigned int irq, cpu = smp_processor_id();
320
321 for_each_active_irq(irq) {
322 struct irq_data *data = irq_get_irq_data(irq);
323
324 if (data->node == cpu) {
325 unsigned int newcpu = cpumask_any_and(data->affinity,
326 cpu_online_mask);
327 if (newcpu >= nr_cpu_ids) {
328 if (printk_ratelimit())
329 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
330 irq, cpu);
331
332 cpumask_setall(data->affinity);
333 newcpu = cpumask_any_and(data->affinity,
334 cpu_online_mask);
335 }
336
337 route_irq(data, irq, newcpu);
338 }
339 }
288} 340}
289#endif 341#endif