aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/irq.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-04-15 00:13:52 -0400
committerPaul Mundt <lethal@linux-sh.org>2010-04-15 00:13:52 -0400
commitdc825b17904a06bbd2f79d720b23156e4c01a22f (patch)
tree8f1e13b850a06264530f1f1bb680a541e73cef34 /arch/sh/kernel/irq.c
parentfecf066c2d2fbc7e6a7e7e3a5af772a165bdd7b0 (diff)
sh: intc: IRQ auto-distribution support.
This implements support for hardware-managed IRQ balancing as implemented by SH-X3 cores (presently only hooked up for SH7786, but can probably be carried over to other SH-X3 cores, too). CPUs need to specify their distribution register along with the mask definitions, as these follow the same format. Peripheral IRQs that don't opt out of balancing will be automatically distributed at the whim of the hardware block, while each CPU needs to verify whether it is handling the IRQ or not, especially before clearing the mask. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/irq.c')
-rw-r--r--arch/sh/kernel/irq.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index d2d41d046657..f6a9319c28e2 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -113,19 +113,14 @@ union irq_ctx {
113 113
114static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 114static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
115static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 115static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
116#endif
117 116
118asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) 117static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
118static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
119
120static inline void handle_one_irq(unsigned int irq)
119{ 121{
120 struct pt_regs *old_regs = set_irq_regs(regs);
121#ifdef CONFIG_IRQSTACKS
122 union irq_ctx *curctx, *irqctx; 122 union irq_ctx *curctx, *irqctx;
123#endif
124
125 irq_enter();
126 irq = irq_demux(irq);
127 123
128#ifdef CONFIG_IRQSTACKS
129 curctx = (union irq_ctx *)current_thread_info(); 124 curctx = (union irq_ctx *)current_thread_info();
130 irqctx = hardirq_ctx[smp_processor_id()]; 125 irqctx = hardirq_ctx[smp_processor_id()];
131 126
@@ -164,20 +159,9 @@ asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
164 "r5", "r6", "r7", "r8", "t", "pr" 159 "r5", "r6", "r7", "r8", "t", "pr"
165 ); 160 );
166 } else 161 } else
167#endif
168 generic_handle_irq(irq); 162 generic_handle_irq(irq);
169
170 irq_exit();
171
172 set_irq_regs(old_regs);
173 return 1;
174} 163}
175 164
176#ifdef CONFIG_IRQSTACKS
177static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
178
179static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
180
181/* 165/*
182 * allocate per-cpu stacks for hardirq and for softirq processing 166 * allocate per-cpu stacks for hardirq and for softirq processing
183 */ 167 */
@@ -257,8 +241,33 @@ asmlinkage void do_softirq(void)
257 241
258 local_irq_restore(flags); 242 local_irq_restore(flags);
259} 243}
244#else
245static inline void handle_one_irq(unsigned int irq)
246{
247 generic_handle_irq(irq);
248}
260#endif 249#endif
261 250
251asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
252{
253 struct pt_regs *old_regs = set_irq_regs(regs);
254
255 irq_enter();
256
257 irq = irq_demux(irq_lookup(irq));
258
259 if (irq != NO_IRQ_IGNORE) {
260 handle_one_irq(irq);
261 irq_finish(irq);
262 }
263
264 irq_exit();
265
266 set_irq_regs(old_regs);
267
268 return IRQ_HANDLED;
269}
270
262void __init init_IRQ(void) 271void __init init_IRQ(void)
263{ 272{
264 plat_irq_setup(); 273 plat_irq_setup();