diff options
Diffstat (limited to 'arch/sh/kernel/irq.c')
-rw-r--r-- | arch/sh/kernel/irq.c | 153 |
1 files changed, 149 insertions, 4 deletions
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 7066611aeb72..c7ebd6aec951 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $ | 1 | /* |
2 | * | ||
3 | * linux/arch/sh/kernel/irq.c | 2 | * linux/arch/sh/kernel/irq.c |
4 | * | 3 | * |
5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | 4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
@@ -7,13 +6,15 @@ | |||
7 | * | 6 | * |
8 | * SuperH version: Copyright (C) 1999 Niibe Yutaka | 7 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
9 | */ | 8 | */ |
10 | |||
11 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
12 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/module.h> | ||
13 | #include <linux/kernel_stat.h> | 12 | #include <linux/kernel_stat.h> |
14 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
15 | #include <asm/irq.h> | 14 | #include <asm/irq.h> |
16 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/uaccess.h> | ||
17 | #include <asm/thread_info.h> | ||
17 | #include <asm/cpu/mmu_context.h> | 18 | #include <asm/cpu/mmu_context.h> |
18 | 19 | ||
19 | /* | 20 | /* |
@@ -60,11 +61,27 @@ unlock: | |||
60 | } | 61 | } |
61 | #endif | 62 | #endif |
62 | 63 | ||
64 | #ifdef CONFIG_4KSTACKS | ||
65 | /* | ||
66 | * per-CPU IRQ handling contexts (thread information and stack) | ||
67 | */ | ||
68 | union irq_ctx { | ||
69 | struct thread_info tinfo; | ||
70 | u32 stack[THREAD_SIZE/sizeof(u32)]; | ||
71 | }; | ||
72 | |||
73 | static union irq_ctx *hardirq_ctx[NR_CPUS]; | ||
74 | static union irq_ctx *softirq_ctx[NR_CPUS]; | ||
75 | #endif | ||
76 | |||
63 | asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, | 77 | asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, |
64 | unsigned long r6, unsigned long r7, | 78 | unsigned long r6, unsigned long r7, |
65 | struct pt_regs regs) | 79 | struct pt_regs regs) |
66 | { | 80 | { |
67 | int irq = r4; | 81 | int irq = r4; |
82 | #ifdef CONFIG_4KSTACKS | ||
83 | union irq_ctx *curctx, *irqctx; | ||
84 | #endif | ||
68 | 85 | ||
69 | irq_enter(); | 86 | irq_enter(); |
70 | 87 | ||
@@ -102,7 +119,135 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, | |||
102 | #endif | 119 | #endif |
103 | 120 | ||
104 | irq = irq_demux(irq); | 121 | irq = irq_demux(irq); |
105 | __do_IRQ(irq, ®s); | 122 | |
123 | #ifdef CONFIG_4KSTACKS | ||
124 | curctx = (union irq_ctx *)current_thread_info(); | ||
125 | irqctx = hardirq_ctx[smp_processor_id()]; | ||
126 | |||
127 | /* | ||
128 | * this is where we switch to the IRQ stack. However, if we are | ||
129 | * already using the IRQ stack (because we interrupted a hardirq | ||
130 | * handler) we can't do that and just have to keep using the | ||
131 | * current stack (which is the irq stack already after all) | ||
132 | */ | ||
133 | if (curctx != irqctx) { | ||
134 | u32 *isp; | ||
135 | |||
136 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); | ||
137 | irqctx->tinfo.task = curctx->tinfo.task; | ||
138 | irqctx->tinfo.previous_sp = current_stack_pointer; | ||
139 | |||
140 | __asm__ __volatile__ ( | ||
141 | "mov %0, r4 \n" | ||
142 | "mov %1, r5 \n" | ||
143 | "mov r15, r9 \n" | ||
144 | "jsr @%2 \n" | ||
145 | /* swith to the irq stack */ | ||
146 | " mov %3, r15 \n" | ||
147 | /* restore the stack (ring zero) */ | ||
148 | "mov r9, r15 \n" | ||
149 | : /* no outputs */ | ||
150 | : "r" (irq), "r" (®s), "r" (__do_IRQ), "r" (isp) | ||
151 | /* XXX: A somewhat excessive clobber list? -PFM */ | ||
152 | : "memory", "r0", "r1", "r2", "r3", "r4", | ||
153 | "r5", "r6", "r7", "r8", "t", "pr" | ||
154 | ); | ||
155 | } else | ||
156 | #endif | ||
157 | __do_IRQ(irq, ®s); | ||
158 | |||
106 | irq_exit(); | 159 | irq_exit(); |
160 | |||
107 | return 1; | 161 | return 1; |
108 | } | 162 | } |
163 | |||
164 | #ifdef CONFIG_4KSTACKS | ||
165 | /* | ||
166 | * These should really be __section__(".bss.page_aligned") as well, but | ||
167 | * gcc's 3.0 and earlier don't handle that correctly. | ||
168 | */ | ||
169 | static char softirq_stack[NR_CPUS * THREAD_SIZE] | ||
170 | __attribute__((__aligned__(THREAD_SIZE))); | ||
171 | |||
172 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] | ||
173 | __attribute__((__aligned__(THREAD_SIZE))); | ||
174 | |||
175 | /* | ||
176 | * allocate per-cpu stacks for hardirq and for softirq processing | ||
177 | */ | ||
178 | void irq_ctx_init(int cpu) | ||
179 | { | ||
180 | union irq_ctx *irqctx; | ||
181 | |||
182 | if (hardirq_ctx[cpu]) | ||
183 | return; | ||
184 | |||
185 | irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; | ||
186 | irqctx->tinfo.task = NULL; | ||
187 | irqctx->tinfo.exec_domain = NULL; | ||
188 | irqctx->tinfo.cpu = cpu; | ||
189 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | ||
190 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
191 | |||
192 | hardirq_ctx[cpu] = irqctx; | ||
193 | |||
194 | irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; | ||
195 | irqctx->tinfo.task = NULL; | ||
196 | irqctx->tinfo.exec_domain = NULL; | ||
197 | irqctx->tinfo.cpu = cpu; | ||
198 | irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; | ||
199 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
200 | |||
201 | softirq_ctx[cpu] = irqctx; | ||
202 | |||
203 | printk("CPU %u irqstacks, hard=%p soft=%p\n", | ||
204 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); | ||
205 | } | ||
206 | |||
207 | void irq_ctx_exit(int cpu) | ||
208 | { | ||
209 | hardirq_ctx[cpu] = NULL; | ||
210 | } | ||
211 | |||
212 | extern asmlinkage void __do_softirq(void); | ||
213 | |||
214 | asmlinkage void do_softirq(void) | ||
215 | { | ||
216 | unsigned long flags; | ||
217 | struct thread_info *curctx; | ||
218 | union irq_ctx *irqctx; | ||
219 | u32 *isp; | ||
220 | |||
221 | if (in_interrupt()) | ||
222 | return; | ||
223 | |||
224 | local_irq_save(flags); | ||
225 | |||
226 | if (local_softirq_pending()) { | ||
227 | curctx = current_thread_info(); | ||
228 | irqctx = softirq_ctx[smp_processor_id()]; | ||
229 | irqctx->tinfo.task = curctx->task; | ||
230 | irqctx->tinfo.previous_sp = current_stack_pointer; | ||
231 | |||
232 | /* build the stack frame on the softirq stack */ | ||
233 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); | ||
234 | |||
235 | __asm__ __volatile__ ( | ||
236 | "mov r15, r9 \n" | ||
237 | "jsr @%0 \n" | ||
238 | /* switch to the softirq stack */ | ||
239 | " mov %1, r15 \n" | ||
240 | /* restore the thread stack */ | ||
241 | "mov r9, r15 \n" | ||
242 | : /* no outputs */ | ||
243 | : "r" (__do_softirq), "r" (isp) | ||
244 | /* XXX: A somewhat excessive clobber list? -PFM */ | ||
245 | : "memory", "r0", "r1", "r2", "r3", "r4", | ||
246 | "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" | ||
247 | ); | ||
248 | } | ||
249 | |||
250 | local_irq_restore(flags); | ||
251 | } | ||
252 | EXPORT_SYMBOL(do_softirq); | ||
253 | #endif | ||