aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/irq_32.c163
1 files changed, 77 insertions, 86 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1c470d2e5af7..4e3e8ec60276 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -83,26 +83,28 @@ union irq_ctx {
83static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 83static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
84static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 84static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
85 85
86static inline void call_on_stack(void *func, void *stack, 86static char softirq_stack[NR_CPUS * THREAD_SIZE]
87 unsigned long arg1, void *arg2) 87 __attribute__((__section__(".bss.page_aligned")));
88
89static char hardirq_stack[NR_CPUS * THREAD_SIZE]
90 __attribute__((__section__(".bss.page_aligned")));
91
92static void call_on_stack(void *func, void *stack)
88{ 93{
89 unsigned long bx; 94 asm volatile("xchgl %%ebx,%%esp \n"
90 95 "call *%%edi \n"
91 asm volatile( 96 "movl %%ebx,%%esp \n"
92 " xchgl %%ebx,%%esp \n" 97 : "=b" (stack)
93 " call *%%edi \n" 98 : "0" (stack),
94 " movl %%ebx,%%esp \n" 99 "D"(func)
95 : "=a" (arg1), "=d" (arg2), "=b" (bx) 100 : "memory", "cc", "edx", "ecx", "eax");
96 : "0" (arg1), "1" (arg2), "2" (stack),
97 "D" (func)
98 : "memory", "cc", "ecx");
99} 101}
100 102
101static inline int 103static inline int
102execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) 104execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
103{ 105{
104 union irq_ctx *curctx, *irqctx; 106 union irq_ctx *curctx, *irqctx;
105 u32 *isp; 107 u32 *isp, arg1, arg2;
106 108
107 curctx = (union irq_ctx *) current_thread_info(); 109 curctx = (union irq_ctx *) current_thread_info();
108 irqctx = hardirq_ctx[smp_processor_id()]; 110 irqctx = hardirq_ctx[smp_processor_id()];
@@ -130,64 +132,22 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
130 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 132 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
131 133
132 if (unlikely(overflow)) 134 if (unlikely(overflow))
133 call_on_stack(print_stack_overflow, isp, 0, NULL); 135 call_on_stack(print_stack_overflow, isp);
134 136
135 call_on_stack(desc->handle_irq, isp, irq, desc); 137 asm volatile("xchgl %%ebx,%%esp \n"
136 138 "call *%%edi \n"
137 return 1; 139 "movl %%ebx,%%esp \n"
138} 140 : "=a" (arg1), "=d" (arg2), "=b" (isp)
139 141 : "0" (irq), "1" (desc), "2" (isp),
140#else 142 "D" (desc->handle_irq)
141static inline int 143 : "memory", "cc", "ecx");
142execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
143#endif
144
145/*
146 * do_IRQ handles all normal device IRQ's (the special
147 * SMP cross-CPU interrupts have their own specific
148 * handlers).
149 */
150unsigned int do_IRQ(struct pt_regs *regs)
151{
152 struct pt_regs *old_regs;
153 /* high bit used in ret_from_ code */
154 int overflow, irq = ~regs->orig_ax;
155 struct irq_desc *desc = irq_desc + irq;
156
157 if (unlikely((unsigned)irq >= NR_IRQS)) {
158 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
159 __func__, irq);
160 BUG();
161 }
162
163 old_regs = set_irq_regs(regs);
164 irq_enter();
165
166 overflow = check_stack_overflow();
167
168 if (!execute_on_irq_stack(overflow, desc, irq)) {
169 if (unlikely(overflow))
170 print_stack_overflow();
171 desc->handle_irq(irq, desc);
172 }
173
174 irq_exit();
175 set_irq_regs(old_regs);
176 return 1; 144 return 1;
177} 145}
178 146
179#ifdef CONFIG_4KSTACKS
180
181static char softirq_stack[NR_CPUS * THREAD_SIZE]
182 __attribute__((__section__(".bss.page_aligned")));
183
184static char hardirq_stack[NR_CPUS * THREAD_SIZE]
185 __attribute__((__section__(".bss.page_aligned")));
186
187/* 147/*
188 * allocate per-cpu stacks for hardirq and for softirq processing 148 * allocate per-cpu stacks for hardirq and for softirq processing
189 */ 149 */
190void irq_ctx_init(int cpu) 150void __cpuinit irq_ctx_init(int cpu)
191{ 151{
192 union irq_ctx *irqctx; 152 union irq_ctx *irqctx;
193 153
@@ -195,25 +155,25 @@ void irq_ctx_init(int cpu)
195 return; 155 return;
196 156
197 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; 157 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
198 irqctx->tinfo.task = NULL; 158 irqctx->tinfo.task = NULL;
199 irqctx->tinfo.exec_domain = NULL; 159 irqctx->tinfo.exec_domain = NULL;
200 irqctx->tinfo.cpu = cpu; 160 irqctx->tinfo.cpu = cpu;
201 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 161 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
202 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 162 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
203 163
204 hardirq_ctx[cpu] = irqctx; 164 hardirq_ctx[cpu] = irqctx;
205 165
206 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; 166 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
207 irqctx->tinfo.task = NULL; 167 irqctx->tinfo.task = NULL;
208 irqctx->tinfo.exec_domain = NULL; 168 irqctx->tinfo.exec_domain = NULL;
209 irqctx->tinfo.cpu = cpu; 169 irqctx->tinfo.cpu = cpu;
210 irqctx->tinfo.preempt_count = 0; 170 irqctx->tinfo.preempt_count = 0;
211 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 171 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
212 172
213 softirq_ctx[cpu] = irqctx; 173 softirq_ctx[cpu] = irqctx;
214 174
215 printk("CPU %u irqstacks, hard=%p soft=%p\n", 175 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
216 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); 176 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
217} 177}
218 178
219void irq_ctx_exit(int cpu) 179void irq_ctx_exit(int cpu)
@@ -242,25 +202,56 @@ asmlinkage void do_softirq(void)
242 /* build the stack frame on the softirq stack */ 202 /* build the stack frame on the softirq stack */
243 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 203 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
244 204
245 asm volatile( 205 call_on_stack(__do_softirq, isp);
246 " xchgl %%ebx,%%esp \n"
247 " call __do_softirq \n"
248 " movl %%ebx,%%esp \n"
249 : "=b"(isp)
250 : "0"(isp)
251 : "memory", "cc", "edx", "ecx", "eax"
252 );
253 /* 206 /*
254 * Shouldnt happen, we returned above if in_interrupt(): 207 * Shouldnt happen, we returned above if in_interrupt():
255 */ 208 */
256 WARN_ON_ONCE(softirq_count()); 209 WARN_ON_ONCE(softirq_count());
257 } 210 }
258 211
259 local_irq_restore(flags); 212 local_irq_restore(flags);
260} 213}
214
215#else
216static inline int
217execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
261#endif 218#endif
262 219
263/* 220/*
221 * do_IRQ handles all normal device IRQ's (the special
222 * SMP cross-CPU interrupts have their own specific
223 * handlers).
224 */
225unsigned int do_IRQ(struct pt_regs *regs)
226{
227 struct pt_regs *old_regs;
228 /* high bit used in ret_from_ code */
229 int overflow, irq = ~regs->orig_ax;
230 struct irq_desc *desc = irq_desc + irq;
231
232 if (unlikely((unsigned)irq >= NR_IRQS)) {
233 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
234 __func__, irq);
235 BUG();
236 }
237
238 old_regs = set_irq_regs(regs);
239 irq_enter();
240
241 overflow = check_stack_overflow();
242
243 if (!execute_on_irq_stack(overflow, desc, irq)) {
244 if (unlikely(overflow))
245 print_stack_overflow();
246 desc->handle_irq(irq, desc);
247 }
248
249 irq_exit();
250 set_irq_regs(old_regs);
251 return 1;
252}
253
254/*
264 * Interrupt statistics: 255 * Interrupt statistics:
265 */ 256 */
266 257