aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-09-05 09:49:45 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-10-01 06:53:25 -0400
commit7d65f4a6553203da6a22097821d151fbbe7e4956 (patch)
tree4ac4162ca37756530112c29e8dbf1d2568313d81
parentded797547548a5b8e7b92383a41e4c0e6b0ecb7f (diff)
irq: Consolidate do_softirq() arch overriden implementations
All arch overriden implementations of do_softirq() share the following common code: disable irqs (to avoid races with the pending check), check if there are softirqs pending, then execute __do_softirq() on a specific stack. Consolidate the common parts such that archs only worry about the stack switch. Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@au1.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@au1.ibm.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Helge Deller <deller@gmx.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/metag/kernel/irq.c52
-rw-r--r--arch/parisc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/s390/kernel/irq.c52
-rw-r--r--arch/sh/kernel/irq.c57
-rw-r--r--arch/sparc/kernel/irq_64.c31
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/irq_32.c30
-rw-r--r--arch/x86/kernel/irq_64.c21
-rw-r--r--include/linux/interrupt.h11
-rw-r--r--kernel/softirq.c8
11 files changed, 98 insertions, 202 deletions
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index 2a2c9d55187e..3b4b7f6c0950 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu)
159 159
160extern asmlinkage void __do_softirq(void); 160extern asmlinkage void __do_softirq(void);
161 161
162asmlinkage void do_softirq(void) 162void do_softirq_own_stack(void)
163{ 163{
164 unsigned long flags;
165 struct thread_info *curctx; 164 struct thread_info *curctx;
166 union irq_ctx *irqctx; 165 union irq_ctx *irqctx;
167 u32 *isp; 166 u32 *isp;
168 167
169 if (in_interrupt()) 168 curctx = current_thread_info();
170 return; 169 irqctx = softirq_ctx[smp_processor_id()];
171 170 irqctx->tinfo.task = curctx->task;
172 local_irq_save(flags); 171
173 172 /* build the stack frame on the softirq stack */
174 if (local_softirq_pending()) { 173 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
175 curctx = current_thread_info(); 174
176 irqctx = softirq_ctx[smp_processor_id()]; 175 asm volatile (
177 irqctx->tinfo.task = curctx->task; 176 "MOV D0.5,%0\n"
178 177 "SWAP A0StP,D0.5\n"
179 /* build the stack frame on the softirq stack */ 178 "CALLR D1RtP,___do_softirq\n"
180 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); 179 "MOV A0StP,D0.5\n"
181 180 :
182 asm volatile ( 181 : "r" (isp)
183 "MOV D0.5,%0\n" 182 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
184 "SWAP A0StP,D0.5\n" 183 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
185 "CALLR D1RtP,___do_softirq\n" 184 "D0.5"
186 "MOV A0StP,D0.5\n" 185 );
187 :
188 : "r" (isp)
189 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
190 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
191 "D0.5"
192 );
193 /*
194 * Shouldn't happen, we returned above if in_interrupt():
195 */
196 WARN_ON_ONCE(softirq_count());
197 }
198
199 local_irq_restore(flags);
200} 186}
201#endif 187#endif
202 188
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e6443b1e922..ef5927685299 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -499,22 +499,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
499 *irq_stack_in_use = 1; 499 *irq_stack_in_use = 1;
500} 500}
501 501
502asmlinkage void do_softirq(void) 502void do_softirq_own_stack(void)
503{ 503{
504 __u32 pending; 504 execute_on_irq_stack(__do_softirq, 0);
505 unsigned long flags;
506
507 if (in_interrupt())
508 return;
509
510 local_irq_save(flags);
511
512 pending = local_softirq_pending();
513
514 if (pending)
515 execute_on_irq_stack(__do_softirq, 0);
516
517 local_irq_restore(flags);
518} 505}
519#endif /* CONFIG_IRQSTACKS */ 506#endif /* CONFIG_IRQSTACKS */
520 507
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d286a78f86..5c4adfc6a6d0 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -593,7 +593,7 @@ void irq_ctx_init(void)
593 } 593 }
594} 594}
595 595
596static inline void do_softirq_onstack(void) 596void do_softirq_own_stack(void)
597{ 597{
598 struct thread_info *curtp, *irqtp; 598 struct thread_info *curtp, *irqtp;
599 599
@@ -611,21 +611,6 @@ static inline void do_softirq_onstack(void)
611 set_bits(irqtp->flags, &curtp->flags); 611 set_bits(irqtp->flags, &curtp->flags);
612} 612}
613 613
614void do_softirq(void)
615{
616 unsigned long flags;
617
618 if (in_interrupt())
619 return;
620
621 local_irq_save(flags);
622
623 if (local_softirq_pending())
624 do_softirq_onstack();
625
626 local_irq_restore(flags);
627}
628
629irq_hw_number_t virq_to_hw(unsigned int virq) 614irq_hw_number_t virq_to_hw(unsigned int virq)
630{ 615{
631 struct irq_data *irq_data = irq_get_irq_data(virq); 616 struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8ac2097f13d4..bb27a262c44a 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -157,39 +157,29 @@ int arch_show_interrupts(struct seq_file *p, int prec)
157/* 157/*
158 * Switch to the asynchronous interrupt stack for softirq execution. 158 * Switch to the asynchronous interrupt stack for softirq execution.
159 */ 159 */
160asmlinkage void do_softirq(void) 160void do_softirq_own_stack(void)
161{ 161{
162 unsigned long flags, old, new; 162 unsigned long old, new;
163 163
164 if (in_interrupt()) 164 /* Get current stack pointer. */
165 return; 165 asm volatile("la %0,0(15)" : "=a" (old));
166 166 /* Check against async. stack address range. */
167 local_irq_save(flags); 167 new = S390_lowcore.async_stack;
168 168 if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
169 if (local_softirq_pending()) { 169 /* Need to switch to the async. stack. */
170 /* Get current stack pointer. */ 170 new -= STACK_FRAME_OVERHEAD;
171 asm volatile("la %0,0(15)" : "=a" (old)); 171 ((struct stack_frame *) new)->back_chain = old;
172 /* Check against async. stack address range. */ 172 asm volatile(" la 15,0(%0)\n"
173 new = S390_lowcore.async_stack; 173 " basr 14,%2\n"
174 if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { 174 " la 15,0(%1)\n"
175 /* Need to switch to the async. stack. */ 175 : : "a" (new), "a" (old),
176 new -= STACK_FRAME_OVERHEAD; 176 "a" (__do_softirq)
177 ((struct stack_frame *) new)->back_chain = old; 177 : "0", "1", "2", "3", "4", "5", "14",
178 178 "cc", "memory" );
179 asm volatile(" la 15,0(%0)\n" 179 } else {
180 " basr 14,%2\n" 180 /* We are already on the async stack. */
181 " la 15,0(%1)\n" 181 __do_softirq();
182 : : "a" (new), "a" (old),
183 "a" (__do_softirq)
184 : "0", "1", "2", "3", "4", "5", "14",
185 "cc", "memory" );
186 } else {
187 /* We are already on the async stack. */
188 __do_softirq();
189 }
190 } 182 }
191
192 local_irq_restore(flags);
193} 183}
194 184
195/* 185/*
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 063af10ff3c1..0833736afa32 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu)
149 hardirq_ctx[cpu] = NULL; 149 hardirq_ctx[cpu] = NULL;
150} 150}
151 151
152asmlinkage void do_softirq(void) 152void do_softirq_own_stack(void)
153{ 153{
154 unsigned long flags;
155 struct thread_info *curctx; 154 struct thread_info *curctx;
156 union irq_ctx *irqctx; 155 union irq_ctx *irqctx;
157 u32 *isp; 156 u32 *isp;
158 157
159 if (in_interrupt()) 158 curctx = current_thread_info();
160 return; 159 irqctx = softirq_ctx[smp_processor_id()];
161 160 irqctx->tinfo.task = curctx->task;
162 local_irq_save(flags); 161 irqctx->tinfo.previous_sp = current_stack_pointer;
163 162
164 if (local_softirq_pending()) { 163 /* build the stack frame on the softirq stack */
165 curctx = current_thread_info(); 164 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
166 irqctx = softirq_ctx[smp_processor_id()]; 165
167 irqctx->tinfo.task = curctx->task; 166 __asm__ __volatile__ (
168 irqctx->tinfo.previous_sp = current_stack_pointer; 167 "mov r15, r9 \n"
169 168 "jsr @%0 \n"
170 /* build the stack frame on the softirq stack */ 169 /* switch to the softirq stack */
171 isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); 170 " mov %1, r15 \n"
172 171 /* restore the thread stack */
173 __asm__ __volatile__ ( 172 "mov r9, r15 \n"
174 "mov r15, r9 \n" 173 : /* no outputs */
175 "jsr @%0 \n" 174 : "r" (__do_softirq), "r" (isp)
176 /* switch to the softirq stack */ 175 : "memory", "r0", "r1", "r2", "r3", "r4",
177 " mov %1, r15 \n" 176 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
178 /* restore the thread stack */ 177 );
179 "mov r9, r15 \n"
180 : /* no outputs */
181 : "r" (__do_softirq), "r" (isp)
182 : "memory", "r0", "r1", "r2", "r3", "r4",
183 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
184 );
185
186 /*
187 * Shouldn't happen, we returned above if in_interrupt():
188 */
189 WARN_ON_ONCE(softirq_count());
190 }
191
192 local_irq_restore(flags);
193} 178}
194#else 179#else
195static inline void handle_one_irq(unsigned int irq) 180static inline void handle_one_irq(unsigned int irq)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index d4840cec2c55..666193f4e8bb 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -698,30 +698,19 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
698 set_irq_regs(old_regs); 698 set_irq_regs(old_regs);
699} 699}
700 700
701void do_softirq(void) 701void do_softirq_own_stack(void)
702{ 702{
703 unsigned long flags; 703 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
704
705 if (in_interrupt())
706 return;
707
708 local_irq_save(flags);
709 704
710 if (local_softirq_pending()) { 705 sp += THREAD_SIZE - 192 - STACK_BIAS;
711 void *orig_sp, *sp = softirq_stack[smp_processor_id()];
712
713 sp += THREAD_SIZE - 192 - STACK_BIAS;
714
715 __asm__ __volatile__("mov %%sp, %0\n\t"
716 "mov %1, %%sp"
717 : "=&r" (orig_sp)
718 : "r" (sp));
719 __do_softirq();
720 __asm__ __volatile__("mov %0, %%sp"
721 : : "r" (orig_sp));
722 }
723 706
724 local_irq_restore(flags); 707 __asm__ __volatile__("mov %%sp, %0\n\t"
708 "mov %1, %%sp"
709 : "=&r" (orig_sp)
710 : "r" (sp));
711 __do_softirq();
712 __asm__ __volatile__("mov %0, %%sp"
713 : : "r" (orig_sp));
725} 714}
726 715
727#ifdef CONFIG_HOTPLUG_CPU 716#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b077f4cc225a..083da7c2f40d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1342,7 +1342,7 @@ bad_gs:
1342 .previous 1342 .previous
1343 1343
1344/* Call softirq on interrupt stack. Interrupts are off. */ 1344/* Call softirq on interrupt stack. Interrupts are off. */
1345ENTRY(call_softirq) 1345ENTRY(do_softirq_own_stack)
1346 CFI_STARTPROC 1346 CFI_STARTPROC
1347 pushq_cfi %rbp 1347 pushq_cfi %rbp
1348 CFI_REL_OFFSET rbp,0 1348 CFI_REL_OFFSET rbp,0
@@ -1359,7 +1359,7 @@ ENTRY(call_softirq)
1359 decl PER_CPU_VAR(irq_count) 1359 decl PER_CPU_VAR(irq_count)
1360 ret 1360 ret
1361 CFI_ENDPROC 1361 CFI_ENDPROC
1362END(call_softirq) 1362END(do_softirq_own_stack)
1363 1363
1364#ifdef CONFIG_XEN 1364#ifdef CONFIG_XEN
1365zeroentry xen_hypervisor_callback xen_do_hypervisor_callback 1365zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 4186755f1d7c..8a5bb01dbc0e 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -149,35 +149,21 @@ void irq_ctx_init(int cpu)
149 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); 149 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
150} 150}
151 151
152asmlinkage void do_softirq(void) 152void do_softirq_own_stack(void)
153{ 153{
154 unsigned long flags;
155 struct thread_info *curctx; 154 struct thread_info *curctx;
156 union irq_ctx *irqctx; 155 union irq_ctx *irqctx;
157 u32 *isp; 156 u32 *isp;
158 157
159 if (in_interrupt()) 158 curctx = current_thread_info();
160 return; 159 irqctx = __this_cpu_read(softirq_ctx);
161 160 irqctx->tinfo.task = curctx->task;
162 local_irq_save(flags); 161 irqctx->tinfo.previous_esp = current_stack_pointer;
163
164 if (local_softirq_pending()) {
165 curctx = current_thread_info();
166 irqctx = __this_cpu_read(softirq_ctx);
167 irqctx->tinfo.task = curctx->task;
168 irqctx->tinfo.previous_esp = current_stack_pointer;
169
170 /* build the stack frame on the softirq stack */
171 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
172 162
173 call_on_stack(__do_softirq, isp); 163 /* build the stack frame on the softirq stack */
174 /* 164 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
175 * Shouldn't happen, we returned above if in_interrupt():
176 */
177 WARN_ON_ONCE(softirq_count());
178 }
179 165
180 local_irq_restore(flags); 166 call_on_stack(__do_softirq, isp);
181} 167}
182 168
183bool handle_irq(unsigned irq, struct pt_regs *regs) 169bool handle_irq(unsigned irq, struct pt_regs *regs)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index d04d3ecded62..4d1c746892eb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
87 generic_handle_irq_desc(irq, desc); 87 generic_handle_irq_desc(irq, desc);
88 return true; 88 return true;
89} 89}
90
91
92extern void call_softirq(void);
93
94asmlinkage void do_softirq(void)
95{
96 __u32 pending;
97 unsigned long flags;
98
99 if (in_interrupt())
100 return;
101
102 local_irq_save(flags);
103 pending = local_softirq_pending();
104 /* Switch to interrupt stack */
105 if (pending) {
106 call_softirq();
107 WARN_ON_ONCE(softirq_count());
108 }
109 local_irq_restore(flags);
110}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5e865b554940..c9e831dc80bc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -19,6 +19,7 @@
19 19
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/irq.h>
22 23
23/* 24/*
24 * These correspond to the IORESOURCE_IRQ_* defines in 25 * These correspond to the IORESOURCE_IRQ_* defines in
@@ -374,6 +375,16 @@ struct softirq_action
374 375
375asmlinkage void do_softirq(void); 376asmlinkage void do_softirq(void);
376asmlinkage void __do_softirq(void); 377asmlinkage void __do_softirq(void);
378
379#ifdef __ARCH_HAS_DO_SOFTIRQ
380void do_softirq_own_stack(void);
381#else
382static inline void do_softirq_own_stack(void)
383{
384 __do_softirq();
385}
386#endif
387
377extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 388extern void open_softirq(int nr, void (*action)(struct softirq_action *));
378extern void softirq_init(void); 389extern void softirq_init(void);
379extern void __raise_softirq_irqoff(unsigned int nr); 390extern void __raise_softirq_irqoff(unsigned int nr);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d7d498d8cc4f..26ee72725d29 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,7 +29,6 @@
29#define CREATE_TRACE_POINTS 29#define CREATE_TRACE_POINTS
30#include <trace/events/irq.h> 30#include <trace/events/irq.h>
31 31
32#include <asm/irq.h>
33/* 32/*
34 - No shared variables, all the data are CPU local. 33 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself 34 - If a softirq needs serialization, let it serialize itself
@@ -283,7 +282,7 @@ restart:
283 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 282 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
284} 283}
285 284
286#ifndef __ARCH_HAS_DO_SOFTIRQ 285
287 286
288asmlinkage void do_softirq(void) 287asmlinkage void do_softirq(void)
289{ 288{
@@ -298,13 +297,12 @@ asmlinkage void do_softirq(void)
298 pending = local_softirq_pending(); 297 pending = local_softirq_pending();
299 298
300 if (pending) 299 if (pending)
301 __do_softirq(); 300 do_softirq_own_stack();
302 301
302 WARN_ON_ONCE(softirq_count());
303 local_irq_restore(flags); 303 local_irq_restore(flags);
304} 304}
305 305
306#endif
307
308/* 306/*
309 * Enter an interrupt context. 307 * Enter an interrupt context.
310 */ 308 */