diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-05-05 09:58:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-05-12 15:28:06 -0400 |
commit | de9b10af1287bf25b9c0433de53a2e95ef611aa7 (patch) | |
tree | 9bc83f349f9ca651ce9d7c56aee9c3c3c7ae79e0 /arch/x86/kernel/irq_32.c | |
parent | 04b361abfdc522239e3a071f3afdebf5787d9f03 (diff) |
x86: janitor stack overflow warning patch
Add KERN_WARNING to the printk as this could not be done in the
original patch, which allegedly only moves code around.
Un#ifdef do_IRQ.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/irq_32.c')
-rw-r--r-- | arch/x86/kernel/irq_32.c | 136 |
1 files changed, 75 insertions, 61 deletions
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 3f76561da815..1c470d2e5af7 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -48,6 +48,29 @@ void ack_bad_irq(unsigned int irq) | |||
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
52 | /* Debugging check for stack overflow: is there less than 1KB free? */ | ||
53 | static int check_stack_overflow(void) | ||
54 | { | ||
55 | long sp; | ||
56 | |||
57 | __asm__ __volatile__("andl %%esp,%0" : | ||
58 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | ||
59 | |||
60 | return sp < (sizeof(struct thread_info) + STACK_WARN); | ||
61 | } | ||
62 | |||
63 | static void print_stack_overflow(void) | ||
64 | { | ||
65 | printk(KERN_WARNING "low stack detected by irq handler\n"); | ||
66 | dump_stack(); | ||
67 | } | ||
68 | |||
69 | #else | ||
70 | static inline int check_stack_overflow(void) { return 0; } | ||
71 | static inline void print_stack_overflow(void) { } | ||
72 | #endif | ||
73 | |||
51 | #ifdef CONFIG_4KSTACKS | 74 | #ifdef CONFIG_4KSTACKS |
52 | /* | 75 | /* |
53 | * per-CPU IRQ handling contexts (thread information and stack) | 76 | * per-CPU IRQ handling contexts (thread information and stack) |
@@ -59,18 +82,12 @@ union irq_ctx { | |||
59 | 82 | ||
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 83 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 84 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
62 | #endif | ||
63 | |||
64 | static void stack_overflow(void) | ||
65 | { | ||
66 | printk("low stack detected by irq handler\n"); | ||
67 | dump_stack(); | ||
68 | } | ||
69 | 85 | ||
70 | static inline void call_on_stack2(void *func, void *stack, | 86 | static inline void call_on_stack(void *func, void *stack, |
71 | unsigned long arg1, unsigned long arg2) | 87 | unsigned long arg1, void *arg2) |
72 | { | 88 | { |
73 | unsigned long bx; | 89 | unsigned long bx; |
90 | |||
74 | asm volatile( | 91 | asm volatile( |
75 | " xchgl %%ebx,%%esp \n" | 92 | " xchgl %%ebx,%%esp \n" |
76 | " call *%%edi \n" | 93 | " call *%%edi \n" |
@@ -81,22 +98,61 @@ static inline void call_on_stack2(void *func, void *stack, | |||
81 | : "memory", "cc", "ecx"); | 98 | : "memory", "cc", "ecx"); |
82 | } | 99 | } |
83 | 100 | ||
101 | static inline int | ||
102 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | ||
103 | { | ||
104 | union irq_ctx *curctx, *irqctx; | ||
105 | u32 *isp; | ||
106 | |||
107 | curctx = (union irq_ctx *) current_thread_info(); | ||
108 | irqctx = hardirq_ctx[smp_processor_id()]; | ||
109 | |||
110 | /* | ||
111 | * this is where we switch to the IRQ stack. However, if we are | ||
112 | * already using the IRQ stack (because we interrupted a hardirq | ||
113 | * handler) we can't do that and just have to keep using the | ||
114 | * current stack (which is the irq stack already after all) | ||
115 | */ | ||
116 | if (unlikely(curctx == irqctx)) | ||
117 | return 0; | ||
118 | |||
119 | /* build the stack frame on the IRQ stack */ | ||
120 | isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); | ||
121 | irqctx->tinfo.task = curctx->tinfo.task; | ||
122 | irqctx->tinfo.previous_esp = current_stack_pointer; | ||
123 | |||
124 | /* | ||
125 | * Copy the softirq bits in preempt_count so that the | ||
126 | * softirq checks work in the hardirq context. | ||
127 | */ | ||
128 | irqctx->tinfo.preempt_count = | ||
129 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | ||
130 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | ||
131 | |||
132 | if (unlikely(overflow)) | ||
133 | call_on_stack(print_stack_overflow, isp, 0, NULL); | ||
134 | |||
135 | call_on_stack(desc->handle_irq, isp, irq, desc); | ||
136 | |||
137 | return 1; | ||
138 | } | ||
139 | |||
140 | #else | ||
141 | static inline int | ||
142 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | ||
143 | #endif | ||
144 | |||
84 | /* | 145 | /* |
85 | * do_IRQ handles all normal device IRQ's (the special | 146 | * do_IRQ handles all normal device IRQ's (the special |
86 | * SMP cross-CPU interrupts have their own specific | 147 | * SMP cross-CPU interrupts have their own specific |
87 | * handlers). | 148 | * handlers). |
88 | */ | 149 | */ |
89 | unsigned int do_IRQ(struct pt_regs *regs) | 150 | unsigned int do_IRQ(struct pt_regs *regs) |
90 | { | 151 | { |
91 | struct pt_regs *old_regs; | 152 | struct pt_regs *old_regs; |
92 | /* high bit used in ret_from_ code */ | 153 | /* high bit used in ret_from_ code */ |
93 | int irq = ~regs->orig_ax; | 154 | int overflow, irq = ~regs->orig_ax; |
94 | struct irq_desc *desc = irq_desc + irq; | 155 | struct irq_desc *desc = irq_desc + irq; |
95 | #ifdef CONFIG_4KSTACKS | ||
96 | union irq_ctx *curctx, *irqctx; | ||
97 | u32 *isp; | ||
98 | #endif | ||
99 | int overflow = 0; | ||
100 | 156 | ||
101 | if (unlikely((unsigned)irq >= NR_IRQS)) { | 157 | if (unlikely((unsigned)irq >= NR_IRQS)) { |
102 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | 158 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", |
@@ -106,54 +162,12 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
106 | 162 | ||
107 | old_regs = set_irq_regs(regs); | 163 | old_regs = set_irq_regs(regs); |
108 | irq_enter(); | 164 | irq_enter(); |
109 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
110 | /* Debugging check for stack overflow: is there less than 1KB free? */ | ||
111 | { | ||
112 | long sp; | ||
113 | |||
114 | __asm__ __volatile__("andl %%esp,%0" : | ||
115 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | ||
116 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) | ||
117 | overflow = 1; | ||
118 | } | ||
119 | #endif | ||
120 | |||
121 | #ifdef CONFIG_4KSTACKS | ||
122 | 165 | ||
123 | curctx = (union irq_ctx *) current_thread_info(); | 166 | overflow = check_stack_overflow(); |
124 | irqctx = hardirq_ctx[smp_processor_id()]; | ||
125 | 167 | ||
126 | /* | 168 | if (!execute_on_irq_stack(overflow, desc, irq)) { |
127 | * this is where we switch to the IRQ stack. However, if we are | ||
128 | * already using the IRQ stack (because we interrupted a hardirq | ||
129 | * handler) we can't do that and just have to keep using the | ||
130 | * current stack (which is the irq stack already after all) | ||
131 | */ | ||
132 | if (curctx != irqctx) { | ||
133 | /* build the stack frame on the IRQ stack */ | ||
134 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | ||
135 | irqctx->tinfo.task = curctx->tinfo.task; | ||
136 | irqctx->tinfo.previous_esp = current_stack_pointer; | ||
137 | |||
138 | /* | ||
139 | * Copy the softirq bits in preempt_count so that the | ||
140 | * softirq checks work in the hardirq context. | ||
141 | */ | ||
142 | irqctx->tinfo.preempt_count = | ||
143 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | ||
144 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | ||
145 | |||
146 | /* Execute warning on interrupt stack */ | ||
147 | if (unlikely(overflow)) | 169 | if (unlikely(overflow)) |
148 | call_on_stack2(stack_overflow, isp, 0, 0); | 170 | print_stack_overflow(); |
149 | |||
150 | call_on_stack2(desc->handle_irq, isp, irq, (unsigned long)desc); | ||
151 | } else | ||
152 | #endif | ||
153 | { | ||
154 | /* AK: Slightly bogus here */ | ||
155 | if (overflow) | ||
156 | stack_overflow(); | ||
157 | desc->handle_irq(irq, desc); | 171 | desc->handle_irq(irq, desc); |
158 | } | 172 | } |
159 | 173 | ||