aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2015-12-18 11:01:47 -0500
committerWill Deacon <will.deacon@arm.com>2015-12-21 12:26:01 -0500
commitd224a69e3d80fe08f285d1f41d21b590bae4fa9f (patch)
tree714dfa475b1e358c18285af1d809e08fc7e6f790 /arch/arm64/kernel/entry.S
parent66b3923a1a0f77a563b43f43f6ad091354abbfe9 (diff)
arm64: remove irq_count and do_softirq_own_stack()
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The irq_stack implementation wrongly assumed this would only ever happen via the softirq path, allowing it to update irq_count late, in do_softirq_own_stack(). This means if an irq occurs in sysrq_handle_reboot(), during emergency_restart() the stack will be corrupted, as irq_count wasn't updated. Lose the optimisation, and instead of moving the adding/subtracting of irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us if we are on a task stack, if so, we can safely switch to the irq stack. Finally, remove do_softirq_own_stack(), we don't need it anymore. Reported-by: Will Deacon <will.deacon@arm.com> Signed-off-by: James Morse <james.morse@arm.com> [will: use get_thread_info macro] Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 0667fb7d8bb1..c0db321db7e1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -181,19 +181,20 @@ alternative_endif
181 .macro irq_stack_entry 181 .macro irq_stack_entry
182 mov x19, sp // preserve the original sp 182 mov x19, sp // preserve the original sp
183 183
184 this_cpu_ptr irq_stack, x25, x26
185
186 /* 184 /*
187 * Check the lowest address on irq_stack for the irq_count value, 185 * Compare sp with the current thread_info, if the top
188 * incremented by do_softirq_own_stack if we have re-enabled irqs 186 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
189 * while on the irq_stack. 187 * should switch to the irq stack.
190 */ 188 */
191 ldr x26, [x25] 189 and x25, x19, #~(THREAD_SIZE - 1)
192 cbnz x26, 9998f // recursive use? 190 cmp x25, tsk
191 b.ne 9998f
193 192
194 /* switch to the irq stack */ 193 this_cpu_ptr irq_stack, x25, x26
195 mov x26, #IRQ_STACK_START_SP 194 mov x26, #IRQ_STACK_START_SP
196 add x26, x25, x26 195 add x26, x25, x26
196
197 /* switch to the irq stack */
197 mov sp, x26 198 mov sp, x26
198 199
199 /* 200 /*
@@ -405,10 +406,10 @@ el1_irq:
405 bl trace_hardirqs_off 406 bl trace_hardirqs_off
406#endif 407#endif
407 408
409 get_thread_info tsk
408 irq_handler 410 irq_handler
409 411
410#ifdef CONFIG_PREEMPT 412#ifdef CONFIG_PREEMPT
411 get_thread_info tsk
412 ldr w24, [tsk, #TI_PREEMPT] // get preempt count 413 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
413 cbnz w24, 1f // preempt count != 0 414 cbnz w24, 1f // preempt count != 0
414 ldr x0, [tsk, #TI_FLAGS] // get flags 415 ldr x0, [tsk, #TI_FLAGS] // get flags