aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-07-20 12:46:58 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2016-07-20 12:54:35 -0400
commitab4c1325d4bf111a590a1f773e3d93bde7f40201 (patch)
tree85817cdc289aba4b54ee3cc4f6659d3b158356c0
parent44bd887ce10eb8061f6a137f8a73f823957edd82 (diff)
arm64: kprobes: Fix overflow when saving stack
The MIN_STACK_SIZE macro tries evaluate how much stack space needs to be saved in the jprobes_stack array, sized at 128 bytes. When using the IRQ stack, said macro can happily return up to IRQ_STACK_SIZE, which is 16kB. Mayhem follows. This patch fixes things by getting rid of the crazy macro and limiting the copy to be at most the size of the jprobes_stack array, no matter which stack we're on. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/kernel/probes/kprobes.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index c89811d1e294..09f8ae98da5a 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -34,12 +34,6 @@
34 34
35#include "decode-insn.h" 35#include "decode-insn.h"
36 36
37#define MIN_STACK_SIZE(addr) (on_irq_stack(addr, raw_smp_processor_id()) ? \
38 min((unsigned long)IRQ_STACK_SIZE, \
39 IRQ_STACK_PTR(raw_smp_processor_id()) - (addr)) : \
40 min((unsigned long)MAX_STACK_SIZE, \
41 (unsigned long)current_thread_info() + THREAD_START_SP - (addr)))
42
43void jprobe_return_break(void); 37void jprobe_return_break(void);
44 38
45DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 39DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -48,6 +42,18 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
48static void __kprobes 42static void __kprobes
49post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 43post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
50 44
45static inline unsigned long min_stack_size(unsigned long addr)
46{
47 unsigned long size;
48
49 if (on_irq_stack(addr, raw_smp_processor_id()))
50 size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
51 else
52 size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
53
54 return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
55}
56
51static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 57static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
52{ 58{
53 /* prepare insn slot */ 59 /* prepare insn slot */
@@ -495,7 +501,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
495 * the argument area. 501 * the argument area.
496 */ 502 */
497 memcpy(kcb->jprobes_stack, (void *)stack_ptr, 503 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
498 MIN_STACK_SIZE(stack_ptr)); 504 min_stack_size(stack_ptr));
499 505
500 instruction_pointer_set(regs, (unsigned long) jp->entry); 506 instruction_pointer_set(regs, (unsigned long) jp->entry);
501 preempt_disable(); 507 preempt_disable();
@@ -548,7 +554,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
548 unpause_graph_tracing(); 554 unpause_graph_tracing();
549 *regs = kcb->jprobe_saved_regs; 555 *regs = kcb->jprobe_saved_regs;
550 memcpy((void *)stack_addr, kcb->jprobes_stack, 556 memcpy((void *)stack_addr, kcb->jprobes_stack,
551 MIN_STACK_SIZE(stack_addr)); 557 min_stack_size(stack_addr));
552 preempt_enable_no_resched(); 558 preempt_enable_no_resched();
553 return 1; 559 return 1;
554} 560}