aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-10-11 08:13:38 -0400
committerIngo Molnar <mingo@kernel.org>2016-10-16 04:58:59 -0400
commit9254139ad083433c50ba62920107ed55fc4ca5e2 (patch)
treee1bd7b4240f0034483ee2397e47b40657ebaf823
parent1001354ca34179f3db924eb66672442a173147dc (diff)
kprobes: Avoid false KASAN reports during stack copy
Kprobes save and restore raw stack chunks with memcpy(). With KASAN these chunks can contain poisoned stack redzones, as the result memcpy() interceptor produces false stack out-of-bounds reports. Use __memcpy() instead of memcpy() for stack copying. __memcpy() is not instrumented by KASAN and does not lead to the false reports. Currently there is a spew of KASAN reports during boot if CONFIG_KPROBES_SANITY_TEST is enabled: [ ] Kprobe smoke test: started [ ] ================================================================== [ ] BUG: KASAN: stack-out-of-bounds in setjmp_pre_handler+0x17c/0x280 at addr ffff88085259fba8 [ ] Read of size 64 by task swapper/0/1 [ ] page:ffffea00214967c0 count:0 mapcount:0 mapping: (null) index:0x0 [ ] flags: 0x2fffff80000000() [ ] page dumped because: kasan: bad access detected [...] Reported-by: CAI Qian <caiqian@redhat.com> Tested-by: CAI Qian <caiqian@redhat.com> Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kasan-dev@googlegroups.com [ Improved various details. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/kprobes/core.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 28cee019209c..f423b0ef23a7 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1057,9 +1057,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1057 * tailcall optimization. So, to be absolutely safe 1057 * tailcall optimization. So, to be absolutely safe
1058 * we also save and restore enough stack bytes to cover 1058 * we also save and restore enough stack bytes to cover
1059 * the argument area. 1059 * the argument area.
1060 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
1061 * raw stack chunk with redzones:
1060 */ 1062 */
1061 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 1063 __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
1062 MIN_STACK_SIZE(addr));
1063 regs->flags &= ~X86_EFLAGS_IF; 1064 regs->flags &= ~X86_EFLAGS_IF;
1064 trace_hardirqs_off(); 1065 trace_hardirqs_off();
1065 regs->ip = (unsigned long)(jp->entry); 1066 regs->ip = (unsigned long)(jp->entry);
@@ -1118,7 +1119,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1118 /* It's OK to start function graph tracing again */ 1119 /* It's OK to start function graph tracing again */
1119 unpause_graph_tracing(); 1120 unpause_graph_tracing();
1120 *regs = kcb->jprobe_saved_regs; 1121 *regs = kcb->jprobe_saved_regs;
1121 memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); 1122 __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1122 preempt_enable_no_resched(); 1123 preempt_enable_no_resched();
1123 return 1; 1124 return 1;
1124 } 1125 }