summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnshuman Khandual <anshuman.khandual@arm.com>2019-07-16 19:28:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-16 22:23:22 -0400
commitb98cca444d287a63dd96df04af7fb9793567599e (patch)
treedf031390e174d880e07b4c4e67fff0c752126579
parent92bae787c483b015d8985c43784e5afb5ec67895 (diff)
mm, kprobes: generalize and rename notify_page_fault() as kprobe_page_fault()
Architectures which support kprobes have very similar boilerplate around calling kprobe_fault_handler(). Use a helper function in kprobes.h to unify them, based on the x86 code. This changes the behaviour for other architectures when preemption is enabled. Previously, they would have disabled preemption while calling the kprobe handler. However, preemption would be disabled if this fault was due to a kprobe, so we know the fault was not due to a kprobe handler and can simply return failure. This behaviour was introduced in commit a980c0ef9f6d ("x86/kprobes: Refactor kprobes_fault() like kprobe_exceptions_notify()") [anshuman.khandual@arm.com: export kprobe_fault_handler()] Link: http://lkml.kernel.org/r/1561133358-8876-1-git-send-email-anshuman.khandual@arm.com Link: http://lkml.kernel.org/r/1560420444-25737-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: James Hogan <jhogan@kernel.org> Cc: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/arm/mm/fault.c24
-rw-r--r--arch/arm64/mm/fault.c24
-rw-r--r--arch/ia64/mm/fault.c24
-rw-r--r--arch/mips/include/asm/kprobes.h1
-rw-r--r--arch/mips/kernel/kprobes.c2
-rw-r--r--arch/powerpc/mm/fault.c23
-rw-r--r--arch/s390/mm/fault.c16
-rw-r--r--arch/sh/mm/fault.c18
-rw-r--r--arch/sparc/mm/fault_64.c16
-rw-r--r--arch/x86/mm/fault.c21
-rw-r--r--include/linux/kprobes.h19
11 files changed, 32 insertions, 156 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0e417233dad7..890eeaac3cbb 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -27,28 +27,6 @@
27 27
28#ifdef CONFIG_MMU 28#ifdef CONFIG_MMU
29 29
30#ifdef CONFIG_KPROBES
31static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
32{
33 int ret = 0;
34
35 if (!user_mode(regs)) {
36 /* kprobe_running() needs smp_processor_id() */
37 preempt_disable();
38 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
39 ret = 1;
40 preempt_enable();
41 }
42
43 return ret;
44}
45#else
46static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
47{
48 return 0;
49}
50#endif
51
52/* 30/*
53 * This is useful to dump out the page tables associated with 31 * This is useful to dump out the page tables associated with
54 * 'addr' in mm 'mm'. 32 * 'addr' in mm 'mm'.
@@ -265,7 +243,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
265 vm_fault_t fault; 243 vm_fault_t fault;
266 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 244 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
267 245
268 if (notify_page_fault(regs, fsr)) 246 if (kprobe_page_fault(regs, fsr))
269 return 0; 247 return 0;
270 248
271 tsk = current; 249 tsk = current;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c8c61b1eb479..9568c116ac7f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -59,28 +59,6 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
59 return debug_fault_info + DBG_ESR_EVT(esr); 59 return debug_fault_info + DBG_ESR_EVT(esr);
60} 60}
61 61
62#ifdef CONFIG_KPROBES
63static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
64{
65 int ret = 0;
66
67 /* kprobe_running() needs smp_processor_id() */
68 if (!user_mode(regs)) {
69 preempt_disable();
70 if (kprobe_running() && kprobe_fault_handler(regs, esr))
71 ret = 1;
72 preempt_enable();
73 }
74
75 return ret;
76}
77#else
78static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
79{
80 return 0;
81}
82#endif
83
84static void data_abort_decode(unsigned int esr) 62static void data_abort_decode(unsigned int esr)
85{ 63{
86 pr_alert("Data abort info:\n"); 64 pr_alert("Data abort info:\n");
@@ -434,7 +412,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
434 unsigned long vm_flags = VM_READ | VM_WRITE; 412 unsigned long vm_flags = VM_READ | VM_WRITE;
435 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 413 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
436 414
437 if (notify_page_fault(regs, esr)) 415 if (kprobe_page_fault(regs, esr))
438 return 0; 416 return 0;
439 417
440 /* 418 /*
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 3c3a283d3172..c2f299fe9e04 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -21,28 +21,6 @@
21 21
22extern int die(char *, struct pt_regs *, long); 22extern int die(char *, struct pt_regs *, long);
23 23
24#ifdef CONFIG_KPROBES
25static inline int notify_page_fault(struct pt_regs *regs, int trap)
26{
27 int ret = 0;
28
29 if (!user_mode(regs)) {
30 /* kprobe_running() needs smp_processor_id() */
31 preempt_disable();
32 if (kprobe_running() && kprobe_fault_handler(regs, trap))
33 ret = 1;
34 preempt_enable();
35 }
36
37 return ret;
38}
39#else
40static inline int notify_page_fault(struct pt_regs *regs, int trap)
41{
42 return 0;
43}
44#endif
45
46/* 24/*
47 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
48 * (inside region 5, on ia64) and that page is present. 26 * (inside region 5, on ia64) and that page is present.
@@ -116,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
116 /* 94 /*
117 * This is to handle the kprobes on user space access instructions 95 * This is to handle the kprobes on user space access instructions
118 */ 96 */
119 if (notify_page_fault(regs, TRAP_BRKPT)) 97 if (kprobe_page_fault(regs, TRAP_BRKPT))
120 return; 98 return;
121 99
122 if (user_mode(regs)) 100 if (user_mode(regs))
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 3cf8e4d5fa28..68b1e5d458cf 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -41,6 +41,7 @@ do { \
41#define kretprobe_blacklist_size 0 41#define kretprobe_blacklist_size 0
42 42
43void arch_remove_kprobe(struct kprobe *p); 43void arch_remove_kprobe(struct kprobe *p);
44int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
44 45
45/* Architecture specific copy of original instruction*/ 46/* Architecture specific copy of original instruction*/
46struct arch_specific_insn { 47struct arch_specific_insn {
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 81ba1d3c367c..6cfae2411c04 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -398,7 +398,7 @@ out:
398 return 1; 398 return 1;
399} 399}
400 400
401static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 401int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
402{ 402{
403 struct kprobe *cur = kprobe_running(); 403 struct kprobe *cur = kprobe_running();
404 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 404 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d989592b6fc8..8432c281de92 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -42,26 +42,6 @@
42#include <asm/debug.h> 42#include <asm/debug.h>
43#include <asm/kup.h> 43#include <asm/kup.h>
44 44
45static inline bool notify_page_fault(struct pt_regs *regs)
46{
47 bool ret = false;
48
49#ifdef CONFIG_KPROBES
50 /* kprobe_running() needs smp_processor_id() */
51 if (!user_mode(regs)) {
52 preempt_disable();
53 if (kprobe_running() && kprobe_fault_handler(regs, 11))
54 ret = true;
55 preempt_enable();
56 }
57#endif /* CONFIG_KPROBES */
58
59 if (unlikely(debugger_fault_handler(regs)))
60 ret = true;
61
62 return ret;
63}
64
65/* 45/*
66 * Check whether the instruction inst is a store using 46 * Check whether the instruction inst is a store using
67 * an update addressing form which will update r1. 47 * an update addressing form which will update r1.
@@ -461,8 +441,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
461 int is_write = page_fault_is_write(error_code); 441 int is_write = page_fault_is_write(error_code);
462 vm_fault_t fault, major = 0; 442 vm_fault_t fault, major = 0;
463 bool must_retry = false; 443 bool must_retry = false;
444 bool kprobe_fault = kprobe_page_fault(regs, 11);
464 445
465 if (notify_page_fault(regs)) 446 if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
466 return 0; 447 return 0;
467 448
468 if (unlikely(page_fault_is_bad(error_code))) { 449 if (unlikely(page_fault_is_bad(error_code))) {
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 0ba174f779da..63507662828f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -67,20 +67,6 @@ static int __init fault_init(void)
67} 67}
68early_initcall(fault_init); 68early_initcall(fault_init);
69 69
70static inline int notify_page_fault(struct pt_regs *regs)
71{
72 int ret = 0;
73
74 /* kprobe_running() needs smp_processor_id() */
75 if (kprobes_built_in() && !user_mode(regs)) {
76 preempt_disable();
77 if (kprobe_running() && kprobe_fault_handler(regs, 14))
78 ret = 1;
79 preempt_enable();
80 }
81 return ret;
82}
83
84/* 70/*
85 * Find out which address space caused the exception. 71 * Find out which address space caused the exception.
86 */ 72 */
@@ -412,7 +398,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
412 */ 398 */
413 clear_pt_regs_flag(regs, PIF_PER_TRAP); 399 clear_pt_regs_flag(regs, PIF_PER_TRAP);
414 400
415 if (notify_page_fault(regs)) 401 if (kprobe_page_fault(regs, 14))
416 return 0; 402 return 0;
417 403
418 mm = tsk->mm; 404 mm = tsk->mm;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 3093bc372138..5f51456f4fc7 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -24,20 +24,6 @@
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25#include <asm/traps.h> 25#include <asm/traps.h>
26 26
27static inline int notify_page_fault(struct pt_regs *regs, int trap)
28{
29 int ret = 0;
30
31 if (kprobes_built_in() && !user_mode(regs)) {
32 preempt_disable();
33 if (kprobe_running() && kprobe_fault_handler(regs, trap))
34 ret = 1;
35 preempt_enable();
36 }
37
38 return ret;
39}
40
41static void 27static void
42force_sig_info_fault(int si_signo, int si_code, unsigned long address) 28force_sig_info_fault(int si_signo, int si_code, unsigned long address)
43{ 29{
@@ -412,14 +398,14 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
412 if (unlikely(fault_in_kernel_space(address))) { 398 if (unlikely(fault_in_kernel_space(address))) {
413 if (vmalloc_fault(address) >= 0) 399 if (vmalloc_fault(address) >= 0)
414 return; 400 return;
415 if (notify_page_fault(regs, vec)) 401 if (kprobe_page_fault(regs, vec))
416 return; 402 return;
417 403
418 bad_area_nosemaphore(regs, error_code, address); 404 bad_area_nosemaphore(regs, error_code, address);
419 return; 405 return;
420 } 406 }
421 407
422 if (unlikely(notify_page_fault(regs, vec))) 408 if (unlikely(kprobe_page_fault(regs, vec)))
423 return; 409 return;
424 410
425 /* Only enable interrupts if they were on before the fault */ 411 /* Only enable interrupts if they were on before the fault */
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 83fda4d9c3b2..2371fb6b97e4 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -38,20 +38,6 @@
38 38
39int show_unhandled_signals = 1; 39int show_unhandled_signals = 1;
40 40
41static inline __kprobes int notify_page_fault(struct pt_regs *regs)
42{
43 int ret = 0;
44
45 /* kprobe_running() needs smp_processor_id() */
46 if (kprobes_built_in() && !user_mode(regs)) {
47 preempt_disable();
48 if (kprobe_running() && kprobe_fault_handler(regs, 0))
49 ret = 1;
50 preempt_enable();
51 }
52 return ret;
53}
54
55static void __kprobes unhandled_fault(unsigned long address, 41static void __kprobes unhandled_fault(unsigned long address,
56 struct task_struct *tsk, 42 struct task_struct *tsk,
57 struct pt_regs *regs) 43 struct pt_regs *regs)
@@ -285,7 +271,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
285 271
286 fault_code = get_thread_fault_code(); 272 fault_code = get_thread_fault_code();
287 273
288 if (notify_page_fault(regs)) 274 if (kprobe_page_fault(regs, 0))
289 goto exit_exception; 275 goto exit_exception;
290 276
291 si_code = SEGV_MAPERR; 277 si_code = SEGV_MAPERR;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 794f364cb882..d1634c59ed56 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
46 return 0; 46 return 0;
47} 47}
48 48
49static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
50{
51 if (!kprobes_built_in())
52 return 0;
53 if (user_mode(regs))
54 return 0;
55 /*
56 * To be potentially processing a kprobe fault and to be allowed to call
57 * kprobe_running(), we have to be non-preemptible.
58 */
59 if (preemptible())
60 return 0;
61 if (!kprobe_running())
62 return 0;
63 return kprobe_fault_handler(regs, X86_TRAP_PF);
64}
65
66/* 49/*
67 * Prefetch quirks: 50 * Prefetch quirks:
68 * 51 *
@@ -1282,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
1282 return; 1265 return;
1283 1266
1284 /* kprobes don't want to hook the spurious faults: */ 1267 /* kprobes don't want to hook the spurious faults: */
1285 if (kprobes_fault(regs)) 1268 if (kprobe_page_fault(regs, X86_TRAP_PF))
1286 return; 1269 return;
1287 1270
1288 /* 1271 /*
@@ -1313,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs,
1313 mm = tsk->mm; 1296 mm = tsk->mm;
1314 1297
1315 /* kprobes don't want to hook the spurious faults: */ 1298 /* kprobes don't want to hook the spurious faults: */
1316 if (unlikely(kprobes_fault(regs))) 1299 if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
1317 return; 1300 return;
1318 1301
1319 /* 1302 /*
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 443d9800ca3f..04bdaf01112c 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
458} 458}
459#endif 459#endif
460 460
461/* Returns true if kprobes handled the fault */
462static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
463 unsigned int trap)
464{
465 if (!kprobes_built_in())
466 return false;
467 if (user_mode(regs))
468 return false;
469 /*
470 * To be potentially processing a kprobe fault and to be allowed
471 * to call kprobe_running(), we have to be non-preemptible.
472 */
473 if (preemptible())
474 return false;
475 if (!kprobe_running())
476 return false;
477 return kprobe_fault_handler(regs, trap);
478}
479
461#endif /* _LINUX_KPROBES_H */ 480#endif /* _LINUX_KPROBES_H */