aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-15 19:05:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-15 19:05:47 -0400
commitd2d8b146043ae7e250aef1fb312971f6f479d487 (patch)
tree22db8758a5aa0bc850ba8f83fe57b1f679924d0a
parent2bbacd1a92788ee334c7e92b765ea16ebab68dfe (diff)
parent693713cbdb3a4bda5a8a678c31f06560bbb14657 (diff)
Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "The major changes in this tracing update includes: - Removal of non-DYNAMIC_FTRACE from 32bit x86 - Removal of mcount support from x86 - Emulating a call from int3 on x86_64, fixes live kernel patching - Consolidated Tracing Error logs file Minor updates: - Removal of klp_check_compiler_support() - kdb ftrace dumping output changes - Accessing and creating ftrace instances from inside the kernel - Clean up of #define if macro - Introduction of TRACE_EVENT_NOP() to disable trace events based on config options And other minor fixes and clean ups" * tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits) x86: Hide the int3_emulate_call/jmp functions from UML livepatch: Remove klp_check_compiler_support() ftrace/x86: Remove mcount support ftrace/x86_32: Remove support for non DYNAMIC_FTRACE tracing: Simplify "if" macro code tracing: Fix documentation about disabling options using trace_options tracing: Replace kzalloc with kcalloc tracing: Fix partial reading of trace event's id file tracing: Allow RCU to run between postponed startup tests tracing: Fix white space issues in parse_pred() function tracing: Eliminate const char[] auto variables ring-buffer: Fix mispelling of Calculate tracing: probeevent: Fix to make the type of $comm string tracing: probeevent: Do not accumulate on ret variable tracing: uprobes: Re-enable $comm support for uprobe events ftrace/x86_64: Emulate call function while updating in breakpoint handler x86_64: Allow breakpoints to emulate call instructions x86_64: Add gap to int3 to allow for call emulation tracing: kdb: Allow ftdump to skip all but the last few entries tracing: Add trace_total_entries() / trace_total_entries_cpu() ...
-rw-r--r--Documentation/trace/ftrace.rst31
-rw-r--r--Documentation/trace/histogram.rst16
-rw-r--r--arch/nds32/kernel/ftrace.c1
-rw-r--r--arch/parisc/kernel/ftrace.c1
-rw-r--r--arch/powerpc/include/asm/livepatch.h5
-rw-r--r--arch/s390/include/asm/livepatch.h5
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/include/asm/ftrace.h8
-rw-r--r--arch/x86/include/asm/livepatch.h8
-rw-r--r--arch/x86/include/asm/text-patching.h30
-rw-r--r--arch/x86/kernel/ftrace.c32
-rw-r--r--arch/x86/kernel/ftrace_32.S75
-rw-r--r--arch/x86/kernel/ftrace_64.S28
-rw-r--r--include/linux/compiler.h35
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/tracepoint.h15
-rw-r--r--include/trace/define_trace.h8
-rw-r--r--include/trace/events/rcu.h81
-rw-r--r--include/trace/events/sched.h21
-rw-r--r--kernel/livepatch/core.c8
-rw-r--r--kernel/rcu/rcu.h9
-rw-r--r--kernel/rcu/tree.c8
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/ring_buffer_benchmark.c2
-rw-r--r--kernel/trace/trace.c417
-rw-r--r--kernel/trace/trace.h13
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_filter.c84
-rw-r--r--kernel/trace/trace_events_hist.c268
-rw-r--r--kernel/trace/trace_events_trigger.c3
-rw-r--r--kernel/trace/trace_kdb.c61
-rw-r--r--kernel/trace/trace_kprobe.c77
-rw-r--r--kernel/trace/trace_probe.c291
-rw-r--r--kernel/trace/trace_probe.h78
-rw-r--r--kernel/trace/trace_probe_tmpl.h2
-rw-r--r--kernel/trace/trace_selftest.c5
-rw-r--r--kernel/trace/trace_uprobe.c57
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions12
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc85
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc28
44 files changed, 1345 insertions, 651 deletions
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index c3b9bd2fd512..f60079259669 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -765,6 +765,37 @@ Here is the list of current tracers that may be configured.
765 tracers from tracing simply echo "nop" into 765 tracers from tracing simply echo "nop" into
766 current_tracer. 766 current_tracer.
767 767
768Error conditions
769----------------
770
771 For most ftrace commands, failure modes are obvious and communicated
772 using standard return codes.
773
774 For other more involved commands, extended error information may be
775 available via the tracing/error_log file. For the commands that
776 support it, reading the tracing/error_log file after an error will
777 display more detailed information about what went wrong, if
778 information is available. The tracing/error_log file is a circular
779 error log displaying a small number (currently, 8) of ftrace errors
780 for the last (8) failed commands.
781
782 The extended error information and usage takes the form shown in
783 this example::
784
785 # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
786 echo: write error: Invalid argument
787
788 # cat /sys/kernel/debug/tracing/error_log
789 [ 5348.887237] location: error: Couldn't yyy: zzz
790 Command: xxx
791 ^
792 [ 7517.023364] location: error: Bad rrr: sss
793 Command: ppp qqq
794 ^
795
796 To clear the error log, echo the empty string into it::
797
798 # echo > /sys/kernel/debug/tracing/error_log
768 799
769Examples of using the tracer 800Examples of using the tracer
770---------------------------- 801----------------------------
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index ddbaffa530f9..fb621a1c2638 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -199,20 +199,8 @@ Extended error information
199 199
200 For some error conditions encountered when invoking a hist trigger 200 For some error conditions encountered when invoking a hist trigger
201 command, extended error information is available via the 201 command, extended error information is available via the
202 corresponding event's 'hist' file. Reading the hist file after an 202 tracing/error_log file. See Error Conditions in
203 error will display more detailed information about what went wrong, 203 :file:`Documentation/trace/ftrace.rst` for details.
204 if information is available. This extended error information will
205 be available until the next hist trigger command for that event.
206
207 If available for a given error condition, the extended error
208 information and usage takes the following form::
209
210 # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
211 echo: write error: Invalid argument
212
213 # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/hist
214 ERROR: Couldn't yyy: zzz
215 Last command: xxx
216 204
2176.2 'hist' trigger examples 2056.2 'hist' trigger examples
218--------------------------- 206---------------------------
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index 8a41372551ff..fd2a54b8cd57 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -7,7 +7,6 @@
7#ifndef CONFIG_DYNAMIC_FTRACE 7#ifndef CONFIG_DYNAMIC_FTRACE
8extern void (*ftrace_trace_function)(unsigned long, unsigned long, 8extern void (*ftrace_trace_function)(unsigned long, unsigned long,
9 struct ftrace_ops*, struct pt_regs*); 9 struct ftrace_ops*, struct pt_regs*);
10extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
11extern void ftrace_graph_caller(void); 10extern void ftrace_graph_caller(void);
12 11
13noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, 12noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index e46a4157a894..a28f915993b1 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -51,7 +51,6 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
51 unsigned long org_sp_gr3) 51 unsigned long org_sp_gr3)
52{ 52{
53 extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */ 53 extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
54 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
55 54
56 if (ftrace_trace_function != ftrace_stub) { 55 if (ftrace_trace_function != ftrace_stub) {
57 /* struct ftrace_ops *op, struct pt_regs *regs); */ 56 /* struct ftrace_ops *op, struct pt_regs *regs); */
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 5070df19d463..c005aee5ea43 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -24,11 +24,6 @@
24#include <linux/sched/task_stack.h> 24#include <linux/sched/task_stack.h>
25 25
26#ifdef CONFIG_LIVEPATCH 26#ifdef CONFIG_LIVEPATCH
27static inline int klp_check_compiler_support(void)
28{
29 return 0;
30}
31
32static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 27static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
33{ 28{
34 regs->nip = ip; 29 regs->nip = ip;
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 672f95b12d40..818612b784cd 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -13,11 +13,6 @@
13 13
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15 15
16static inline int klp_check_compiler_support(void)
17{
18 return 0;
19}
20
21static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 16static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
22{ 17{
23 regs->psw.addr = ip; 18 regs->psw.addr = ip;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 326b2d5bab9d..21e9f2fac04b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,17 @@ config X86_64
31 select SWIOTLB 31 select SWIOTLB
32 select ARCH_HAS_SYSCALL_WRAPPER 32 select ARCH_HAS_SYSCALL_WRAPPER
33 33
34config FORCE_DYNAMIC_FTRACE
35 def_bool y
36 depends on X86_32
37 depends on FUNCTION_TRACER
38 select DYNAMIC_FTRACE
39 help
40 We keep the static function tracing (!DYNAMIC_FTRACE) around
41 in order to test the non static function tracing in the
42 generic code, as other architectures still use it. But we
43 only need to keep it around for x86_64. No need to keep it
44 for x86_32. For x86_32, force DYNAMIC_FTRACE.
34# 45#
35# Arch settings 46# Arch settings
36# 47#
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 20e45d9b4e15..11aa3b2afa4d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -878,7 +878,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
878 * @paranoid == 2 is special: the stub will never switch stacks. This is for 878 * @paranoid == 2 is special: the stub will never switch stacks. This is for
879 * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. 879 * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
880 */ 880 */
881.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 881.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0
882ENTRY(\sym) 882ENTRY(\sym)
883 UNWIND_HINT_IRET_REGS offset=\has_error_code*8 883 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
884 884
@@ -898,6 +898,20 @@ ENTRY(\sym)
898 jnz .Lfrom_usermode_switch_stack_\@ 898 jnz .Lfrom_usermode_switch_stack_\@
899 .endif 899 .endif
900 900
901 .if \create_gap == 1
902 /*
903 * If coming from kernel space, create a 6-word gap to allow the
904 * int3 handler to emulate a call instruction.
905 */
906 testb $3, CS-ORIG_RAX(%rsp)
907 jnz .Lfrom_usermode_no_gap_\@
908 .rept 6
909 pushq 5*8(%rsp)
910 .endr
911 UNWIND_HINT_IRET_REGS offset=8
912.Lfrom_usermode_no_gap_\@:
913 .endif
914
901 .if \paranoid 915 .if \paranoid
902 call paranoid_entry 916 call paranoid_entry
903 .else 917 .else
@@ -1129,7 +1143,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
1129#endif /* CONFIG_HYPERV */ 1143#endif /* CONFIG_HYPERV */
1130 1144
1131idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET 1145idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
1132idtentry int3 do_int3 has_error_code=0 1146idtentry int3 do_int3 has_error_code=0 create_gap=1
1133idtentry stack_segment do_stack_segment has_error_code=1 1147idtentry stack_segment do_stack_segment has_error_code=1
1134 1148
1135#ifdef CONFIG_XEN_PV 1149#ifdef CONFIG_XEN_PV
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index cf350639e76d..287f1f7b2e52 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,12 +3,10 @@
3#define _ASM_X86_FTRACE_H 3#define _ASM_X86_FTRACE_H
4 4
5#ifdef CONFIG_FUNCTION_TRACER 5#ifdef CONFIG_FUNCTION_TRACER
6#ifdef CC_USING_FENTRY 6#ifndef CC_USING_FENTRY
7# define MCOUNT_ADDR ((unsigned long)(__fentry__)) 7# error Compiler does not support fentry?
8#else
9# define MCOUNT_ADDR ((unsigned long)(mcount))
10# define HAVE_FUNCTION_GRAPH_FP_TEST
11#endif 8#endif
9# define MCOUNT_ADDR ((unsigned long)(__fentry__))
12#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 10#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
13 11
14#ifdef CONFIG_DYNAMIC_FTRACE 12#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index ed80003ce3e2..a66f6706c2de 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -24,14 +24,6 @@
24#include <asm/setup.h> 24#include <asm/setup.h>
25#include <linux/ftrace.h> 25#include <linux/ftrace.h>
26 26
27static inline int klp_check_compiler_support(void)
28{
29#ifndef CC_USING_FENTRY
30 return 1;
31#endif
32 return 0;
33}
34
35static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 27static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
36{ 28{
37 regs->ip = ip; 29 regs->ip = ip;
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index c90678fd391a..880b5515b1d6 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -42,4 +42,34 @@ extern int after_bootmem;
42extern __ro_after_init struct mm_struct *poking_mm; 42extern __ro_after_init struct mm_struct *poking_mm;
43extern __ro_after_init unsigned long poking_addr; 43extern __ro_after_init unsigned long poking_addr;
44 44
45#ifndef CONFIG_UML_X86
46static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
47{
48 regs->ip = ip;
49}
50
51#define INT3_INSN_SIZE 1
52#define CALL_INSN_SIZE 5
53
54#ifdef CONFIG_X86_64
55static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
56{
57 /*
58 * The int3 handler in entry_64.S adds a gap between the
59 * stack where the break point happened, and the saving of
60 * pt_regs. We can extend the original stack because of
61 * this gap. See the idtentry macro's create_gap option.
62 */
63 regs->sp -= sizeof(unsigned long);
64 *(unsigned long *)regs->sp = val;
65}
66
67static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
68{
69 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
70 int3_emulate_jmp(regs, func);
71}
72#endif /* CONFIG_X86_64 */
73#endif /* !CONFIG_UML_X86 */
74
45#endif /* _ASM_X86_TEXT_PATCHING_H */ 75#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0caf8122d680..0927bb158ffc 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,7 @@
29#include <asm/kprobes.h> 29#include <asm/kprobes.h>
30#include <asm/ftrace.h> 30#include <asm/ftrace.h>
31#include <asm/nops.h> 31#include <asm/nops.h>
32#include <asm/text-patching.h>
32 33
33#ifdef CONFIG_DYNAMIC_FTRACE 34#ifdef CONFIG_DYNAMIC_FTRACE
34 35
@@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
231} 232}
232 233
233static unsigned long ftrace_update_func; 234static unsigned long ftrace_update_func;
235static unsigned long ftrace_update_func_call;
234 236
235static int update_ftrace_func(unsigned long ip, void *new) 237static int update_ftrace_func(unsigned long ip, void *new)
236{ 238{
@@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
259 unsigned char *new; 261 unsigned char *new;
260 int ret; 262 int ret;
261 263
264 ftrace_update_func_call = (unsigned long)func;
265
262 new = ftrace_call_replace(ip, (unsigned long)func); 266 new = ftrace_call_replace(ip, (unsigned long)func);
263 ret = update_ftrace_func(ip, new); 267 ret = update_ftrace_func(ip, new);
264 268
@@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
294 if (WARN_ON_ONCE(!regs)) 298 if (WARN_ON_ONCE(!regs))
295 return 0; 299 return 0;
296 300
297 ip = regs->ip - 1; 301 ip = regs->ip - INT3_INSN_SIZE;
298 if (!ftrace_location(ip) && !is_ftrace_caller(ip))
299 return 0;
300 302
301 regs->ip += MCOUNT_INSN_SIZE - 1; 303#ifdef CONFIG_X86_64
304 if (ftrace_location(ip)) {
305 int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
306 return 1;
307 } else if (is_ftrace_caller(ip)) {
308 if (!ftrace_update_func_call) {
309 int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
310 return 1;
311 }
312 int3_emulate_call(regs, ftrace_update_func_call);
313 return 1;
314 }
315#else
316 if (ftrace_location(ip) || is_ftrace_caller(ip)) {
317 int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
318 return 1;
319 }
320#endif
302 321
303 return 1; 322 return 0;
304} 323}
305NOKPROBE_SYMBOL(ftrace_int3_handler); 324NOKPROBE_SYMBOL(ftrace_int3_handler);
306 325
@@ -865,6 +884,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
865 884
866 func = ftrace_ops_get_func(ops); 885 func = ftrace_ops_get_func(ops);
867 886
887 ftrace_update_func_call = (unsigned long)func;
888
868 /* Do a safe modify in case the trampoline is executing */ 889 /* Do a safe modify in case the trampoline is executing */
869 new = ftrace_call_replace(ip, (unsigned long)func); 890 new = ftrace_call_replace(ip, (unsigned long)func);
870 ret = update_ftrace_func(ip, new); 891 ret = update_ftrace_func(ip, new);
@@ -966,6 +987,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
966{ 987{
967 unsigned char *new; 988 unsigned char *new;
968 989
990 ftrace_update_func_call = 0UL;
969 new = ftrace_jmp_replace(ip, (unsigned long)func); 991 new = ftrace_jmp_replace(ip, (unsigned long)func);
970 992
971 return update_ftrace_func(ip, new); 993 return update_ftrace_func(ip, new);
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 4c8440de3355..2ba914a34b06 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -10,22 +10,10 @@
10#include <asm/ftrace.h> 10#include <asm/ftrace.h>
11#include <asm/nospec-branch.h> 11#include <asm/nospec-branch.h>
12 12
13#ifdef CC_USING_FENTRY
14# define function_hook __fentry__ 13# define function_hook __fentry__
15EXPORT_SYMBOL(__fentry__) 14EXPORT_SYMBOL(__fentry__)
16#else
17# define function_hook mcount
18EXPORT_SYMBOL(mcount)
19#endif
20
21#ifdef CONFIG_DYNAMIC_FTRACE
22
23/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
24#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
25# define USING_FRAME_POINTER
26#endif
27 15
28#ifdef USING_FRAME_POINTER 16#ifdef CONFIG_FRAME_POINTER
29# define MCOUNT_FRAME 1 /* using frame = true */ 17# define MCOUNT_FRAME 1 /* using frame = true */
30#else 18#else
31# define MCOUNT_FRAME 0 /* using frame = false */ 19# define MCOUNT_FRAME 0 /* using frame = false */
@@ -37,8 +25,7 @@ END(function_hook)
37 25
38ENTRY(ftrace_caller) 26ENTRY(ftrace_caller)
39 27
40#ifdef USING_FRAME_POINTER 28#ifdef CONFIG_FRAME_POINTER
41# ifdef CC_USING_FENTRY
42 /* 29 /*
43 * Frame pointers are of ip followed by bp. 30 * Frame pointers are of ip followed by bp.
44 * Since fentry is an immediate jump, we are left with 31 * Since fentry is an immediate jump, we are left with
@@ -49,7 +36,7 @@ ENTRY(ftrace_caller)
49 pushl %ebp 36 pushl %ebp
50 movl %esp, %ebp 37 movl %esp, %ebp
51 pushl 2*4(%esp) /* function ip */ 38 pushl 2*4(%esp) /* function ip */
52# endif 39
53 /* For mcount, the function ip is directly above */ 40 /* For mcount, the function ip is directly above */
54 pushl %ebp 41 pushl %ebp
55 movl %esp, %ebp 42 movl %esp, %ebp
@@ -59,7 +46,7 @@ ENTRY(ftrace_caller)
59 pushl %edx 46 pushl %edx
60 pushl $0 /* Pass NULL as regs pointer */ 47 pushl $0 /* Pass NULL as regs pointer */
61 48
62#ifdef USING_FRAME_POINTER 49#ifdef CONFIG_FRAME_POINTER
63 /* Load parent ebp into edx */ 50 /* Load parent ebp into edx */
64 movl 4*4(%esp), %edx 51 movl 4*4(%esp), %edx
65#else 52#else
@@ -82,13 +69,11 @@ ftrace_call:
82 popl %edx 69 popl %edx
83 popl %ecx 70 popl %ecx
84 popl %eax 71 popl %eax
85#ifdef USING_FRAME_POINTER 72#ifdef CONFIG_FRAME_POINTER
86 popl %ebp 73 popl %ebp
87# ifdef CC_USING_FENTRY
88 addl $4,%esp /* skip function ip */ 74 addl $4,%esp /* skip function ip */
89 popl %ebp /* this is the orig bp */ 75 popl %ebp /* this is the orig bp */
90 addl $4, %esp /* skip parent ip */ 76 addl $4, %esp /* skip parent ip */
91# endif
92#endif 77#endif
93.Lftrace_ret: 78.Lftrace_ret:
94#ifdef CONFIG_FUNCTION_GRAPH_TRACER 79#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -133,11 +118,7 @@ ENTRY(ftrace_regs_caller)
133 118
134 movl 12*4(%esp), %eax /* Load ip (1st parameter) */ 119 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
135 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ 120 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
136#ifdef CC_USING_FENTRY
137 movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ 121 movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */
138#else
139 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
140#endif
141 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ 122 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
142 pushl %esp /* Save pt_regs as 4th parameter */ 123 pushl %esp /* Save pt_regs as 4th parameter */
143 124
@@ -170,43 +151,6 @@ GLOBAL(ftrace_regs_call)
170 lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ 151 lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
171 152
172 jmp .Lftrace_ret 153 jmp .Lftrace_ret
173#else /* ! CONFIG_DYNAMIC_FTRACE */
174
175ENTRY(function_hook)
176 cmpl $__PAGE_OFFSET, %esp
177 jb ftrace_stub /* Paging not enabled yet? */
178
179 cmpl $ftrace_stub, ftrace_trace_function
180 jnz .Ltrace
181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
182 cmpl $ftrace_stub, ftrace_graph_return
183 jnz ftrace_graph_caller
184
185 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
186 jnz ftrace_graph_caller
187#endif
188.globl ftrace_stub
189ftrace_stub:
190 ret
191
192 /* taken from glibc */
193.Ltrace:
194 pushl %eax
195 pushl %ecx
196 pushl %edx
197 movl 0xc(%esp), %eax
198 movl 0x4(%ebp), %edx
199 subl $MCOUNT_INSN_SIZE, %eax
200
201 movl ftrace_trace_function, %ecx
202 CALL_NOSPEC %ecx
203
204 popl %edx
205 popl %ecx
206 popl %eax
207 jmp ftrace_stub
208END(function_hook)
209#endif /* CONFIG_DYNAMIC_FTRACE */
210 154
211#ifdef CONFIG_FUNCTION_GRAPH_TRACER 155#ifdef CONFIG_FUNCTION_GRAPH_TRACER
212ENTRY(ftrace_graph_caller) 156ENTRY(ftrace_graph_caller)
@@ -215,13 +159,8 @@ ENTRY(ftrace_graph_caller)
215 pushl %edx 159 pushl %edx
216 movl 3*4(%esp), %eax 160 movl 3*4(%esp), %eax
217 /* Even with frame pointers, fentry doesn't have one here */ 161 /* Even with frame pointers, fentry doesn't have one here */
218#ifdef CC_USING_FENTRY
219 lea 4*4(%esp), %edx 162 lea 4*4(%esp), %edx
220 movl $0, %ecx 163 movl $0, %ecx
221#else
222 lea 0x4(%ebp), %edx
223 movl (%ebp), %ecx
224#endif
225 subl $MCOUNT_INSN_SIZE, %eax 164 subl $MCOUNT_INSN_SIZE, %eax
226 call prepare_ftrace_return 165 call prepare_ftrace_return
227 popl %edx 166 popl %edx
@@ -234,11 +173,7 @@ END(ftrace_graph_caller)
234return_to_handler: 173return_to_handler:
235 pushl %eax 174 pushl %eax
236 pushl %edx 175 pushl %edx
237#ifdef CC_USING_FENTRY
238 movl $0, %eax 176 movl $0, %eax
239#else
240 movl %ebp, %eax
241#endif
242 call ftrace_return_to_handler 177 call ftrace_return_to_handler
243 movl %eax, %ecx 178 movl %eax, %ecx
244 popl %edx 179 popl %edx
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 75f2b36b41a6..10eb2760ef2c 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -13,22 +13,12 @@
13 .code64 13 .code64
14 .section .entry.text, "ax" 14 .section .entry.text, "ax"
15 15
16#ifdef CC_USING_FENTRY
17# define function_hook __fentry__ 16# define function_hook __fentry__
18EXPORT_SYMBOL(__fentry__) 17EXPORT_SYMBOL(__fentry__)
19#else
20# define function_hook mcount
21EXPORT_SYMBOL(mcount)
22#endif
23 18
24#ifdef CONFIG_FRAME_POINTER 19#ifdef CONFIG_FRAME_POINTER
25# ifdef CC_USING_FENTRY
26/* Save parent and function stack frames (rip and rbp) */ 20/* Save parent and function stack frames (rip and rbp) */
27# define MCOUNT_FRAME_SIZE (8+16*2) 21# define MCOUNT_FRAME_SIZE (8+16*2)
28# else
29/* Save just function stack frame (rip and rbp) */
30# define MCOUNT_FRAME_SIZE (8+16)
31# endif
32#else 22#else
33/* No need to save a stack frame */ 23/* No need to save a stack frame */
34# define MCOUNT_FRAME_SIZE 0 24# define MCOUNT_FRAME_SIZE 0
@@ -75,17 +65,13 @@ EXPORT_SYMBOL(mcount)
75 * fentry is called before the stack frame is set up, where as mcount 65 * fentry is called before the stack frame is set up, where as mcount
76 * is called afterward. 66 * is called afterward.
77 */ 67 */
78#ifdef CC_USING_FENTRY 68
79 /* Save the parent pointer (skip orig rbp and our return address) */ 69 /* Save the parent pointer (skip orig rbp and our return address) */
80 pushq \added+8*2(%rsp) 70 pushq \added+8*2(%rsp)
81 pushq %rbp 71 pushq %rbp
82 movq %rsp, %rbp 72 movq %rsp, %rbp
83 /* Save the return address (now skip orig rbp, rbp and parent) */ 73 /* Save the return address (now skip orig rbp, rbp and parent) */
84 pushq \added+8*3(%rsp) 74 pushq \added+8*3(%rsp)
85#else
86 /* Can't assume that rip is before this (unless added was zero) */
87 pushq \added+8(%rsp)
88#endif
89 pushq %rbp 75 pushq %rbp
90 movq %rsp, %rbp 76 movq %rsp, %rbp
91#endif /* CONFIG_FRAME_POINTER */ 77#endif /* CONFIG_FRAME_POINTER */
@@ -113,12 +99,7 @@ EXPORT_SYMBOL(mcount)
113 movq %rdx, RBP(%rsp) 99 movq %rdx, RBP(%rsp)
114 100
115 /* Copy the parent address into %rsi (second parameter) */ 101 /* Copy the parent address into %rsi (second parameter) */
116#ifdef CC_USING_FENTRY
117 movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi 102 movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
118#else
119 /* %rdx contains original %rbp */
120 movq 8(%rdx), %rsi
121#endif
122 103
123 /* Move RIP to its proper location */ 104 /* Move RIP to its proper location */
124 movq MCOUNT_REG_SIZE+\added(%rsp), %rdi 105 movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
@@ -303,15 +284,8 @@ ENTRY(ftrace_graph_caller)
303 /* Saves rbp into %rdx and fills first parameter */ 284 /* Saves rbp into %rdx and fills first parameter */
304 save_mcount_regs 285 save_mcount_regs
305 286
306#ifdef CC_USING_FENTRY
307 leaq MCOUNT_REG_SIZE+8(%rsp), %rsi 287 leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
308 movq $0, %rdx /* No framepointers needed */ 288 movq $0, %rdx /* No framepointers needed */
309#else
310 /* Save address of the return address of traced function */
311 leaq 8(%rdx), %rsi
312 /* ftrace does sanity checks against frame pointers */
313 movq (%rdx), %rdx
314#endif
315 call prepare_ftrace_return 289 call prepare_ftrace_return
316 290
317 restore_mcount_regs 291 restore_mcount_regs
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d58aa0db05f9..8aaf7cd026b0 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -53,23 +53,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
53 * "Define 'is'", Bill Clinton 53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt 54 * "Define 'if'", Steven Rostedt
55 */ 55 */
56#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 56#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57#define __trace_if(cond) \ 57
58 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ 58#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59 ({ \ 59
60 int ______r; \ 60#define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \ 61 static struct ftrace_branch_data \
62 __aligned(4) \ 62 __aligned(4) \
63 __section("_ftrace_branch") \ 63 __section("_ftrace_branch") \
64 ______f = { \ 64 __if_trace = { \
65 .func = __func__, \ 65 .func = __func__, \
66 .file = __FILE__, \ 66 .file = __FILE__, \
67 .line = __LINE__, \ 67 .line = __LINE__, \
68 }; \ 68 }; \
69 ______r = !!(cond); \ 69 (cond) ? \
70 ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ 70 (__if_trace.miss_hit[1]++,1) : \
71 ______r; \ 71 (__if_trace.miss_hit[0]++,0); \
72 })) 72})
73
73#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 74#endif /* CONFIG_PROFILE_ALL_BRANCHES */
74 75
75#else 76#else
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 20899919ead8..25e2995d4a4c 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -741,6 +741,8 @@ struct ftrace_graph_ret {
741typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 741typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
742typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 742typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
743 743
744extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
745
744#ifdef CONFIG_FUNCTION_GRAPH_TRACER 746#ifdef CONFIG_FUNCTION_GRAPH_TRACER
745 747
746struct fgraph_ops { 748struct fgraph_ops {
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 9c3186578ce0..86b019aa2839 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -548,4 +548,19 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
548 548
549#define TRACE_EVENT_PERF_PERM(event, expr...) 549#define TRACE_EVENT_PERF_PERM(event, expr...)
550 550
551#define DECLARE_EVENT_NOP(name, proto, args) \
552 static inline void trace_##name(proto) \
553 { } \
554 static inline bool trace_##name##_enabled(void) \
555 { \
556 return false; \
557 }
558
559#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) \
560 DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
561
562#define DECLARE_EVENT_CLASS_NOP(name, proto, args, tstruct, assign, print)
563#define DEFINE_EVENT_NOP(template, name, proto, args) \
564 DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
565
551#endif /* ifdef TRACE_EVENT (see note above) */ 566#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index cb30c5532144..bd75f97867b9 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -46,6 +46,12 @@
46 assign, print, reg, unreg) \ 46 assign, print, reg, unreg) \
47 DEFINE_TRACE_FN(name, reg, unreg) 47 DEFINE_TRACE_FN(name, reg, unreg)
48 48
49#undef TRACE_EVENT_NOP
50#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
51
52#undef DEFINE_EVENT_NOP
53#define DEFINE_EVENT_NOP(template, name, proto, args)
54
49#undef DEFINE_EVENT 55#undef DEFINE_EVENT
50#define DEFINE_EVENT(template, name, proto, args) \ 56#define DEFINE_EVENT(template, name, proto, args) \
51 DEFINE_TRACE(name) 57 DEFINE_TRACE(name)
@@ -102,6 +108,8 @@
102#undef TRACE_EVENT_FN 108#undef TRACE_EVENT_FN
103#undef TRACE_EVENT_FN_COND 109#undef TRACE_EVENT_FN_COND
104#undef TRACE_EVENT_CONDITION 110#undef TRACE_EVENT_CONDITION
111#undef TRACE_EVENT_NOP
112#undef DEFINE_EVENT_NOP
105#undef DECLARE_EVENT_CLASS 113#undef DECLARE_EVENT_CLASS
106#undef DEFINE_EVENT 114#undef DEFINE_EVENT
107#undef DEFINE_EVENT_FN 115#undef DEFINE_EVENT_FN
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 80339fd14c1c..02a3f78f7cd8 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -7,6 +7,12 @@
7 7
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10#ifdef CONFIG_RCU_TRACE
11#define TRACE_EVENT_RCU TRACE_EVENT
12#else
13#define TRACE_EVENT_RCU TRACE_EVENT_NOP
14#endif
15
10/* 16/*
11 * Tracepoint for start/end markers used for utilization calculations. 17 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms: 18 * By convention, the string is of the following forms:
@@ -35,8 +41,6 @@ TRACE_EVENT(rcu_utilization,
35 TP_printk("%s", __entry->s) 41 TP_printk("%s", __entry->s)
36); 42);
37 43
38#ifdef CONFIG_RCU_TRACE
39
40#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 44#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
41 45
42/* 46/*
@@ -62,7 +66,7 @@ TRACE_EVENT(rcu_utilization,
62 * "end": End a grace period. 66 * "end": End a grace period.
63 * "cpuend": CPU first notices a grace-period end. 67 * "cpuend": CPU first notices a grace-period end.
64 */ 68 */
65TRACE_EVENT(rcu_grace_period, 69TRACE_EVENT_RCU(rcu_grace_period,
66 70
67 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent), 71 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
68 72
@@ -101,7 +105,7 @@ TRACE_EVENT(rcu_grace_period,
101 * "Cleanup": Clean up rcu_node structure after previous GP. 105 * "Cleanup": Clean up rcu_node structure after previous GP.
102 * "CleanupMore": Clean up, and another GP is needed. 106 * "CleanupMore": Clean up, and another GP is needed.
103 */ 107 */
104TRACE_EVENT(rcu_future_grace_period, 108TRACE_EVENT_RCU(rcu_future_grace_period,
105 109
106 TP_PROTO(const char *rcuname, unsigned long gp_seq, 110 TP_PROTO(const char *rcuname, unsigned long gp_seq,
107 unsigned long gp_seq_req, u8 level, int grplo, int grphi, 111 unsigned long gp_seq_req, u8 level, int grplo, int grphi,
@@ -141,7 +145,7 @@ TRACE_EVENT(rcu_future_grace_period,
141 * rcu_node structure, and the mask of CPUs that will be waited for. 145 * rcu_node structure, and the mask of CPUs that will be waited for.
142 * All but the type of RCU are extracted from the rcu_node structure. 146 * All but the type of RCU are extracted from the rcu_node structure.
143 */ 147 */
144TRACE_EVENT(rcu_grace_period_init, 148TRACE_EVENT_RCU(rcu_grace_period_init,
145 149
146 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level, 150 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
147 int grplo, int grphi, unsigned long qsmask), 151 int grplo, int grphi, unsigned long qsmask),
@@ -186,7 +190,7 @@ TRACE_EVENT(rcu_grace_period_init,
186 * "endwake": Woke piggybackers up. 190 * "endwake": Woke piggybackers up.
187 * "done": Someone else did the expedited grace period for us. 191 * "done": Someone else did the expedited grace period for us.
188 */ 192 */
189TRACE_EVENT(rcu_exp_grace_period, 193TRACE_EVENT_RCU(rcu_exp_grace_period,
190 194
191 TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent), 195 TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
192 196
@@ -218,7 +222,7 @@ TRACE_EVENT(rcu_exp_grace_period,
218 * "nxtlvl": Advance to next level of rcu_node funnel 222 * "nxtlvl": Advance to next level of rcu_node funnel
219 * "wait": Wait for someone else to do expedited GP 223 * "wait": Wait for someone else to do expedited GP
220 */ 224 */
221TRACE_EVENT(rcu_exp_funnel_lock, 225TRACE_EVENT_RCU(rcu_exp_funnel_lock,
222 226
223 TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi, 227 TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
224 const char *gpevent), 228 const char *gpevent),
@@ -269,7 +273,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
269 * "WaitQueue": Enqueue partially done, timed wait for it to complete. 273 * "WaitQueue": Enqueue partially done, timed wait for it to complete.
270 * "WokeQueue": Partial enqueue now complete. 274 * "WokeQueue": Partial enqueue now complete.
271 */ 275 */
272TRACE_EVENT(rcu_nocb_wake, 276TRACE_EVENT_RCU(rcu_nocb_wake,
273 277
274 TP_PROTO(const char *rcuname, int cpu, const char *reason), 278 TP_PROTO(const char *rcuname, int cpu, const char *reason),
275 279
@@ -297,7 +301,7 @@ TRACE_EVENT(rcu_nocb_wake,
297 * include SRCU), the grace-period number that the task is blocking 301 * include SRCU), the grace-period number that the task is blocking
298 * (the current or the next), and the task's PID. 302 * (the current or the next), and the task's PID.
299 */ 303 */
300TRACE_EVENT(rcu_preempt_task, 304TRACE_EVENT_RCU(rcu_preempt_task,
301 305
302 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq), 306 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
303 307
@@ -324,7 +328,7 @@ TRACE_EVENT(rcu_preempt_task,
324 * read-side critical section exiting that critical section. Track the 328 * read-side critical section exiting that critical section. Track the
325 * type of RCU (which one day might include SRCU) and the task's PID. 329 * type of RCU (which one day might include SRCU) and the task's PID.
326 */ 330 */
327TRACE_EVENT(rcu_unlock_preempted_task, 331TRACE_EVENT_RCU(rcu_unlock_preempted_task,
328 332
329 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid), 333 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
330 334
@@ -353,7 +357,7 @@ TRACE_EVENT(rcu_unlock_preempted_task,
353 * whether there are any blocked tasks blocking the current grace period. 357 * whether there are any blocked tasks blocking the current grace period.
354 * All but the type of RCU are extracted from the rcu_node structure. 358 * All but the type of RCU are extracted from the rcu_node structure.
355 */ 359 */
356TRACE_EVENT(rcu_quiescent_state_report, 360TRACE_EVENT_RCU(rcu_quiescent_state_report,
357 361
358 TP_PROTO(const char *rcuname, unsigned long gp_seq, 362 TP_PROTO(const char *rcuname, unsigned long gp_seq,
359 unsigned long mask, unsigned long qsmask, 363 unsigned long mask, unsigned long qsmask,
@@ -396,7 +400,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
396 * state, which can be "dti" for dyntick-idle mode or "kick" when kicking 400 * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
397 * a CPU that has been in dyntick-idle mode for too long. 401 * a CPU that has been in dyntick-idle mode for too long.
398 */ 402 */
399TRACE_EVENT(rcu_fqs, 403TRACE_EVENT_RCU(rcu_fqs,
400 404
401 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent), 405 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
402 406
@@ -436,7 +440,7 @@ TRACE_EVENT(rcu_fqs,
436 * events use two separate counters, and that the "++=" and "--=" events 440 * events use two separate counters, and that the "++=" and "--=" events
437 * for irq/NMI will change the counter by two, otherwise by one. 441 * for irq/NMI will change the counter by two, otherwise by one.
438 */ 442 */
439TRACE_EVENT(rcu_dyntick, 443TRACE_EVENT_RCU(rcu_dyntick,
440 444
441 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks), 445 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
442 446
@@ -468,7 +472,7 @@ TRACE_EVENT(rcu_dyntick,
468 * number of lazy callbacks queued, and the fourth element is the 472 * number of lazy callbacks queued, and the fourth element is the
469 * total number of callbacks queued. 473 * total number of callbacks queued.
470 */ 474 */
471TRACE_EVENT(rcu_callback, 475TRACE_EVENT_RCU(rcu_callback,
472 476
473 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, 477 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
474 long qlen), 478 long qlen),
@@ -504,7 +508,7 @@ TRACE_EVENT(rcu_callback,
504 * the fourth argument is the number of lazy callbacks queued, and the 508 * the fourth argument is the number of lazy callbacks queued, and the
505 * fifth argument is the total number of callbacks queued. 509 * fifth argument is the total number of callbacks queued.
506 */ 510 */
507TRACE_EVENT(rcu_kfree_callback, 511TRACE_EVENT_RCU(rcu_kfree_callback,
508 512
509 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, 513 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
510 long qlen_lazy, long qlen), 514 long qlen_lazy, long qlen),
@@ -539,7 +543,7 @@ TRACE_EVENT(rcu_kfree_callback,
539 * the total number of callbacks queued, and the fourth argument is 543 * the total number of callbacks queued, and the fourth argument is
540 * the current RCU-callback batch limit. 544 * the current RCU-callback batch limit.
541 */ 545 */
542TRACE_EVENT(rcu_batch_start, 546TRACE_EVENT_RCU(rcu_batch_start,
543 547
544 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), 548 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
545 549
@@ -569,7 +573,7 @@ TRACE_EVENT(rcu_batch_start,
569 * The first argument is the type of RCU, and the second argument is 573 * The first argument is the type of RCU, and the second argument is
570 * a pointer to the RCU callback itself. 574 * a pointer to the RCU callback itself.
571 */ 575 */
572TRACE_EVENT(rcu_invoke_callback, 576TRACE_EVENT_RCU(rcu_invoke_callback,
573 577
574 TP_PROTO(const char *rcuname, struct rcu_head *rhp), 578 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
575 579
@@ -598,7 +602,7 @@ TRACE_EVENT(rcu_invoke_callback,
598 * is the offset of the callback within the enclosing RCU-protected 602 * is the offset of the callback within the enclosing RCU-protected
599 * data structure. 603 * data structure.
600 */ 604 */
601TRACE_EVENT(rcu_invoke_kfree_callback, 605TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
602 606
603 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), 607 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
604 608
@@ -631,7 +635,7 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
631 * and the sixth argument (risk) is the return value from 635 * and the sixth argument (risk) is the return value from
632 * rcu_is_callbacks_kthread(). 636 * rcu_is_callbacks_kthread().
633 */ 637 */
634TRACE_EVENT(rcu_batch_end, 638TRACE_EVENT_RCU(rcu_batch_end,
635 639
636 TP_PROTO(const char *rcuname, int callbacks_invoked, 640 TP_PROTO(const char *rcuname, int callbacks_invoked,
637 char cb, char nr, char iit, char risk), 641 char cb, char nr, char iit, char risk),
@@ -673,7 +677,7 @@ TRACE_EVENT(rcu_batch_end,
673 * callback address can be NULL. 677 * callback address can be NULL.
674 */ 678 */
675#define RCUTORTURENAME_LEN 8 679#define RCUTORTURENAME_LEN 8
676TRACE_EVENT(rcu_torture_read, 680TRACE_EVENT_RCU(rcu_torture_read,
677 681
678 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, 682 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
679 unsigned long secs, unsigned long c_old, unsigned long c), 683 unsigned long secs, unsigned long c_old, unsigned long c),
@@ -721,7 +725,7 @@ TRACE_EVENT(rcu_torture_read,
721 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument 725 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
722 * is the count of remaining callbacks, and "done" is the piggybacking count. 726 * is the count of remaining callbacks, and "done" is the piggybacking count.
723 */ 727 */
724TRACE_EVENT(rcu_barrier, 728TRACE_EVENT_RCU(rcu_barrier,
725 729
726 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), 730 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
727 731
@@ -748,41 +752,6 @@ TRACE_EVENT(rcu_barrier,
748 __entry->done) 752 __entry->done)
749); 753);
750 754
751#else /* #ifdef CONFIG_RCU_TRACE */
752
753#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
754#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
755 level, grplo, grphi, event) \
756 do { } while (0)
757#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
758 qsmask) do { } while (0)
759#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
760 do { } while (0)
761#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
762 do { } while (0)
763#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
764#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
765#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
766#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
767 grplo, grphi, gp_tasks) do { } \
768 while (0)
769#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
770#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
771#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
772#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
773 do { } while (0)
774#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
775 do { } while (0)
776#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
777#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
778#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
779 do { } while (0)
780#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
781 do { } while (0)
782#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
783
784#endif /* #else #ifdef CONFIG_RCU_TRACE */
785
786#endif /* _TRACE_RCU_H */ 755#endif /* _TRACE_RCU_H */
787 756
788/* This part must be outside protection */ 757/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9a4bdfadab07..c8c7c7efb487 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -241,7 +241,6 @@ DECLARE_EVENT_CLASS(sched_process_template,
241DEFINE_EVENT(sched_process_template, sched_process_free, 241DEFINE_EVENT(sched_process_template, sched_process_free,
242 TP_PROTO(struct task_struct *p), 242 TP_PROTO(struct task_struct *p),
243 TP_ARGS(p)); 243 TP_ARGS(p));
244
245 244
246/* 245/*
247 * Tracepoint for a task exiting: 246 * Tracepoint for a task exiting:
@@ -336,11 +335,20 @@ TRACE_EVENT(sched_process_exec,
336 __entry->pid, __entry->old_pid) 335 __entry->pid, __entry->old_pid)
337); 336);
338 337
338
339#ifdef CONFIG_SCHEDSTATS
340#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
341#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
342#else
343#define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
344#define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
345#endif
346
339/* 347/*
340 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
341 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 349 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
342 */ 350 */
343DECLARE_EVENT_CLASS(sched_stat_template, 351DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
344 352
345 TP_PROTO(struct task_struct *tsk, u64 delay), 353 TP_PROTO(struct task_struct *tsk, u64 delay),
346 354
@@ -363,12 +371,11 @@ DECLARE_EVENT_CLASS(sched_stat_template,
363 (unsigned long long)__entry->delay) 371 (unsigned long long)__entry->delay)
364); 372);
365 373
366
367/* 374/*
368 * Tracepoint for accounting wait time (time the task is runnable 375 * Tracepoint for accounting wait time (time the task is runnable
369 * but not actually running due to scheduler contention). 376 * but not actually running due to scheduler contention).
370 */ 377 */
371DEFINE_EVENT(sched_stat_template, sched_stat_wait, 378DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
372 TP_PROTO(struct task_struct *tsk, u64 delay), 379 TP_PROTO(struct task_struct *tsk, u64 delay),
373 TP_ARGS(tsk, delay)); 380 TP_ARGS(tsk, delay));
374 381
@@ -376,7 +383,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_wait,
376 * Tracepoint for accounting sleep time (time the task is not runnable, 383 * Tracepoint for accounting sleep time (time the task is not runnable,
377 * including iowait, see below). 384 * including iowait, see below).
378 */ 385 */
379DEFINE_EVENT(sched_stat_template, sched_stat_sleep, 386DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
380 TP_PROTO(struct task_struct *tsk, u64 delay), 387 TP_PROTO(struct task_struct *tsk, u64 delay),
381 TP_ARGS(tsk, delay)); 388 TP_ARGS(tsk, delay));
382 389
@@ -384,14 +391,14 @@ DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
384 * Tracepoint for accounting iowait time (time the task is not runnable 391 * Tracepoint for accounting iowait time (time the task is not runnable
385 * due to waiting on IO to complete). 392 * due to waiting on IO to complete).
386 */ 393 */
387DEFINE_EVENT(sched_stat_template, sched_stat_iowait, 394DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
388 TP_PROTO(struct task_struct *tsk, u64 delay), 395 TP_PROTO(struct task_struct *tsk, u64 delay),
389 TP_ARGS(tsk, delay)); 396 TP_ARGS(tsk, delay));
390 397
391/* 398/*
392 * Tracepoint for accounting blocked time (time the task is in uninterruptible). 399 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
393 */ 400 */
394DEFINE_EVENT(sched_stat_template, sched_stat_blocked, 401DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
395 TP_PROTO(struct task_struct *tsk, u64 delay), 402 TP_PROTO(struct task_struct *tsk, u64 delay),
396 TP_ARGS(tsk, delay)); 403 TP_ARGS(tsk, delay));
397 404
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index f6fbaff10e71..91cd519756d3 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -1208,14 +1208,6 @@ void klp_module_going(struct module *mod)
1208 1208
1209static int __init klp_init(void) 1209static int __init klp_init(void)
1210{ 1210{
1211 int ret;
1212
1213 ret = klp_check_compiler_support();
1214 if (ret) {
1215 pr_info("Your compiler is too old; turning off.\n");
1216 return -EINVAL;
1217 }
1218
1219 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1211 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1220 if (!klp_root_kobj) 1212 if (!klp_root_kobj)
1221 return -ENOMEM; 1213 return -ENOMEM;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 4b58c907b4b7..390aab20115e 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -11,11 +11,6 @@
11#define __LINUX_RCU_H 11#define __LINUX_RCU_H
12 12
13#include <trace/events/rcu.h> 13#include <trace/events/rcu.h>
14#ifdef CONFIG_RCU_TRACE
15#define RCU_TRACE(stmt) stmt
16#else /* #ifdef CONFIG_RCU_TRACE */
17#define RCU_TRACE(stmt)
18#endif /* #else #ifdef CONFIG_RCU_TRACE */
19 14
20/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ 15/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
21#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) 16#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
@@ -216,12 +211,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
216 211
217 rcu_lock_acquire(&rcu_callback_map); 212 rcu_lock_acquire(&rcu_callback_map);
218 if (__is_kfree_rcu_offset(offset)) { 213 if (__is_kfree_rcu_offset(offset)) {
219 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);) 214 trace_rcu_invoke_kfree_callback(rn, head, offset);
220 kfree((void *)head - offset); 215 kfree((void *)head - offset);
221 rcu_lock_release(&rcu_callback_map); 216 rcu_lock_release(&rcu_callback_map);
222 return true; 217 return true;
223 } else { 218 } else {
224 RCU_TRACE(trace_rcu_invoke_callback(rn, head);) 219 trace_rcu_invoke_callback(rn, head);
225 f = head->func; 220 f = head->func;
226 WRITE_ONCE(head->func, (rcu_callback_t)0L); 221 WRITE_ONCE(head->func, (rcu_callback_t)0L);
227 f(head); 222 f(head);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b4d88a594785..980ca3ca643f 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1969,14 +1969,14 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
1969 */ 1969 */
1970int rcutree_dying_cpu(unsigned int cpu) 1970int rcutree_dying_cpu(unsigned int cpu)
1971{ 1971{
1972 RCU_TRACE(bool blkd;) 1972 bool blkd;
1973 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);) 1973 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1974 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) 1974 struct rcu_node *rnp = rdp->mynode;
1975 1975
1976 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 1976 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
1977 return 0; 1977 return 0;
1978 1978
1979 RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);) 1979 blkd = !!(rnp->qsmask & rdp->grpmask);
1980 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, 1980 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
1981 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 1981 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
1982 return 0; 1982 return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b920358dd8f7..a12aff849c04 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -70,12 +70,8 @@
70#define INIT_OPS_HASH(opsname) \ 70#define INIT_OPS_HASH(opsname) \
71 .func_hash = &opsname.local_hash, \ 71 .func_hash = &opsname.local_hash, \
72 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 72 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
73#define ASSIGN_OPS_HASH(opsname, val) \
74 .func_hash = val, \
75 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
76#else 73#else
77#define INIT_OPS_HASH(opsname) 74#define INIT_OPS_HASH(opsname)
78#define ASSIGN_OPS_HASH(opsname, val)
79#endif 75#endif
80 76
81enum { 77enum {
@@ -3880,7 +3876,7 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3880static bool module_exists(const char *module) 3876static bool module_exists(const char *module)
3881{ 3877{
3882 /* All modules have the symbol __this_module */ 3878 /* All modules have the symbol __this_module */
3883 const char this_mod[] = "__this_module"; 3879 static const char this_mod[] = "__this_module";
3884 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 3880 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
3885 unsigned long val; 3881 unsigned long val;
3886 int n; 3882 int n;
@@ -6265,6 +6261,9 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6265 preempt_disable_notrace(); 6261 preempt_disable_notrace();
6266 6262
6267 do_for_each_ftrace_op(op, ftrace_ops_list) { 6263 do_for_each_ftrace_op(op, ftrace_ops_list) {
6264 /* Stub functions don't need to be called nor tested */
6265 if (op->flags & FTRACE_OPS_FL_STUB)
6266 continue;
6268 /* 6267 /*
6269 * Check the following for each ops before calling their func: 6268 * Check the following for each ops before calling their func:
6270 * if RCU flag is set, then rcu_is_watching() must be true 6269 * if RCU flag is set, then rcu_is_watching() must be true
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 4ee8d8aa3d0f..05b0b3139ebc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4979,7 +4979,7 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested)
4979 cnt = data->cnt + (nested ? 27 : 0); 4979 cnt = data->cnt + (nested ? 27 : 0);
4980 4980
4981 /* Multiply cnt by ~e, to make some unique increment */ 4981 /* Multiply cnt by ~e, to make some unique increment */
4982 size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1); 4982 size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
4983 4983
4984 len = size + sizeof(struct rb_item); 4984 len = size + sizeof(struct rb_item);
4985 4985
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index ffba6789c0e2..0564f6db0561 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -362,7 +362,7 @@ static void ring_buffer_producer(void)
362 hit--; /* make it non zero */ 362 hit--; /* make it non zero */
363 } 363 }
364 364
365 /* Caculate the average time in nanosecs */ 365 /* Calculate the average time in nanosecs */
366 avg = NSEC_PER_MSEC / (hit + missed); 366 avg = NSEC_PER_MSEC / (hit + missed);
367 trace_printk("%ld ns per entry\n", avg); 367 trace_printk("%ld ns per entry\n", avg);
368 } 368 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ec439999f387..2c92b3d9ea30 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1727,6 +1727,10 @@ static __init int init_trace_selftests(void)
1727 pr_info("Running postponed tracer tests:\n"); 1727 pr_info("Running postponed tracer tests:\n");
1728 1728
1729 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 1729 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1730 /* This loop can take minutes when sanitizers are enabled, so
1731 * lets make sure we allow RCU processing.
1732 */
1733 cond_resched();
1730 ret = run_tracer_selftest(p->type); 1734 ret = run_tracer_selftest(p->type);
1731 /* If the test fails, then warn and remove from available_tracers */ 1735 /* If the test fails, then warn and remove from available_tracers */
1732 if (ret < 0) { 1736 if (ret < 0) {
@@ -3045,6 +3049,7 @@ void trace_printk_init_buffers(void)
3045 if (global_trace.trace_buffer.buffer) 3049 if (global_trace.trace_buffer.buffer)
3046 tracing_start_cmdline_record(); 3050 tracing_start_cmdline_record();
3047} 3051}
3052EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3048 3053
3049void trace_printk_start_comm(void) 3054void trace_printk_start_comm(void)
3050{ 3055{
@@ -3205,6 +3210,7 @@ int trace_array_printk(struct trace_array *tr,
3205 va_end(ap); 3210 va_end(ap);
3206 return ret; 3211 return ret;
3207} 3212}
3213EXPORT_SYMBOL_GPL(trace_array_printk);
3208 3214
3209__printf(3, 4) 3215__printf(3, 4)
3210int trace_array_printk_buf(struct ring_buffer *buffer, 3216int trace_array_printk_buf(struct ring_buffer *buffer,
@@ -3483,33 +3489,68 @@ static void s_stop(struct seq_file *m, void *p)
3483} 3489}
3484 3490
3485static void 3491static void
3492get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3493 unsigned long *entries, int cpu)
3494{
3495 unsigned long count;
3496
3497 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3498 /*
3499 * If this buffer has skipped entries, then we hold all
3500 * entries for the trace and we need to ignore the
3501 * ones before the time stamp.
3502 */
3503 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3504 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3505 /* total is the same as the entries */
3506 *total = count;
3507 } else
3508 *total = count +
3509 ring_buffer_overrun_cpu(buf->buffer, cpu);
3510 *entries = count;
3511}
3512
3513static void
3486get_total_entries(struct trace_buffer *buf, 3514get_total_entries(struct trace_buffer *buf,
3487 unsigned long *total, unsigned long *entries) 3515 unsigned long *total, unsigned long *entries)
3488{ 3516{
3489 unsigned long count; 3517 unsigned long t, e;
3490 int cpu; 3518 int cpu;
3491 3519
3492 *total = 0; 3520 *total = 0;
3493 *entries = 0; 3521 *entries = 0;
3494 3522
3495 for_each_tracing_cpu(cpu) { 3523 for_each_tracing_cpu(cpu) {
3496 count = ring_buffer_entries_cpu(buf->buffer, cpu); 3524 get_total_entries_cpu(buf, &t, &e, cpu);
3497 /* 3525 *total += t;
3498 * If this buffer has skipped entries, then we hold all 3526 *entries += e;
3499 * entries for the trace and we need to ignore the
3500 * ones before the time stamp.
3501 */
3502 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3503 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3504 /* total is the same as the entries */
3505 *total += count;
3506 } else
3507 *total += count +
3508 ring_buffer_overrun_cpu(buf->buffer, cpu);
3509 *entries += count;
3510 } 3527 }
3511} 3528}
3512 3529
3530unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3531{
3532 unsigned long total, entries;
3533
3534 if (!tr)
3535 tr = &global_trace;
3536
3537 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3538
3539 return entries;
3540}
3541
3542unsigned long trace_total_entries(struct trace_array *tr)
3543{
3544 unsigned long total, entries;
3545
3546 if (!tr)
3547 tr = &global_trace;
3548
3549 get_total_entries(&tr->trace_buffer, &total, &entries);
3550
3551 return entries;
3552}
3553
3513static void print_lat_help_header(struct seq_file *m) 3554static void print_lat_help_header(struct seq_file *m)
3514{ 3555{
3515 seq_puts(m, "# _------=> CPU# \n" 3556 seq_puts(m, "# _------=> CPU# \n"
@@ -3548,25 +3589,18 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
3548 unsigned int flags) 3589 unsigned int flags)
3549{ 3590{
3550 bool tgid = flags & TRACE_ITER_RECORD_TGID; 3591 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3551 const char tgid_space[] = " "; 3592 const char *space = " ";
3552 const char space[] = " "; 3593 int prec = tgid ? 10 : 2;
3553 3594
3554 print_event_info(buf, m); 3595 print_event_info(buf, m);
3555 3596
3556 seq_printf(m, "# %s _-----=> irqs-off\n", 3597 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3557 tgid ? tgid_space : space); 3598 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3558 seq_printf(m, "# %s / _----=> need-resched\n", 3599 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3559 tgid ? tgid_space : space); 3600 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3560 seq_printf(m, "# %s| / _---=> hardirq/softirq\n", 3601 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3561 tgid ? tgid_space : space); 3602 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3562 seq_printf(m, "# %s|| / _--=> preempt-depth\n", 3603 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3563 tgid ? tgid_space : space);
3564 seq_printf(m, "# %s||| / delay\n",
3565 tgid ? tgid_space : space);
3566 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3567 tgid ? " TGID " : space);
3568 seq_printf(m, "# | | %s | |||| | |\n",
3569 tgid ? " | " : space);
3570} 3604}
3571 3605
3572void 3606void
@@ -4692,6 +4726,7 @@ static const char readme_msg[] =
4692 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 4726 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4693 " current_tracer\t- function and latency tracers\n" 4727 " current_tracer\t- function and latency tracers\n"
4694 " available_tracers\t- list of configured tracers for current_tracer\n" 4728 " available_tracers\t- list of configured tracers for current_tracer\n"
4729 " error_log\t- error log for failed commands (that support it)\n"
4695 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 4730 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4696 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 4731 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4697 " trace_clock\t\t-change the clock used to order events\n" 4732 " trace_clock\t\t-change the clock used to order events\n"
@@ -4712,7 +4747,7 @@ static const char readme_msg[] =
4712 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 4747 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4713 "\t\t\t Remove sub-buffer with rmdir\n" 4748 "\t\t\t Remove sub-buffer with rmdir\n"
4714 " trace_options\t\t- Set format or modify how tracing happens\n" 4749 " trace_options\t\t- Set format or modify how tracing happens\n"
4715 "\t\t\t Disable an option by adding a suffix 'no' to the\n" 4750 "\t\t\t Disable an option by prefixing 'no' to the\n"
4716 "\t\t\t option name\n" 4751 "\t\t\t option name\n"
4717 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 4752 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4718#ifdef CONFIG_DYNAMIC_FTRACE 4753#ifdef CONFIG_DYNAMIC_FTRACE
@@ -6296,13 +6331,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
6296 struct ring_buffer *buffer; 6331 struct ring_buffer *buffer;
6297 struct print_entry *entry; 6332 struct print_entry *entry;
6298 unsigned long irq_flags; 6333 unsigned long irq_flags;
6299 const char faulted[] = "<faulted>";
6300 ssize_t written; 6334 ssize_t written;
6301 int size; 6335 int size;
6302 int len; 6336 int len;
6303 6337
6304/* Used in tracing_mark_raw_write() as well */ 6338/* Used in tracing_mark_raw_write() as well */
6305#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ 6339#define FAULTED_STR "<faulted>"
6340#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6306 6341
6307 if (tracing_disabled) 6342 if (tracing_disabled)
6308 return -EINVAL; 6343 return -EINVAL;
@@ -6334,7 +6369,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
6334 6369
6335 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 6370 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6336 if (len) { 6371 if (len) {
6337 memcpy(&entry->buf, faulted, FAULTED_SIZE); 6372 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6338 cnt = FAULTED_SIZE; 6373 cnt = FAULTED_SIZE;
6339 written = -EFAULT; 6374 written = -EFAULT;
6340 } else 6375 } else
@@ -6375,7 +6410,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6375 struct ring_buffer_event *event; 6410 struct ring_buffer_event *event;
6376 struct ring_buffer *buffer; 6411 struct ring_buffer *buffer;
6377 struct raw_data_entry *entry; 6412 struct raw_data_entry *entry;
6378 const char faulted[] = "<faulted>";
6379 unsigned long irq_flags; 6413 unsigned long irq_flags;
6380 ssize_t written; 6414 ssize_t written;
6381 int size; 6415 int size;
@@ -6415,7 +6449,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6415 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 6449 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6416 if (len) { 6450 if (len) {
6417 entry->id = -1; 6451 entry->id = -1;
6418 memcpy(&entry->buf, faulted, FAULTED_SIZE); 6452 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6419 written = -EFAULT; 6453 written = -EFAULT;
6420 } else 6454 } else
6421 written = cnt; 6455 written = cnt;
@@ -6868,6 +6902,238 @@ static const struct file_operations snapshot_raw_fops = {
6868 6902
6869#endif /* CONFIG_TRACER_SNAPSHOT */ 6903#endif /* CONFIG_TRACER_SNAPSHOT */
6870 6904
6905#define TRACING_LOG_ERRS_MAX 8
6906#define TRACING_LOG_LOC_MAX 128
6907
6908#define CMD_PREFIX " Command: "
6909
6910struct err_info {
6911 const char **errs; /* ptr to loc-specific array of err strings */
6912 u8 type; /* index into errs -> specific err string */
6913 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6914 u64 ts;
6915};
6916
6917struct tracing_log_err {
6918 struct list_head list;
6919 struct err_info info;
6920 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6921 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6922};
6923
6924static DEFINE_MUTEX(tracing_err_log_lock);
6925
6926struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
6927{
6928 struct tracing_log_err *err;
6929
6930 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6931 err = kzalloc(sizeof(*err), GFP_KERNEL);
6932 if (!err)
6933 err = ERR_PTR(-ENOMEM);
6934 tr->n_err_log_entries++;
6935
6936 return err;
6937 }
6938
6939 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
6940 list_del(&err->list);
6941
6942 return err;
6943}
6944
6945/**
6946 * err_pos - find the position of a string within a command for error careting
6947 * @cmd: The tracing command that caused the error
6948 * @str: The string to position the caret at within @cmd
6949 *
6950 * Finds the position of the first occurence of @str within @cmd. The
6951 * return value can be passed to tracing_log_err() for caret placement
6952 * within @cmd.
6953 *
6954 * Returns the index within @cmd of the first occurence of @str or 0
6955 * if @str was not found.
6956 */
6957unsigned int err_pos(char *cmd, const char *str)
6958{
6959 char *found;
6960
6961 if (WARN_ON(!strlen(cmd)))
6962 return 0;
6963
6964 found = strstr(cmd, str);
6965 if (found)
6966 return found - cmd;
6967
6968 return 0;
6969}
6970
6971/**
6972 * tracing_log_err - write an error to the tracing error log
6973 * @tr: The associated trace array for the error (NULL for top level array)
6974 * @loc: A string describing where the error occurred
6975 * @cmd: The tracing command that caused the error
6976 * @errs: The array of loc-specific static error strings
6977 * @type: The index into errs[], which produces the specific static err string
6978 * @pos: The position the caret should be placed in the cmd
6979 *
6980 * Writes an error into tracing/error_log of the form:
6981 *
6982 * <loc>: error: <text>
6983 * Command: <cmd>
6984 * ^
6985 *
6986 * tracing/error_log is a small log file containing the last
6987 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
6988 * unless there has been a tracing error, and the error log can be
6989 * cleared and have its memory freed by writing the empty string in
6990 * truncation mode to it i.e. echo > tracing/error_log.
6991 *
6992 * NOTE: the @errs array along with the @type param are used to
6993 * produce a static error string - this string is not copied and saved
6994 * when the error is logged - only a pointer to it is saved. See
6995 * existing callers for examples of how static strings are typically
6996 * defined for use with tracing_log_err().
6997 */
6998void tracing_log_err(struct trace_array *tr,
6999 const char *loc, const char *cmd,
7000 const char **errs, u8 type, u8 pos)
7001{
7002 struct tracing_log_err *err;
7003
7004 if (!tr)
7005 tr = &global_trace;
7006
7007 mutex_lock(&tracing_err_log_lock);
7008 err = get_tracing_log_err(tr);
7009 if (PTR_ERR(err) == -ENOMEM) {
7010 mutex_unlock(&tracing_err_log_lock);
7011 return;
7012 }
7013
7014 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7015 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7016
7017 err->info.errs = errs;
7018 err->info.type = type;
7019 err->info.pos = pos;
7020 err->info.ts = local_clock();
7021
7022 list_add_tail(&err->list, &tr->err_log);
7023 mutex_unlock(&tracing_err_log_lock);
7024}
7025
7026static void clear_tracing_err_log(struct trace_array *tr)
7027{
7028 struct tracing_log_err *err, *next;
7029
7030 mutex_lock(&tracing_err_log_lock);
7031 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7032 list_del(&err->list);
7033 kfree(err);
7034 }
7035
7036 tr->n_err_log_entries = 0;
7037 mutex_unlock(&tracing_err_log_lock);
7038}
7039
7040static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7041{
7042 struct trace_array *tr = m->private;
7043
7044 mutex_lock(&tracing_err_log_lock);
7045
7046 return seq_list_start(&tr->err_log, *pos);
7047}
7048
7049static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7050{
7051 struct trace_array *tr = m->private;
7052
7053 return seq_list_next(v, &tr->err_log, pos);
7054}
7055
7056static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7057{
7058 mutex_unlock(&tracing_err_log_lock);
7059}
7060
7061static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7062{
7063 u8 i;
7064
7065 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7066 seq_putc(m, ' ');
7067 for (i = 0; i < pos; i++)
7068 seq_putc(m, ' ');
7069 seq_puts(m, "^\n");
7070}
7071
7072static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7073{
7074 struct tracing_log_err *err = v;
7075
7076 if (err) {
7077 const char *err_text = err->info.errs[err->info.type];
7078 u64 sec = err->info.ts;
7079 u32 nsec;
7080
7081 nsec = do_div(sec, NSEC_PER_SEC);
7082 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7083 err->loc, err_text);
7084 seq_printf(m, "%s", err->cmd);
7085 tracing_err_log_show_pos(m, err->info.pos);
7086 }
7087
7088 return 0;
7089}
7090
7091static const struct seq_operations tracing_err_log_seq_ops = {
7092 .start = tracing_err_log_seq_start,
7093 .next = tracing_err_log_seq_next,
7094 .stop = tracing_err_log_seq_stop,
7095 .show = tracing_err_log_seq_show
7096};
7097
7098static int tracing_err_log_open(struct inode *inode, struct file *file)
7099{
7100 struct trace_array *tr = inode->i_private;
7101 int ret = 0;
7102
7103 if (trace_array_get(tr) < 0)
7104 return -ENODEV;
7105
7106 /* If this file was opened for write, then erase contents */
7107 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7108 clear_tracing_err_log(tr);
7109
7110 if (file->f_mode & FMODE_READ) {
7111 ret = seq_open(file, &tracing_err_log_seq_ops);
7112 if (!ret) {
7113 struct seq_file *m = file->private_data;
7114 m->private = tr;
7115 } else {
7116 trace_array_put(tr);
7117 }
7118 }
7119 return ret;
7120}
7121
7122static ssize_t tracing_err_log_write(struct file *file,
7123 const char __user *buffer,
7124 size_t count, loff_t *ppos)
7125{
7126 return count;
7127}
7128
7129static const struct file_operations tracing_err_log_fops = {
7130 .open = tracing_err_log_open,
7131 .write = tracing_err_log_write,
7132 .read = seq_read,
7133 .llseek = seq_lseek,
7134 .release = tracing_release_generic_tr,
7135};
7136
6871static int tracing_buffers_open(struct inode *inode, struct file *filp) 7137static int tracing_buffers_open(struct inode *inode, struct file *filp)
6872{ 7138{
6873 struct trace_array *tr = inode->i_private; 7139 struct trace_array *tr = inode->i_private;
@@ -8033,7 +8299,7 @@ static void update_tracer_options(struct trace_array *tr)
8033 mutex_unlock(&trace_types_lock); 8299 mutex_unlock(&trace_types_lock);
8034} 8300}
8035 8301
8036static int instance_mkdir(const char *name) 8302struct trace_array *trace_array_create(const char *name)
8037{ 8303{
8038 struct trace_array *tr; 8304 struct trace_array *tr;
8039 int ret; 8305 int ret;
@@ -8072,6 +8338,7 @@ static int instance_mkdir(const char *name)
8072 INIT_LIST_HEAD(&tr->systems); 8338 INIT_LIST_HEAD(&tr->systems);
8073 INIT_LIST_HEAD(&tr->events); 8339 INIT_LIST_HEAD(&tr->events);
8074 INIT_LIST_HEAD(&tr->hist_vars); 8340 INIT_LIST_HEAD(&tr->hist_vars);
8341 INIT_LIST_HEAD(&tr->err_log);
8075 8342
8076 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 8343 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8077 goto out_free_tr; 8344 goto out_free_tr;
@@ -8097,7 +8364,7 @@ static int instance_mkdir(const char *name)
8097 mutex_unlock(&trace_types_lock); 8364 mutex_unlock(&trace_types_lock);
8098 mutex_unlock(&event_mutex); 8365 mutex_unlock(&event_mutex);
8099 8366
8100 return 0; 8367 return tr;
8101 8368
8102 out_free_tr: 8369 out_free_tr:
8103 free_trace_buffers(tr); 8370 free_trace_buffers(tr);
@@ -8109,33 +8376,21 @@ static int instance_mkdir(const char *name)
8109 mutex_unlock(&trace_types_lock); 8376 mutex_unlock(&trace_types_lock);
8110 mutex_unlock(&event_mutex); 8377 mutex_unlock(&event_mutex);
8111 8378
8112 return ret; 8379 return ERR_PTR(ret);
8380}
8381EXPORT_SYMBOL_GPL(trace_array_create);
8113 8382
8383static int instance_mkdir(const char *name)
8384{
8385 return PTR_ERR_OR_ZERO(trace_array_create(name));
8114} 8386}
8115 8387
8116static int instance_rmdir(const char *name) 8388static int __remove_instance(struct trace_array *tr)
8117{ 8389{
8118 struct trace_array *tr;
8119 int found = 0;
8120 int ret;
8121 int i; 8390 int i;
8122 8391
8123 mutex_lock(&event_mutex);
8124 mutex_lock(&trace_types_lock);
8125
8126 ret = -ENODEV;
8127 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8128 if (tr->name && strcmp(tr->name, name) == 0) {
8129 found = 1;
8130 break;
8131 }
8132 }
8133 if (!found)
8134 goto out_unlock;
8135
8136 ret = -EBUSY;
8137 if (tr->ref || (tr->current_trace && tr->current_trace->ref)) 8392 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8138 goto out_unlock; 8393 return -EBUSY;
8139 8394
8140 list_del(&tr->list); 8395 list_del(&tr->list);
8141 8396
@@ -8161,10 +8416,46 @@ static int instance_rmdir(const char *name)
8161 free_cpumask_var(tr->tracing_cpumask); 8416 free_cpumask_var(tr->tracing_cpumask);
8162 kfree(tr->name); 8417 kfree(tr->name);
8163 kfree(tr); 8418 kfree(tr);
8419 tr = NULL;
8164 8420
8165 ret = 0; 8421 return 0;
8422}
8423
8424int trace_array_destroy(struct trace_array *tr)
8425{
8426 int ret;
8427
8428 if (!tr)
8429 return -EINVAL;
8430
8431 mutex_lock(&event_mutex);
8432 mutex_lock(&trace_types_lock);
8433
8434 ret = __remove_instance(tr);
8435
8436 mutex_unlock(&trace_types_lock);
8437 mutex_unlock(&event_mutex);
8438
8439 return ret;
8440}
8441EXPORT_SYMBOL_GPL(trace_array_destroy);
8442
8443static int instance_rmdir(const char *name)
8444{
8445 struct trace_array *tr;
8446 int ret;
8447
8448 mutex_lock(&event_mutex);
8449 mutex_lock(&trace_types_lock);
8450
8451 ret = -ENODEV;
8452 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8453 if (tr->name && strcmp(tr->name, name) == 0) {
8454 ret = __remove_instance(tr);
8455 break;
8456 }
8457 }
8166 8458
8167 out_unlock:
8168 mutex_unlock(&trace_types_lock); 8459 mutex_unlock(&trace_types_lock);
8169 mutex_unlock(&event_mutex); 8460 mutex_unlock(&event_mutex);
8170 8461
@@ -8254,6 +8545,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8254 tr, &snapshot_fops); 8545 tr, &snapshot_fops);
8255#endif 8546#endif
8256 8547
8548 trace_create_file("error_log", 0644, d_tracer,
8549 tr, &tracing_err_log_fops);
8550
8257 for_each_tracing_cpu(cpu) 8551 for_each_tracing_cpu(cpu)
8258 tracing_init_tracefs_percpu(tr, cpu); 8552 tracing_init_tracefs_percpu(tr, cpu);
8259 8553
@@ -8839,6 +9133,7 @@ __init static int tracer_alloc_buffers(void)
8839 INIT_LIST_HEAD(&global_trace.systems); 9133 INIT_LIST_HEAD(&global_trace.systems);
8840 INIT_LIST_HEAD(&global_trace.events); 9134 INIT_LIST_HEAD(&global_trace.events);
8841 INIT_LIST_HEAD(&global_trace.hist_vars); 9135 INIT_LIST_HEAD(&global_trace.hist_vars);
9136 INIT_LIST_HEAD(&global_trace.err_log);
8842 list_add(&global_trace.list, &ftrace_trace_arrays); 9137 list_add(&global_trace.list, &ftrace_trace_arrays);
8843 9138
8844 apply_trace_boot_options(); 9139 apply_trace_boot_options();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 639047b259d7..1974ce818ddb 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -293,11 +293,13 @@ struct trace_array {
293 int nr_topts; 293 int nr_topts;
294 bool clear_trace; 294 bool clear_trace;
295 int buffer_percent; 295 int buffer_percent;
296 unsigned int n_err_log_entries;
296 struct tracer *current_trace; 297 struct tracer *current_trace;
297 unsigned int trace_flags; 298 unsigned int trace_flags;
298 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 299 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
299 unsigned int flags; 300 unsigned int flags;
300 raw_spinlock_t start_lock; 301 raw_spinlock_t start_lock;
302 struct list_head err_log;
301 struct dentry *dir; 303 struct dentry *dir;
302 struct dentry *options; 304 struct dentry *options;
303 struct dentry *percpu_dir; 305 struct dentry *percpu_dir;
@@ -719,6 +721,9 @@ void trace_init_global_iter(struct trace_iterator *iter);
719 721
720void tracing_iter_reset(struct trace_iterator *iter, int cpu); 722void tracing_iter_reset(struct trace_iterator *iter, int cpu);
721 723
724unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
725unsigned long trace_total_entries(struct trace_array *tr);
726
722void trace_function(struct trace_array *tr, 727void trace_function(struct trace_array *tr,
723 unsigned long ip, 728 unsigned long ip,
724 unsigned long parent_ip, 729 unsigned long parent_ip,
@@ -1545,7 +1550,8 @@ extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1545extern void print_subsystem_event_filter(struct event_subsystem *system, 1550extern void print_subsystem_event_filter(struct event_subsystem *system,
1546 struct trace_seq *s); 1551 struct trace_seq *s);
1547extern int filter_assign_type(const char *type); 1552extern int filter_assign_type(const char *type);
1548extern int create_event_filter(struct trace_event_call *call, 1553extern int create_event_filter(struct trace_array *tr,
1554 struct trace_event_call *call,
1549 char *filter_str, bool set_str, 1555 char *filter_str, bool set_str,
1550 struct event_filter **filterp); 1556 struct event_filter **filterp);
1551extern void free_event_filter(struct event_filter *filter); 1557extern void free_event_filter(struct event_filter *filter);
@@ -1876,6 +1882,11 @@ extern ssize_t trace_parse_run_command(struct file *file,
1876 const char __user *buffer, size_t count, loff_t *ppos, 1882 const char __user *buffer, size_t count, loff_t *ppos,
1877 int (*createfn)(int, char**)); 1883 int (*createfn)(int, char**));
1878 1884
1885extern unsigned int err_pos(char *cmd, const char *str);
1886extern void tracing_log_err(struct trace_array *tr,
1887 const char *loc, const char *cmd,
1888 const char **errs, u8 type, u8 pos);
1889
1879/* 1890/*
1880 * Normal trace_printk() and friends allocates special buffers 1891 * Normal trace_printk() and friends allocates special buffers
1881 * to do the manipulation, as well as saves the print formats 1892 * to do the manipulation, as well as saves the print formats
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 5b3b0c3c8a47..0ce3db67f556 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -832,6 +832,7 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
832 832
833 return ret; 833 return ret;
834} 834}
835EXPORT_SYMBOL_GPL(ftrace_set_clr_event);
835 836
836/** 837/**
837 * trace_set_clr_event - enable or disable an event 838 * trace_set_clr_event - enable or disable an event
@@ -1318,9 +1319,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1318 char buf[32]; 1319 char buf[32];
1319 int len; 1320 int len;
1320 1321
1321 if (*ppos)
1322 return 0;
1323
1324 if (unlikely(!id)) 1322 if (unlikely(!id))
1325 return -ENODEV; 1323 return -ENODEV;
1326 1324
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 05a66493a164..d3e59312ef40 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -66,7 +66,8 @@ static const char * ops[] = { OPS };
66 C(INVALID_FILTER, "Meaningless filter expression"), \ 66 C(INVALID_FILTER, "Meaningless filter expression"), \
67 C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ 67 C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
68 C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \ 68 C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
69 C(NO_FILTER, "No filter found"), 69 C(ERRNO, "Error"), \
70 C(NO_FILTER, "No filter found")
70 71
71#undef C 72#undef C
72#define C(a, b) FILT_ERR_##a 73#define C(a, b) FILT_ERR_##a
@@ -76,7 +77,7 @@ enum { ERRORS };
76#undef C 77#undef C
77#define C(a, b) b 78#define C(a, b) b
78 79
79static char *err_text[] = { ERRORS }; 80static const char *err_text[] = { ERRORS };
80 81
81/* Called after a '!' character but "!=" and "!~" are not "not"s */ 82/* Called after a '!' character but "!=" and "!~" are not "not"s */
82static bool is_not(const char *str) 83static bool is_not(const char *str)
@@ -919,7 +920,8 @@ static void remove_filter_string(struct event_filter *filter)
919 filter->filter_string = NULL; 920 filter->filter_string = NULL;
920} 921}
921 922
922static void append_filter_err(struct filter_parse_error *pe, 923static void append_filter_err(struct trace_array *tr,
924 struct filter_parse_error *pe,
923 struct event_filter *filter) 925 struct event_filter *filter)
924{ 926{
925 struct trace_seq *s; 927 struct trace_seq *s;
@@ -947,8 +949,14 @@ static void append_filter_err(struct filter_parse_error *pe,
947 if (pe->lasterr > 0) { 949 if (pe->lasterr > 0) {
948 trace_seq_printf(s, "\n%*s", pos, "^"); 950 trace_seq_printf(s, "\n%*s", pos, "^");
949 trace_seq_printf(s, "\nparse_error: %s\n", err_text[pe->lasterr]); 951 trace_seq_printf(s, "\nparse_error: %s\n", err_text[pe->lasterr]);
952 tracing_log_err(tr, "event filter parse error",
953 filter->filter_string, err_text,
954 pe->lasterr, pe->lasterr_pos);
950 } else { 955 } else {
951 trace_seq_printf(s, "\nError: (%d)\n", pe->lasterr); 956 trace_seq_printf(s, "\nError: (%d)\n", pe->lasterr);
957 tracing_log_err(tr, "event filter parse error",
958 filter->filter_string, err_text,
959 FILT_ERR_ERRNO, 0);
952 } 960 }
953 trace_seq_putc(s, 0); 961 trace_seq_putc(s, 0);
954 buf = kmemdup_nul(s->buffer, s->seq.len, GFP_KERNEL); 962 buf = kmemdup_nul(s->buffer, s->seq.len, GFP_KERNEL);
@@ -1214,30 +1222,30 @@ static int parse_pred(const char *str, void *data,
1214 * (perf doesn't use it) and grab everything. 1222 * (perf doesn't use it) and grab everything.
1215 */ 1223 */
1216 if (strcmp(field->name, "ip") != 0) { 1224 if (strcmp(field->name, "ip") != 0) {
1217 parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i); 1225 parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i);
1218 goto err_free; 1226 goto err_free;
1219 } 1227 }
1220 pred->fn = filter_pred_none; 1228 pred->fn = filter_pred_none;
1221 1229
1222 /* 1230 /*
1223 * Quotes are not required, but if they exist then we need 1231 * Quotes are not required, but if they exist then we need
1224 * to read them till we hit a matching one. 1232 * to read them till we hit a matching one.
1225 */ 1233 */
1226 if (str[i] == '\'' || str[i] == '"') 1234 if (str[i] == '\'' || str[i] == '"')
1227 q = str[i]; 1235 q = str[i];
1228 else 1236 else
1229 q = 0; 1237 q = 0;
1230 1238
1231 for (i++; str[i]; i++) { 1239 for (i++; str[i]; i++) {
1232 if (q && str[i] == q) 1240 if (q && str[i] == q)
1233 break; 1241 break;
1234 if (!q && (str[i] == ')' || str[i] == '&' || 1242 if (!q && (str[i] == ')' || str[i] == '&' ||
1235 str[i] == '|')) 1243 str[i] == '|'))
1236 break; 1244 break;
1237 } 1245 }
1238 /* Skip quotes */ 1246 /* Skip quotes */
1239 if (q) 1247 if (q)
1240 s++; 1248 s++;
1241 len = i - s; 1249 len = i - s;
1242 if (len >= MAX_FILTER_STR_VAL) { 1250 if (len >= MAX_FILTER_STR_VAL) {
1243 parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); 1251 parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i);
@@ -1600,7 +1608,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
1600 if (err) { 1608 if (err) {
1601 filter_disable(file); 1609 filter_disable(file);
1602 parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); 1610 parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1603 append_filter_err(pe, filter); 1611 append_filter_err(tr, pe, filter);
1604 } else 1612 } else
1605 event_set_filtered_flag(file); 1613 event_set_filtered_flag(file);
1606 1614
@@ -1712,7 +1720,8 @@ static void create_filter_finish(struct filter_parse_error *pe)
1712 * information if @set_str is %true and the caller is responsible for 1720 * information if @set_str is %true and the caller is responsible for
1713 * freeing it. 1721 * freeing it.
1714 */ 1722 */
1715static int create_filter(struct trace_event_call *call, 1723static int create_filter(struct trace_array *tr,
1724 struct trace_event_call *call,
1716 char *filter_string, bool set_str, 1725 char *filter_string, bool set_str,
1717 struct event_filter **filterp) 1726 struct event_filter **filterp)
1718{ 1727{
@@ -1729,17 +1738,18 @@ static int create_filter(struct trace_event_call *call,
1729 1738
1730 err = process_preds(call, filter_string, *filterp, pe); 1739 err = process_preds(call, filter_string, *filterp, pe);
1731 if (err && set_str) 1740 if (err && set_str)
1732 append_filter_err(pe, *filterp); 1741 append_filter_err(tr, pe, *filterp);
1733 create_filter_finish(pe); 1742 create_filter_finish(pe);
1734 1743
1735 return err; 1744 return err;
1736} 1745}
1737 1746
1738int create_event_filter(struct trace_event_call *call, 1747int create_event_filter(struct trace_array *tr,
1748 struct trace_event_call *call,
1739 char *filter_str, bool set_str, 1749 char *filter_str, bool set_str,
1740 struct event_filter **filterp) 1750 struct event_filter **filterp)
1741{ 1751{
1742 return create_filter(call, filter_str, set_str, filterp); 1752 return create_filter(tr, call, filter_str, set_str, filterp);
1743} 1753}
1744 1754
1745/** 1755/**
@@ -1766,7 +1776,7 @@ static int create_system_filter(struct trace_subsystem_dir *dir,
1766 kfree((*filterp)->filter_string); 1776 kfree((*filterp)->filter_string);
1767 (*filterp)->filter_string = NULL; 1777 (*filterp)->filter_string = NULL;
1768 } else { 1778 } else {
1769 append_filter_err(pe, *filterp); 1779 append_filter_err(tr, pe, *filterp);
1770 } 1780 }
1771 } 1781 }
1772 create_filter_finish(pe); 1782 create_filter_finish(pe);
@@ -1797,7 +1807,7 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
1797 return 0; 1807 return 0;
1798 } 1808 }
1799 1809
1800 err = create_filter(call, filter_string, true, &filter); 1810 err = create_filter(file->tr, call, filter_string, true, &filter);
1801 1811
1802 /* 1812 /*
1803 * Always swap the call filter with the new filter 1813 * Always swap the call filter with the new filter
@@ -2053,7 +2063,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
2053 if (event->filter) 2063 if (event->filter)
2054 goto out_unlock; 2064 goto out_unlock;
2055 2065
2056 err = create_filter(call, filter_str, false, &filter); 2066 err = create_filter(NULL, call, filter_str, false, &filter);
2057 if (err) 2067 if (err)
2058 goto free_filter; 2068 goto free_filter;
2059 2069
@@ -2202,8 +2212,8 @@ static __init int ftrace_test_event_filter(void)
2202 struct test_filter_data_t *d = &test_filter_data[i]; 2212 struct test_filter_data_t *d = &test_filter_data[i];
2203 int err; 2213 int err;
2204 2214
2205 err = create_filter(&event_ftrace_test_filter, d->filter, 2215 err = create_filter(NULL, &event_ftrace_test_filter,
2206 false, &filter); 2216 d->filter, false, &filter);
2207 if (err) { 2217 if (err) {
2208 printk(KERN_INFO 2218 printk(KERN_INFO
2209 "Failed to get filter for '%s', err %d\n", 2219 "Failed to get filter for '%s', err %d\n",
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index a1d20421f4b0..7fca3457c705 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -22,6 +22,57 @@
22 22
23#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ 23#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
24 24
25#define ERRORS \
26 C(NONE, "No error"), \
27 C(DUPLICATE_VAR, "Variable already defined"), \
28 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
29 C(TOO_MANY_VARS, "Too many variables defined"), \
30 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
31 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
32 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
33 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
34 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
35 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
36 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
37 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
38 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
39 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
40 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
41 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
42 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
43 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
44 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
45 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
46 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
47 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
48 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
49 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
50 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
51 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
52 C(TOO_MANY_PARAMS, "Too many action params"), \
53 C(PARAM_NOT_FOUND, "Couldn't find param"), \
54 C(INVALID_PARAM, "Invalid action param"), \
55 C(ACTION_NOT_FOUND, "No action found"), \
56 C(NO_SAVE_PARAMS, "No params found for save()"), \
57 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
58 C(ACTION_MISMATCH, "Handler doesn't support action"), \
59 C(NO_CLOSING_PAREN, "No closing paren found"), \
60 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
61 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
62 C(INVALID_REF_KEY, "Using variable references as keys not supported"), \
63 C(VAR_NOT_FOUND, "Couldn't find variable"), \
64 C(FIELD_NOT_FOUND, "Couldn't find field"),
65
66#undef C
67#define C(a, b) HIST_ERR_##a
68
69enum { ERRORS };
70
71#undef C
72#define C(a, b) b
73
74static const char *err_text[] = { ERRORS };
75
25struct hist_field; 76struct hist_field;
26 77
27typedef u64 (*hist_field_fn_t) (struct hist_field *field, 78typedef u64 (*hist_field_fn_t) (struct hist_field *field,
@@ -535,62 +586,49 @@ static struct track_data *track_data_alloc(unsigned int key_len,
535 return data; 586 return data;
536} 587}
537 588
538static char last_hist_cmd[MAX_FILTER_STR_VAL]; 589static char last_cmd[MAX_FILTER_STR_VAL];
539static char hist_err_str[MAX_FILTER_STR_VAL]; 590static char last_cmd_loc[MAX_FILTER_STR_VAL];
540 591
541static void last_cmd_set(char *str) 592static int errpos(char *str)
542{ 593{
543 if (!str) 594 return err_pos(last_cmd, str);
544 return;
545
546 strncpy(last_hist_cmd, str, MAX_FILTER_STR_VAL - 1);
547} 595}
548 596
549static void hist_err(char *str, char *var) 597static void last_cmd_set(struct trace_event_file *file, char *str)
550{ 598{
551 int maxlen = MAX_FILTER_STR_VAL - 1; 599 const char *system = NULL, *name = NULL;
600 struct trace_event_call *call;
552 601
553 if (!str) 602 if (!str)
554 return; 603 return;
555 604
556 if (strlen(hist_err_str)) 605 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1);
557 return;
558 606
559 if (!var) 607 if (file) {
560 var = ""; 608 call = file->event_call;
561 609
562 if (strlen(hist_err_str) + strlen(str) + strlen(var) > maxlen) 610 system = call->class->system;
563 return; 611 if (system) {
612 name = trace_event_name(call);
613 if (!name)
614 system = NULL;
615 }
616 }
564 617
565 strcat(hist_err_str, str); 618 if (system)
566 strcat(hist_err_str, var); 619 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name);
567} 620}
568 621
569static void hist_err_event(char *str, char *system, char *event, char *var) 622static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos)
570{ 623{
571 char err[MAX_FILTER_STR_VAL]; 624 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
572 625 err_type, err_pos);
573 if (system && var)
574 snprintf(err, MAX_FILTER_STR_VAL, "%s.%s.%s", system, event, var);
575 else if (system)
576 snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
577 else
578 strscpy(err, var, MAX_FILTER_STR_VAL);
579
580 hist_err(str, err);
581} 626}
582 627
583static void hist_err_clear(void) 628static void hist_err_clear(void)
584{ 629{
585 hist_err_str[0] = '\0'; 630 last_cmd[0] = '\0';
586} 631 last_cmd_loc[0] = '\0';
587
588static bool have_hist_err(void)
589{
590 if (strlen(hist_err_str))
591 return true;
592
593 return false;
594} 632}
595 633
596struct synth_trace_event { 634struct synth_trace_event {
@@ -1719,7 +1757,7 @@ static struct trace_event_file *find_var_file(struct trace_array *tr,
1719 1757
1720 if (find_var_field(var_hist_data, var_name)) { 1758 if (find_var_field(var_hist_data, var_name)) {
1721 if (found) { 1759 if (found) {
1722 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name); 1760 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
1723 return NULL; 1761 return NULL;
1724 } 1762 }
1725 1763
@@ -1770,7 +1808,8 @@ find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1770 hist_field = find_file_var(file, var_name); 1808 hist_field = find_file_var(file, var_name);
1771 if (hist_field) { 1809 if (hist_field) {
1772 if (found) { 1810 if (found) {
1773 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name); 1811 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
1812 errpos(var_name));
1774 return ERR_PTR(-EINVAL); 1813 return ERR_PTR(-EINVAL);
1775 } 1814 }
1776 1815
@@ -2002,11 +2041,11 @@ static int parse_action(char *str, struct hist_trigger_attrs *attrs)
2002 attrs->n_actions++; 2041 attrs->n_actions++;
2003 ret = 0; 2042 ret = 0;
2004 } 2043 }
2005
2006 return ret; 2044 return ret;
2007} 2045}
2008 2046
2009static int parse_assignment(char *str, struct hist_trigger_attrs *attrs) 2047static int parse_assignment(struct trace_array *tr,
2048 char *str, struct hist_trigger_attrs *attrs)
2010{ 2049{
2011 int ret = 0; 2050 int ret = 0;
2012 2051
@@ -2062,7 +2101,7 @@ static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
2062 char *assignment; 2101 char *assignment;
2063 2102
2064 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 2103 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
2065 hist_err("Too many variables defined: ", str); 2104 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
2066 ret = -EINVAL; 2105 ret = -EINVAL;
2067 goto out; 2106 goto out;
2068 } 2107 }
@@ -2079,7 +2118,8 @@ static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
2079 return ret; 2118 return ret;
2080} 2119}
2081 2120
2082static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str) 2121static struct hist_trigger_attrs *
2122parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
2083{ 2123{
2084 struct hist_trigger_attrs *attrs; 2124 struct hist_trigger_attrs *attrs;
2085 int ret = 0; 2125 int ret = 0;
@@ -2092,7 +2132,7 @@ static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
2092 char *str = strsep(&trigger_str, ":"); 2132 char *str = strsep(&trigger_str, ":");
2093 2133
2094 if (strchr(str, '=')) { 2134 if (strchr(str, '=')) {
2095 ret = parse_assignment(str, attrs); 2135 ret = parse_assignment(tr, str, attrs);
2096 if (ret) 2136 if (ret)
2097 goto free; 2137 goto free;
2098 } else if (strcmp(str, "pause") == 0) 2138 } else if (strcmp(str, "pause") == 0)
@@ -2648,6 +2688,7 @@ static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2648 char *var_name) 2688 char *var_name)
2649{ 2689{
2650 struct hist_field *var_field = NULL, *ref_field = NULL; 2690 struct hist_field *var_field = NULL, *ref_field = NULL;
2691 struct trace_array *tr = hist_data->event_file->tr;
2651 2692
2652 if (!is_var_ref(var_name)) 2693 if (!is_var_ref(var_name))
2653 return NULL; 2694 return NULL;
@@ -2660,8 +2701,7 @@ static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2660 system, event_name); 2701 system, event_name);
2661 2702
2662 if (!ref_field) 2703 if (!ref_field)
2663 hist_err_event("Couldn't find variable: $", 2704 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
2664 system, event_name, var_name);
2665 2705
2666 return ref_field; 2706 return ref_field;
2667} 2707}
@@ -2672,6 +2712,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2672{ 2712{
2673 struct ftrace_event_field *field = NULL; 2713 struct ftrace_event_field *field = NULL;
2674 char *field_name, *modifier, *str; 2714 char *field_name, *modifier, *str;
2715 struct trace_array *tr = file->tr;
2675 2716
2676 modifier = str = kstrdup(field_str, GFP_KERNEL); 2717 modifier = str = kstrdup(field_str, GFP_KERNEL);
2677 if (!modifier) 2718 if (!modifier)
@@ -2695,7 +2736,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2695 else if (strcmp(modifier, "usecs") == 0) 2736 else if (strcmp(modifier, "usecs") == 0)
2696 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2737 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2697 else { 2738 else {
2698 hist_err("Invalid field modifier: ", modifier); 2739 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
2699 field = ERR_PTR(-EINVAL); 2740 field = ERR_PTR(-EINVAL);
2700 goto out; 2741 goto out;
2701 } 2742 }
@@ -2711,7 +2752,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2711 else { 2752 else {
2712 field = trace_find_event_field(file->event_call, field_name); 2753 field = trace_find_event_field(file->event_call, field_name);
2713 if (!field || !field->size) { 2754 if (!field || !field->size) {
2714 hist_err("Couldn't find field: ", field_name); 2755 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
2715 field = ERR_PTR(-EINVAL); 2756 field = ERR_PTR(-EINVAL);
2716 goto out; 2757 goto out;
2717 } 2758 }
@@ -2773,7 +2814,8 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2773 2814
2774 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2815 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2775 if (!s) { 2816 if (!s) {
2776 hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var); 2817 hist_field = parse_var_ref(hist_data, ref_system,
2818 ref_event, ref_var);
2777 if (hist_field) { 2819 if (hist_field) {
2778 if (var_name) { 2820 if (var_name) {
2779 hist_field = create_alias(hist_data, hist_field, var_name); 2821 hist_field = create_alias(hist_data, hist_field, var_name);
@@ -2822,7 +2864,7 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2822 /* we support only -(xxx) i.e. explicit parens required */ 2864 /* we support only -(xxx) i.e. explicit parens required */
2823 2865
2824 if (level > 3) { 2866 if (level > 3) {
2825 hist_err("Too many subexpressions (3 max): ", str); 2867 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2826 ret = -EINVAL; 2868 ret = -EINVAL;
2827 goto free; 2869 goto free;
2828 } 2870 }
@@ -2877,7 +2919,8 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2877 return ERR_PTR(ret); 2919 return ERR_PTR(ret);
2878} 2920}
2879 2921
2880static int check_expr_operands(struct hist_field *operand1, 2922static int check_expr_operands(struct trace_array *tr,
2923 struct hist_field *operand1,
2881 struct hist_field *operand2) 2924 struct hist_field *operand2)
2882{ 2925{
2883 unsigned long operand1_flags = operand1->flags; 2926 unsigned long operand1_flags = operand1->flags;
@@ -2905,7 +2948,7 @@ static int check_expr_operands(struct hist_field *operand1,
2905 2948
2906 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2949 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2907 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2950 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2908 hist_err("Timestamp units in expression don't match", NULL); 2951 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
2909 return -EINVAL; 2952 return -EINVAL;
2910 } 2953 }
2911 2954
@@ -2923,7 +2966,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2923 char *sep, *operand1_str; 2966 char *sep, *operand1_str;
2924 2967
2925 if (level > 3) { 2968 if (level > 3) {
2926 hist_err("Too many subexpressions (3 max): ", str); 2969 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
2927 return ERR_PTR(-EINVAL); 2970 return ERR_PTR(-EINVAL);
2928 } 2971 }
2929 2972
@@ -2968,7 +3011,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2968 goto free; 3011 goto free;
2969 } 3012 }
2970 3013
2971 ret = check_expr_operands(operand1, operand2); 3014 ret = check_expr_operands(file->tr, operand1, operand2);
2972 if (ret) 3015 if (ret)
2973 goto free; 3016 goto free;
2974 3017
@@ -3161,16 +3204,14 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
3161 int ret; 3204 int ret;
3162 3205
3163 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 3206 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
3164 hist_err_event("trace action: Too many field variables defined: ", 3207 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3165 subsys_name, event_name, field_name);
3166 return ERR_PTR(-EINVAL); 3208 return ERR_PTR(-EINVAL);
3167 } 3209 }
3168 3210
3169 file = event_file(tr, subsys_name, event_name); 3211 file = event_file(tr, subsys_name, event_name);
3170 3212
3171 if (IS_ERR(file)) { 3213 if (IS_ERR(file)) {
3172 hist_err_event("trace action: Event file not found: ", 3214 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
3173 subsys_name, event_name, field_name);
3174 ret = PTR_ERR(file); 3215 ret = PTR_ERR(file);
3175 return ERR_PTR(ret); 3216 return ERR_PTR(ret);
3176 } 3217 }
@@ -3183,8 +3224,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
3183 */ 3224 */
3184 hist_data = find_compatible_hist(target_hist_data, file); 3225 hist_data = find_compatible_hist(target_hist_data, file);
3185 if (!hist_data) { 3226 if (!hist_data) {
3186 hist_err_event("trace action: Matching event histogram not found: ", 3227 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
3187 subsys_name, event_name, field_name);
3188 return ERR_PTR(-EINVAL); 3228 return ERR_PTR(-EINVAL);
3189 } 3229 }
3190 3230
@@ -3245,8 +3285,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
3245 kfree(cmd); 3285 kfree(cmd);
3246 kfree(var_hist->cmd); 3286 kfree(var_hist->cmd);
3247 kfree(var_hist); 3287 kfree(var_hist);
3248 hist_err_event("trace action: Couldn't create histogram for field: ", 3288 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
3249 subsys_name, event_name, field_name);
3250 return ERR_PTR(ret); 3289 return ERR_PTR(ret);
3251 } 3290 }
3252 3291
@@ -3258,8 +3297,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
3258 if (IS_ERR_OR_NULL(event_var)) { 3297 if (IS_ERR_OR_NULL(event_var)) {
3259 kfree(var_hist->cmd); 3298 kfree(var_hist->cmd);
3260 kfree(var_hist); 3299 kfree(var_hist);
3261 hist_err_event("trace action: Couldn't find synthetic variable: ", 3300 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
3262 subsys_name, event_name, field_name);
3263 return ERR_PTR(-EINVAL); 3301 return ERR_PTR(-EINVAL);
3264 } 3302 }
3265 3303
@@ -3392,25 +3430,26 @@ static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3392{ 3430{
3393 struct hist_field *val = NULL, *var = NULL; 3431 struct hist_field *val = NULL, *var = NULL;
3394 unsigned long flags = HIST_FIELD_FL_VAR; 3432 unsigned long flags = HIST_FIELD_FL_VAR;
3433 struct trace_array *tr = file->tr;
3395 struct field_var *field_var; 3434 struct field_var *field_var;
3396 int ret = 0; 3435 int ret = 0;
3397 3436
3398 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3437 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3399 hist_err("Too many field variables defined: ", field_name); 3438 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
3400 ret = -EINVAL; 3439 ret = -EINVAL;
3401 goto err; 3440 goto err;
3402 } 3441 }
3403 3442
3404 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3443 val = parse_atom(hist_data, file, field_name, &flags, NULL);
3405 if (IS_ERR(val)) { 3444 if (IS_ERR(val)) {
3406 hist_err("Couldn't parse field variable: ", field_name); 3445 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
3407 ret = PTR_ERR(val); 3446 ret = PTR_ERR(val);
3408 goto err; 3447 goto err;
3409 } 3448 }
3410 3449
3411 var = create_var(hist_data, file, field_name, val->size, val->type); 3450 var = create_var(hist_data, file, field_name, val->size, val->type);
3412 if (IS_ERR(var)) { 3451 if (IS_ERR(var)) {
3413 hist_err("Couldn't create or find variable: ", field_name); 3452 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
3414 kfree(val); 3453 kfree(val);
3415 ret = PTR_ERR(var); 3454 ret = PTR_ERR(var);
3416 goto err; 3455 goto err;
@@ -3737,19 +3776,20 @@ static int track_data_create(struct hist_trigger_data *hist_data,
3737{ 3776{
3738 struct hist_field *var_field, *ref_field, *track_var = NULL; 3777 struct hist_field *var_field, *ref_field, *track_var = NULL;
3739 struct trace_event_file *file = hist_data->event_file; 3778 struct trace_event_file *file = hist_data->event_file;
3779 struct trace_array *tr = file->tr;
3740 char *track_data_var_str; 3780 char *track_data_var_str;
3741 int ret = 0; 3781 int ret = 0;
3742 3782
3743 track_data_var_str = data->track_data.var_str; 3783 track_data_var_str = data->track_data.var_str;
3744 if (track_data_var_str[0] != '$') { 3784 if (track_data_var_str[0] != '$') {
3745 hist_err("For onmax(x) or onchange(x), x must be a variable: ", track_data_var_str); 3785 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
3746 return -EINVAL; 3786 return -EINVAL;
3747 } 3787 }
3748 track_data_var_str++; 3788 track_data_var_str++;
3749 3789
3750 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3790 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
3751 if (!var_field) { 3791 if (!var_field) {
3752 hist_err("Couldn't find onmax or onchange variable: ", track_data_var_str); 3792 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
3753 return -EINVAL; 3793 return -EINVAL;
3754 } 3794 }
3755 3795
@@ -3762,7 +3802,7 @@ static int track_data_create(struct hist_trigger_data *hist_data,
3762 if (data->handler == HANDLER_ONMAX) 3802 if (data->handler == HANDLER_ONMAX)
3763 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3803 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
3764 if (IS_ERR(track_var)) { 3804 if (IS_ERR(track_var)) {
3765 hist_err("Couldn't create onmax variable: ", "__max"); 3805 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3766 ret = PTR_ERR(track_var); 3806 ret = PTR_ERR(track_var);
3767 goto out; 3807 goto out;
3768 } 3808 }
@@ -3770,7 +3810,7 @@ static int track_data_create(struct hist_trigger_data *hist_data,
3770 if (data->handler == HANDLER_ONCHANGE) 3810 if (data->handler == HANDLER_ONCHANGE)
3771 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3811 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
3772 if (IS_ERR(track_var)) { 3812 if (IS_ERR(track_var)) {
3773 hist_err("Couldn't create onchange variable: ", "__change"); 3813 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
3774 ret = PTR_ERR(track_var); 3814 ret = PTR_ERR(track_var);
3775 goto out; 3815 goto out;
3776 } 3816 }
@@ -3781,7 +3821,8 @@ static int track_data_create(struct hist_trigger_data *hist_data,
3781 return ret; 3821 return ret;
3782} 3822}
3783 3823
3784static int parse_action_params(char *params, struct action_data *data) 3824static int parse_action_params(struct trace_array *tr, char *params,
3825 struct action_data *data)
3785{ 3826{
3786 char *param, *saved_param; 3827 char *param, *saved_param;
3787 bool first_param = true; 3828 bool first_param = true;
@@ -3789,20 +3830,20 @@ static int parse_action_params(char *params, struct action_data *data)
3789 3830
3790 while (params) { 3831 while (params) {
3791 if (data->n_params >= SYNTH_FIELDS_MAX) { 3832 if (data->n_params >= SYNTH_FIELDS_MAX) {
3792 hist_err("Too many action params", ""); 3833 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
3793 goto out; 3834 goto out;
3794 } 3835 }
3795 3836
3796 param = strsep(&params, ","); 3837 param = strsep(&params, ",");
3797 if (!param) { 3838 if (!param) {
3798 hist_err("No action param found", ""); 3839 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
3799 ret = -EINVAL; 3840 ret = -EINVAL;
3800 goto out; 3841 goto out;
3801 } 3842 }
3802 3843
3803 param = strstrip(param); 3844 param = strstrip(param);
3804 if (strlen(param) < 2) { 3845 if (strlen(param) < 2) {
3805 hist_err("Invalid action param: ", param); 3846 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
3806 ret = -EINVAL; 3847 ret = -EINVAL;
3807 goto out; 3848 goto out;
3808 } 3849 }
@@ -3826,7 +3867,7 @@ static int parse_action_params(char *params, struct action_data *data)
3826 return ret; 3867 return ret;
3827} 3868}
3828 3869
3829static int action_parse(char *str, struct action_data *data, 3870static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
3830 enum handler_id handler) 3871 enum handler_id handler)
3831{ 3872{
3832 char *action_name; 3873 char *action_name;
@@ -3834,14 +3875,14 @@ static int action_parse(char *str, struct action_data *data,
3834 3875
3835 strsep(&str, "."); 3876 strsep(&str, ".");
3836 if (!str) { 3877 if (!str) {
3837 hist_err("action parsing: No action found", ""); 3878 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3838 ret = -EINVAL; 3879 ret = -EINVAL;
3839 goto out; 3880 goto out;
3840 } 3881 }
3841 3882
3842 action_name = strsep(&str, "("); 3883 action_name = strsep(&str, "(");
3843 if (!action_name || !str) { 3884 if (!action_name || !str) {
3844 hist_err("action parsing: No action found", ""); 3885 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
3845 ret = -EINVAL; 3886 ret = -EINVAL;
3846 goto out; 3887 goto out;
3847 } 3888 }
@@ -3850,12 +3891,12 @@ static int action_parse(char *str, struct action_data *data,
3850 char *params = strsep(&str, ")"); 3891 char *params = strsep(&str, ")");
3851 3892
3852 if (!params) { 3893 if (!params) {
3853 hist_err("action parsing: No params found for %s", "save"); 3894 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
3854 ret = -EINVAL; 3895 ret = -EINVAL;
3855 goto out; 3896 goto out;
3856 } 3897 }
3857 3898
3858 ret = parse_action_params(params, data); 3899 ret = parse_action_params(tr, params, data);
3859 if (ret) 3900 if (ret)
3860 goto out; 3901 goto out;
3861 3902
@@ -3864,7 +3905,7 @@ static int action_parse(char *str, struct action_data *data,
3864 else if (handler == HANDLER_ONCHANGE) 3905 else if (handler == HANDLER_ONCHANGE)
3865 data->track_data.check_val = check_track_val_changed; 3906 data->track_data.check_val = check_track_val_changed;
3866 else { 3907 else {
3867 hist_err("action parsing: Handler doesn't support action: ", action_name); 3908 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3868 ret = -EINVAL; 3909 ret = -EINVAL;
3869 goto out; 3910 goto out;
3870 } 3911 }
@@ -3876,7 +3917,7 @@ static int action_parse(char *str, struct action_data *data,
3876 char *params = strsep(&str, ")"); 3917 char *params = strsep(&str, ")");
3877 3918
3878 if (!str) { 3919 if (!str) {
3879 hist_err("action parsing: No closing paren found: %s", params); 3920 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
3880 ret = -EINVAL; 3921 ret = -EINVAL;
3881 goto out; 3922 goto out;
3882 } 3923 }
@@ -3886,7 +3927,7 @@ static int action_parse(char *str, struct action_data *data,
3886 else if (handler == HANDLER_ONCHANGE) 3927 else if (handler == HANDLER_ONCHANGE)
3887 data->track_data.check_val = check_track_val_changed; 3928 data->track_data.check_val = check_track_val_changed;
3888 else { 3929 else {
3889 hist_err("action parsing: Handler doesn't support action: ", action_name); 3930 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
3890 ret = -EINVAL; 3931 ret = -EINVAL;
3891 goto out; 3932 goto out;
3892 } 3933 }
@@ -3901,7 +3942,7 @@ static int action_parse(char *str, struct action_data *data,
3901 data->use_trace_keyword = true; 3942 data->use_trace_keyword = true;
3902 3943
3903 if (params) { 3944 if (params) {
3904 ret = parse_action_params(params, data); 3945 ret = parse_action_params(tr, params, data);
3905 if (ret) 3946 if (ret)
3906 goto out; 3947 goto out;
3907 } 3948 }
@@ -3954,7 +3995,7 @@ static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
3954 goto free; 3995 goto free;
3955 } 3996 }
3956 3997
3957 ret = action_parse(str, data, handler); 3998 ret = action_parse(hist_data->event_file->tr, str, data, handler);
3958 if (ret) 3999 if (ret)
3959 goto free; 4000 goto free;
3960 out: 4001 out:
@@ -4024,6 +4065,7 @@ trace_action_find_var(struct hist_trigger_data *hist_data,
4024 struct action_data *data, 4065 struct action_data *data,
4025 char *system, char *event, char *var) 4066 char *system, char *event, char *var)
4026{ 4067{
4068 struct trace_array *tr = hist_data->event_file->tr;
4027 struct hist_field *hist_field; 4069 struct hist_field *hist_field;
4028 4070
4029 var++; /* skip '$' */ 4071 var++; /* skip '$' */
@@ -4039,7 +4081,7 @@ trace_action_find_var(struct hist_trigger_data *hist_data,
4039 } 4081 }
4040 4082
4041 if (!hist_field) 4083 if (!hist_field)
4042 hist_err_event("trace action: Couldn't find param: $", system, event, var); 4084 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
4043 4085
4044 return hist_field; 4086 return hist_field;
4045} 4087}
@@ -4097,6 +4139,7 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
4097static int trace_action_create(struct hist_trigger_data *hist_data, 4139static int trace_action_create(struct hist_trigger_data *hist_data,
4098 struct action_data *data) 4140 struct action_data *data)
4099{ 4141{
4142 struct trace_array *tr = hist_data->event_file->tr;
4100 char *event_name, *param, *system = NULL; 4143 char *event_name, *param, *system = NULL;
4101 struct hist_field *hist_field, *var_ref; 4144 struct hist_field *hist_field, *var_ref;
4102 unsigned int i, var_ref_idx; 4145 unsigned int i, var_ref_idx;
@@ -4114,7 +4157,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
4114 4157
4115 event = find_synth_event(synth_event_name); 4158 event = find_synth_event(synth_event_name);
4116 if (!event) { 4159 if (!event) {
4117 hist_err("trace action: Couldn't find synthetic event: ", synth_event_name); 4160 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
4118 return -EINVAL; 4161 return -EINVAL;
4119 } 4162 }
4120 4163
@@ -4175,15 +4218,14 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
4175 continue; 4218 continue;
4176 } 4219 }
4177 4220
4178 hist_err_event("trace action: Param type doesn't match synthetic event field type: ", 4221 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
4179 system, event_name, param);
4180 kfree(p); 4222 kfree(p);
4181 ret = -EINVAL; 4223 ret = -EINVAL;
4182 goto err; 4224 goto err;
4183 } 4225 }
4184 4226
4185 if (field_pos != event->n_fields) { 4227 if (field_pos != event->n_fields) {
4186 hist_err("trace action: Param count doesn't match synthetic event field count: ", event->name); 4228 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
4187 ret = -EINVAL; 4229 ret = -EINVAL;
4188 goto err; 4230 goto err;
4189 } 4231 }
@@ -4202,6 +4244,7 @@ static int action_create(struct hist_trigger_data *hist_data,
4202 struct action_data *data) 4244 struct action_data *data)
4203{ 4245{
4204 struct trace_event_file *file = hist_data->event_file; 4246 struct trace_event_file *file = hist_data->event_file;
4247 struct trace_array *tr = file->tr;
4205 struct track_data *track_data; 4248 struct track_data *track_data;
4206 struct field_var *field_var; 4249 struct field_var *field_var;
4207 unsigned int i; 4250 unsigned int i;
@@ -4229,7 +4272,7 @@ static int action_create(struct hist_trigger_data *hist_data,
4229 if (data->action == ACTION_SAVE) { 4272 if (data->action == ACTION_SAVE) {
4230 if (hist_data->n_save_vars) { 4273 if (hist_data->n_save_vars) {
4231 ret = -EEXIST; 4274 ret = -EEXIST;
4232 hist_err("save action: Can't have more than one save() action per hist", ""); 4275 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
4233 goto out; 4276 goto out;
4234 } 4277 }
4235 4278
@@ -4242,7 +4285,8 @@ static int action_create(struct hist_trigger_data *hist_data,
4242 4285
4243 field_var = create_target_field_var(hist_data, NULL, NULL, param); 4286 field_var = create_target_field_var(hist_data, NULL, NULL, param);
4244 if (IS_ERR(field_var)) { 4287 if (IS_ERR(field_var)) {
4245 hist_err("save action: Couldn't create field variable: ", param); 4288 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
4289 errpos(param));
4246 ret = PTR_ERR(field_var); 4290 ret = PTR_ERR(field_var);
4247 kfree(param); 4291 kfree(param);
4248 goto out; 4292 goto out;
@@ -4276,19 +4320,18 @@ static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4276 4320
4277 match_event = strsep(&str, ")"); 4321 match_event = strsep(&str, ")");
4278 if (!match_event || !str) { 4322 if (!match_event || !str) {
4279 hist_err("onmatch: Missing closing paren: ", match_event); 4323 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
4280 goto free; 4324 goto free;
4281 } 4325 }
4282 4326
4283 match_event_system = strsep(&match_event, "."); 4327 match_event_system = strsep(&match_event, ".");
4284 if (!match_event) { 4328 if (!match_event) {
4285 hist_err("onmatch: Missing subsystem for match event: ", match_event_system); 4329 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
4286 goto free; 4330 goto free;
4287 } 4331 }
4288 4332
4289 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 4333 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
4290 hist_err_event("onmatch: Invalid subsystem or event name: ", 4334 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
4291 match_event_system, match_event, NULL);
4292 goto free; 4335 goto free;
4293 } 4336 }
4294 4337
@@ -4304,7 +4347,7 @@ static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
4304 goto free; 4347 goto free;
4305 } 4348 }
4306 4349
4307 ret = action_parse(str, data, HANDLER_ONMATCH); 4350 ret = action_parse(tr, str, data, HANDLER_ONMATCH);
4308 if (ret) 4351 if (ret)
4309 goto free; 4352 goto free;
4310 out: 4353 out:
@@ -4373,13 +4416,14 @@ static int create_var_field(struct hist_trigger_data *hist_data,
4373 struct trace_event_file *file, 4416 struct trace_event_file *file,
4374 char *var_name, char *expr_str) 4417 char *var_name, char *expr_str)
4375{ 4418{
4419 struct trace_array *tr = hist_data->event_file->tr;
4376 unsigned long flags = 0; 4420 unsigned long flags = 0;
4377 4421
4378 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4422 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
4379 return -EINVAL; 4423 return -EINVAL;
4380 4424
4381 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 4425 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
4382 hist_err("Variable already defined: ", var_name); 4426 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
4383 return -EINVAL; 4427 return -EINVAL;
4384 } 4428 }
4385 4429
@@ -4436,8 +4480,8 @@ static int create_key_field(struct hist_trigger_data *hist_data,
4436 struct trace_event_file *file, 4480 struct trace_event_file *file,
4437 char *field_str) 4481 char *field_str)
4438{ 4482{
4483 struct trace_array *tr = hist_data->event_file->tr;
4439 struct hist_field *hist_field = NULL; 4484 struct hist_field *hist_field = NULL;
4440
4441 unsigned long flags = 0; 4485 unsigned long flags = 0;
4442 unsigned int key_size; 4486 unsigned int key_size;
4443 int ret = 0; 4487 int ret = 0;
@@ -4460,7 +4504,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
4460 } 4504 }
4461 4505
4462 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) { 4506 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) {
4463 hist_err("Using variable references as keys not supported: ", field_str); 4507 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
4464 destroy_hist_field(hist_field, 0); 4508 destroy_hist_field(hist_field, 0);
4465 ret = -EINVAL; 4509 ret = -EINVAL;
4466 goto out; 4510 goto out;
@@ -4561,6 +4605,7 @@ static void free_var_defs(struct hist_trigger_data *hist_data)
4561 4605
4562static int parse_var_defs(struct hist_trigger_data *hist_data) 4606static int parse_var_defs(struct hist_trigger_data *hist_data)
4563{ 4607{
4608 struct trace_array *tr = hist_data->event_file->tr;
4564 char *s, *str, *var_name, *field_str; 4609 char *s, *str, *var_name, *field_str;
4565 unsigned int i, j, n_vars = 0; 4610 unsigned int i, j, n_vars = 0;
4566 int ret = 0; 4611 int ret = 0;
@@ -4574,13 +4619,14 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
4574 4619
4575 var_name = strsep(&field_str, "="); 4620 var_name = strsep(&field_str, "=");
4576 if (!var_name || !field_str) { 4621 if (!var_name || !field_str) {
4577 hist_err("Malformed assignment: ", var_name); 4622 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
4623 errpos(var_name));
4578 ret = -EINVAL; 4624 ret = -EINVAL;
4579 goto free; 4625 goto free;
4580 } 4626 }
4581 4627
4582 if (n_vars == TRACING_MAP_VARS_MAX) { 4628 if (n_vars == TRACING_MAP_VARS_MAX) {
4583 hist_err("Too many variables defined: ", var_name); 4629 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
4584 ret = -EINVAL; 4630 ret = -EINVAL;
4585 goto free; 4631 goto free;
4586 } 4632 }
@@ -5431,11 +5477,6 @@ static int hist_show(struct seq_file *m, void *v)
5431 hist_trigger_show(m, data, n++); 5477 hist_trigger_show(m, data, n++);
5432 } 5478 }
5433 5479
5434 if (have_hist_err()) {
5435 seq_printf(m, "\nERROR: %s\n", hist_err_str);
5436 seq_printf(m, " Last command: %s\n", last_hist_cmd);
5437 }
5438
5439 out_unlock: 5480 out_unlock:
5440 mutex_unlock(&event_mutex); 5481 mutex_unlock(&event_mutex);
5441 5482
@@ -5800,6 +5841,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5800{ 5841{
5801 struct hist_trigger_data *hist_data = data->private_data; 5842 struct hist_trigger_data *hist_data = data->private_data;
5802 struct event_trigger_data *test, *named_data = NULL; 5843 struct event_trigger_data *test, *named_data = NULL;
5844 struct trace_array *tr = file->tr;
5803 int ret = 0; 5845 int ret = 0;
5804 5846
5805 if (hist_data->attrs->name) { 5847 if (hist_data->attrs->name) {
@@ -5807,7 +5849,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5807 if (named_data) { 5849 if (named_data) {
5808 if (!hist_trigger_match(data, named_data, named_data, 5850 if (!hist_trigger_match(data, named_data, named_data,
5809 true)) { 5851 true)) {
5810 hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data->attrs->name); 5852 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
5811 ret = -EINVAL; 5853 ret = -EINVAL;
5812 goto out; 5854 goto out;
5813 } 5855 }
@@ -5828,7 +5870,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5828 else if (hist_data->attrs->clear) 5870 else if (hist_data->attrs->clear)
5829 hist_clear(test); 5871 hist_clear(test);
5830 else { 5872 else {
5831 hist_err("Hist trigger already exists", NULL); 5873 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
5832 ret = -EEXIST; 5874 ret = -EEXIST;
5833 } 5875 }
5834 goto out; 5876 goto out;
@@ -5836,7 +5878,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5836 } 5878 }
5837 new: 5879 new:
5838 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5880 if (hist_data->attrs->cont || hist_data->attrs->clear) {
5839 hist_err("Can't clear or continue a nonexistent hist trigger", NULL); 5881 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
5840 ret = -ENOENT; 5882 ret = -ENOENT;
5841 goto out; 5883 goto out;
5842 } 5884 }
@@ -5861,7 +5903,7 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5861 5903
5862 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5904 ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
5863 if (ret) { 5905 if (ret) {
5864 hist_err("Couldn't set trace_clock: ", clock); 5906 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
5865 goto out; 5907 goto out;
5866 } 5908 }
5867 5909
@@ -6037,8 +6079,8 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
6037 lockdep_assert_held(&event_mutex); 6079 lockdep_assert_held(&event_mutex);
6038 6080
6039 if (glob && strlen(glob)) { 6081 if (glob && strlen(glob)) {
6040 last_cmd_set(param);
6041 hist_err_clear(); 6082 hist_err_clear();
6083 last_cmd_set(file, param);
6042 } 6084 }
6043 6085
6044 if (!param) 6086 if (!param)
@@ -6079,7 +6121,7 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
6079 trigger = strstrip(trigger); 6121 trigger = strstrip(trigger);
6080 } 6122 }
6081 6123
6082 attrs = parse_hist_trigger_attrs(trigger); 6124 attrs = parse_hist_trigger_attrs(file->tr, trigger);
6083 if (IS_ERR(attrs)) 6125 if (IS_ERR(attrs))
6084 return PTR_ERR(attrs); 6126 return PTR_ERR(attrs);
6085 6127
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index cd12ecb66eb9..2a2912cb4533 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -731,7 +731,8 @@ int set_trigger_filter(char *filter_str,
731 goto out; 731 goto out;
732 732
733 /* The filter is for the 'trigger' event, not the triggered event */ 733 /* The filter is for the 'trigger' event, not the triggered event */
734 ret = create_event_filter(file->event_call, filter_str, false, &filter); 734 ret = create_event_filter(file->tr, file->event_call,
735 filter_str, false, &filter);
735 /* 736 /*
736 * If create_event_filter() fails, filter still needs to be freed. 737 * If create_event_filter() fails, filter still needs to be freed.
737 * Which the calling code will do with data->filter. 738 * Which the calling code will do with data->filter.
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 810d78a8d14c..6c1ae6b752d1 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -17,29 +17,25 @@
17#include "trace.h" 17#include "trace.h"
18#include "trace_output.h" 18#include "trace_output.h"
19 19
20static void ftrace_dump_buf(int skip_lines, long cpu_file) 20static struct trace_iterator iter;
21static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
22
23static void ftrace_dump_buf(int skip_entries, long cpu_file)
21{ 24{
22 /* use static because iter can be a bit big for the stack */
23 static struct trace_iterator iter;
24 static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
25 struct trace_array *tr; 25 struct trace_array *tr;
26 unsigned int old_userobj; 26 unsigned int old_userobj;
27 int cnt = 0, cpu; 27 int cnt = 0, cpu;
28 28
29 trace_init_global_iter(&iter);
30 iter.buffer_iter = buffer_iter;
31 tr = iter.tr; 29 tr = iter.tr;
32 30
33 for_each_tracing_cpu(cpu) {
34 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
35 }
36
37 old_userobj = tr->trace_flags; 31 old_userobj = tr->trace_flags;
38 32
39 /* don't look at user memory in panic mode */ 33 /* don't look at user memory in panic mode */
40 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 34 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
41 35
42 kdb_printf("Dumping ftrace buffer:\n"); 36 kdb_printf("Dumping ftrace buffer:\n");
37 if (skip_entries)
38 kdb_printf("(skipping %d entries)\n", skip_entries);
43 39
44 /* reset all but tr, trace, and overruns */ 40 /* reset all but tr, trace, and overruns */
45 memset(&iter.seq, 0, 41 memset(&iter.seq, 0,
@@ -70,11 +66,11 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
70 kdb_printf("---------------------------------\n"); 66 kdb_printf("---------------------------------\n");
71 cnt++; 67 cnt++;
72 68
73 if (!skip_lines) { 69 if (!skip_entries) {
74 print_trace_line(&iter); 70 print_trace_line(&iter);
75 trace_printk_seq(&iter.seq); 71 trace_printk_seq(&iter.seq);
76 } else { 72 } else {
77 skip_lines--; 73 skip_entries--;
78 } 74 }
79 75
80 if (KDB_FLAG(CMD_INTERRUPT)) 76 if (KDB_FLAG(CMD_INTERRUPT))
@@ -90,10 +86,6 @@ out:
90 tr->trace_flags = old_userobj; 86 tr->trace_flags = old_userobj;
91 87
92 for_each_tracing_cpu(cpu) { 88 for_each_tracing_cpu(cpu) {
93 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
94 }
95
96 for_each_tracing_cpu(cpu) {
97 if (iter.buffer_iter[cpu]) { 89 if (iter.buffer_iter[cpu]) {
98 ring_buffer_read_finish(iter.buffer_iter[cpu]); 90 ring_buffer_read_finish(iter.buffer_iter[cpu]);
99 iter.buffer_iter[cpu] = NULL; 91 iter.buffer_iter[cpu] = NULL;
@@ -106,17 +98,19 @@ out:
106 */ 98 */
107static int kdb_ftdump(int argc, const char **argv) 99static int kdb_ftdump(int argc, const char **argv)
108{ 100{
109 int skip_lines = 0; 101 int skip_entries = 0;
110 long cpu_file; 102 long cpu_file;
111 char *cp; 103 char *cp;
104 int cnt;
105 int cpu;
112 106
113 if (argc > 2) 107 if (argc > 2)
114 return KDB_ARGCOUNT; 108 return KDB_ARGCOUNT;
115 109
116 if (argc) { 110 if (argc) {
117 skip_lines = simple_strtol(argv[1], &cp, 0); 111 skip_entries = simple_strtol(argv[1], &cp, 0);
118 if (*cp) 112 if (*cp)
119 skip_lines = 0; 113 skip_entries = 0;
120 } 114 }
121 115
122 if (argc == 2) { 116 if (argc == 2) {
@@ -129,7 +123,29 @@ static int kdb_ftdump(int argc, const char **argv)
129 } 123 }
130 124
131 kdb_trap_printk++; 125 kdb_trap_printk++;
132 ftrace_dump_buf(skip_lines, cpu_file); 126
127 trace_init_global_iter(&iter);
128 iter.buffer_iter = buffer_iter;
129
130 for_each_tracing_cpu(cpu) {
131 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
132 }
133
134 /* A negative skip_entries means skip all but the last entries */
135 if (skip_entries < 0) {
136 if (cpu_file == RING_BUFFER_ALL_CPUS)
137 cnt = trace_total_entries(NULL);
138 else
139 cnt = trace_total_entries_cpu(NULL, cpu_file);
140 skip_entries = max(cnt + skip_entries, 0);
141 }
142
143 ftrace_dump_buf(skip_entries, cpu_file);
144
145 for_each_tracing_cpu(cpu) {
146 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
147 }
148
133 kdb_trap_printk--; 149 kdb_trap_printk--;
134 150
135 return 0; 151 return 0;
@@ -137,8 +153,9 @@ static int kdb_ftdump(int argc, const char **argv)
137 153
138static __init int kdb_ftrace_register(void) 154static __init int kdb_ftrace_register(void)
139{ 155{
140 kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 156 kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
141 "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); 157 "Dump ftrace log; -skip dumps last #entries", 0,
158 KDB_ENABLE_ALWAYS_SAFE);
142 return 0; 159 return 0;
143} 160}
144 161
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5d5129b05df7..7d736248a070 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -441,13 +441,8 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
441 else 441 else
442 ret = register_kprobe(&tk->rp.kp); 442 ret = register_kprobe(&tk->rp.kp);
443 443
444 if (ret == 0) { 444 if (ret == 0)
445 tk->tp.flags |= TP_FLAG_REGISTERED; 445 tk->tp.flags |= TP_FLAG_REGISTERED;
446 } else if (ret == -EILSEQ) {
447 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
448 tk->rp.kp.addr);
449 ret = -EINVAL;
450 }
451 return ret; 446 return ret;
452} 447}
453 448
@@ -591,7 +586,7 @@ static int trace_kprobe_create(int argc, const char *argv[])
591 * Type of args: 586 * Type of args:
592 * FETCHARG:TYPE : use TYPE instead of unsigned long. 587 * FETCHARG:TYPE : use TYPE instead of unsigned long.
593 */ 588 */
594 struct trace_kprobe *tk; 589 struct trace_kprobe *tk = NULL;
595 int i, len, ret = 0; 590 int i, len, ret = 0;
596 bool is_return = false; 591 bool is_return = false;
597 char *symbol = NULL, *tmp = NULL; 592 char *symbol = NULL, *tmp = NULL;
@@ -615,44 +610,50 @@ static int trace_kprobe_create(int argc, const char *argv[])
615 if (argc < 2) 610 if (argc < 2)
616 return -ECANCELED; 611 return -ECANCELED;
617 612
613 trace_probe_log_init("trace_kprobe", argc, argv);
614
618 event = strchr(&argv[0][1], ':'); 615 event = strchr(&argv[0][1], ':');
619 if (event) 616 if (event)
620 event++; 617 event++;
621 618
622 if (isdigit(argv[0][1])) { 619 if (isdigit(argv[0][1])) {
623 if (!is_return) { 620 if (!is_return) {
624 pr_info("Maxactive is not for kprobe"); 621 trace_probe_log_err(1, MAXACT_NO_KPROBE);
625 return -EINVAL; 622 goto parse_error;
626 } 623 }
627 if (event) 624 if (event)
628 len = event - &argv[0][1] - 1; 625 len = event - &argv[0][1] - 1;
629 else 626 else
630 len = strlen(&argv[0][1]); 627 len = strlen(&argv[0][1]);
631 if (len > MAX_EVENT_NAME_LEN - 1) 628 if (len > MAX_EVENT_NAME_LEN - 1) {
632 return -E2BIG; 629 trace_probe_log_err(1, BAD_MAXACT);
630 goto parse_error;
631 }
633 memcpy(buf, &argv[0][1], len); 632 memcpy(buf, &argv[0][1], len);
634 buf[len] = '\0'; 633 buf[len] = '\0';
635 ret = kstrtouint(buf, 0, &maxactive); 634 ret = kstrtouint(buf, 0, &maxactive);
636 if (ret || !maxactive) { 635 if (ret || !maxactive) {
637 pr_info("Invalid maxactive number\n"); 636 trace_probe_log_err(1, BAD_MAXACT);
638 return ret; 637 goto parse_error;
639 } 638 }
640 /* kretprobes instances are iterated over via a list. The 639 /* kretprobes instances are iterated over via a list. The
641 * maximum should stay reasonable. 640 * maximum should stay reasonable.
642 */ 641 */
643 if (maxactive > KRETPROBE_MAXACTIVE_MAX) { 642 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
644 pr_info("Maxactive is too big (%d > %d).\n", 643 trace_probe_log_err(1, MAXACT_TOO_BIG);
645 maxactive, KRETPROBE_MAXACTIVE_MAX); 644 goto parse_error;
646 return -E2BIG;
647 } 645 }
648 } 646 }
649 647
650 /* try to parse an address. if that fails, try to read the 648 /* try to parse an address. if that fails, try to read the
651 * input as a symbol. */ 649 * input as a symbol. */
652 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 650 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
651 trace_probe_log_set_index(1);
653 /* Check whether uprobe event specified */ 652 /* Check whether uprobe event specified */
654 if (strchr(argv[1], '/') && strchr(argv[1], ':')) 653 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
655 return -ECANCELED; 654 ret = -ECANCELED;
655 goto error;
656 }
656 /* a symbol specified */ 657 /* a symbol specified */
657 symbol = kstrdup(argv[1], GFP_KERNEL); 658 symbol = kstrdup(argv[1], GFP_KERNEL);
658 if (!symbol) 659 if (!symbol)
@@ -660,23 +661,23 @@ static int trace_kprobe_create(int argc, const char *argv[])
660 /* TODO: support .init module functions */ 661 /* TODO: support .init module functions */
661 ret = traceprobe_split_symbol_offset(symbol, &offset); 662 ret = traceprobe_split_symbol_offset(symbol, &offset);
662 if (ret || offset < 0 || offset > UINT_MAX) { 663 if (ret || offset < 0 || offset > UINT_MAX) {
663 pr_info("Failed to parse either an address or a symbol.\n"); 664 trace_probe_log_err(0, BAD_PROBE_ADDR);
664 goto out; 665 goto parse_error;
665 } 666 }
666 if (kprobe_on_func_entry(NULL, symbol, offset)) 667 if (kprobe_on_func_entry(NULL, symbol, offset))
667 flags |= TPARG_FL_FENTRY; 668 flags |= TPARG_FL_FENTRY;
668 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { 669 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
669 pr_info("Given offset is not valid for return probe.\n"); 670 trace_probe_log_err(0, BAD_RETPROBE);
670 ret = -EINVAL; 671 goto parse_error;
671 goto out;
672 } 672 }
673 } 673 }
674 argc -= 2; argv += 2;
675 674
675 trace_probe_log_set_index(0);
676 if (event) { 676 if (event) {
677 ret = traceprobe_parse_event_name(&event, &group, buf); 677 ret = traceprobe_parse_event_name(&event, &group, buf,
678 event - argv[0]);
678 if (ret) 679 if (ret)
679 goto out; 680 goto parse_error;
680 } else { 681 } else {
681 /* Make a new event name */ 682 /* Make a new event name */
682 if (symbol) 683 if (symbol)
@@ -691,13 +692,14 @@ static int trace_kprobe_create(int argc, const char *argv[])
691 692
692 /* setup a probe */ 693 /* setup a probe */
693 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 694 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
694 argc, is_return); 695 argc - 2, is_return);
695 if (IS_ERR(tk)) { 696 if (IS_ERR(tk)) {
696 ret = PTR_ERR(tk); 697 ret = PTR_ERR(tk);
697 /* This must return -ENOMEM otherwise there is a bug */ 698 /* This must return -ENOMEM, else there is a bug */
698 WARN_ON_ONCE(ret != -ENOMEM); 699 WARN_ON_ONCE(ret != -ENOMEM);
699 goto out; 700 goto out; /* We know tk is not allocated */
700 } 701 }
702 argc -= 2; argv += 2;
701 703
702 /* parse arguments */ 704 /* parse arguments */
703 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 705 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
@@ -707,19 +709,32 @@ static int trace_kprobe_create(int argc, const char *argv[])
707 goto error; 709 goto error;
708 } 710 }
709 711
712 trace_probe_log_set_index(i + 2);
710 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags); 713 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
711 kfree(tmp); 714 kfree(tmp);
712 if (ret) 715 if (ret)
713 goto error; 716 goto error; /* This can be -ENOMEM */
714 } 717 }
715 718
716 ret = register_trace_kprobe(tk); 719 ret = register_trace_kprobe(tk);
717 if (ret) 720 if (ret) {
721 trace_probe_log_set_index(1);
722 if (ret == -EILSEQ)
723 trace_probe_log_err(0, BAD_INSN_BNDRY);
724 else if (ret == -ENOENT)
725 trace_probe_log_err(0, BAD_PROBE_ADDR);
726 else if (ret != -ENOMEM)
727 trace_probe_log_err(0, FAIL_REG_PROBE);
718 goto error; 728 goto error;
729 }
730
719out: 731out:
732 trace_probe_log_clear();
720 kfree(symbol); 733 kfree(symbol);
721 return ret; 734 return ret;
722 735
736parse_error:
737 ret = -EINVAL;
723error: 738error:
724 free_trace_kprobe(tk); 739 free_trace_kprobe(tk);
725 goto out; 740 goto out;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 8f8411e7835f..a347faced959 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -13,6 +13,11 @@
13 13
14#include "trace_probe.h" 14#include "trace_probe.h"
15 15
16#undef C
17#define C(a, b) b
18
19static const char *trace_probe_err_text[] = { ERRORS };
20
16static const char *reserved_field_names[] = { 21static const char *reserved_field_names[] = {
17 "common_type", 22 "common_type",
18 "common_flags", 23 "common_flags",
@@ -133,6 +138,60 @@ fail:
133 return NULL; 138 return NULL;
134} 139}
135 140
141static struct trace_probe_log trace_probe_log;
142
143void trace_probe_log_init(const char *subsystem, int argc, const char **argv)
144{
145 trace_probe_log.subsystem = subsystem;
146 trace_probe_log.argc = argc;
147 trace_probe_log.argv = argv;
148 trace_probe_log.index = 0;
149}
150
151void trace_probe_log_clear(void)
152{
153 memset(&trace_probe_log, 0, sizeof(trace_probe_log));
154}
155
156void trace_probe_log_set_index(int index)
157{
158 trace_probe_log.index = index;
159}
160
161void __trace_probe_log_err(int offset, int err_type)
162{
163 char *command, *p;
164 int i, len = 0, pos = 0;
165
166 if (!trace_probe_log.argv)
167 return;
168
169 /* Recalcurate the length and allocate buffer */
170 for (i = 0; i < trace_probe_log.argc; i++) {
171 if (i == trace_probe_log.index)
172 pos = len;
173 len += strlen(trace_probe_log.argv[i]) + 1;
174 }
175 command = kzalloc(len, GFP_KERNEL);
176 if (!command)
177 return;
178
179 /* And make a command string from argv array */
180 p = command;
181 for (i = 0; i < trace_probe_log.argc; i++) {
182 len = strlen(trace_probe_log.argv[i]);
183 strcpy(p, trace_probe_log.argv[i]);
184 p[len] = ' ';
185 p += len + 1;
186 }
187 *(p - 1) = '\0';
188
189 tracing_log_err(NULL, trace_probe_log.subsystem, command,
190 trace_probe_err_text, err_type, pos + offset);
191
192 kfree(command);
193}
194
136/* Split symbol and offset. */ 195/* Split symbol and offset. */
137int traceprobe_split_symbol_offset(char *symbol, long *offset) 196int traceprobe_split_symbol_offset(char *symbol, long *offset)
138{ 197{
@@ -156,7 +215,7 @@ int traceprobe_split_symbol_offset(char *symbol, long *offset)
156 215
157/* @buf must has MAX_EVENT_NAME_LEN size */ 216/* @buf must has MAX_EVENT_NAME_LEN size */
158int traceprobe_parse_event_name(const char **pevent, const char **pgroup, 217int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
159 char *buf) 218 char *buf, int offset)
160{ 219{
161 const char *slash, *event = *pevent; 220 const char *slash, *event = *pevent;
162 int len; 221 int len;
@@ -164,32 +223,33 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
164 slash = strchr(event, '/'); 223 slash = strchr(event, '/');
165 if (slash) { 224 if (slash) {
166 if (slash == event) { 225 if (slash == event) {
167 pr_info("Group name is not specified\n"); 226 trace_probe_log_err(offset, NO_GROUP_NAME);
168 return -EINVAL; 227 return -EINVAL;
169 } 228 }
170 if (slash - event + 1 > MAX_EVENT_NAME_LEN) { 229 if (slash - event + 1 > MAX_EVENT_NAME_LEN) {
171 pr_info("Group name is too long\n"); 230 trace_probe_log_err(offset, GROUP_TOO_LONG);
172 return -E2BIG; 231 return -EINVAL;
173 } 232 }
174 strlcpy(buf, event, slash - event + 1); 233 strlcpy(buf, event, slash - event + 1);
175 if (!is_good_name(buf)) { 234 if (!is_good_name(buf)) {
176 pr_info("Group name must follow the same rules as C identifiers\n"); 235 trace_probe_log_err(offset, BAD_GROUP_NAME);
177 return -EINVAL; 236 return -EINVAL;
178 } 237 }
179 *pgroup = buf; 238 *pgroup = buf;
180 *pevent = slash + 1; 239 *pevent = slash + 1;
240 offset += slash - event + 1;
181 event = *pevent; 241 event = *pevent;
182 } 242 }
183 len = strlen(event); 243 len = strlen(event);
184 if (len == 0) { 244 if (len == 0) {
185 pr_info("Event name is not specified\n"); 245 trace_probe_log_err(offset, NO_EVENT_NAME);
186 return -EINVAL; 246 return -EINVAL;
187 } else if (len > MAX_EVENT_NAME_LEN) { 247 } else if (len > MAX_EVENT_NAME_LEN) {
188 pr_info("Event name is too long\n"); 248 trace_probe_log_err(offset, EVENT_TOO_LONG);
189 return -E2BIG; 249 return -EINVAL;
190 } 250 }
191 if (!is_good_name(event)) { 251 if (!is_good_name(event)) {
192 pr_info("Event name must follow the same rules as C identifiers\n"); 252 trace_probe_log_err(offset, BAD_EVENT_NAME);
193 return -EINVAL; 253 return -EINVAL;
194 } 254 }
195 return 0; 255 return 0;
@@ -198,56 +258,67 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
198#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) 258#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
199 259
200static int parse_probe_vars(char *arg, const struct fetch_type *t, 260static int parse_probe_vars(char *arg, const struct fetch_type *t,
201 struct fetch_insn *code, unsigned int flags) 261 struct fetch_insn *code, unsigned int flags, int offs)
202{ 262{
203 unsigned long param; 263 unsigned long param;
204 int ret = 0; 264 int ret = 0;
205 int len; 265 int len;
206 266
207 if (strcmp(arg, "retval") == 0) { 267 if (strcmp(arg, "retval") == 0) {
208 if (flags & TPARG_FL_RETURN) 268 if (flags & TPARG_FL_RETURN) {
209 code->op = FETCH_OP_RETVAL; 269 code->op = FETCH_OP_RETVAL;
210 else 270 } else {
271 trace_probe_log_err(offs, RETVAL_ON_PROBE);
211 ret = -EINVAL; 272 ret = -EINVAL;
273 }
212 } else if ((len = str_has_prefix(arg, "stack"))) { 274 } else if ((len = str_has_prefix(arg, "stack"))) {
213 if (arg[len] == '\0') { 275 if (arg[len] == '\0') {
214 code->op = FETCH_OP_STACKP; 276 code->op = FETCH_OP_STACKP;
215 } else if (isdigit(arg[len])) { 277 } else if (isdigit(arg[len])) {
216 ret = kstrtoul(arg + len, 10, &param); 278 ret = kstrtoul(arg + len, 10, &param);
217 if (ret || ((flags & TPARG_FL_KERNEL) && 279 if (ret) {
218 param > PARAM_MAX_STACK)) 280 goto inval_var;
281 } else if ((flags & TPARG_FL_KERNEL) &&
282 param > PARAM_MAX_STACK) {
283 trace_probe_log_err(offs, BAD_STACK_NUM);
219 ret = -EINVAL; 284 ret = -EINVAL;
220 else { 285 } else {
221 code->op = FETCH_OP_STACK; 286 code->op = FETCH_OP_STACK;
222 code->param = (unsigned int)param; 287 code->param = (unsigned int)param;
223 } 288 }
224 } else 289 } else
225 ret = -EINVAL; 290 goto inval_var;
226 } else if (strcmp(arg, "comm") == 0) { 291 } else if (strcmp(arg, "comm") == 0) {
227 code->op = FETCH_OP_COMM; 292 code->op = FETCH_OP_COMM;
228#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 293#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
229 } else if (((flags & TPARG_FL_MASK) == 294 } else if (((flags & TPARG_FL_MASK) ==
230 (TPARG_FL_KERNEL | TPARG_FL_FENTRY)) && 295 (TPARG_FL_KERNEL | TPARG_FL_FENTRY)) &&
231 (len = str_has_prefix(arg, "arg"))) { 296 (len = str_has_prefix(arg, "arg"))) {
232 if (!isdigit(arg[len]))
233 return -EINVAL;
234 ret = kstrtoul(arg + len, 10, &param); 297 ret = kstrtoul(arg + len, 10, &param);
235 if (ret || !param || param > PARAM_MAX_STACK) 298 if (ret) {
299 goto inval_var;
300 } else if (!param || param > PARAM_MAX_STACK) {
301 trace_probe_log_err(offs, BAD_ARG_NUM);
236 return -EINVAL; 302 return -EINVAL;
303 }
237 code->op = FETCH_OP_ARG; 304 code->op = FETCH_OP_ARG;
238 code->param = (unsigned int)param - 1; 305 code->param = (unsigned int)param - 1;
239#endif 306#endif
240 } else 307 } else
241 ret = -EINVAL; 308 goto inval_var;
242 309
243 return ret; 310 return ret;
311
312inval_var:
313 trace_probe_log_err(offs, BAD_VAR);
314 return -EINVAL;
244} 315}
245 316
246/* Recursive argument parser */ 317/* Recursive argument parser */
247static int 318static int
248parse_probe_arg(char *arg, const struct fetch_type *type, 319parse_probe_arg(char *arg, const struct fetch_type *type,
249 struct fetch_insn **pcode, struct fetch_insn *end, 320 struct fetch_insn **pcode, struct fetch_insn *end,
250 unsigned int flags) 321 unsigned int flags, int offs)
251{ 322{
252 struct fetch_insn *code = *pcode; 323 struct fetch_insn *code = *pcode;
253 unsigned long param; 324 unsigned long param;
@@ -257,7 +328,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
257 328
258 switch (arg[0]) { 329 switch (arg[0]) {
259 case '$': 330 case '$':
260 ret = parse_probe_vars(arg + 1, type, code, flags); 331 ret = parse_probe_vars(arg + 1, type, code, flags, offs);
261 break; 332 break;
262 333
263 case '%': /* named register */ 334 case '%': /* named register */
@@ -266,47 +337,57 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
266 code->op = FETCH_OP_REG; 337 code->op = FETCH_OP_REG;
267 code->param = (unsigned int)ret; 338 code->param = (unsigned int)ret;
268 ret = 0; 339 ret = 0;
269 } 340 } else
341 trace_probe_log_err(offs, BAD_REG_NAME);
270 break; 342 break;
271 343
272 case '@': /* memory, file-offset or symbol */ 344 case '@': /* memory, file-offset or symbol */
273 if (isdigit(arg[1])) { 345 if (isdigit(arg[1])) {
274 ret = kstrtoul(arg + 1, 0, &param); 346 ret = kstrtoul(arg + 1, 0, &param);
275 if (ret) 347 if (ret) {
348 trace_probe_log_err(offs, BAD_MEM_ADDR);
276 break; 349 break;
350 }
277 /* load address */ 351 /* load address */
278 code->op = FETCH_OP_IMM; 352 code->op = FETCH_OP_IMM;
279 code->immediate = param; 353 code->immediate = param;
280 } else if (arg[1] == '+') { 354 } else if (arg[1] == '+') {
281 /* kprobes don't support file offsets */ 355 /* kprobes don't support file offsets */
282 if (flags & TPARG_FL_KERNEL) 356 if (flags & TPARG_FL_KERNEL) {
357 trace_probe_log_err(offs, FILE_ON_KPROBE);
283 return -EINVAL; 358 return -EINVAL;
284 359 }
285 ret = kstrtol(arg + 2, 0, &offset); 360 ret = kstrtol(arg + 2, 0, &offset);
286 if (ret) 361 if (ret) {
362 trace_probe_log_err(offs, BAD_FILE_OFFS);
287 break; 363 break;
364 }
288 365
289 code->op = FETCH_OP_FOFFS; 366 code->op = FETCH_OP_FOFFS;
290 code->immediate = (unsigned long)offset; // imm64? 367 code->immediate = (unsigned long)offset; // imm64?
291 } else { 368 } else {
292 /* uprobes don't support symbols */ 369 /* uprobes don't support symbols */
293 if (!(flags & TPARG_FL_KERNEL)) 370 if (!(flags & TPARG_FL_KERNEL)) {
371 trace_probe_log_err(offs, SYM_ON_UPROBE);
294 return -EINVAL; 372 return -EINVAL;
295 373 }
296 /* Preserve symbol for updating */ 374 /* Preserve symbol for updating */
297 code->op = FETCH_NOP_SYMBOL; 375 code->op = FETCH_NOP_SYMBOL;
298 code->data = kstrdup(arg + 1, GFP_KERNEL); 376 code->data = kstrdup(arg + 1, GFP_KERNEL);
299 if (!code->data) 377 if (!code->data)
300 return -ENOMEM; 378 return -ENOMEM;
301 if (++code == end) 379 if (++code == end) {
302 return -E2BIG; 380 trace_probe_log_err(offs, TOO_MANY_OPS);
303 381 return -EINVAL;
382 }
304 code->op = FETCH_OP_IMM; 383 code->op = FETCH_OP_IMM;
305 code->immediate = 0; 384 code->immediate = 0;
306 } 385 }
307 /* These are fetching from memory */ 386 /* These are fetching from memory */
308 if (++code == end) 387 if (++code == end) {
309 return -E2BIG; 388 trace_probe_log_err(offs, TOO_MANY_OPS);
389 return -EINVAL;
390 }
310 *pcode = code; 391 *pcode = code;
311 code->op = FETCH_OP_DEREF; 392 code->op = FETCH_OP_DEREF;
312 code->offset = offset; 393 code->offset = offset;
@@ -317,28 +398,38 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
317 /* fall through */ 398 /* fall through */
318 case '-': 399 case '-':
319 tmp = strchr(arg, '('); 400 tmp = strchr(arg, '(');
320 if (!tmp) 401 if (!tmp) {
402 trace_probe_log_err(offs, DEREF_NEED_BRACE);
321 return -EINVAL; 403 return -EINVAL;
322 404 }
323 *tmp = '\0'; 405 *tmp = '\0';
324 ret = kstrtol(arg, 0, &offset); 406 ret = kstrtol(arg, 0, &offset);
325 if (ret) 407 if (ret) {
408 trace_probe_log_err(offs, BAD_DEREF_OFFS);
326 break; 409 break;
327 410 }
411 offs += (tmp + 1 - arg) + (arg[0] != '-' ? 1 : 0);
328 arg = tmp + 1; 412 arg = tmp + 1;
329 tmp = strrchr(arg, ')'); 413 tmp = strrchr(arg, ')');
330 414 if (!tmp) {
331 if (tmp) { 415 trace_probe_log_err(offs + strlen(arg),
416 DEREF_OPEN_BRACE);
417 return -EINVAL;
418 } else {
332 const struct fetch_type *t2 = find_fetch_type(NULL); 419 const struct fetch_type *t2 = find_fetch_type(NULL);
333 420
334 *tmp = '\0'; 421 *tmp = '\0';
335 ret = parse_probe_arg(arg, t2, &code, end, flags); 422 ret = parse_probe_arg(arg, t2, &code, end, flags, offs);
336 if (ret) 423 if (ret)
337 break; 424 break;
338 if (code->op == FETCH_OP_COMM) 425 if (code->op == FETCH_OP_COMM) {
426 trace_probe_log_err(offs, COMM_CANT_DEREF);
339 return -EINVAL; 427 return -EINVAL;
340 if (++code == end) 428 }
341 return -E2BIG; 429 if (++code == end) {
430 trace_probe_log_err(offs, TOO_MANY_OPS);
431 return -EINVAL;
432 }
342 *pcode = code; 433 *pcode = code;
343 434
344 code->op = FETCH_OP_DEREF; 435 code->op = FETCH_OP_DEREF;
@@ -348,6 +439,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
348 } 439 }
349 if (!ret && code->op == FETCH_OP_NOP) { 440 if (!ret && code->op == FETCH_OP_NOP) {
350 /* Parsed, but do not find fetch method */ 441 /* Parsed, but do not find fetch method */
442 trace_probe_log_err(offs, BAD_FETCH_ARG);
351 ret = -EINVAL; 443 ret = -EINVAL;
352 } 444 }
353 return ret; 445 return ret;
@@ -379,7 +471,7 @@ static int __parse_bitfield_probe_arg(const char *bf,
379 return -EINVAL; 471 return -EINVAL;
380 code++; 472 code++;
381 if (code->op != FETCH_OP_NOP) 473 if (code->op != FETCH_OP_NOP)
382 return -E2BIG; 474 return -EINVAL;
383 *pcode = code; 475 *pcode = code;
384 476
385 code->op = FETCH_OP_MOD_BF; 477 code->op = FETCH_OP_MOD_BF;
@@ -392,44 +484,66 @@ static int __parse_bitfield_probe_arg(const char *bf,
392 484
393/* String length checking wrapper */ 485/* String length checking wrapper */
394static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, 486static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
395 struct probe_arg *parg, unsigned int flags) 487 struct probe_arg *parg, unsigned int flags, int offset)
396{ 488{
397 struct fetch_insn *code, *scode, *tmp = NULL; 489 struct fetch_insn *code, *scode, *tmp = NULL;
398 char *t, *t2; 490 char *t, *t2, *t3;
399 int ret, len; 491 int ret, len;
400 492
401 if (strlen(arg) > MAX_ARGSTR_LEN) { 493 len = strlen(arg);
402 pr_info("Argument is too long.: %s\n", arg); 494 if (len > MAX_ARGSTR_LEN) {
403 return -ENOSPC; 495 trace_probe_log_err(offset, ARG_TOO_LONG);
496 return -EINVAL;
497 } else if (len == 0) {
498 trace_probe_log_err(offset, NO_ARG_BODY);
499 return -EINVAL;
404 } 500 }
501
405 parg->comm = kstrdup(arg, GFP_KERNEL); 502 parg->comm = kstrdup(arg, GFP_KERNEL);
406 if (!parg->comm) { 503 if (!parg->comm)
407 pr_info("Failed to allocate memory for command '%s'.\n", arg);
408 return -ENOMEM; 504 return -ENOMEM;
409 } 505
410 t = strchr(arg, ':'); 506 t = strchr(arg, ':');
411 if (t) { 507 if (t) {
412 *t = '\0'; 508 *t = '\0';
413 t2 = strchr(++t, '['); 509 t2 = strchr(++t, '[');
414 if (t2) { 510 if (t2) {
415 *t2 = '\0'; 511 *t2++ = '\0';
416 parg->count = simple_strtoul(t2 + 1, &t2, 0); 512 t3 = strchr(t2, ']');
417 if (strcmp(t2, "]") || parg->count == 0) 513 if (!t3) {
514 offset += t2 + strlen(t2) - arg;
515 trace_probe_log_err(offset,
516 ARRAY_NO_CLOSE);
517 return -EINVAL;
518 } else if (t3[1] != '\0') {
519 trace_probe_log_err(offset + t3 + 1 - arg,
520 BAD_ARRAY_SUFFIX);
418 return -EINVAL; 521 return -EINVAL;
419 if (parg->count > MAX_ARRAY_LEN) 522 }
420 return -E2BIG; 523 *t3 = '\0';
524 if (kstrtouint(t2, 0, &parg->count) || !parg->count) {
525 trace_probe_log_err(offset + t2 - arg,
526 BAD_ARRAY_NUM);
527 return -EINVAL;
528 }
529 if (parg->count > MAX_ARRAY_LEN) {
530 trace_probe_log_err(offset + t2 - arg,
531 ARRAY_TOO_BIG);
532 return -EINVAL;
533 }
421 } 534 }
422 } 535 }
423 /* 536
424 * The default type of $comm should be "string", and it can't be 537 /* Since $comm can not be dereferred, we can find $comm by strcmp */
425 * dereferenced. 538 if (strcmp(arg, "$comm") == 0) {
426 */ 539 /* The type of $comm must be "string", and not an array. */
427 if (!t && strcmp(arg, "$comm") == 0) 540 if (parg->count || (t && strcmp(t, "string")))
541 return -EINVAL;
428 parg->type = find_fetch_type("string"); 542 parg->type = find_fetch_type("string");
429 else 543 } else
430 parg->type = find_fetch_type(t); 544 parg->type = find_fetch_type(t);
431 if (!parg->type) { 545 if (!parg->type) {
432 pr_info("Unsupported type: %s\n", t); 546 trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_TYPE);
433 return -EINVAL; 547 return -EINVAL;
434 } 548 }
435 parg->offset = *size; 549 parg->offset = *size;
@@ -444,13 +558,13 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
444 parg->count); 558 parg->count);
445 } 559 }
446 560
447 code = tmp = kzalloc(sizeof(*code) * FETCH_INSN_MAX, GFP_KERNEL); 561 code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
448 if (!code) 562 if (!code)
449 return -ENOMEM; 563 return -ENOMEM;
450 code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; 564 code[FETCH_INSN_MAX - 1].op = FETCH_OP_END;
451 565
452 ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1], 566 ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1],
453 flags); 567 flags, offset);
454 if (ret) 568 if (ret)
455 goto fail; 569 goto fail;
456 570
@@ -458,7 +572,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
458 if (!strcmp(parg->type->name, "string")) { 572 if (!strcmp(parg->type->name, "string")) {
459 if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_IMM && 573 if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_IMM &&
460 code->op != FETCH_OP_COMM) { 574 code->op != FETCH_OP_COMM) {
461 pr_info("string only accepts memory or address.\n"); 575 trace_probe_log_err(offset + (t ? (t - arg) : 0),
576 BAD_STRING);
462 ret = -EINVAL; 577 ret = -EINVAL;
463 goto fail; 578 goto fail;
464 } 579 }
@@ -470,7 +585,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
470 */ 585 */
471 code++; 586 code++;
472 if (code->op != FETCH_OP_NOP) { 587 if (code->op != FETCH_OP_NOP) {
473 ret = -E2BIG; 588 trace_probe_log_err(offset, TOO_MANY_OPS);
589 ret = -EINVAL;
474 goto fail; 590 goto fail;
475 } 591 }
476 } 592 }
@@ -483,7 +599,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
483 } else { 599 } else {
484 code++; 600 code++;
485 if (code->op != FETCH_OP_NOP) { 601 if (code->op != FETCH_OP_NOP) {
486 ret = -E2BIG; 602 trace_probe_log_err(offset, TOO_MANY_OPS);
603 ret = -EINVAL;
487 goto fail; 604 goto fail;
488 } 605 }
489 code->op = FETCH_OP_ST_RAW; 606 code->op = FETCH_OP_ST_RAW;
@@ -493,20 +610,24 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
493 /* Modify operation */ 610 /* Modify operation */
494 if (t != NULL) { 611 if (t != NULL) {
495 ret = __parse_bitfield_probe_arg(t, parg->type, &code); 612 ret = __parse_bitfield_probe_arg(t, parg->type, &code);
496 if (ret) 613 if (ret) {
614 trace_probe_log_err(offset + t - arg, BAD_BITFIELD);
497 goto fail; 615 goto fail;
616 }
498 } 617 }
499 /* Loop(Array) operation */ 618 /* Loop(Array) operation */
500 if (parg->count) { 619 if (parg->count) {
501 if (scode->op != FETCH_OP_ST_MEM && 620 if (scode->op != FETCH_OP_ST_MEM &&
502 scode->op != FETCH_OP_ST_STRING) { 621 scode->op != FETCH_OP_ST_STRING) {
503 pr_info("array only accepts memory or address\n"); 622 trace_probe_log_err(offset + (t ? (t - arg) : 0),
623 BAD_STRING);
504 ret = -EINVAL; 624 ret = -EINVAL;
505 goto fail; 625 goto fail;
506 } 626 }
507 code++; 627 code++;
508 if (code->op != FETCH_OP_NOP) { 628 if (code->op != FETCH_OP_NOP) {
509 ret = -E2BIG; 629 trace_probe_log_err(offset, TOO_MANY_OPS);
630 ret = -EINVAL;
510 goto fail; 631 goto fail;
511 } 632 }
512 code->op = FETCH_OP_LP_ARRAY; 633 code->op = FETCH_OP_LP_ARRAY;
@@ -516,7 +637,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
516 code->op = FETCH_OP_END; 637 code->op = FETCH_OP_END;
517 638
518 /* Shrink down the code buffer */ 639 /* Shrink down the code buffer */
519 parg->code = kzalloc(sizeof(*code) * (code - tmp + 1), GFP_KERNEL); 640 parg->code = kcalloc(code - tmp + 1, sizeof(*code), GFP_KERNEL);
520 if (!parg->code) 641 if (!parg->code)
521 ret = -ENOMEM; 642 ret = -ENOMEM;
522 else 643 else
@@ -555,15 +676,19 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
555{ 676{
556 struct probe_arg *parg = &tp->args[i]; 677 struct probe_arg *parg = &tp->args[i];
557 char *body; 678 char *body;
558 int ret;
559 679
560 /* Increment count for freeing args in error case */ 680 /* Increment count for freeing args in error case */
561 tp->nr_args++; 681 tp->nr_args++;
562 682
563 body = strchr(arg, '='); 683 body = strchr(arg, '=');
564 if (body) { 684 if (body) {
565 if (body - arg > MAX_ARG_NAME_LEN || body == arg) 685 if (body - arg > MAX_ARG_NAME_LEN) {
686 trace_probe_log_err(0, ARG_NAME_TOO_LONG);
687 return -EINVAL;
688 } else if (body == arg) {
689 trace_probe_log_err(0, NO_ARG_NAME);
566 return -EINVAL; 690 return -EINVAL;
691 }
567 parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL); 692 parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL);
568 body++; 693 body++;
569 } else { 694 } else {
@@ -575,22 +700,16 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
575 return -ENOMEM; 700 return -ENOMEM;
576 701
577 if (!is_good_name(parg->name)) { 702 if (!is_good_name(parg->name)) {
578 pr_info("Invalid argument[%d] name: %s\n", 703 trace_probe_log_err(0, BAD_ARG_NAME);
579 i, parg->name);
580 return -EINVAL; 704 return -EINVAL;
581 } 705 }
582
583 if (traceprobe_conflict_field_name(parg->name, tp->args, i)) { 706 if (traceprobe_conflict_field_name(parg->name, tp->args, i)) {
584 pr_info("Argument[%d]: '%s' conflicts with another field.\n", 707 trace_probe_log_err(0, USED_ARG_NAME);
585 i, parg->name);
586 return -EINVAL; 708 return -EINVAL;
587 } 709 }
588
589 /* Parse fetch argument */ 710 /* Parse fetch argument */
590 ret = traceprobe_parse_probe_arg_body(body, &tp->size, parg, flags); 711 return traceprobe_parse_probe_arg_body(body, &tp->size, parg, flags,
591 if (ret) 712 body - arg);
592 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
593 return ret;
594} 713}
595 714
596void traceprobe_free_probe_arg(struct probe_arg *arg) 715void traceprobe_free_probe_arg(struct probe_arg *arg)
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 2177c206de15..f9a8c632188b 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -124,6 +124,7 @@ struct fetch_insn {
124 124
125/* fetch + deref*N + store + mod + end <= 16, this allows N=12, enough */ 125/* fetch + deref*N + store + mod + end <= 16, this allows N=12, enough */
126#define FETCH_INSN_MAX 16 126#define FETCH_INSN_MAX 16
127#define FETCH_TOKEN_COMM (-ECOMM)
127 128
128/* Fetch type information table */ 129/* Fetch type information table */
129struct fetch_type { 130struct fetch_type {
@@ -280,8 +281,8 @@ extern int traceprobe_update_arg(struct probe_arg *arg);
280extern void traceprobe_free_probe_arg(struct probe_arg *arg); 281extern void traceprobe_free_probe_arg(struct probe_arg *arg);
281 282
282extern int traceprobe_split_symbol_offset(char *symbol, long *offset); 283extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
283extern int traceprobe_parse_event_name(const char **pevent, 284int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
284 const char **pgroup, char *buf); 285 char *buf, int offset);
285 286
286extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return); 287extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return);
287 288
@@ -298,3 +299,76 @@ extern void destroy_local_trace_uprobe(struct trace_event_call *event_call);
298#endif 299#endif
299extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, 300extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
300 size_t offset, struct trace_probe *tp); 301 size_t offset, struct trace_probe *tp);
302
303#undef ERRORS
304#define ERRORS \
305 C(FILE_NOT_FOUND, "Failed to find the given file"), \
306 C(NO_REGULAR_FILE, "Not a regular file"), \
307 C(BAD_REFCNT, "Invalid reference counter offset"), \
308 C(REFCNT_OPEN_BRACE, "Reference counter brace is not closed"), \
309 C(BAD_REFCNT_SUFFIX, "Reference counter has wrong suffix"), \
310 C(BAD_UPROBE_OFFS, "Invalid uprobe offset"), \
311 C(MAXACT_NO_KPROBE, "Maxactive is not for kprobe"), \
312 C(BAD_MAXACT, "Invalid maxactive number"), \
313 C(MAXACT_TOO_BIG, "Maxactive is too big"), \
314 C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
315 C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
316 C(NO_GROUP_NAME, "Group name is not specified"), \
317 C(GROUP_TOO_LONG, "Group name is too long"), \
318 C(BAD_GROUP_NAME, "Group name must follow the same rules as C identifiers"), \
319 C(NO_EVENT_NAME, "Event name is not specified"), \
320 C(EVENT_TOO_LONG, "Event name is too long"), \
321 C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \
322 C(RETVAL_ON_PROBE, "$retval is not available on probe"), \
323 C(BAD_STACK_NUM, "Invalid stack number"), \
324 C(BAD_ARG_NUM, "Invalid argument number"), \
325 C(BAD_VAR, "Invalid $-valiable specified"), \
326 C(BAD_REG_NAME, "Invalid register name"), \
327 C(BAD_MEM_ADDR, "Invalid memory address"), \
328 C(FILE_ON_KPROBE, "File offset is not available with kprobe"), \
329 C(BAD_FILE_OFFS, "Invalid file offset value"), \
330 C(SYM_ON_UPROBE, "Symbol is not available with uprobe"), \
331 C(TOO_MANY_OPS, "Dereference is too much nested"), \
332 C(DEREF_NEED_BRACE, "Dereference needs a brace"), \
333 C(BAD_DEREF_OFFS, "Invalid dereference offset"), \
334 C(DEREF_OPEN_BRACE, "Dereference brace is not closed"), \
335 C(COMM_CANT_DEREF, "$comm can not be dereferenced"), \
336 C(BAD_FETCH_ARG, "Invalid fetch argument"), \
337 C(ARRAY_NO_CLOSE, "Array is not closed"), \
338 C(BAD_ARRAY_SUFFIX, "Array has wrong suffix"), \
339 C(BAD_ARRAY_NUM, "Invalid array size"), \
340 C(ARRAY_TOO_BIG, "Array number is too big"), \
341 C(BAD_TYPE, "Unknown type is specified"), \
342 C(BAD_STRING, "String accepts only memory argument"), \
343 C(BAD_BITFIELD, "Invalid bitfield"), \
344 C(ARG_NAME_TOO_LONG, "Argument name is too long"), \
345 C(NO_ARG_NAME, "Argument name is not specified"), \
346 C(BAD_ARG_NAME, "Argument name must follow the same rules as C identifiers"), \
347 C(USED_ARG_NAME, "This argument name is already used"), \
348 C(ARG_TOO_LONG, "Argument expression is too long"), \
349 C(NO_ARG_BODY, "No argument expression"), \
350 C(BAD_INSN_BNDRY, "Probe point is not an instruction boundary"),\
351 C(FAIL_REG_PROBE, "Failed to register probe event"),
352
353#undef C
354#define C(a, b) TP_ERR_##a
355
356/* Define TP_ERR_ */
357enum { ERRORS };
358
359/* Error text is defined in trace_probe.c */
360
361struct trace_probe_log {
362 const char *subsystem;
363 const char **argv;
364 int argc;
365 int index;
366};
367
368void trace_probe_log_init(const char *subsystem, int argc, const char **argv);
369void trace_probe_log_set_index(int index);
370void trace_probe_log_clear(void);
371void __trace_probe_log_err(int offset, int err);
372
373#define trace_probe_log_err(offs, err) \
374 __trace_probe_log_err(offs, TP_ERR_##err)
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
index 4737bb8c07a3..c30c61f12ddd 100644
--- a/kernel/trace/trace_probe_tmpl.h
+++ b/kernel/trace/trace_probe_tmpl.h
@@ -88,7 +88,7 @@ stage3:
88 /* 3rd stage: store value to buffer */ 88 /* 3rd stage: store value to buffer */
89 if (unlikely(!dest)) { 89 if (unlikely(!dest)) {
90 if (code->op == FETCH_OP_ST_STRING) { 90 if (code->op == FETCH_OP_ST_STRING) {
91 ret += fetch_store_strlen(val + code->offset); 91 ret = fetch_store_strlen(val + code->offset);
92 code++; 92 code++;
93 goto array; 93 goto array;
94 } else 94 } else
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 9d402e7fc949..69ee8ef12cee 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -792,7 +792,10 @@ trace_selftest_startup_function_graph(struct tracer *trace,
792 /* check the trace buffer */ 792 /* check the trace buffer */
793 ret = trace_test_buffer(&tr->trace_buffer, &count); 793 ret = trace_test_buffer(&tr->trace_buffer, &count);
794 794
795 trace->reset(tr); 795 /* Need to also simulate the tr->reset to remove this fgraph_ops */
796 tracing_stop_cmdline_record();
797 unregister_ftrace_graph(&fgraph_ops);
798
796 tracing_start(); 799 tracing_start();
797 800
798 if (!ret && !count) { 801 if (!ret && !count) {
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index be78d99ee6bc..eb7e06b54741 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -156,7 +156,10 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
156 if (unlikely(!maxlen)) 156 if (unlikely(!maxlen))
157 return -ENOMEM; 157 return -ENOMEM;
158 158
159 ret = strncpy_from_user(dst, src, maxlen); 159 if (addr == FETCH_TOKEN_COMM)
160 ret = strlcpy(dst, current->comm, maxlen);
161 else
162 ret = strncpy_from_user(dst, src, maxlen);
160 if (ret >= 0) { 163 if (ret >= 0) {
161 if (ret == maxlen) 164 if (ret == maxlen)
162 dst[ret - 1] = '\0'; 165 dst[ret - 1] = '\0';
@@ -180,7 +183,10 @@ fetch_store_strlen(unsigned long addr)
180 int len; 183 int len;
181 void __user *vaddr = (void __force __user *) addr; 184 void __user *vaddr = (void __force __user *) addr;
182 185
183 len = strnlen_user(vaddr, MAX_STRING_SIZE); 186 if (addr == FETCH_TOKEN_COMM)
187 len = strlen(current->comm) + 1;
188 else
189 len = strnlen_user(vaddr, MAX_STRING_SIZE);
184 190
185 return (len > MAX_STRING_SIZE) ? 0 : len; 191 return (len > MAX_STRING_SIZE) ? 0 : len;
186} 192}
@@ -220,6 +226,9 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
220 case FETCH_OP_IMM: 226 case FETCH_OP_IMM:
221 val = code->immediate; 227 val = code->immediate;
222 break; 228 break;
229 case FETCH_OP_COMM:
230 val = FETCH_TOKEN_COMM;
231 break;
223 case FETCH_OP_FOFFS: 232 case FETCH_OP_FOFFS:
224 val = translate_user_vaddr(code->immediate); 233 val = translate_user_vaddr(code->immediate);
225 break; 234 break;
@@ -457,13 +466,19 @@ static int trace_uprobe_create(int argc, const char **argv)
457 return -ECANCELED; 466 return -ECANCELED;
458 } 467 }
459 468
469 trace_probe_log_init("trace_uprobe", argc, argv);
470 trace_probe_log_set_index(1); /* filename is the 2nd argument */
471
460 *arg++ = '\0'; 472 *arg++ = '\0';
461 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 473 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
462 if (ret) { 474 if (ret) {
475 trace_probe_log_err(0, FILE_NOT_FOUND);
463 kfree(filename); 476 kfree(filename);
477 trace_probe_log_clear();
464 return ret; 478 return ret;
465 } 479 }
466 if (!d_is_reg(path.dentry)) { 480 if (!d_is_reg(path.dentry)) {
481 trace_probe_log_err(0, NO_REGULAR_FILE);
467 ret = -EINVAL; 482 ret = -EINVAL;
468 goto fail_address_parse; 483 goto fail_address_parse;
469 } 484 }
@@ -472,9 +487,16 @@ static int trace_uprobe_create(int argc, const char **argv)
472 rctr = strchr(arg, '('); 487 rctr = strchr(arg, '(');
473 if (rctr) { 488 if (rctr) {
474 rctr_end = strchr(rctr, ')'); 489 rctr_end = strchr(rctr, ')');
475 if (rctr > rctr_end || *(rctr_end + 1) != 0) { 490 if (!rctr_end) {
491 ret = -EINVAL;
492 rctr_end = rctr + strlen(rctr);
493 trace_probe_log_err(rctr_end - filename,
494 REFCNT_OPEN_BRACE);
495 goto fail_address_parse;
496 } else if (rctr_end[1] != '\0') {
476 ret = -EINVAL; 497 ret = -EINVAL;
477 pr_info("Invalid reference counter offset.\n"); 498 trace_probe_log_err(rctr_end + 1 - filename,
499 BAD_REFCNT_SUFFIX);
478 goto fail_address_parse; 500 goto fail_address_parse;
479 } 501 }
480 502
@@ -482,22 +504,23 @@ static int trace_uprobe_create(int argc, const char **argv)
482 *rctr_end = '\0'; 504 *rctr_end = '\0';
483 ret = kstrtoul(rctr, 0, &ref_ctr_offset); 505 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
484 if (ret) { 506 if (ret) {
485 pr_info("Invalid reference counter offset.\n"); 507 trace_probe_log_err(rctr - filename, BAD_REFCNT);
486 goto fail_address_parse; 508 goto fail_address_parse;
487 } 509 }
488 } 510 }
489 511
490 /* Parse uprobe offset. */ 512 /* Parse uprobe offset. */
491 ret = kstrtoul(arg, 0, &offset); 513 ret = kstrtoul(arg, 0, &offset);
492 if (ret) 514 if (ret) {
515 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
493 goto fail_address_parse; 516 goto fail_address_parse;
494 517 }
495 argc -= 2;
496 argv += 2;
497 518
498 /* setup a probe */ 519 /* setup a probe */
520 trace_probe_log_set_index(0);
499 if (event) { 521 if (event) {
500 ret = traceprobe_parse_event_name(&event, &group, buf); 522 ret = traceprobe_parse_event_name(&event, &group, buf,
523 event - argv[0]);
501 if (ret) 524 if (ret)
502 goto fail_address_parse; 525 goto fail_address_parse;
503 } else { 526 } else {
@@ -519,6 +542,9 @@ static int trace_uprobe_create(int argc, const char **argv)
519 kfree(tail); 542 kfree(tail);
520 } 543 }
521 544
545 argc -= 2;
546 argv += 2;
547
522 tu = alloc_trace_uprobe(group, event, argc, is_return); 548 tu = alloc_trace_uprobe(group, event, argc, is_return);
523 if (IS_ERR(tu)) { 549 if (IS_ERR(tu)) {
524 ret = PTR_ERR(tu); 550 ret = PTR_ERR(tu);
@@ -539,6 +565,7 @@ static int trace_uprobe_create(int argc, const char **argv)
539 goto error; 565 goto error;
540 } 566 }
541 567
568 trace_probe_log_set_index(i + 2);
542 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp, 569 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
543 is_return ? TPARG_FL_RETURN : 0); 570 is_return ? TPARG_FL_RETURN : 0);
544 kfree(tmp); 571 kfree(tmp);
@@ -547,20 +574,20 @@ static int trace_uprobe_create(int argc, const char **argv)
547 } 574 }
548 575
549 ret = register_trace_uprobe(tu); 576 ret = register_trace_uprobe(tu);
550 if (ret) 577 if (!ret)
551 goto error; 578 goto out;
552 return 0;
553 579
554error: 580error:
555 free_trace_uprobe(tu); 581 free_trace_uprobe(tu);
582out:
583 trace_probe_log_clear();
556 return ret; 584 return ret;
557 585
558fail_address_parse: 586fail_address_parse:
587 trace_probe_log_clear();
559 path_put(&path); 588 path_put(&path);
560 kfree(filename); 589 kfree(filename);
561 590
562 pr_info("Failed to parse address or file.\n");
563
564 return ret; 591 return ret;
565} 592}
566 593
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
new file mode 100644
index 000000000000..021c03fd885d
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
@@ -0,0 +1,19 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: ftrace - test tracing error log support
4
5fail() { #msg
6 echo $1
7 exit_fail
8}
9
10# event tracing is currently the only ftrace tracer that uses the
11# tracing error_log, hence this check
12if [ ! -f set_event ]; then
13 echo "event tracing is not supported"
14 exit_unsupported
15fi
16
17ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter'
18
19exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index 7b96e80e6b8a..779ec11f61bd 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -109,3 +109,15 @@ LOCALHOST=127.0.0.1
109yield() { 109yield() {
110 ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1 110 ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1
111} 111}
112
113ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file
114 pos=$(echo -n "${2%^*}" | wc -c) # error position
115 command=$(echo "$2" | tr -d ^)
116 echo "Test command: $command"
117 echo > error_log
118 (! echo "$command" > "$3" ) 2> /dev/null
119 grep "$1: error:" -A 3 error_log
120 N=$(tail -n 1 error_log | wc -c)
121 # " Command: " and "^\n" => 13
122 test $(expr 13 + $pos) -eq $N
123}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
new file mode 100644
index 000000000000..29faaec942c6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -0,0 +1,85 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Kprobe event parser error log check
4
5[ -f kprobe_events ] || exit_unsupported # this is configurable
6
7[ -f error_log ] || exit_unsupported
8
9check_error() { # command-with-error-pos-by-^
10 ftrace_errlog_check 'trace_kprobe' "$1" 'kprobe_events'
11}
12
13if grep -q 'r\[maxactive\]' README; then
14check_error 'p^100 vfs_read' # MAXACT_NO_KPROBE
15check_error 'r^1a111 vfs_read' # BAD_MAXACT
16check_error 'r^100000 vfs_read' # MAXACT_TOO_BIG
17fi
18
19check_error 'p ^non_exist_func' # BAD_PROBE_ADDR (enoent)
20check_error 'p ^hoge-fuga' # BAD_PROBE_ADDR (bad syntax)
21check_error 'p ^hoge+1000-1000' # BAD_PROBE_ADDR (bad syntax)
22check_error 'r ^vfs_read+10' # BAD_RETPROBE
23check_error 'p:^/bar vfs_read' # NO_GROUP_NAME
24check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
25
26check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME
27check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME
28check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
29check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
30
31check_error 'p vfs_read ^$retval' # RETVAL_ON_PROBE
32check_error 'p vfs_read ^$stack10000' # BAD_STACK_NUM
33
34if grep -q '$arg<N>' README; then
35check_error 'p vfs_read ^$arg10000' # BAD_ARG_NUM
36fi
37
38check_error 'p vfs_read ^$none_var' # BAD_VAR
39
40check_error 'p vfs_read ^%none_reg' # BAD_REG_NAME
41check_error 'p vfs_read ^@12345678abcde' # BAD_MEM_ADDR
42check_error 'p vfs_read ^@+10' # FILE_ON_KPROBE
43
44check_error 'p vfs_read ^+0@0)' # DEREF_NEED_BRACE
45check_error 'p vfs_read ^+0ab1(@0)' # BAD_DEREF_OFFS
46check_error 'p vfs_read +0(+0(@0^)' # DEREF_OPEN_BRACE
47
48if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
49check_error 'p vfs_read +0(^$comm)' # COMM_CANT_DEREF
50fi
51
52check_error 'p vfs_read ^&1' # BAD_FETCH_ARG
53
54
55# We've introduced this limitation with array support
56if grep -q ' <type>\\\[<array-size>\\\]' README; then
57check_error 'p vfs_read +0(^+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(@0))))))))))))))' # TOO_MANY_OPS?
58check_error 'p vfs_read +0(@11):u8[10^' # ARRAY_NO_CLOSE
59check_error 'p vfs_read +0(@11):u8[10]^a' # BAD_ARRAY_SUFFIX
60check_error 'p vfs_read +0(@11):u8[^10a]' # BAD_ARRAY_NUM
61check_error 'p vfs_read +0(@11):u8[^256]' # ARRAY_TOO_BIG
62fi
63
64check_error 'p vfs_read @11:^unknown_type' # BAD_TYPE
65check_error 'p vfs_read $stack0:^string' # BAD_STRING
66check_error 'p vfs_read @11:^b10@a/16' # BAD_BITFIELD
67
68check_error 'p vfs_read ^arg123456789012345678901234567890=@11' # ARG_NAME_TOO_LOG
69check_error 'p vfs_read ^=@11' # NO_ARG_NAME
70check_error 'p vfs_read ^var.1=@11' # BAD_ARG_NAME
71check_error 'p vfs_read var1=@11 ^var1=@12' # USED_ARG_NAME
72check_error 'p vfs_read ^+1234567(+1234567(+1234567(+1234567(+1234567(+1234567(@1234))))))' # ARG_TOO_LONG
73check_error 'p vfs_read arg1=^' # NO_ARG_BODY
74
75# instruction boundary check is valid on x86 (at this moment)
76case $(uname -m) in
77 x86_64|i[3456]86)
78 echo 'p vfs_read' > kprobe_events
79 if grep -q FTRACE ../kprobes/list ; then
80 check_error 'p ^vfs_read+3' # BAD_INSN_BNDRY (only if function-tracer is enabled)
81 fi
82 ;;
83esac
84
85exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc
new file mode 100644
index 000000000000..14229d5778a0
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc
@@ -0,0 +1,23 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: Uprobe event parser error log check
4
5[ -f uprobe_events ] || exit_unsupported # this is configurable
6
7[ -f error_log ] || exit_unsupported
8
9check_error() { # command-with-error-pos-by-^
10 ftrace_errlog_check 'trace_uprobe' "$1" 'uprobe_events'
11}
12
13check_error 'p ^/non_exist_file:100' # FILE_NOT_FOUND
14check_error 'p ^/sys:100' # NO_REGULAR_FILE
15check_error 'p /bin/sh:^10a' # BAD_UPROBE_OFFS
16check_error 'p /bin/sh:10(^1a)' # BAD_REFCNT
17check_error 'p /bin/sh:10(10^' # REFCNT_OPEN_BRACE
18check_error 'p /bin/sh:10(10)^a' # BAD_REFCNT_SUFFIX
19
20check_error 'p /bin/sh:10 ^@+ab' # BAD_FILE_OFFS
21check_error 'p /bin/sh:10 ^@symbol' # SYM_ON_UPROBE
22
23exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
deleted file mode 100644
index 9912616a8672..000000000000
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+++ /dev/null
@@ -1,28 +0,0 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# description: event trigger - test extended error support
4
5
6fail() { #msg
7 echo $1
8 exit_fail
9}
10
11if [ ! -f set_event ]; then
12 echo "event tracing is not supported"
13 exit_unsupported
14fi
15
16if [ ! -f synthetic_events ]; then
17 echo "synthetic event is not supported"
18 exit_unsupported
19fi
20
21echo "Test extended error support"
22echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
23! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null
24if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then
25 fail "Failed to generate extended error in histogram"
26fi
27
28exit 0