aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--include/linux/ftrace.h113
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/trace_events.h9
-rw-r--r--include/trace/events/dma_fence.h40
-rw-r--r--include/trace/events/preemptirq.h70
-rw-r--r--include/trace/events/thermal.h4
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--include/trace/events/xen.h35
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/kallsyms.c43
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/printk/printk_safe.c15
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c354
-rw-r--r--kernel/trace/ring_buffer.c64
-rw-r--r--kernel/trace/trace.c91
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_event_perf.c82
-rw-r--r--kernel/trace/trace_events.c31
-rw-r--r--kernel/trace/trace_events_hist.c128
-rw-r--r--kernel/trace/trace_irqsoff.c133
-rw-r--r--kernel/trace/trace_kprobe.c22
-rw-r--r--kernel/trace/trace_probe.c86
-rw-r--r--kernel/trace/trace_probe.h7
-rw-r--r--kernel/trace/trace_selftest.c34
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c4
-rw-r--r--kernel/trace/tracing_map.c3
-rw-r--r--kernel/trace/tracing_map.h2
32 files changed, 903 insertions, 518 deletions
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 9a302799040e..5d101c4053e0 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -27,7 +27,6 @@
27#define CREATE_TRACE_POINTS 27#define CREATE_TRACE_POINTS
28#include <trace/events/dma_fence.h> 28#include <trace/events/dma_fence.h>
29 29
30EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
31EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); 30EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
32EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); 31EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
33 32
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index e54d257983f2..2bab81951ced 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -52,6 +52,30 @@ static inline void early_trace_init(void) { }
52struct module; 52struct module;
53struct ftrace_hash; 53struct ftrace_hash;
54 54
55#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
56 defined(CONFIG_DYNAMIC_FTRACE)
57const char *
58ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
59 unsigned long *off, char **modname, char *sym);
60int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
61 char *type, char *name,
62 char *module_name, int *exported);
63#else
64static inline const char *
65ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
66 unsigned long *off, char **modname, char *sym)
67{
68 return NULL;
69}
70static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
71 char *type, char *name,
72 char *module_name, int *exported)
73{
74 return -1;
75}
76#endif
77
78
55#ifdef CONFIG_FUNCTION_TRACER 79#ifdef CONFIG_FUNCTION_TRACER
56 80
57extern int ftrace_enabled; 81extern int ftrace_enabled;
@@ -79,10 +103,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
79 * ENABLED - set/unset when ftrace_ops is registered/unregistered 103 * ENABLED - set/unset when ftrace_ops is registered/unregistered
80 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 104 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
81 * allocated ftrace_ops which need special care 105 * allocated ftrace_ops which need special care
82 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
83 * could be controlled by following calls:
84 * ftrace_function_local_enable
85 * ftrace_function_local_disable
86 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 106 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
87 * and passed to the callback. If this flag is set, but the 107 * and passed to the callback. If this flag is set, but the
88 * architecture does not support passing regs 108 * architecture does not support passing regs
@@ -126,21 +146,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
126enum { 146enum {
127 FTRACE_OPS_FL_ENABLED = 1 << 0, 147 FTRACE_OPS_FL_ENABLED = 1 << 0,
128 FTRACE_OPS_FL_DYNAMIC = 1 << 1, 148 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
129 FTRACE_OPS_FL_PER_CPU = 1 << 2, 149 FTRACE_OPS_FL_SAVE_REGS = 1 << 2,
130 FTRACE_OPS_FL_SAVE_REGS = 1 << 3, 150 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3,
131 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, 151 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4,
132 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, 152 FTRACE_OPS_FL_STUB = 1 << 5,
133 FTRACE_OPS_FL_STUB = 1 << 6, 153 FTRACE_OPS_FL_INITIALIZED = 1 << 6,
134 FTRACE_OPS_FL_INITIALIZED = 1 << 7, 154 FTRACE_OPS_FL_DELETED = 1 << 7,
135 FTRACE_OPS_FL_DELETED = 1 << 8, 155 FTRACE_OPS_FL_ADDING = 1 << 8,
136 FTRACE_OPS_FL_ADDING = 1 << 9, 156 FTRACE_OPS_FL_REMOVING = 1 << 9,
137 FTRACE_OPS_FL_REMOVING = 1 << 10, 157 FTRACE_OPS_FL_MODIFYING = 1 << 10,
138 FTRACE_OPS_FL_MODIFYING = 1 << 11, 158 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11,
139 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 159 FTRACE_OPS_FL_IPMODIFY = 1 << 12,
140 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 160 FTRACE_OPS_FL_PID = 1 << 13,
141 FTRACE_OPS_FL_PID = 1 << 14, 161 FTRACE_OPS_FL_RCU = 1 << 14,
142 FTRACE_OPS_FL_RCU = 1 << 15, 162 FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
143 FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
144}; 163};
145 164
146#ifdef CONFIG_DYNAMIC_FTRACE 165#ifdef CONFIG_DYNAMIC_FTRACE
@@ -152,8 +171,10 @@ struct ftrace_ops_hash {
152}; 171};
153 172
154void ftrace_free_init_mem(void); 173void ftrace_free_init_mem(void);
174void ftrace_free_mem(struct module *mod, void *start, void *end);
155#else 175#else
156static inline void ftrace_free_init_mem(void) { } 176static inline void ftrace_free_init_mem(void) { }
177static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
157#endif 178#endif
158 179
159/* 180/*
@@ -173,7 +194,6 @@ struct ftrace_ops {
173 unsigned long flags; 194 unsigned long flags;
174 void *private; 195 void *private;
175 ftrace_func_t saved_func; 196 ftrace_func_t saved_func;
176 int __percpu *disabled;
177#ifdef CONFIG_DYNAMIC_FTRACE 197#ifdef CONFIG_DYNAMIC_FTRACE
178 struct ftrace_ops_hash local_hash; 198 struct ftrace_ops_hash local_hash;
179 struct ftrace_ops_hash *func_hash; 199 struct ftrace_ops_hash *func_hash;
@@ -205,55 +225,6 @@ int register_ftrace_function(struct ftrace_ops *ops);
205int unregister_ftrace_function(struct ftrace_ops *ops); 225int unregister_ftrace_function(struct ftrace_ops *ops);
206void clear_ftrace_function(void); 226void clear_ftrace_function(void);
207 227
208/**
209 * ftrace_function_local_enable - enable ftrace_ops on current cpu
210 *
211 * This function enables tracing on current cpu by decreasing
212 * the per cpu control variable.
213 * It must be called with preemption disabled and only on ftrace_ops
214 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
215 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
216 */
217static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
218{
219 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
220 return;
221
222 (*this_cpu_ptr(ops->disabled))--;
223}
224
225/**
226 * ftrace_function_local_disable - disable ftrace_ops on current cpu
227 *
228 * This function disables tracing on current cpu by increasing
229 * the per cpu control variable.
230 * It must be called with preemption disabled and only on ftrace_ops
231 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
232 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
233 */
234static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
235{
236 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
237 return;
238
239 (*this_cpu_ptr(ops->disabled))++;
240}
241
242/**
243 * ftrace_function_local_disabled - returns ftrace_ops disabled value
244 * on current cpu
245 *
246 * This function returns value of ftrace_ops::disabled on current cpu.
247 * It must be called with preemption disabled and only on ftrace_ops
248 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
249 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
250 */
251static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
252{
253 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
254 return *this_cpu_ptr(ops->disabled);
255}
256
257extern void ftrace_stub(unsigned long a0, unsigned long a1, 228extern void ftrace_stub(unsigned long a0, unsigned long a1,
258 struct ftrace_ops *op, struct pt_regs *regs); 229 struct ftrace_ops *op, struct pt_regs *regs);
259 230
@@ -271,6 +242,7 @@ static inline int ftrace_nr_registered_ops(void)
271static inline void clear_ftrace_function(void) { } 242static inline void clear_ftrace_function(void) { }
272static inline void ftrace_kill(void) { } 243static inline void ftrace_kill(void) { }
273static inline void ftrace_free_init_mem(void) { } 244static inline void ftrace_free_init_mem(void) { }
245static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
274#endif /* CONFIG_FUNCTION_TRACER */ 246#endif /* CONFIG_FUNCTION_TRACER */
275 247
276#ifdef CONFIG_STACK_TRACER 248#ifdef CONFIG_STACK_TRACER
@@ -743,7 +715,8 @@ static inline unsigned long get_lock_parent_ip(void)
743 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } 715 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
744#endif 716#endif
745 717
746#ifdef CONFIG_PREEMPT_TRACER 718#if defined(CONFIG_PREEMPT_TRACER) || \
719 (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
747 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 720 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
748 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 721 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
749#else 722#else
diff --git a/include/linux/init.h b/include/linux/init.h
index f38b993edacb..ea1b31101d9e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,7 +40,7 @@
40 40
41/* These are for everybody (although not all archs will actually 41/* These are for everybody (although not all archs will actually
42 discard it in modules) */ 42 discard it in modules) */
43#define __init __section(.init.text) __cold __inittrace __latent_entropy 43#define __init __section(.init.text) __cold __latent_entropy
44#define __initdata __section(.init.data) 44#define __initdata __section(.init.data)
45#define __initconst __section(.init.rodata) 45#define __initconst __section(.init.rodata)
46#define __exitdata __section(.exit.data) 46#define __exitdata __section(.exit.data)
@@ -69,10 +69,8 @@
69 69
70#ifdef MODULE 70#ifdef MODULE
71#define __exitused 71#define __exitused
72#define __inittrace notrace
73#else 72#else
74#define __exitused __used 73#define __exitused __used
75#define __inittrace
76#endif 74#endif
77 75
78#define __exit __section(.exit.text) __exitused __cold notrace 76#define __exit __section(.exit.text) __exitused __cold notrace
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 874b71a70058..2c9c87d8a0c1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1169,7 +1169,7 @@ extern void perf_event_init(void);
1169extern void perf_tp_event(u16 event_type, u64 count, void *record, 1169extern void perf_tp_event(u16 event_type, u64 count, void *record,
1170 int entry_size, struct pt_regs *regs, 1170 int entry_size, struct pt_regs *regs,
1171 struct hlist_head *head, int rctx, 1171 struct hlist_head *head, int rctx,
1172 struct task_struct *task, struct perf_event *event); 1172 struct task_struct *task);
1173extern void perf_bp_event(struct perf_event *event, void *data); 1173extern void perf_bp_event(struct perf_event *event, void *data);
1174 1174
1175#ifndef perf_misc_flags 1175#ifndef perf_misc_flags
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 84014ecfa67f..af44e7c2d577 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -174,6 +174,11 @@ enum trace_reg {
174 TRACE_REG_PERF_UNREGISTER, 174 TRACE_REG_PERF_UNREGISTER,
175 TRACE_REG_PERF_OPEN, 175 TRACE_REG_PERF_OPEN,
176 TRACE_REG_PERF_CLOSE, 176 TRACE_REG_PERF_CLOSE,
177 /*
178 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
179 * custom action was taken and the default action is not to be
180 * performed.
181 */
177 TRACE_REG_PERF_ADD, 182 TRACE_REG_PERF_ADD,
178 TRACE_REG_PERF_DEL, 183 TRACE_REG_PERF_DEL,
179#endif 184#endif
@@ -542,9 +547,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
542static inline void 547static inline void
543perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, 548perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
544 u64 count, struct pt_regs *regs, void *head, 549 u64 count, struct pt_regs *regs, void *head,
545 struct task_struct *task, struct perf_event *event) 550 struct task_struct *task)
546{ 551{
547 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event); 552 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
548} 553}
549 554
550#endif 555#endif
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index d61bfddcc621..2212adda8f77 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -9,46 +9,6 @@
9 9
10struct dma_fence; 10struct dma_fence;
11 11
12TRACE_EVENT(dma_fence_annotate_wait_on,
13
14 /* fence: the fence waiting on f1, f1: the fence to be waited on. */
15 TP_PROTO(struct dma_fence *fence, struct dma_fence *f1),
16
17 TP_ARGS(fence, f1),
18
19 TP_STRUCT__entry(
20 __string(driver, fence->ops->get_driver_name(fence))
21 __string(timeline, fence->ops->get_timeline_name(fence))
22 __field(unsigned int, context)
23 __field(unsigned int, seqno)
24
25 __string(waiting_driver, f1->ops->get_driver_name(f1))
26 __string(waiting_timeline, f1->ops->get_timeline_name(f1))
27 __field(unsigned int, waiting_context)
28 __field(unsigned int, waiting_seqno)
29 ),
30
31 TP_fast_assign(
32 __assign_str(driver, fence->ops->get_driver_name(fence))
33 __assign_str(timeline, fence->ops->get_timeline_name(fence))
34 __entry->context = fence->context;
35 __entry->seqno = fence->seqno;
36
37 __assign_str(waiting_driver, f1->ops->get_driver_name(f1))
38 __assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
39 __entry->waiting_context = f1->context;
40 __entry->waiting_seqno = f1->seqno;
41
42 ),
43
44 TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
45 "waits on driver=%s timeline=%s context=%u seqno=%u",
46 __get_str(driver), __get_str(timeline), __entry->context,
47 __entry->seqno,
48 __get_str(waiting_driver), __get_str(waiting_timeline),
49 __entry->waiting_context, __entry->waiting_seqno)
50);
51
52DECLARE_EVENT_CLASS(dma_fence, 12DECLARE_EVENT_CLASS(dma_fence,
53 13
54 TP_PROTO(struct dma_fence *fence), 14 TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
new file mode 100644
index 000000000000..f5024c560d8f
--- /dev/null
+++ b/include/trace/events/preemptirq.h
@@ -0,0 +1,70 @@
1#ifdef CONFIG_PREEMPTIRQ_EVENTS
2
3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM preemptirq
5
6#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
7#define _TRACE_PREEMPTIRQ_H
8
9#include <linux/ktime.h>
10#include <linux/tracepoint.h>
11#include <linux/string.h>
12#include <asm/sections.h>
13
14DECLARE_EVENT_CLASS(preemptirq_template,
15
16 TP_PROTO(unsigned long ip, unsigned long parent_ip),
17
18 TP_ARGS(ip, parent_ip),
19
20 TP_STRUCT__entry(
21 __field(u32, caller_offs)
22 __field(u32, parent_offs)
23 ),
24
25 TP_fast_assign(
26 __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
27 __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
28 ),
29
30 TP_printk("caller=%pF parent=%pF",
31 (void *)((unsigned long)(_stext) + __entry->caller_offs),
32 (void *)((unsigned long)(_stext) + __entry->parent_offs))
33);
34
35#ifndef CONFIG_PROVE_LOCKING
36DEFINE_EVENT(preemptirq_template, irq_disable,
37 TP_PROTO(unsigned long ip, unsigned long parent_ip),
38 TP_ARGS(ip, parent_ip));
39
40DEFINE_EVENT(preemptirq_template, irq_enable,
41 TP_PROTO(unsigned long ip, unsigned long parent_ip),
42 TP_ARGS(ip, parent_ip));
43#endif
44
45#ifdef CONFIG_DEBUG_PREEMPT
46DEFINE_EVENT(preemptirq_template, preempt_disable,
47 TP_PROTO(unsigned long ip, unsigned long parent_ip),
48 TP_ARGS(ip, parent_ip));
49
50DEFINE_EVENT(preemptirq_template, preempt_enable,
51 TP_PROTO(unsigned long ip, unsigned long parent_ip),
52 TP_ARGS(ip, parent_ip));
53#endif
54
55#endif /* _TRACE_PREEMPTIRQ_H */
56
57#include <trace/define_trace.h>
58
59#else /* !CONFIG_PREEMPTIRQ_EVENTS */
60
61#define trace_irq_enable(...)
62#define trace_irq_disable(...)
63#define trace_preempt_enable(...)
64#define trace_preempt_disable(...)
65#define trace_irq_enable_rcuidle(...)
66#define trace_irq_disable_rcuidle(...)
67#define trace_preempt_enable_rcuidle(...)
68#define trace_preempt_disable_rcuidle(...)
69
70#endif
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 466c09d882ad..78946640fe03 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -91,6 +91,7 @@ TRACE_EVENT(thermal_zone_trip,
91 show_tzt_type(__entry->trip_type)) 91 show_tzt_type(__entry->trip_type))
92); 92);
93 93
94#ifdef CONFIG_CPU_THERMAL
94TRACE_EVENT(thermal_power_cpu_get_power, 95TRACE_EVENT(thermal_power_cpu_get_power,
95 TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, 96 TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
96 size_t load_len, u32 dynamic_power, u32 static_power), 97 size_t load_len, u32 dynamic_power, u32 static_power),
@@ -148,7 +149,9 @@ TRACE_EVENT(thermal_power_cpu_limit,
148 __get_bitmask(cpumask), __entry->freq, __entry->cdev_state, 149 __get_bitmask(cpumask), __entry->freq, __entry->cdev_state,
149 __entry->power) 150 __entry->power)
150); 151);
152#endif /* CONFIG_CPU_THERMAL */
151 153
154#ifdef CONFIG_DEVFREQ_THERMAL
152TRACE_EVENT(thermal_power_devfreq_get_power, 155TRACE_EVENT(thermal_power_devfreq_get_power,
153 TP_PROTO(struct thermal_cooling_device *cdev, 156 TP_PROTO(struct thermal_cooling_device *cdev,
154 struct devfreq_dev_status *status, unsigned long freq, 157 struct devfreq_dev_status *status, unsigned long freq,
@@ -204,6 +207,7 @@ TRACE_EVENT(thermal_power_devfreq_limit,
204 __get_str(type), __entry->freq, __entry->cdev_state, 207 __get_str(type), __entry->freq, __entry->cdev_state,
205 __entry->power) 208 __entry->power)
206); 209);
210#endif /* CONFIG_DEVFREQ_THERMAL */
207#endif /* _TRACE_THERMAL_H */ 211#endif /* _TRACE_THERMAL_H */
208 212
209/* This part must be outside protection */ 213/* This part must be outside protection */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index dc23cf032403..d70b53e65f43 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -134,6 +134,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
134 TP_ARGS(order, may_writepage, gfp_flags, classzone_idx) 134 TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
135); 135);
136 136
137#ifdef CONFIG_MEMCG
137DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin, 138DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
138 139
139 TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx), 140 TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
@@ -147,6 +148,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_
147 148
148 TP_ARGS(order, may_writepage, gfp_flags, classzone_idx) 149 TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
149); 150);
151#endif /* CONFIG_MEMCG */
150 152
151DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template, 153DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
152 154
@@ -172,6 +174,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
172 TP_ARGS(nr_reclaimed) 174 TP_ARGS(nr_reclaimed)
173); 175);
174 176
177#ifdef CONFIG_MEMCG
175DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end, 178DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
176 179
177 TP_PROTO(unsigned long nr_reclaimed), 180 TP_PROTO(unsigned long nr_reclaimed),
@@ -185,6 +188,7 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
185 188
186 TP_ARGS(nr_reclaimed) 189 TP_ARGS(nr_reclaimed)
187); 190);
191#endif /* CONFIG_MEMCG */
188 192
189TRACE_EVENT(mm_shrink_slab_start, 193TRACE_EVENT(mm_shrink_slab_start,
190 TP_PROTO(struct shrinker *shr, struct shrink_control *sc, 194 TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index a7c8b452aab9..b8adf05c534e 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -148,7 +148,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
148 TP_ARGS(ptep, pteval)) 148 TP_ARGS(ptep, pteval))
149 149
150DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); 150DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
151DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
152 151
153TRACE_EVENT(xen_mmu_set_pte_at, 152TRACE_EVENT(xen_mmu_set_pte_at,
154 TP_PROTO(struct mm_struct *mm, unsigned long addr, 153 TP_PROTO(struct mm_struct *mm, unsigned long addr,
@@ -170,21 +169,6 @@ TRACE_EVENT(xen_mmu_set_pte_at,
170 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) 169 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
171 ); 170 );
172 171
173TRACE_EVENT(xen_mmu_pte_clear,
174 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
175 TP_ARGS(mm, addr, ptep),
176 TP_STRUCT__entry(
177 __field(struct mm_struct *, mm)
178 __field(unsigned long, addr)
179 __field(pte_t *, ptep)
180 ),
181 TP_fast_assign(__entry->mm = mm;
182 __entry->addr = addr;
183 __entry->ptep = ptep),
184 TP_printk("mm %p addr %lx ptep %p",
185 __entry->mm, __entry->addr, __entry->ptep)
186 );
187
188TRACE_DEFINE_SIZEOF(pmdval_t); 172TRACE_DEFINE_SIZEOF(pmdval_t);
189 173
190TRACE_EVENT(xen_mmu_set_pmd, 174TRACE_EVENT(xen_mmu_set_pmd,
@@ -202,6 +186,24 @@ TRACE_EVENT(xen_mmu_set_pmd,
202 (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval) 186 (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
203 ); 187 );
204 188
189#ifdef CONFIG_X86_PAE
190DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
191
192TRACE_EVENT(xen_mmu_pte_clear,
193 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
194 TP_ARGS(mm, addr, ptep),
195 TP_STRUCT__entry(
196 __field(struct mm_struct *, mm)
197 __field(unsigned long, addr)
198 __field(pte_t *, ptep)
199 ),
200 TP_fast_assign(__entry->mm = mm;
201 __entry->addr = addr;
202 __entry->ptep = ptep),
203 TP_printk("mm %p addr %lx ptep %p",
204 __entry->mm, __entry->addr, __entry->ptep)
205 );
206
205TRACE_EVENT(xen_mmu_pmd_clear, 207TRACE_EVENT(xen_mmu_pmd_clear,
206 TP_PROTO(pmd_t *pmdp), 208 TP_PROTO(pmd_t *pmdp),
207 TP_ARGS(pmdp), 209 TP_ARGS(pmdp),
@@ -211,6 +213,7 @@ TRACE_EVENT(xen_mmu_pmd_clear,
211 TP_fast_assign(__entry->pmdp = pmdp), 213 TP_fast_assign(__entry->pmdp = pmdp),
212 TP_printk("pmdp %p", __entry->pmdp) 214 TP_printk("pmdp %p", __entry->pmdp)
213 ); 215 );
216#endif
214 217
215#if CONFIG_PGTABLE_LEVELS >= 4 218#if CONFIG_PGTABLE_LEVELS >= 4
216 219
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3939a4674e0a..9404c631bd3f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7874,15 +7874,16 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7874 } 7874 }
7875 } 7875 }
7876 perf_tp_event(call->event.type, count, raw_data, size, regs, head, 7876 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7877 rctx, task, NULL); 7877 rctx, task);
7878} 7878}
7879EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); 7879EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7880 7880
7881void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, 7881void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
7882 struct pt_regs *regs, struct hlist_head *head, int rctx, 7882 struct pt_regs *regs, struct hlist_head *head, int rctx,
7883 struct task_struct *task, struct perf_event *event) 7883 struct task_struct *task)
7884{ 7884{
7885 struct perf_sample_data data; 7885 struct perf_sample_data data;
7886 struct perf_event *event;
7886 7887
7887 struct perf_raw_record raw = { 7888 struct perf_raw_record raw = {
7888 .frag = { 7889 .frag = {
@@ -7896,15 +7897,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
7896 7897
7897 perf_trace_buf_update(record, event_type); 7898 perf_trace_buf_update(record, event_type);
7898 7899
7899 /* Use the given event instead of the hlist */ 7900 hlist_for_each_entry_rcu(event, head, hlist_entry) {
7900 if (event) {
7901 if (perf_tp_event_match(event, &data, regs)) 7901 if (perf_tp_event_match(event, &data, regs))
7902 perf_swevent_event(event, count, &data, regs); 7902 perf_swevent_event(event, count, &data, regs);
7903 } else {
7904 hlist_for_each_entry_rcu(event, head, hlist_entry) {
7905 if (perf_tp_event_match(event, &data, regs))
7906 perf_swevent_event(event, count, &data, regs);
7907 }
7908 } 7903 }
7909 7904
7910 /* 7905 /*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 1e6ae66c6244..531ffa984bc2 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -24,6 +24,7 @@
24#include <linux/ctype.h> 24#include <linux/ctype.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/filter.h> 26#include <linux/filter.h>
27#include <linux/ftrace.h>
27#include <linux/compiler.h> 28#include <linux/compiler.h>
28 29
29#include <asm/sections.h> 30#include <asm/sections.h>
@@ -337,6 +338,10 @@ const char *kallsyms_lookup(unsigned long addr,
337 if (!ret) 338 if (!ret)
338 ret = bpf_address_lookup(addr, symbolsize, 339 ret = bpf_address_lookup(addr, symbolsize,
339 offset, modname, namebuf); 340 offset, modname, namebuf);
341
342 if (!ret)
343 ret = ftrace_mod_address_lookup(addr, symbolsize,
344 offset, modname, namebuf);
340 return ret; 345 return ret;
341} 346}
342 347
@@ -474,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
474struct kallsym_iter { 479struct kallsym_iter {
475 loff_t pos; 480 loff_t pos;
476 loff_t pos_mod_end; 481 loff_t pos_mod_end;
482 loff_t pos_ftrace_mod_end;
477 unsigned long value; 483 unsigned long value;
478 unsigned int nameoff; /* If iterating in core kernel symbols. */ 484 unsigned int nameoff; /* If iterating in core kernel symbols. */
479 char type; 485 char type;
@@ -497,11 +503,25 @@ static int get_ksymbol_mod(struct kallsym_iter *iter)
497 return 1; 503 return 1;
498} 504}
499 505
506static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
507{
508 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
509 &iter->value, &iter->type,
510 iter->name, iter->module_name,
511 &iter->exported);
512 if (ret < 0) {
513 iter->pos_ftrace_mod_end = iter->pos;
514 return 0;
515 }
516
517 return 1;
518}
519
500static int get_ksymbol_bpf(struct kallsym_iter *iter) 520static int get_ksymbol_bpf(struct kallsym_iter *iter)
501{ 521{
502 iter->module_name[0] = '\0'; 522 iter->module_name[0] = '\0';
503 iter->exported = 0; 523 iter->exported = 0;
504 return bpf_get_kallsym(iter->pos - iter->pos_mod_end, 524 return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
505 &iter->value, &iter->type, 525 &iter->value, &iter->type,
506 iter->name) < 0 ? 0 : 1; 526 iter->name) < 0 ? 0 : 1;
507} 527}
@@ -526,20 +546,31 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
526 iter->name[0] = '\0'; 546 iter->name[0] = '\0';
527 iter->nameoff = get_symbol_offset(new_pos); 547 iter->nameoff = get_symbol_offset(new_pos);
528 iter->pos = new_pos; 548 iter->pos = new_pos;
529 if (new_pos == 0) 549 if (new_pos == 0) {
530 iter->pos_mod_end = 0; 550 iter->pos_mod_end = 0;
551 iter->pos_ftrace_mod_end = 0;
552 }
531} 553}
532 554
533static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) 555static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
534{ 556{
535 iter->pos = pos; 557 iter->pos = pos;
536 558
537 if (iter->pos_mod_end > 0 && 559 if (iter->pos_ftrace_mod_end > 0 &&
538 iter->pos_mod_end < iter->pos) 560 iter->pos_ftrace_mod_end < iter->pos)
539 return get_ksymbol_bpf(iter); 561 return get_ksymbol_bpf(iter);
540 562
541 if (!get_ksymbol_mod(iter)) 563 if (iter->pos_mod_end > 0 &&
542 return get_ksymbol_bpf(iter); 564 iter->pos_mod_end < iter->pos) {
565 if (!get_ksymbol_ftrace_mod(iter))
566 return get_ksymbol_bpf(iter);
567 return 1;
568 }
569
570 if (!get_ksymbol_mod(iter)) {
571 if (!get_ksymbol_ftrace_mod(iter))
572 return get_ksymbol_bpf(iter);
573 }
543 574
544 return 1; 575 return 1;
545} 576}
diff --git a/kernel/module.c b/kernel/module.c
index 222aba4aa960..f0411a271765 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3481,6 +3481,8 @@ static noinline int do_init_module(struct module *mod)
3481 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) 3481 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3482 async_synchronize_full(); 3482 async_synchronize_full();
3483 3483
3484 ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
3485 mod->init_layout.size);
3484 mutex_lock(&module_mutex); 3486 mutex_lock(&module_mutex);
3485 /* Drop initial reference. */ 3487 /* Drop initial reference. */
3486 module_put(mod); 3488 module_put(mod);
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 3cdaeaef9ce1..724d9292d4b9 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -39,7 +39,7 @@
39 * There are situations when we want to make sure that all buffers 39 * There are situations when we want to make sure that all buffers
40 * were handled or when IRQs are blocked. 40 * were handled or when IRQs are blocked.
41 */ 41 */
42static int printk_safe_irq_ready; 42static int printk_safe_irq_ready __read_mostly;
43 43
44#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ 44#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
45 sizeof(atomic_t) - \ 45 sizeof(atomic_t) - \
@@ -63,11 +63,8 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
63/* Get flushed in a more safe context. */ 63/* Get flushed in a more safe context. */
64static void queue_flush_work(struct printk_safe_seq_buf *s) 64static void queue_flush_work(struct printk_safe_seq_buf *s)
65{ 65{
66 if (printk_safe_irq_ready) { 66 if (printk_safe_irq_ready)
67 /* Make sure that IRQ work is really initialized. */
68 smp_rmb();
69 irq_work_queue(&s->work); 67 irq_work_queue(&s->work);
70 }
71} 68}
72 69
73/* 70/*
@@ -398,8 +395,12 @@ void __init printk_safe_init(void)
398#endif 395#endif
399 } 396 }
400 397
401 /* Make sure that IRQ works are initialized before enabling. */ 398 /*
402 smp_wmb(); 399 * In the highly unlikely event that a NMI were to trigger at
400 * this moment. Make sure IRQ work is set up before this
401 * variable is set.
402 */
403 barrier();
403 printk_safe_irq_ready = 1; 404 printk_safe_irq_ready = 1;
404 405
405 /* Flush pending messages that did not have scheduled IRQ works. */ 406 /* Flush pending messages that did not have scheduled IRQ works. */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index f54b7b6b4a4b..af7dad126c13 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -160,6 +160,17 @@ config FUNCTION_GRAPH_TRACER
160 address on the current task structure into a stack of calls. 160 address on the current task structure into a stack of calls.
161 161
162 162
163config PREEMPTIRQ_EVENTS
164 bool "Enable trace events for preempt and irq disable/enable"
165 select TRACE_IRQFLAGS
166 depends on DEBUG_PREEMPT || !PROVE_LOCKING
167 default n
168 help
169 Enable tracing of disable and enable events for preemption and irqs.
170 For tracing preempt disable/enable events, DEBUG_PREEMPT must be
171 enabled. For tracing irq disable/enable events, PROVE_LOCKING must
172 be disabled.
173
163config IRQSOFF_TRACER 174config IRQSOFF_TRACER
164 bool "Interrupts-off Latency Tracer" 175 bool "Interrupts-off Latency Tracer"
165 default n 176 default n
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 19a15b2f1190..e2538c7638d4 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
35obj-$(CONFIG_TRACING_MAP) += tracing_map.o 35obj-$(CONFIG_TRACING_MAP) += tracing_map.o
36obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 36obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
37obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 37obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
38obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
38obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 39obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
39obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 40obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
40obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 41obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8319e09e15b9..ccdf3664e4a9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -203,30 +203,6 @@ void clear_ftrace_function(void)
203 ftrace_trace_function = ftrace_stub; 203 ftrace_trace_function = ftrace_stub;
204} 204}
205 205
206static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
207{
208 int cpu;
209
210 for_each_possible_cpu(cpu)
211 *per_cpu_ptr(ops->disabled, cpu) = 1;
212}
213
214static int per_cpu_ops_alloc(struct ftrace_ops *ops)
215{
216 int __percpu *disabled;
217
218 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
219 return -EINVAL;
220
221 disabled = alloc_percpu(int);
222 if (!disabled)
223 return -ENOMEM;
224
225 ops->disabled = disabled;
226 per_cpu_ops_disable_all(ops);
227 return 0;
228}
229
230static void ftrace_sync(struct work_struct *work) 206static void ftrace_sync(struct work_struct *work)
231{ 207{
232 /* 208 /*
@@ -262,8 +238,8 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
262 * If this is a dynamic, RCU, or per CPU ops, or we force list func, 238 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
263 * then it needs to call the list anyway. 239 * then it needs to call the list anyway.
264 */ 240 */
265 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU | 241 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
266 FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC) 242 FTRACE_FORCE_LIST_FUNC)
267 return ftrace_ops_list_func; 243 return ftrace_ops_list_func;
268 244
269 return ftrace_ops_get_func(ops); 245 return ftrace_ops_get_func(ops);
@@ -422,11 +398,6 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
422 if (!core_kernel_data((unsigned long)ops)) 398 if (!core_kernel_data((unsigned long)ops))
423 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 399 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
424 400
425 if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
426 if (per_cpu_ops_alloc(ops))
427 return -ENOMEM;
428 }
429
430 add_ftrace_ops(&ftrace_ops_list, ops); 401 add_ftrace_ops(&ftrace_ops_list, ops);
431 402
432 /* Always save the function, and reset at unregistering */ 403 /* Always save the function, and reset at unregistering */
@@ -2727,11 +2698,6 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2727{ 2698{
2728} 2699}
2729 2700
2730static void per_cpu_ops_free(struct ftrace_ops *ops)
2731{
2732 free_percpu(ops->disabled);
2733}
2734
2735static void ftrace_startup_enable(int command) 2701static void ftrace_startup_enable(int command)
2736{ 2702{
2737 if (saved_ftrace_func != ftrace_trace_function) { 2703 if (saved_ftrace_func != ftrace_trace_function) {
@@ -2833,7 +2799,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2833 * not currently active, we can just free them 2799 * not currently active, we can just free them
2834 * without synchronizing all CPUs. 2800 * without synchronizing all CPUs.
2835 */ 2801 */
2836 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) 2802 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2837 goto free_ops; 2803 goto free_ops;
2838 2804
2839 return 0; 2805 return 0;
@@ -2880,7 +2846,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2880 * The same goes for freeing the per_cpu data of the per_cpu 2846 * The same goes for freeing the per_cpu data of the per_cpu
2881 * ops. 2847 * ops.
2882 */ 2848 */
2883 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { 2849 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2884 /* 2850 /*
2885 * We need to do a hard force of sched synchronization. 2851 * We need to do a hard force of sched synchronization.
2886 * This is because we use preempt_disable() to do RCU, but 2852 * This is because we use preempt_disable() to do RCU, but
@@ -2903,9 +2869,6 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2903 2869
2904 free_ops: 2870 free_ops:
2905 arch_ftrace_trampoline_free(ops); 2871 arch_ftrace_trampoline_free(ops);
2906
2907 if (ops->flags & FTRACE_OPS_FL_PER_CPU)
2908 per_cpu_ops_free(ops);
2909 } 2872 }
2910 2873
2911 return 0; 2874 return 0;
@@ -5672,10 +5635,29 @@ static int ftrace_process_locs(struct module *mod,
5672 return ret; 5635 return ret;
5673} 5636}
5674 5637
5638struct ftrace_mod_func {
5639 struct list_head list;
5640 char *name;
5641 unsigned long ip;
5642 unsigned int size;
5643};
5644
5645struct ftrace_mod_map {
5646 struct rcu_head rcu;
5647 struct list_head list;
5648 struct module *mod;
5649 unsigned long start_addr;
5650 unsigned long end_addr;
5651 struct list_head funcs;
5652 unsigned int num_funcs;
5653};
5654
5675#ifdef CONFIG_MODULES 5655#ifdef CONFIG_MODULES
5676 5656
5677#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 5657#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5678 5658
5659static LIST_HEAD(ftrace_mod_maps);
5660
5679static int referenced_filters(struct dyn_ftrace *rec) 5661static int referenced_filters(struct dyn_ftrace *rec)
5680{ 5662{
5681 struct ftrace_ops *ops; 5663 struct ftrace_ops *ops;
@@ -5729,8 +5711,26 @@ static void clear_mod_from_hashes(struct ftrace_page *pg)
5729 mutex_unlock(&trace_types_lock); 5711 mutex_unlock(&trace_types_lock);
5730} 5712}
5731 5713
5714static void ftrace_free_mod_map(struct rcu_head *rcu)
5715{
5716 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
5717 struct ftrace_mod_func *mod_func;
5718 struct ftrace_mod_func *n;
5719
5720 /* All the contents of mod_map are now not visible to readers */
5721 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
5722 kfree(mod_func->name);
5723 list_del(&mod_func->list);
5724 kfree(mod_func);
5725 }
5726
5727 kfree(mod_map);
5728}
5729
5732void ftrace_release_mod(struct module *mod) 5730void ftrace_release_mod(struct module *mod)
5733{ 5731{
5732 struct ftrace_mod_map *mod_map;
5733 struct ftrace_mod_map *n;
5734 struct dyn_ftrace *rec; 5734 struct dyn_ftrace *rec;
5735 struct ftrace_page **last_pg; 5735 struct ftrace_page **last_pg;
5736 struct ftrace_page *tmp_page = NULL; 5736 struct ftrace_page *tmp_page = NULL;
@@ -5742,6 +5742,14 @@ void ftrace_release_mod(struct module *mod)
5742 if (ftrace_disabled) 5742 if (ftrace_disabled)
5743 goto out_unlock; 5743 goto out_unlock;
5744 5744
5745 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5746 if (mod_map->mod == mod) {
5747 list_del_rcu(&mod_map->list);
5748 call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
5749 break;
5750 }
5751 }
5752
5745 /* 5753 /*
5746 * Each module has its own ftrace_pages, remove 5754 * Each module has its own ftrace_pages, remove
5747 * them from the list. 5755 * them from the list.
@@ -5749,7 +5757,8 @@ void ftrace_release_mod(struct module *mod)
5749 last_pg = &ftrace_pages_start; 5757 last_pg = &ftrace_pages_start;
5750 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 5758 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5751 rec = &pg->records[0]; 5759 rec = &pg->records[0];
5752 if (within_module_core(rec->ip, mod)) { 5760 if (within_module_core(rec->ip, mod) ||
5761 within_module_init(rec->ip, mod)) {
5753 /* 5762 /*
5754 * As core pages are first, the first 5763 * As core pages are first, the first
5755 * page should never be a module page. 5764 * page should never be a module page.
@@ -5818,7 +5827,8 @@ void ftrace_module_enable(struct module *mod)
5818 * not part of this module, then skip this pg, 5827 * not part of this module, then skip this pg,
5819 * which the "break" will do. 5828 * which the "break" will do.
5820 */ 5829 */
5821 if (!within_module_core(rec->ip, mod)) 5830 if (!within_module_core(rec->ip, mod) &&
5831 !within_module_init(rec->ip, mod))
5822 break; 5832 break;
5823 5833
5824 cnt = 0; 5834 cnt = 0;
@@ -5863,23 +5873,245 @@ void ftrace_module_init(struct module *mod)
5863 ftrace_process_locs(mod, mod->ftrace_callsites, 5873 ftrace_process_locs(mod, mod->ftrace_callsites,
5864 mod->ftrace_callsites + mod->num_ftrace_callsites); 5874 mod->ftrace_callsites + mod->num_ftrace_callsites);
5865} 5875}
5876
5877static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
5878 struct dyn_ftrace *rec)
5879{
5880 struct ftrace_mod_func *mod_func;
5881 unsigned long symsize;
5882 unsigned long offset;
5883 char str[KSYM_SYMBOL_LEN];
5884 char *modname;
5885 const char *ret;
5886
5887 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
5888 if (!ret)
5889 return;
5890
5891 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
5892 if (!mod_func)
5893 return;
5894
5895 mod_func->name = kstrdup(str, GFP_KERNEL);
5896 if (!mod_func->name) {
5897 kfree(mod_func);
5898 return;
5899 }
5900
5901 mod_func->ip = rec->ip - offset;
5902 mod_func->size = symsize;
5903
5904 mod_map->num_funcs++;
5905
5906 list_add_rcu(&mod_func->list, &mod_map->funcs);
5907}
5908
5909static struct ftrace_mod_map *
5910allocate_ftrace_mod_map(struct module *mod,
5911 unsigned long start, unsigned long end)
5912{
5913 struct ftrace_mod_map *mod_map;
5914
5915 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
5916 if (!mod_map)
5917 return NULL;
5918
5919 mod_map->mod = mod;
5920 mod_map->start_addr = start;
5921 mod_map->end_addr = end;
5922 mod_map->num_funcs = 0;
5923
5924 INIT_LIST_HEAD_RCU(&mod_map->funcs);
5925
5926 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
5927
5928 return mod_map;
5929}
5930
5931static const char *
5932ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
5933 unsigned long addr, unsigned long *size,
5934 unsigned long *off, char *sym)
5935{
5936 struct ftrace_mod_func *found_func = NULL;
5937 struct ftrace_mod_func *mod_func;
5938
5939 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5940 if (addr >= mod_func->ip &&
5941 addr < mod_func->ip + mod_func->size) {
5942 found_func = mod_func;
5943 break;
5944 }
5945 }
5946
5947 if (found_func) {
5948 if (size)
5949 *size = found_func->size;
5950 if (off)
5951 *off = addr - found_func->ip;
5952 if (sym)
5953 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
5954
5955 return found_func->name;
5956 }
5957
5958 return NULL;
5959}
5960
5961const char *
5962ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
5963 unsigned long *off, char **modname, char *sym)
5964{
5965 struct ftrace_mod_map *mod_map;
5966 const char *ret = NULL;
5967
5968 /* mod_map is freed via call_rcu_sched() */
5969 preempt_disable();
5970 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5971 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
5972 if (ret) {
5973 if (modname)
5974 *modname = mod_map->mod->name;
5975 break;
5976 }
5977 }
5978 preempt_enable();
5979
5980 return ret;
5981}
5982
5983int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
5984 char *type, char *name,
5985 char *module_name, int *exported)
5986{
5987 struct ftrace_mod_map *mod_map;
5988 struct ftrace_mod_func *mod_func;
5989
5990 preempt_disable();
5991 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5992
5993 if (symnum >= mod_map->num_funcs) {
5994 symnum -= mod_map->num_funcs;
5995 continue;
5996 }
5997
5998 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5999 if (symnum > 1) {
6000 symnum--;
6001 continue;
6002 }
6003
6004 *value = mod_func->ip;
6005 *type = 'T';
6006 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6007 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6008 *exported = 1;
6009 preempt_enable();
6010 return 0;
6011 }
6012 WARN_ON(1);
6013 break;
6014 }
6015 preempt_enable();
6016 return -ERANGE;
6017}
6018
6019#else
6020static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6021 struct dyn_ftrace *rec) { }
6022static inline struct ftrace_mod_map *
6023allocate_ftrace_mod_map(struct module *mod,
6024 unsigned long start, unsigned long end)
6025{
6026 return NULL;
6027}
5866#endif /* CONFIG_MODULES */ 6028#endif /* CONFIG_MODULES */
5867 6029
5868void __init ftrace_free_init_mem(void) 6030struct ftrace_init_func {
6031 struct list_head list;
6032 unsigned long ip;
6033};
6034
6035/* Clear any init ips from hashes */
6036static void
6037clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6038{
6039 struct ftrace_func_entry *entry;
6040
6041 if (ftrace_hash_empty(hash))
6042 return;
6043
6044 entry = __ftrace_lookup_ip(hash, func->ip);
6045
6046 /*
6047 * Do not allow this rec to match again.
6048 * Yeah, it may waste some memory, but will be removed
6049 * if/when the hash is modified again.
6050 */
6051 if (entry)
6052 entry->ip = 0;
6053}
6054
6055static void
6056clear_func_from_hashes(struct ftrace_init_func *func)
6057{
6058 struct trace_array *tr;
6059
6060 mutex_lock(&trace_types_lock);
6061 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6062 if (!tr->ops || !tr->ops->func_hash)
6063 continue;
6064 mutex_lock(&tr->ops->func_hash->regex_lock);
6065 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6066 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6067 mutex_unlock(&tr->ops->func_hash->regex_lock);
6068 }
6069 mutex_unlock(&trace_types_lock);
6070}
6071
6072static void add_to_clear_hash_list(struct list_head *clear_list,
6073 struct dyn_ftrace *rec)
6074{
6075 struct ftrace_init_func *func;
6076
6077 func = kmalloc(sizeof(*func), GFP_KERNEL);
6078 if (!func) {
6079 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6080 return;
6081 }
6082
6083 func->ip = rec->ip;
6084 list_add(&func->list, clear_list);
6085}
6086
6087void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
5869{ 6088{
5870 unsigned long start = (unsigned long)(&__init_begin); 6089 unsigned long start = (unsigned long)(start_ptr);
5871 unsigned long end = (unsigned long)(&__init_end); 6090 unsigned long end = (unsigned long)(end_ptr);
5872 struct ftrace_page **last_pg = &ftrace_pages_start; 6091 struct ftrace_page **last_pg = &ftrace_pages_start;
5873 struct ftrace_page *pg; 6092 struct ftrace_page *pg;
5874 struct dyn_ftrace *rec; 6093 struct dyn_ftrace *rec;
5875 struct dyn_ftrace key; 6094 struct dyn_ftrace key;
6095 struct ftrace_mod_map *mod_map = NULL;
6096 struct ftrace_init_func *func, *func_next;
6097 struct list_head clear_hash;
5876 int order; 6098 int order;
5877 6099
6100 INIT_LIST_HEAD(&clear_hash);
6101
5878 key.ip = start; 6102 key.ip = start;
5879 key.flags = end; /* overload flags, as it is unsigned long */ 6103 key.flags = end; /* overload flags, as it is unsigned long */
5880 6104
5881 mutex_lock(&ftrace_lock); 6105 mutex_lock(&ftrace_lock);
5882 6106
6107 /*
6108 * If we are freeing module init memory, then check if
6109 * any tracer is active. If so, we need to save a mapping of
6110 * the module functions being freed with the address.
6111 */
6112 if (mod && ftrace_ops_list != &ftrace_list_end)
6113 mod_map = allocate_ftrace_mod_map(mod, start, end);
6114
5883 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 6115 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
5884 if (end < pg->records[0].ip || 6116 if (end < pg->records[0].ip ||
5885 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 6117 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
@@ -5890,6 +6122,13 @@ void __init ftrace_free_init_mem(void)
5890 ftrace_cmp_recs); 6122 ftrace_cmp_recs);
5891 if (!rec) 6123 if (!rec)
5892 continue; 6124 continue;
6125
6126 /* rec will be cleared from hashes after ftrace_lock unlock */
6127 add_to_clear_hash_list(&clear_hash, rec);
6128
6129 if (mod_map)
6130 save_ftrace_mod_rec(mod_map, rec);
6131
5893 pg->index--; 6132 pg->index--;
5894 ftrace_update_tot_cnt--; 6133 ftrace_update_tot_cnt--;
5895 if (!pg->index) { 6134 if (!pg->index) {
@@ -5908,6 +6147,19 @@ void __init ftrace_free_init_mem(void)
5908 goto again; 6147 goto again;
5909 } 6148 }
5910 mutex_unlock(&ftrace_lock); 6149 mutex_unlock(&ftrace_lock);
6150
6151 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6152 clear_func_from_hashes(func);
6153 kfree(func);
6154 }
6155}
6156
6157void __init ftrace_free_init_mem(void)
6158{
6159 void *start = (void *)(&__init_begin);
6160 void *end = (void *)(&__init_end);
6161
6162 ftrace_free_mem(NULL, start, end);
5911} 6163}
5912 6164
5913void __init ftrace_init(void) 6165void __init ftrace_init(void)
@@ -6063,10 +6315,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6063 * If any of the above fails then the op->func() is not executed. 6315 * If any of the above fails then the op->func() is not executed.
6064 */ 6316 */
6065 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 6317 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6066 (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
6067 !ftrace_function_local_disabled(op)) &&
6068 ftrace_ops_test(op, ip, regs)) { 6318 ftrace_ops_test(op, ip, regs)) {
6069
6070 if (FTRACE_WARN_ON(!op->func)) { 6319 if (FTRACE_WARN_ON(!op->func)) {
6071 pr_warn("op=%p %pS\n", op, op); 6320 pr_warn("op=%p %pS\n", op, op);
6072 goto out; 6321 goto out;
@@ -6124,10 +6373,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6124 6373
6125 preempt_disable_notrace(); 6374 preempt_disable_notrace();
6126 6375
6127 if (!(op->flags & FTRACE_OPS_FL_PER_CPU) || 6376 op->func(ip, parent_ip, op, regs);
6128 !ftrace_function_local_disabled(op)) {
6129 op->func(ip, parent_ip, op, regs);
6130 }
6131 6377
6132 preempt_enable_notrace(); 6378 preempt_enable_notrace();
6133 trace_clear_recursion(bit); 6379 trace_clear_recursion(bit);
@@ -6151,7 +6397,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6151 * or does per cpu logic, then we need to call the assist handler. 6397 * or does per cpu logic, then we need to call the assist handler.
6152 */ 6398 */
6153 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || 6399 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6154 ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU)) 6400 ops->flags & FTRACE_OPS_FL_RCU)
6155 return ftrace_ops_assist_func; 6401 return ftrace_ops_assist_func;
6156 6402
6157 return ops->func; 6403 return ops->func;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d57fede84b38..91874a95060d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2536,61 +2536,29 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2536 * The lock and unlock are done within a preempt disable section. 2536 * The lock and unlock are done within a preempt disable section.
2537 * The current_context per_cpu variable can only be modified 2537 * The current_context per_cpu variable can only be modified
2538 * by the current task between lock and unlock. But it can 2538 * by the current task between lock and unlock. But it can
2539 * be modified more than once via an interrupt. To pass this 2539 * be modified more than once via an interrupt. There are four
2540 * information from the lock to the unlock without having to 2540 * different contexts that we need to consider.
2541 * access the 'in_interrupt()' functions again (which do show
2542 * a bit of overhead in something as critical as function tracing,
2543 * we use a bitmask trick.
2544 * 2541 *
2545 * bit 0 = NMI context 2542 * Normal context.
2546 * bit 1 = IRQ context 2543 * SoftIRQ context
2547 * bit 2 = SoftIRQ context 2544 * IRQ context
2548 * bit 3 = normal context. 2545 * NMI context
2549 * 2546 *
2550 * This works because this is the order of contexts that can 2547 * If for some reason the ring buffer starts to recurse, we
2551 * preempt other contexts. A SoftIRQ never preempts an IRQ 2548 * only allow that to happen at most 4 times (one for each
2552 * context. 2549 * context). If it happens 5 times, then we consider this a
2553 * 2550 * recusive loop and do not let it go further.
2554 * When the context is determined, the corresponding bit is
2555 * checked and set (if it was set, then a recursion of that context
2556 * happened).
2557 *
2558 * On unlock, we need to clear this bit. To do so, just subtract
2559 * 1 from the current_context and AND it to itself.
2560 *
2561 * (binary)
2562 * 101 - 1 = 100
2563 * 101 & 100 = 100 (clearing bit zero)
2564 *
2565 * 1010 - 1 = 1001
2566 * 1010 & 1001 = 1000 (clearing bit 1)
2567 *
2568 * The least significant bit can be cleared this way, and it
2569 * just so happens that it is the same bit corresponding to
2570 * the current context.
2571 */ 2551 */
2572 2552
2573static __always_inline int 2553static __always_inline int
2574trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 2554trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2575{ 2555{
2576 unsigned int val = cpu_buffer->current_context; 2556 if (cpu_buffer->current_context >= 4)
2577 int bit;
2578
2579 if (in_interrupt()) {
2580 if (in_nmi())
2581 bit = RB_CTX_NMI;
2582 else if (in_irq())
2583 bit = RB_CTX_IRQ;
2584 else
2585 bit = RB_CTX_SOFTIRQ;
2586 } else
2587 bit = RB_CTX_NORMAL;
2588
2589 if (unlikely(val & (1 << bit)))
2590 return 1; 2557 return 1;
2591 2558
2592 val |= (1 << bit); 2559 cpu_buffer->current_context++;
2593 cpu_buffer->current_context = val; 2560 /* Interrupts must see this update */
2561 barrier();
2594 2562
2595 return 0; 2563 return 0;
2596} 2564}
@@ -2598,7 +2566,9 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2598static __always_inline void 2566static __always_inline void
2599trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 2567trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2600{ 2568{
2601 cpu_buffer->current_context &= cpu_buffer->current_context - 1; 2569 /* Don't let the dec leak out */
2570 barrier();
2571 cpu_buffer->current_context--;
2602} 2572}
2603 2573
2604/** 2574/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 752e5daf0896..73e67b68c53b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7687,6 +7687,7 @@ static int instance_mkdir(const char *name)
7687 struct trace_array *tr; 7687 struct trace_array *tr;
7688 int ret; 7688 int ret;
7689 7689
7690 mutex_lock(&event_mutex);
7690 mutex_lock(&trace_types_lock); 7691 mutex_lock(&trace_types_lock);
7691 7692
7692 ret = -EEXIST; 7693 ret = -EEXIST;
@@ -7742,6 +7743,7 @@ static int instance_mkdir(const char *name)
7742 list_add(&tr->list, &ftrace_trace_arrays); 7743 list_add(&tr->list, &ftrace_trace_arrays);
7743 7744
7744 mutex_unlock(&trace_types_lock); 7745 mutex_unlock(&trace_types_lock);
7746 mutex_unlock(&event_mutex);
7745 7747
7746 return 0; 7748 return 0;
7747 7749
@@ -7753,6 +7755,7 @@ static int instance_mkdir(const char *name)
7753 7755
7754 out_unlock: 7756 out_unlock:
7755 mutex_unlock(&trace_types_lock); 7757 mutex_unlock(&trace_types_lock);
7758 mutex_unlock(&event_mutex);
7756 7759
7757 return ret; 7760 return ret;
7758 7761
@@ -7765,6 +7768,7 @@ static int instance_rmdir(const char *name)
7765 int ret; 7768 int ret;
7766 int i; 7769 int i;
7767 7770
7771 mutex_lock(&event_mutex);
7768 mutex_lock(&trace_types_lock); 7772 mutex_lock(&trace_types_lock);
7769 7773
7770 ret = -ENODEV; 7774 ret = -ENODEV;
@@ -7810,6 +7814,7 @@ static int instance_rmdir(const char *name)
7810 7814
7811 out_unlock: 7815 out_unlock:
7812 mutex_unlock(&trace_types_lock); 7816 mutex_unlock(&trace_types_lock);
7817 mutex_unlock(&event_mutex);
7813 7818
7814 return ret; 7819 return ret;
7815} 7820}
@@ -8276,6 +8281,92 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8276} 8281}
8277EXPORT_SYMBOL_GPL(ftrace_dump); 8282EXPORT_SYMBOL_GPL(ftrace_dump);
8278 8283
8284int trace_run_command(const char *buf, int (*createfn)(int, char **))
8285{
8286 char **argv;
8287 int argc, ret;
8288
8289 argc = 0;
8290 ret = 0;
8291 argv = argv_split(GFP_KERNEL, buf, &argc);
8292 if (!argv)
8293 return -ENOMEM;
8294
8295 if (argc)
8296 ret = createfn(argc, argv);
8297
8298 argv_free(argv);
8299
8300 return ret;
8301}
8302
8303#define WRITE_BUFSIZE 4096
8304
8305ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8306 size_t count, loff_t *ppos,
8307 int (*createfn)(int, char **))
8308{
8309 char *kbuf, *buf, *tmp;
8310 int ret = 0;
8311 size_t done = 0;
8312 size_t size;
8313
8314 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8315 if (!kbuf)
8316 return -ENOMEM;
8317
8318 while (done < count) {
8319 size = count - done;
8320
8321 if (size >= WRITE_BUFSIZE)
8322 size = WRITE_BUFSIZE - 1;
8323
8324 if (copy_from_user(kbuf, buffer + done, size)) {
8325 ret = -EFAULT;
8326 goto out;
8327 }
8328 kbuf[size] = '\0';
8329 buf = kbuf;
8330 do {
8331 tmp = strchr(buf, '\n');
8332 if (tmp) {
8333 *tmp = '\0';
8334 size = tmp - buf + 1;
8335 } else {
8336 size = strlen(buf);
8337 if (done + size < count) {
8338 if (buf != kbuf)
8339 break;
8340 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8341 pr_warn("Line length is too long: Should be less than %d\n",
8342 WRITE_BUFSIZE - 2);
8343 ret = -EINVAL;
8344 goto out;
8345 }
8346 }
8347 done += size;
8348
8349 /* Remove comments */
8350 tmp = strchr(buf, '#');
8351
8352 if (tmp)
8353 *tmp = '\0';
8354
8355 ret = trace_run_command(buf, createfn);
8356 if (ret)
8357 goto out;
8358 buf += size;
8359
8360 } while (done < count);
8361 }
8362 ret = done;
8363
8364out:
8365 kfree(kbuf);
8366
8367 return ret;
8368}
8369
8279__init static int tracer_alloc_buffers(void) 8370__init static int tracer_alloc_buffers(void)
8280{ 8371{
8281 int ring_buf_size; 8372 int ring_buf_size;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6b0b343a36a2..2a6d0325a761 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -739,8 +739,6 @@ extern int trace_selftest_startup_wakeup(struct tracer *trace,
739 struct trace_array *tr); 739 struct trace_array *tr);
740extern int trace_selftest_startup_nop(struct tracer *trace, 740extern int trace_selftest_startup_nop(struct tracer *trace,
741 struct trace_array *tr); 741 struct trace_array *tr);
742extern int trace_selftest_startup_sched_switch(struct tracer *trace,
743 struct trace_array *tr);
744extern int trace_selftest_startup_branch(struct tracer *trace, 742extern int trace_selftest_startup_branch(struct tracer *trace,
745 struct trace_array *tr); 743 struct trace_array *tr);
746/* 744/*
@@ -1755,6 +1753,13 @@ void trace_printk_start_comm(void);
1755int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1753int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1756int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1754int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1757 1755
1756#define MAX_EVENT_NAME_LEN 64
1757
1758extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1759extern ssize_t trace_parse_run_command(struct file *file,
1760 const char __user *buffer, size_t count, loff_t *ppos,
1761 int (*createfn)(int, char**));
1762
1758/* 1763/*
1759 * Normal trace_printk() and friends allocates special buffers 1764 * Normal trace_printk() and friends allocates special buffers
1760 * to do the manipulation, as well as saves the print formats 1765 * to do the manipulation, as well as saves the print formats
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 13ba2d3f6a91..55d6dff37daf 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -240,27 +240,41 @@ void perf_trace_destroy(struct perf_event *p_event)
240int perf_trace_add(struct perf_event *p_event, int flags) 240int perf_trace_add(struct perf_event *p_event, int flags)
241{ 241{
242 struct trace_event_call *tp_event = p_event->tp_event; 242 struct trace_event_call *tp_event = p_event->tp_event;
243 struct hlist_head __percpu *pcpu_list;
244 struct hlist_head *list;
245
246 pcpu_list = tp_event->perf_events;
247 if (WARN_ON_ONCE(!pcpu_list))
248 return -EINVAL;
249 243
250 if (!(flags & PERF_EF_START)) 244 if (!(flags & PERF_EF_START))
251 p_event->hw.state = PERF_HES_STOPPED; 245 p_event->hw.state = PERF_HES_STOPPED;
252 246
253 list = this_cpu_ptr(pcpu_list); 247 /*
254 hlist_add_head_rcu(&p_event->hlist_entry, list); 248 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
249 * and we need to take the default action of enqueueing our event on
250 * the right per-cpu hlist.
251 */
252 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
253 struct hlist_head __percpu *pcpu_list;
254 struct hlist_head *list;
255
256 pcpu_list = tp_event->perf_events;
257 if (WARN_ON_ONCE(!pcpu_list))
258 return -EINVAL;
259
260 list = this_cpu_ptr(pcpu_list);
261 hlist_add_head_rcu(&p_event->hlist_entry, list);
262 }
255 263
256 return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event); 264 return 0;
257} 265}
258 266
259void perf_trace_del(struct perf_event *p_event, int flags) 267void perf_trace_del(struct perf_event *p_event, int flags)
260{ 268{
261 struct trace_event_call *tp_event = p_event->tp_event; 269 struct trace_event_call *tp_event = p_event->tp_event;
262 hlist_del_rcu(&p_event->hlist_entry); 270
263 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); 271 /*
272 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
273 * and we need to take the default action of dequeueing our event from
274 * the right per-cpu hlist.
275 */
276 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
277 hlist_del_rcu(&p_event->hlist_entry);
264} 278}
265 279
266void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) 280void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
@@ -306,16 +320,25 @@ static void
306perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, 320perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
307 struct ftrace_ops *ops, struct pt_regs *pt_regs) 321 struct ftrace_ops *ops, struct pt_regs *pt_regs)
308{ 322{
309 struct perf_event *event;
310 struct ftrace_entry *entry; 323 struct ftrace_entry *entry;
311 struct hlist_head *head; 324 struct perf_event *event;
325 struct hlist_head head;
312 struct pt_regs regs; 326 struct pt_regs regs;
313 int rctx; 327 int rctx;
314 328
315 head = this_cpu_ptr(event_function.perf_events); 329 if ((unsigned long)ops->private != smp_processor_id())
316 if (hlist_empty(head))
317 return; 330 return;
318 331
332 event = container_of(ops, struct perf_event, ftrace_ops);
333
334 /*
335 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
336 * the perf code does is hlist_for_each_entry_rcu(), so we can
337 * get away with simply setting the @head.first pointer in order
338 * to create a singular list.
339 */
340 head.first = &event->hlist_entry;
341
319#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ 342#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
320 sizeof(u64)) - sizeof(u32)) 343 sizeof(u64)) - sizeof(u32))
321 344
@@ -330,9 +353,8 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
330 353
331 entry->ip = ip; 354 entry->ip = ip;
332 entry->parent_ip = parent_ip; 355 entry->parent_ip = parent_ip;
333 event = container_of(ops, struct perf_event, ftrace_ops);
334 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, 356 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
335 1, &regs, head, NULL, event); 357 1, &regs, &head, NULL);
336 358
337#undef ENTRY_SIZE 359#undef ENTRY_SIZE
338} 360}
@@ -341,8 +363,10 @@ static int perf_ftrace_function_register(struct perf_event *event)
341{ 363{
342 struct ftrace_ops *ops = &event->ftrace_ops; 364 struct ftrace_ops *ops = &event->ftrace_ops;
343 365
344 ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU; 366 ops->flags = FTRACE_OPS_FL_RCU;
345 ops->func = perf_ftrace_function_call; 367 ops->func = perf_ftrace_function_call;
368 ops->private = (void *)(unsigned long)nr_cpu_ids;
369
346 return register_ftrace_function(ops); 370 return register_ftrace_function(ops);
347} 371}
348 372
@@ -354,19 +378,11 @@ static int perf_ftrace_function_unregister(struct perf_event *event)
354 return ret; 378 return ret;
355} 379}
356 380
357static void perf_ftrace_function_enable(struct perf_event *event)
358{
359 ftrace_function_local_enable(&event->ftrace_ops);
360}
361
362static void perf_ftrace_function_disable(struct perf_event *event)
363{
364 ftrace_function_local_disable(&event->ftrace_ops);
365}
366
367int perf_ftrace_event_register(struct trace_event_call *call, 381int perf_ftrace_event_register(struct trace_event_call *call,
368 enum trace_reg type, void *data) 382 enum trace_reg type, void *data)
369{ 383{
384 struct perf_event *event = data;
385
370 switch (type) { 386 switch (type) {
371 case TRACE_REG_REGISTER: 387 case TRACE_REG_REGISTER:
372 case TRACE_REG_UNREGISTER: 388 case TRACE_REG_UNREGISTER:
@@ -379,11 +395,11 @@ int perf_ftrace_event_register(struct trace_event_call *call,
379 case TRACE_REG_PERF_CLOSE: 395 case TRACE_REG_PERF_CLOSE:
380 return perf_ftrace_function_unregister(data); 396 return perf_ftrace_function_unregister(data);
381 case TRACE_REG_PERF_ADD: 397 case TRACE_REG_PERF_ADD:
382 perf_ftrace_function_enable(data); 398 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
383 return 0; 399 return 1;
384 case TRACE_REG_PERF_DEL: 400 case TRACE_REG_PERF_DEL:
385 perf_ftrace_function_disable(data); 401 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
386 return 0; 402 return 1;
387 } 403 }
388 404
389 return -EINVAL; 405 return -EINVAL;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 87468398b9ed..ec0f9aa4e151 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1406,8 +1406,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
1406 return -ENODEV; 1406 return -ENODEV;
1407 1407
1408 /* Make sure the system still exists */ 1408 /* Make sure the system still exists */
1409 mutex_lock(&trace_types_lock);
1410 mutex_lock(&event_mutex); 1409 mutex_lock(&event_mutex);
1410 mutex_lock(&trace_types_lock);
1411 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1411 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1412 list_for_each_entry(dir, &tr->systems, list) { 1412 list_for_each_entry(dir, &tr->systems, list) {
1413 if (dir == inode->i_private) { 1413 if (dir == inode->i_private) {
@@ -1421,8 +1421,8 @@ static int subsystem_open(struct inode *inode, struct file *filp)
1421 } 1421 }
1422 } 1422 }
1423 exit_loop: 1423 exit_loop:
1424 mutex_unlock(&event_mutex);
1425 mutex_unlock(&trace_types_lock); 1424 mutex_unlock(&trace_types_lock);
1425 mutex_unlock(&event_mutex);
1426 1426
1427 if (!system) 1427 if (!system)
1428 return -ENODEV; 1428 return -ENODEV;
@@ -2294,15 +2294,15 @@ static void __add_event_to_tracers(struct trace_event_call *call);
2294int trace_add_event_call(struct trace_event_call *call) 2294int trace_add_event_call(struct trace_event_call *call)
2295{ 2295{
2296 int ret; 2296 int ret;
2297 mutex_lock(&trace_types_lock);
2298 mutex_lock(&event_mutex); 2297 mutex_lock(&event_mutex);
2298 mutex_lock(&trace_types_lock);
2299 2299
2300 ret = __register_event(call, NULL); 2300 ret = __register_event(call, NULL);
2301 if (ret >= 0) 2301 if (ret >= 0)
2302 __add_event_to_tracers(call); 2302 __add_event_to_tracers(call);
2303 2303
2304 mutex_unlock(&event_mutex);
2305 mutex_unlock(&trace_types_lock); 2304 mutex_unlock(&trace_types_lock);
2305 mutex_unlock(&event_mutex);
2306 return ret; 2306 return ret;
2307} 2307}
2308 2308
@@ -2356,13 +2356,13 @@ int trace_remove_event_call(struct trace_event_call *call)
2356{ 2356{
2357 int ret; 2357 int ret;
2358 2358
2359 mutex_lock(&trace_types_lock);
2360 mutex_lock(&event_mutex); 2359 mutex_lock(&event_mutex);
2360 mutex_lock(&trace_types_lock);
2361 down_write(&trace_event_sem); 2361 down_write(&trace_event_sem);
2362 ret = probe_remove_event_call(call); 2362 ret = probe_remove_event_call(call);
2363 up_write(&trace_event_sem); 2363 up_write(&trace_event_sem);
2364 mutex_unlock(&event_mutex);
2365 mutex_unlock(&trace_types_lock); 2364 mutex_unlock(&trace_types_lock);
2365 mutex_unlock(&event_mutex);
2366 2366
2367 return ret; 2367 return ret;
2368} 2368}
@@ -2424,8 +2424,8 @@ static int trace_module_notify(struct notifier_block *self,
2424{ 2424{
2425 struct module *mod = data; 2425 struct module *mod = data;
2426 2426
2427 mutex_lock(&trace_types_lock);
2428 mutex_lock(&event_mutex); 2427 mutex_lock(&event_mutex);
2428 mutex_lock(&trace_types_lock);
2429 switch (val) { 2429 switch (val) {
2430 case MODULE_STATE_COMING: 2430 case MODULE_STATE_COMING:
2431 trace_module_add_events(mod); 2431 trace_module_add_events(mod);
@@ -2434,8 +2434,8 @@ static int trace_module_notify(struct notifier_block *self,
2434 trace_module_remove_events(mod); 2434 trace_module_remove_events(mod);
2435 break; 2435 break;
2436 } 2436 }
2437 mutex_unlock(&event_mutex);
2438 mutex_unlock(&trace_types_lock); 2437 mutex_unlock(&trace_types_lock);
2438 mutex_unlock(&event_mutex);
2439 2439
2440 return 0; 2440 return 0;
2441} 2441}
@@ -2950,24 +2950,24 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2950 * creates the event hierachry in the @parent/events directory. 2950 * creates the event hierachry in the @parent/events directory.
2951 * 2951 *
2952 * Returns 0 on success. 2952 * Returns 0 on success.
2953 *
2954 * Must be called with event_mutex held.
2953 */ 2955 */
2954int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 2956int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2955{ 2957{
2956 int ret; 2958 int ret;
2957 2959
2958 mutex_lock(&event_mutex); 2960 lockdep_assert_held(&event_mutex);
2959 2961
2960 ret = create_event_toplevel_files(parent, tr); 2962 ret = create_event_toplevel_files(parent, tr);
2961 if (ret) 2963 if (ret)
2962 goto out_unlock; 2964 goto out;
2963 2965
2964 down_write(&trace_event_sem); 2966 down_write(&trace_event_sem);
2965 __trace_add_event_dirs(tr); 2967 __trace_add_event_dirs(tr);
2966 up_write(&trace_event_sem); 2968 up_write(&trace_event_sem);
2967 2969
2968 out_unlock: 2970 out:
2969 mutex_unlock(&event_mutex);
2970
2971 return ret; 2971 return ret;
2972} 2972}
2973 2973
@@ -2996,9 +2996,10 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2996 return ret; 2996 return ret;
2997} 2997}
2998 2998
2999/* Must be called with event_mutex held */
2999int event_trace_del_tracer(struct trace_array *tr) 3000int event_trace_del_tracer(struct trace_array *tr)
3000{ 3001{
3001 mutex_lock(&event_mutex); 3002 lockdep_assert_held(&event_mutex);
3002 3003
3003 /* Disable any event triggers and associated soft-disabled events */ 3004 /* Disable any event triggers and associated soft-disabled events */
3004 clear_event_triggers(tr); 3005 clear_event_triggers(tr);
@@ -3019,8 +3020,6 @@ int event_trace_del_tracer(struct trace_array *tr)
3019 3020
3020 tr->event_dir = NULL; 3021 tr->event_dir = NULL;
3021 3022
3022 mutex_unlock(&event_mutex);
3023
3024 return 0; 3023 return 0;
3025} 3024}
3026 3025
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 1c21d0e2a145..1e1558c99d56 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -28,12 +28,16 @@ struct hist_field;
28 28
29typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event); 29typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
30 30
31#define HIST_FIELD_OPERANDS_MAX 2
32
31struct hist_field { 33struct hist_field {
32 struct ftrace_event_field *field; 34 struct ftrace_event_field *field;
33 unsigned long flags; 35 unsigned long flags;
34 hist_field_fn_t fn; 36 hist_field_fn_t fn;
35 unsigned int size; 37 unsigned int size;
36 unsigned int offset; 38 unsigned int offset;
39 unsigned int is_signed;
40 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
37}; 41};
38 42
39static u64 hist_field_none(struct hist_field *field, void *event) 43static u64 hist_field_none(struct hist_field *field, void *event)
@@ -71,7 +75,9 @@ static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
71 75
72static u64 hist_field_log2(struct hist_field *hist_field, void *event) 76static u64 hist_field_log2(struct hist_field *hist_field, void *event)
73{ 77{
74 u64 val = *(u64 *)(event + hist_field->field->offset); 78 struct hist_field *operand = hist_field->operands[0];
79
80 u64 val = operand->fn(operand, event);
75 81
76 return (u64) ilog2(roundup_pow_of_two(val)); 82 return (u64) ilog2(roundup_pow_of_two(val));
77} 83}
@@ -110,16 +116,16 @@ DEFINE_HIST_FIELD_FN(u8);
110#define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 116#define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
111 117
112enum hist_field_flags { 118enum hist_field_flags {
113 HIST_FIELD_FL_HITCOUNT = 1, 119 HIST_FIELD_FL_HITCOUNT = 1 << 0,
114 HIST_FIELD_FL_KEY = 2, 120 HIST_FIELD_FL_KEY = 1 << 1,
115 HIST_FIELD_FL_STRING = 4, 121 HIST_FIELD_FL_STRING = 1 << 2,
116 HIST_FIELD_FL_HEX = 8, 122 HIST_FIELD_FL_HEX = 1 << 3,
117 HIST_FIELD_FL_SYM = 16, 123 HIST_FIELD_FL_SYM = 1 << 4,
118 HIST_FIELD_FL_SYM_OFFSET = 32, 124 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
119 HIST_FIELD_FL_EXECNAME = 64, 125 HIST_FIELD_FL_EXECNAME = 1 << 6,
120 HIST_FIELD_FL_SYSCALL = 128, 126 HIST_FIELD_FL_SYSCALL = 1 << 7,
121 HIST_FIELD_FL_STACKTRACE = 256, 127 HIST_FIELD_FL_STACKTRACE = 1 << 8,
122 HIST_FIELD_FL_LOG2 = 512, 128 HIST_FIELD_FL_LOG2 = 1 << 9,
123}; 129};
124 130
125struct hist_trigger_attrs { 131struct hist_trigger_attrs {
@@ -146,6 +152,25 @@ struct hist_trigger_data {
146 struct tracing_map *map; 152 struct tracing_map *map;
147}; 153};
148 154
155static const char *hist_field_name(struct hist_field *field,
156 unsigned int level)
157{
158 const char *field_name = "";
159
160 if (level > 1)
161 return field_name;
162
163 if (field->field)
164 field_name = field->field->name;
165 else if (field->flags & HIST_FIELD_FL_LOG2)
166 field_name = hist_field_name(field->operands[0], ++level);
167
168 if (field_name == NULL)
169 field_name = "";
170
171 return field_name;
172}
173
149static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) 174static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
150{ 175{
151 hist_field_fn_t fn = NULL; 176 hist_field_fn_t fn = NULL;
@@ -340,8 +365,20 @@ static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
340 .elt_init = hist_trigger_elt_comm_init, 365 .elt_init = hist_trigger_elt_comm_init,
341}; 366};
342 367
343static void destroy_hist_field(struct hist_field *hist_field) 368static void destroy_hist_field(struct hist_field *hist_field,
369 unsigned int level)
344{ 370{
371 unsigned int i;
372
373 if (level > 2)
374 return;
375
376 if (!hist_field)
377 return;
378
379 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
380 destroy_hist_field(hist_field->operands[i], level + 1);
381
345 kfree(hist_field); 382 kfree(hist_field);
346} 383}
347 384
@@ -368,7 +405,10 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
368 } 405 }
369 406
370 if (flags & HIST_FIELD_FL_LOG2) { 407 if (flags & HIST_FIELD_FL_LOG2) {
408 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
371 hist_field->fn = hist_field_log2; 409 hist_field->fn = hist_field_log2;
410 hist_field->operands[0] = create_hist_field(field, fl);
411 hist_field->size = hist_field->operands[0]->size;
372 goto out; 412 goto out;
373 } 413 }
374 414
@@ -388,7 +428,7 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field,
388 hist_field->fn = select_value_fn(field->size, 428 hist_field->fn = select_value_fn(field->size,
389 field->is_signed); 429 field->is_signed);
390 if (!hist_field->fn) { 430 if (!hist_field->fn) {
391 destroy_hist_field(hist_field); 431 destroy_hist_field(hist_field, 0);
392 return NULL; 432 return NULL;
393 } 433 }
394 } 434 }
@@ -405,7 +445,7 @@ static void destroy_hist_fields(struct hist_trigger_data *hist_data)
405 445
406 for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) { 446 for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
407 if (hist_data->fields[i]) { 447 if (hist_data->fields[i]) {
408 destroy_hist_field(hist_data->fields[i]); 448 destroy_hist_field(hist_data->fields[i], 0);
409 hist_data->fields[i] = NULL; 449 hist_data->fields[i] = NULL;
410 } 450 }
411 } 451 }
@@ -450,7 +490,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
450 } 490 }
451 491
452 field = trace_find_event_field(file->event_call, field_name); 492 field = trace_find_event_field(file->event_call, field_name);
453 if (!field) { 493 if (!field || !field->size) {
454 ret = -EINVAL; 494 ret = -EINVAL;
455 goto out; 495 goto out;
456 } 496 }
@@ -548,7 +588,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
548 } 588 }
549 589
550 field = trace_find_event_field(file->event_call, field_name); 590 field = trace_find_event_field(file->event_call, field_name);
551 if (!field) { 591 if (!field || !field->size) {
552 ret = -EINVAL; 592 ret = -EINVAL;
553 goto out; 593 goto out;
554 } 594 }
@@ -653,7 +693,6 @@ static int is_descending(const char *str)
653static int create_sort_keys(struct hist_trigger_data *hist_data) 693static int create_sort_keys(struct hist_trigger_data *hist_data)
654{ 694{
655 char *fields_str = hist_data->attrs->sort_key_str; 695 char *fields_str = hist_data->attrs->sort_key_str;
656 struct ftrace_event_field *field = NULL;
657 struct tracing_map_sort_key *sort_key; 696 struct tracing_map_sort_key *sort_key;
658 int descending, ret = 0; 697 int descending, ret = 0;
659 unsigned int i, j; 698 unsigned int i, j;
@@ -670,7 +709,9 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
670 } 709 }
671 710
672 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 711 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
712 struct hist_field *hist_field;
673 char *field_str, *field_name; 713 char *field_str, *field_name;
714 const char *test_name;
674 715
675 sort_key = &hist_data->sort_keys[i]; 716 sort_key = &hist_data->sort_keys[i];
676 717
@@ -703,8 +744,10 @@ static int create_sort_keys(struct hist_trigger_data *hist_data)
703 } 744 }
704 745
705 for (j = 1; j < hist_data->n_fields; j++) { 746 for (j = 1; j < hist_data->n_fields; j++) {
706 field = hist_data->fields[j]->field; 747 hist_field = hist_data->fields[j];
707 if (field && (strcmp(field_name, field->name) == 0)) { 748 test_name = hist_field_name(hist_field, 0);
749
750 if (strcmp(field_name, test_name) == 0) {
708 sort_key->field_idx = j; 751 sort_key->field_idx = j;
709 descending = is_descending(field_str); 752 descending = is_descending(field_str);
710 if (descending < 0) { 753 if (descending < 0) {
@@ -952,6 +995,7 @@ hist_trigger_entry_print(struct seq_file *m,
952 struct hist_field *key_field; 995 struct hist_field *key_field;
953 char str[KSYM_SYMBOL_LEN]; 996 char str[KSYM_SYMBOL_LEN];
954 bool multiline = false; 997 bool multiline = false;
998 const char *field_name;
955 unsigned int i; 999 unsigned int i;
956 u64 uval; 1000 u64 uval;
957 1001
@@ -963,26 +1007,27 @@ hist_trigger_entry_print(struct seq_file *m,
963 if (i > hist_data->n_vals) 1007 if (i > hist_data->n_vals)
964 seq_puts(m, ", "); 1008 seq_puts(m, ", ");
965 1009
1010 field_name = hist_field_name(key_field, 0);
1011
966 if (key_field->flags & HIST_FIELD_FL_HEX) { 1012 if (key_field->flags & HIST_FIELD_FL_HEX) {
967 uval = *(u64 *)(key + key_field->offset); 1013 uval = *(u64 *)(key + key_field->offset);
968 seq_printf(m, "%s: %llx", 1014 seq_printf(m, "%s: %llx", field_name, uval);
969 key_field->field->name, uval);
970 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 1015 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
971 uval = *(u64 *)(key + key_field->offset); 1016 uval = *(u64 *)(key + key_field->offset);
972 sprint_symbol_no_offset(str, uval); 1017 sprint_symbol_no_offset(str, uval);
973 seq_printf(m, "%s: [%llx] %-45s", 1018 seq_printf(m, "%s: [%llx] %-45s", field_name,
974 key_field->field->name, uval, str); 1019 uval, str);
975 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 1020 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
976 uval = *(u64 *)(key + key_field->offset); 1021 uval = *(u64 *)(key + key_field->offset);
977 sprint_symbol(str, uval); 1022 sprint_symbol(str, uval);
978 seq_printf(m, "%s: [%llx] %-55s", 1023 seq_printf(m, "%s: [%llx] %-55s", field_name,
979 key_field->field->name, uval, str); 1024 uval, str);
980 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 1025 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
981 char *comm = elt->private_data; 1026 char *comm = elt->private_data;
982 1027
983 uval = *(u64 *)(key + key_field->offset); 1028 uval = *(u64 *)(key + key_field->offset);
984 seq_printf(m, "%s: %-16s[%10llu]", 1029 seq_printf(m, "%s: %-16s[%10llu]", field_name,
985 key_field->field->name, comm, uval); 1030 comm, uval);
986 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 1031 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
987 const char *syscall_name; 1032 const char *syscall_name;
988 1033
@@ -991,8 +1036,8 @@ hist_trigger_entry_print(struct seq_file *m,
991 if (!syscall_name) 1036 if (!syscall_name)
992 syscall_name = "unknown_syscall"; 1037 syscall_name = "unknown_syscall";
993 1038
994 seq_printf(m, "%s: %-30s[%3llu]", 1039 seq_printf(m, "%s: %-30s[%3llu]", field_name,
995 key_field->field->name, syscall_name, uval); 1040 syscall_name, uval);
996 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 1041 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
997 seq_puts(m, "stacktrace:\n"); 1042 seq_puts(m, "stacktrace:\n");
998 hist_trigger_stacktrace_print(m, 1043 hist_trigger_stacktrace_print(m,
@@ -1000,15 +1045,14 @@ hist_trigger_entry_print(struct seq_file *m,
1000 HIST_STACKTRACE_DEPTH); 1045 HIST_STACKTRACE_DEPTH);
1001 multiline = true; 1046 multiline = true;
1002 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 1047 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
1003 seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name, 1048 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
1004 *(u64 *)(key + key_field->offset)); 1049 *(u64 *)(key + key_field->offset));
1005 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 1050 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
1006 seq_printf(m, "%s: %-50s", key_field->field->name, 1051 seq_printf(m, "%s: %-50s", field_name,
1007 (char *)(key + key_field->offset)); 1052 (char *)(key + key_field->offset));
1008 } else { 1053 } else {
1009 uval = *(u64 *)(key + key_field->offset); 1054 uval = *(u64 *)(key + key_field->offset);
1010 seq_printf(m, "%s: %10llu", key_field->field->name, 1055 seq_printf(m, "%s: %10llu", field_name, uval);
1011 uval);
1012 } 1056 }
1013 } 1057 }
1014 1058
@@ -1021,13 +1065,13 @@ hist_trigger_entry_print(struct seq_file *m,
1021 tracing_map_read_sum(elt, HITCOUNT_IDX)); 1065 tracing_map_read_sum(elt, HITCOUNT_IDX));
1022 1066
1023 for (i = 1; i < hist_data->n_vals; i++) { 1067 for (i = 1; i < hist_data->n_vals; i++) {
1068 field_name = hist_field_name(hist_data->fields[i], 0);
1069
1024 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 1070 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1025 seq_printf(m, " %s: %10llx", 1071 seq_printf(m, " %s: %10llx", field_name,
1026 hist_data->fields[i]->field->name,
1027 tracing_map_read_sum(elt, i)); 1072 tracing_map_read_sum(elt, i));
1028 } else { 1073 } else {
1029 seq_printf(m, " %s: %10llu", 1074 seq_printf(m, " %s: %10llu", field_name,
1030 hist_data->fields[i]->field->name,
1031 tracing_map_read_sum(elt, i)); 1075 tracing_map_read_sum(elt, i));
1032 } 1076 }
1033 } 1077 }
@@ -1062,7 +1106,7 @@ static void hist_trigger_show(struct seq_file *m,
1062 struct event_trigger_data *data, int n) 1106 struct event_trigger_data *data, int n)
1063{ 1107{
1064 struct hist_trigger_data *hist_data; 1108 struct hist_trigger_data *hist_data;
1065 int n_entries, ret = 0; 1109 int n_entries;
1066 1110
1067 if (n > 0) 1111 if (n > 0)
1068 seq_puts(m, "\n\n"); 1112 seq_puts(m, "\n\n");
@@ -1073,10 +1117,8 @@ static void hist_trigger_show(struct seq_file *m,
1073 1117
1074 hist_data = data->private_data; 1118 hist_data = data->private_data;
1075 n_entries = print_entries(m, hist_data); 1119 n_entries = print_entries(m, hist_data);
1076 if (n_entries < 0) { 1120 if (n_entries < 0)
1077 ret = n_entries;
1078 n_entries = 0; 1121 n_entries = 0;
1079 }
1080 1122
1081 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 1123 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1082 (u64)atomic64_read(&hist_data->map->hits), 1124 (u64)atomic64_read(&hist_data->map->hits),
@@ -1142,7 +1184,9 @@ static const char *get_hist_field_flags(struct hist_field *hist_field)
1142 1184
1143static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 1185static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1144{ 1186{
1145 seq_printf(m, "%s", hist_field->field->name); 1187 const char *field_name = hist_field_name(hist_field, 0);
1188
1189 seq_printf(m, "%s", field_name);
1146 if (hist_field->flags) { 1190 if (hist_field->flags) {
1147 const char *flags_str = get_hist_field_flags(hist_field); 1191 const char *flags_str = get_hist_field_flags(hist_field);
1148 1192
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 7758bc0617cb..03ecb4465ee4 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -16,6 +16,10 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19#define CREATE_TRACE_POINTS
20#include <trace/events/preemptirq.h>
21
22#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
19static struct trace_array *irqsoff_trace __read_mostly; 23static struct trace_array *irqsoff_trace __read_mostly;
20static int tracer_enabled __read_mostly; 24static int tracer_enabled __read_mostly;
21 25
@@ -463,63 +467,43 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
463#else /* !CONFIG_PROVE_LOCKING */ 467#else /* !CONFIG_PROVE_LOCKING */
464 468
465/* 469/*
466 * Stubs:
467 */
468
469void trace_softirqs_on(unsigned long ip)
470{
471}
472
473void trace_softirqs_off(unsigned long ip)
474{
475}
476
477inline void print_irqtrace_events(struct task_struct *curr)
478{
479}
480
481/*
482 * We are only interested in hardirq on/off events: 470 * We are only interested in hardirq on/off events:
483 */ 471 */
484void trace_hardirqs_on(void) 472static inline void tracer_hardirqs_on(void)
485{ 473{
486 if (!preempt_trace() && irq_trace()) 474 if (!preempt_trace() && irq_trace())
487 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 475 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
488} 476}
489EXPORT_SYMBOL(trace_hardirqs_on);
490 477
491void trace_hardirqs_off(void) 478static inline void tracer_hardirqs_off(void)
492{ 479{
493 if (!preempt_trace() && irq_trace()) 480 if (!preempt_trace() && irq_trace())
494 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 481 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
495} 482}
496EXPORT_SYMBOL(trace_hardirqs_off);
497 483
498__visible void trace_hardirqs_on_caller(unsigned long caller_addr) 484static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
499{ 485{
500 if (!preempt_trace() && irq_trace()) 486 if (!preempt_trace() && irq_trace())
501 stop_critical_timing(CALLER_ADDR0, caller_addr); 487 stop_critical_timing(CALLER_ADDR0, caller_addr);
502} 488}
503EXPORT_SYMBOL(trace_hardirqs_on_caller);
504 489
505__visible void trace_hardirqs_off_caller(unsigned long caller_addr) 490static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
506{ 491{
507 if (!preempt_trace() && irq_trace()) 492 if (!preempt_trace() && irq_trace())
508 start_critical_timing(CALLER_ADDR0, caller_addr); 493 start_critical_timing(CALLER_ADDR0, caller_addr);
509} 494}
510EXPORT_SYMBOL(trace_hardirqs_off_caller);
511 495
512#endif /* CONFIG_PROVE_LOCKING */ 496#endif /* CONFIG_PROVE_LOCKING */
513#endif /* CONFIG_IRQSOFF_TRACER */ 497#endif /* CONFIG_IRQSOFF_TRACER */
514 498
515#ifdef CONFIG_PREEMPT_TRACER 499#ifdef CONFIG_PREEMPT_TRACER
516void trace_preempt_on(unsigned long a0, unsigned long a1) 500static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
517{ 501{
518 if (preempt_trace() && !irq_trace()) 502 if (preempt_trace() && !irq_trace())
519 stop_critical_timing(a0, a1); 503 stop_critical_timing(a0, a1);
520} 504}
521 505
522void trace_preempt_off(unsigned long a0, unsigned long a1) 506static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
523{ 507{
524 if (preempt_trace() && !irq_trace()) 508 if (preempt_trace() && !irq_trace())
525 start_critical_timing(a0, a1); 509 start_critical_timing(a0, a1);
@@ -781,3 +765,100 @@ __init static int init_irqsoff_tracer(void)
781 return 0; 765 return 0;
782} 766}
783core_initcall(init_irqsoff_tracer); 767core_initcall(init_irqsoff_tracer);
768#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
769
770#ifndef CONFIG_IRQSOFF_TRACER
771static inline void tracer_hardirqs_on(void) { }
772static inline void tracer_hardirqs_off(void) { }
773static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
774static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
775#endif
776
777#ifndef CONFIG_PREEMPT_TRACER
778static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
779static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
780#endif
781
782#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
783/* Per-cpu variable to prevent redundant calls when IRQs already off */
784static DEFINE_PER_CPU(int, tracing_irq_cpu);
785
786void trace_hardirqs_on(void)
787{
788 if (!this_cpu_read(tracing_irq_cpu))
789 return;
790
791 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
792 tracer_hardirqs_on();
793
794 this_cpu_write(tracing_irq_cpu, 0);
795}
796EXPORT_SYMBOL(trace_hardirqs_on);
797
798void trace_hardirqs_off(void)
799{
800 if (this_cpu_read(tracing_irq_cpu))
801 return;
802
803 this_cpu_write(tracing_irq_cpu, 1);
804
805 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
806 tracer_hardirqs_off();
807}
808EXPORT_SYMBOL(trace_hardirqs_off);
809
810__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
811{
812 if (!this_cpu_read(tracing_irq_cpu))
813 return;
814
815 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
816 tracer_hardirqs_on_caller(caller_addr);
817
818 this_cpu_write(tracing_irq_cpu, 0);
819}
820EXPORT_SYMBOL(trace_hardirqs_on_caller);
821
822__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
823{
824 if (this_cpu_read(tracing_irq_cpu))
825 return;
826
827 this_cpu_write(tracing_irq_cpu, 1);
828
829 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
830 tracer_hardirqs_off_caller(caller_addr);
831}
832EXPORT_SYMBOL(trace_hardirqs_off_caller);
833
834/*
835 * Stubs:
836 */
837
838void trace_softirqs_on(unsigned long ip)
839{
840}
841
842void trace_softirqs_off(unsigned long ip)
843{
844}
845
846inline void print_irqtrace_events(struct task_struct *curr)
847{
848}
849#endif
850
851#if defined(CONFIG_PREEMPT_TRACER) || \
852 (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
853void trace_preempt_on(unsigned long a0, unsigned long a1)
854{
855 trace_preempt_enable_rcuidle(a0, a1);
856 tracer_preempt_on(a0, a1);
857}
858
859void trace_preempt_off(unsigned long a0, unsigned long a1)
860{
861 trace_preempt_disable_rcuidle(a0, a1);
862 tracer_preempt_off(a0, a1);
863}
864#endif
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index abf92e478cfb..492700c5fb4d 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -907,8 +907,8 @@ static int probes_open(struct inode *inode, struct file *file)
907static ssize_t probes_write(struct file *file, const char __user *buffer, 907static ssize_t probes_write(struct file *file, const char __user *buffer,
908 size_t count, loff_t *ppos) 908 size_t count, loff_t *ppos)
909{ 909{
910 return traceprobe_probes_write(file, buffer, count, ppos, 910 return trace_parse_run_command(file, buffer, count, ppos,
911 create_trace_kprobe); 911 create_trace_kprobe);
912} 912}
913 913
914static const struct file_operations kprobe_events_ops = { 914static const struct file_operations kprobe_events_ops = {
@@ -1199,7 +1199,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1199 memset(&entry[1], 0, dsize); 1199 memset(&entry[1], 0, dsize);
1200 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1200 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1201 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1201 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1202 head, NULL, NULL); 1202 head, NULL);
1203} 1203}
1204NOKPROBE_SYMBOL(kprobe_perf_func); 1204NOKPROBE_SYMBOL(kprobe_perf_func);
1205 1205
@@ -1234,7 +1234,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1234 entry->ret_ip = (unsigned long)ri->ret_addr; 1234 entry->ret_ip = (unsigned long)ri->ret_addr;
1235 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 1235 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1236 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1236 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1237 head, NULL, NULL); 1237 head, NULL);
1238} 1238}
1239NOKPROBE_SYMBOL(kretprobe_perf_func); 1239NOKPROBE_SYMBOL(kretprobe_perf_func);
1240#endif /* CONFIG_PERF_EVENTS */ 1240#endif /* CONFIG_PERF_EVENTS */
@@ -1431,9 +1431,9 @@ static __init int kprobe_trace_self_tests_init(void)
1431 1431
1432 pr_info("Testing kprobe tracing: "); 1432 pr_info("Testing kprobe tracing: ");
1433 1433
1434 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target " 1434 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
1435 "$stack $stack0 +0($stack)", 1435 "$stack $stack0 +0($stack)",
1436 create_trace_kprobe); 1436 create_trace_kprobe);
1437 if (WARN_ON_ONCE(ret)) { 1437 if (WARN_ON_ONCE(ret)) {
1438 pr_warn("error on probing function entry.\n"); 1438 pr_warn("error on probing function entry.\n");
1439 warn++; 1439 warn++;
@@ -1453,8 +1453,8 @@ static __init int kprobe_trace_self_tests_init(void)
1453 } 1453 }
1454 } 1454 }
1455 1455
1456 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " 1456 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1457 "$retval", create_trace_kprobe); 1457 "$retval", create_trace_kprobe);
1458 if (WARN_ON_ONCE(ret)) { 1458 if (WARN_ON_ONCE(ret)) {
1459 pr_warn("error on probing function return.\n"); 1459 pr_warn("error on probing function return.\n");
1460 warn++; 1460 warn++;
@@ -1524,13 +1524,13 @@ static __init int kprobe_trace_self_tests_init(void)
1524 disable_trace_kprobe(tk, file); 1524 disable_trace_kprobe(tk, file);
1525 } 1525 }
1526 1526
1527 ret = traceprobe_command("-:testprobe", create_trace_kprobe); 1527 ret = trace_run_command("-:testprobe", create_trace_kprobe);
1528 if (WARN_ON_ONCE(ret)) { 1528 if (WARN_ON_ONCE(ret)) {
1529 pr_warn("error on deleting a probe.\n"); 1529 pr_warn("error on deleting a probe.\n");
1530 warn++; 1530 warn++;
1531 } 1531 }
1532 1532
1533 ret = traceprobe_command("-:testprobe2", create_trace_kprobe); 1533 ret = trace_run_command("-:testprobe2", create_trace_kprobe);
1534 if (WARN_ON_ONCE(ret)) { 1534 if (WARN_ON_ONCE(ret)) {
1535 pr_warn("error on deleting a probe.\n"); 1535 pr_warn("error on deleting a probe.\n");
1536 warn++; 1536 warn++;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 52478f033f88..d59357308677 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -623,92 +623,6 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
623 kfree(arg->comm); 623 kfree(arg->comm);
624} 624}
625 625
626int traceprobe_command(const char *buf, int (*createfn)(int, char **))
627{
628 char **argv;
629 int argc, ret;
630
631 argc = 0;
632 ret = 0;
633 argv = argv_split(GFP_KERNEL, buf, &argc);
634 if (!argv)
635 return -ENOMEM;
636
637 if (argc)
638 ret = createfn(argc, argv);
639
640 argv_free(argv);
641
642 return ret;
643}
644
645#define WRITE_BUFSIZE 4096
646
647ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
648 size_t count, loff_t *ppos,
649 int (*createfn)(int, char **))
650{
651 char *kbuf, *buf, *tmp;
652 int ret = 0;
653 size_t done = 0;
654 size_t size;
655
656 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
657 if (!kbuf)
658 return -ENOMEM;
659
660 while (done < count) {
661 size = count - done;
662
663 if (size >= WRITE_BUFSIZE)
664 size = WRITE_BUFSIZE - 1;
665
666 if (copy_from_user(kbuf, buffer + done, size)) {
667 ret = -EFAULT;
668 goto out;
669 }
670 kbuf[size] = '\0';
671 buf = kbuf;
672 do {
673 tmp = strchr(buf, '\n');
674 if (tmp) {
675 *tmp = '\0';
676 size = tmp - buf + 1;
677 } else {
678 size = strlen(buf);
679 if (done + size < count) {
680 if (buf != kbuf)
681 break;
682 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
683 pr_warn("Line length is too long: Should be less than %d\n",
684 WRITE_BUFSIZE - 2);
685 ret = -EINVAL;
686 goto out;
687 }
688 }
689 done += size;
690
691 /* Remove comments */
692 tmp = strchr(buf, '#');
693
694 if (tmp)
695 *tmp = '\0';
696
697 ret = traceprobe_command(buf, createfn);
698 if (ret)
699 goto out;
700 buf += size;
701
702 } while (done < count);
703 }
704 ret = done;
705
706out:
707 kfree(kbuf);
708
709 return ret;
710}
711
712static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, 626static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
713 bool is_return) 627 bool is_return)
714{ 628{
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 903273c93e61..fb66e3eaa192 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -42,7 +42,6 @@
42 42
43#define MAX_TRACE_ARGS 128 43#define MAX_TRACE_ARGS 128
44#define MAX_ARGSTR_LEN 63 44#define MAX_ARGSTR_LEN 63
45#define MAX_EVENT_NAME_LEN 64
46#define MAX_STRING_SIZE PATH_MAX 45#define MAX_STRING_SIZE PATH_MAX
47 46
48/* Reserved field names */ 47/* Reserved field names */
@@ -356,12 +355,6 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
356 355
357extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset); 356extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
358 357
359extern ssize_t traceprobe_probes_write(struct file *file,
360 const char __user *buffer, size_t count, loff_t *ppos,
361 int (*createfn)(int, char**));
362
363extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
364
365/* Sum up total data length for dynamic arraies (strings) */ 358/* Sum up total data length for dynamic arraies (strings) */
366static nokprobe_inline int 359static nokprobe_inline int
367__get_data_size(struct trace_probe *tp, struct pt_regs *regs) 360__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index cd70eb5df38e..11e9daa4a568 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -60,7 +60,7 @@ static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
60 * Test the trace buffer to see if all the elements 60 * Test the trace buffer to see if all the elements
61 * are still sane. 61 * are still sane.
62 */ 62 */
63static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) 63static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
64{ 64{
65 unsigned long flags, cnt = 0; 65 unsigned long flags, cnt = 0;
66 int cpu, ret = 0; 66 int cpu, ret = 0;
@@ -1151,38 +1151,6 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1151} 1151}
1152#endif /* CONFIG_SCHED_TRACER */ 1152#endif /* CONFIG_SCHED_TRACER */
1153 1153
1154#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1155int
1156trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1157{
1158 unsigned long count;
1159 int ret;
1160
1161 /* start the tracing */
1162 ret = tracer_init(trace, tr);
1163 if (ret) {
1164 warn_failed_init_tracer(trace, ret);
1165 return ret;
1166 }
1167
1168 /* Sleep for a 1/10 of a second */
1169 msleep(100);
1170 /* stop the tracing. */
1171 tracing_stop();
1172 /* check the trace buffer */
1173 ret = trace_test_buffer(&tr->trace_buffer, &count);
1174 trace->reset(tr);
1175 tracing_start();
1176
1177 if (!ret && !count) {
1178 printk(KERN_CONT ".. no entries found ..");
1179 ret = -1;
1180 }
1181
1182 return ret;
1183}
1184#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1185
1186#ifdef CONFIG_BRANCH_TRACER 1154#ifdef CONFIG_BRANCH_TRACER
1187int 1155int
1188trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1156trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 19bcaaac884b..f93a56d2db27 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -625,7 +625,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
625 625
626 perf_trace_buf_submit(rec, size, rctx, 626 perf_trace_buf_submit(rec, size, rctx,
627 sys_data->enter_event->event.type, 1, regs, 627 sys_data->enter_event->event.type, 1, regs,
628 head, NULL, NULL); 628 head, NULL);
629} 629}
630 630
631static int perf_sysenter_enable(struct trace_event_call *call) 631static int perf_sysenter_enable(struct trace_event_call *call)
@@ -721,7 +721,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
721 } 721 }
722 722
723 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, 723 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
724 1, regs, head, NULL, NULL); 724 1, regs, head, NULL);
725} 725}
726 726
727static int perf_sysexit_enable(struct trace_event_call *call) 727static int perf_sysexit_enable(struct trace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 153c0e411461..40592e7b3568 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -651,7 +651,7 @@ static int probes_open(struct inode *inode, struct file *file)
651static ssize_t probes_write(struct file *file, const char __user *buffer, 651static ssize_t probes_write(struct file *file, const char __user *buffer,
652 size_t count, loff_t *ppos) 652 size_t count, loff_t *ppos)
653{ 653{
654 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe); 654 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
655} 655}
656 656
657static const struct file_operations uprobe_events_ops = { 657static const struct file_operations uprobe_events_ops = {
@@ -1155,7 +1155,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
1155 } 1155 }
1156 1156
1157 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 1157 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1158 head, NULL, NULL); 1158 head, NULL);
1159 out: 1159 out:
1160 preempt_enable(); 1160 preempt_enable();
1161} 1161}
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 305039b122fa..07e75344725b 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -428,7 +428,8 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
428 428
429 if (test_key && test_key == key_hash && entry->val && 429 if (test_key && test_key == key_hash && entry->val &&
430 keys_match(key, entry->val->key, map->key_size)) { 430 keys_match(key, entry->val->key, map->key_size)) {
431 atomic64_inc(&map->hits); 431 if (!lookup_only)
432 atomic64_inc(&map->hits);
432 return entry->val; 433 return entry->val;
433 } 434 }
434 435
diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
index ab0ca77331d0..5b5bbf8ae550 100644
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
@@ -6,7 +6,7 @@
6#define TRACING_MAP_BITS_MAX 17 6#define TRACING_MAP_BITS_MAX 17
7#define TRACING_MAP_BITS_MIN 7 7#define TRACING_MAP_BITS_MIN 7
8 8
9#define TRACING_MAP_KEYS_MAX 2 9#define TRACING_MAP_KEYS_MAX 3
10#define TRACING_MAP_VALS_MAX 3 10#define TRACING_MAP_VALS_MAX 3
11#define TRACING_MAP_FIELDS_MAX (TRACING_MAP_KEYS_MAX + \ 11#define TRACING_MAP_FIELDS_MAX (TRACING_MAP_KEYS_MAX + \
12 TRACING_MAP_VALS_MAX) 12 TRACING_MAP_VALS_MAX)