aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 13:26:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 13:26:31 -0400
commit68114e5eb862ad0a7a261b91497281b026102715 (patch)
tree0a7296ab83fd6c33f26010c99151445f49042d6a
parent59ecc26004e77e100c700b1d0da7502b0fdadb46 (diff)
parent3862807880acc0adaef6749738d210c9f45c3049 (diff)
Merge tag 'trace-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Most of the changes were largely clean ups, and some documentation. But there were a few features that were added: Uprobes now work with event triggers and multi buffers and have support under ftrace and perf. The big feature is that the function tracer can now be used within the multi buffer instances. That is, you can now trace some functions in one buffer, others in another buffer, all functions in a third buffer and so on. They are basically agnostic from each other. This only works for the function tracer and not for the function graph trace, although you can have the function graph tracer running in the top level buffer (or any tracer for that matter) and have different function tracing going on in the sub buffers" * tag 'trace-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (45 commits) tracing: Add BUG_ON when stack end location is over written tracepoint: Remove unused API functions Revert "tracing: Move event storage for array from macro to standalone function" ftrace: Constify ftrace_text_reserved tracepoints: API doc update to tracepoint_probe_register() return value tracepoints: API doc update to data argument ftrace: Fix compilation warning about control_ops_free ftrace/x86: BUG when ftrace recovery fails ftrace: Warn on error when modifying ftrace function ftrace: Remove freelist from struct dyn_ftrace ftrace: Do not pass data to ftrace_dyn_arch_init ftrace: Pass retval through return in ftrace_dyn_arch_init() ftrace: Inline the code from ftrace_dyn_table_alloc() ftrace: Cleanup of global variables ftrace_new_pgs and ftrace_update_cnt tracing: Evaluate len expression only once in __dynamic_array macro tracing: Correctly expand len expressions from __dynamic_array macro tracing/module: Replace include of tracepoint.h with jump_label.h in module.h tracing: Fix event header migrate.h to include tracepoint.h tracing: Fix event header writeback.h to include tracepoint.h tracing: Warn if a tracepoint is not set via debugfs ...
-rw-r--r--Documentation/trace/ftrace-design.txt5
-rw-r--r--arch/arm/kernel/ftrace.c4
-rw-r--r--arch/blackfin/kernel/ftrace.c5
-rw-r--r--arch/ia64/kernel/ftrace.c4
-rw-r--r--arch/metag/kernel/ftrace.c5
-rw-r--r--arch/microblaze/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/powerpc/kernel/ftrace.c7
-rw-r--r--arch/s390/kernel/ftrace.c3
-rw-r--r--arch/sh/kernel/ftrace.c5
-rw-r--r--arch/sparc/kernel/ftrace.c6
-rw-r--r--arch/tile/kernel/ftrace.c4
-rw-r--r--arch/x86/kernel/ftrace.c55
-rw-r--r--include/linux/ftrace.h27
-rw-r--r--include/linux/ftrace_event.h28
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/tracepoint.h18
-rw-r--r--include/trace/events/migrate.h2
-rw-r--r--include/trace/events/writeback.h1
-rw-r--r--include/trace/ftrace.h38
-rw-r--r--kernel/trace/blktrace.c3
-rw-r--r--kernel/trace/ftrace.c162
-rw-r--r--kernel/trace/trace.c187
-rw-r--r--kernel/trace/trace.h38
-rw-r--r--kernel/trace/trace_events.c30
-rw-r--r--kernel/trace/trace_functions.c143
-rw-r--r--kernel/trace/trace_functions_graph.c3
-rw-r--r--kernel/trace/trace_irqsoff.c10
-rw-r--r--kernel/trace/trace_kprobe.c17
-rw-r--r--kernel/trace/trace_nop.c5
-rw-r--r--kernel/trace/trace_output.c31
-rw-r--r--kernel/trace/trace_probe.h17
-rw-r--r--kernel/trace/trace_sched_wakeup.c10
-rw-r--r--kernel/trace/trace_stack.c3
-rw-r--r--kernel/trace/trace_uprobe.c191
-rw-r--r--kernel/tracepoint.c251
36 files changed, 739 insertions, 591 deletions
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt
index 79fcafc7fd64..3f669b9e8852 100644
--- a/Documentation/trace/ftrace-design.txt
+++ b/Documentation/trace/ftrace-design.txt
@@ -358,11 +358,8 @@ Every arch has an init callback function. If you need to do something early on
358to initialize some state, this is the time to do that. Otherwise, this simple 358to initialize some state, this is the time to do that. Otherwise, this simple
359function below should be sufficient for most people: 359function below should be sufficient for most people:
360 360
361int __init ftrace_dyn_arch_init(void *data) 361int __init ftrace_dyn_arch_init(void)
362{ 362{
363 /* return value is done indirectly via data */
364 *(unsigned long *)data = 0;
365
366 return 0; 363 return 0;
367} 364}
368 365
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 34e56647dcee..c108ddcb9ba4 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -156,10 +156,8 @@ int ftrace_make_nop(struct module *mod,
156 return ret; 156 return ret;
157} 157}
158 158
159int __init ftrace_dyn_arch_init(void *data) 159int __init ftrace_dyn_arch_init(void)
160{ 160{
161 *(unsigned long *)data = 0;
162
163 return 0; 161 return 0;
164} 162}
165#endif /* CONFIG_DYNAMIC_FTRACE */ 163#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
index 9277905b82cf..095de0fa044d 100644
--- a/arch/blackfin/kernel/ftrace.c
+++ b/arch/blackfin/kernel/ftrace.c
@@ -65,11 +65,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
65 return ftrace_modify_code(ip, call, sizeof(call)); 65 return ftrace_modify_code(ip, call, sizeof(call));
66} 66}
67 67
68int __init ftrace_dyn_arch_init(void *data) 68int __init ftrace_dyn_arch_init(void)
69{ 69{
70 /* return value is done indirectly via data */
71 *(unsigned long *)data = 0;
72
73 return 0; 70 return 0;
74} 71}
75 72
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c
index 7fc8c961b1f7..3b0c2aa07857 100644
--- a/arch/ia64/kernel/ftrace.c
+++ b/arch/ia64/kernel/ftrace.c
@@ -198,9 +198,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
198} 198}
199 199
200/* run from kstop_machine */ 200/* run from kstop_machine */
201int __init ftrace_dyn_arch_init(void *data) 201int __init ftrace_dyn_arch_init(void)
202{ 202{
203 *(unsigned long *)data = 0;
204
205 return 0; 203 return 0;
206} 204}
diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c
index a774f321643f..ed1d685157c2 100644
--- a/arch/metag/kernel/ftrace.c
+++ b/arch/metag/kernel/ftrace.c
@@ -117,10 +117,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
117} 117}
118 118
119/* run from kstop_machine */ 119/* run from kstop_machine */
120int __init ftrace_dyn_arch_init(void *data) 120int __init ftrace_dyn_arch_init(void)
121{ 121{
122 /* The return code is returned via data */
123 writel(0, data);
124
125 return 0; 122 return 0;
126} 123}
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index e8a5e9cf4ed1..bbcd2533766c 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -171,11 +171,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
171 return ret; 171 return ret;
172} 172}
173 173
174int __init ftrace_dyn_arch_init(void *data) 174int __init ftrace_dyn_arch_init(void)
175{ 175{
176 /* The return code is retured via data */
177 *(unsigned long *)data = 0;
178
179 return 0; 176 return 0;
180} 177}
181 178
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 74fe73506d8f..60e7e5e45af1 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -201,7 +201,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
201 return ftrace_modify_code(FTRACE_CALL_IP, new); 201 return ftrace_modify_code(FTRACE_CALL_IP, new);
202} 202}
203 203
204int __init ftrace_dyn_arch_init(void *data) 204int __init ftrace_dyn_arch_init(void)
205{ 205{
206 /* Encode the instructions when booting */ 206 /* Encode the instructions when booting */
207 ftrace_dyn_arch_init_insns(); 207 ftrace_dyn_arch_init_insns();
@@ -209,9 +209,6 @@ int __init ftrace_dyn_arch_init(void *data)
209 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 209 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
210 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 210 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
211 211
212 /* The return code is retured via data */
213 *(unsigned long *)data = 0;
214
215 return 0; 212 return 0;
216} 213}
217#endif /* CONFIG_DYNAMIC_FTRACE */ 214#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index b0ded97ee4e1..6a014c763cc7 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -532,13 +532,8 @@ void arch_ftrace_update_code(int command)
532 ftrace_disable_ftrace_graph_caller(); 532 ftrace_disable_ftrace_graph_caller();
533} 533}
534 534
535int __init ftrace_dyn_arch_init(void *data) 535int __init ftrace_dyn_arch_init(void)
536{ 536{
537 /* caller expects data to be zero */
538 unsigned long *p = data;
539
540 *p = 0;
541
542 return 0; 537 return 0;
543} 538}
544#endif /* CONFIG_DYNAMIC_FTRACE */ 539#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 224db03e9518..54d6493c4a56 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -130,9 +130,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
130 return 0; 130 return 0;
131} 131}
132 132
133int __init ftrace_dyn_arch_init(void *data) 133int __init ftrace_dyn_arch_init(void)
134{ 134{
135 *(unsigned long *) data = 0;
136 return 0; 135 return 0;
137} 136}
138 137
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 30e13196d35b..3c74f53db6db 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -272,11 +272,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
272 return ftrace_modify_code(rec->ip, old, new); 272 return ftrace_modify_code(rec->ip, old, new);
273} 273}
274 274
275int __init ftrace_dyn_arch_init(void *data) 275int __init ftrace_dyn_arch_init(void)
276{ 276{
277 /* The return code is retured via data */
278 __raw_writel(0, (unsigned long)data);
279
280 return 0; 277 return 0;
281} 278}
282#endif /* CONFIG_DYNAMIC_FTRACE */ 279#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 03ab022e51c5..0a2d2ddff543 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -82,12 +82,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
82 return ftrace_modify_code(ip, old, new); 82 return ftrace_modify_code(ip, old, new);
83} 83}
84 84
85int __init ftrace_dyn_arch_init(void *data) 85int __init ftrace_dyn_arch_init(void)
86{ 86{
87 unsigned long *p = data;
88
89 *p = 0;
90
91 return 0; 87 return 0;
92} 88}
93#endif 89#endif
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
index f1c452092eeb..8d52d83cc516 100644
--- a/arch/tile/kernel/ftrace.c
+++ b/arch/tile/kernel/ftrace.c
@@ -167,10 +167,8 @@ int ftrace_make_nop(struct module *mod,
167 return ret; 167 return ret;
168} 168}
169 169
170int __init ftrace_dyn_arch_init(void *data) 170int __init ftrace_dyn_arch_init(void)
171{ 171{
172 *(unsigned long *)data = 0;
173
174 return 0; 172 return 0;
175} 173}
176#endif /* CONFIG_DYNAMIC_FTRACE */ 174#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index e6253195a301..52819e816f87 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -308,7 +308,10 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
308 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 308 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
309 ip = (unsigned long)__va(__pa_symbol(ip)); 309 ip = (unsigned long)__va(__pa_symbol(ip));
310 310
311 return probe_kernel_write((void *)ip, val, size); 311 if (probe_kernel_write((void *)ip, val, size))
312 return -EPERM;
313
314 return 0;
312} 315}
313 316
314static int add_break(unsigned long ip, const char *old) 317static int add_break(unsigned long ip, const char *old)
@@ -323,10 +326,7 @@ static int add_break(unsigned long ip, const char *old)
323 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0) 326 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
324 return -EINVAL; 327 return -EINVAL;
325 328
326 if (ftrace_write(ip, &brk, 1)) 329 return ftrace_write(ip, &brk, 1);
327 return -EPERM;
328
329 return 0;
330} 330}
331 331
332static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr) 332static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -425,7 +425,7 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
425 425
426 /* If this does not have a breakpoint, we are done */ 426 /* If this does not have a breakpoint, we are done */
427 if (ins[0] != brk) 427 if (ins[0] != brk)
428 return -1; 428 return 0;
429 429
430 nop = ftrace_nop_replace(); 430 nop = ftrace_nop_replace();
431 431
@@ -455,7 +455,7 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
455 } 455 }
456 456
457 update: 457 update:
458 return probe_kernel_write((void *)ip, &nop[0], 1); 458 return ftrace_write(ip, nop, 1);
459} 459}
460 460
461static int add_update_code(unsigned long ip, unsigned const char *new) 461static int add_update_code(unsigned long ip, unsigned const char *new)
@@ -463,9 +463,7 @@ static int add_update_code(unsigned long ip, unsigned const char *new)
463 /* skip breakpoint */ 463 /* skip breakpoint */
464 ip++; 464 ip++;
465 new++; 465 new++;
466 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1)) 466 return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
467 return -EPERM;
468 return 0;
469} 467}
470 468
471static int add_update_call(struct dyn_ftrace *rec, unsigned long addr) 469static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -520,10 +518,7 @@ static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
520 518
521 new = ftrace_call_replace(ip, addr); 519 new = ftrace_call_replace(ip, addr);
522 520
523 if (ftrace_write(ip, new, 1)) 521 return ftrace_write(ip, new, 1);
524 return -EPERM;
525
526 return 0;
527} 522}
528 523
529static int finish_update_nop(struct dyn_ftrace *rec) 524static int finish_update_nop(struct dyn_ftrace *rec)
@@ -533,9 +528,7 @@ static int finish_update_nop(struct dyn_ftrace *rec)
533 528
534 new = ftrace_nop_replace(); 529 new = ftrace_nop_replace();
535 530
536 if (ftrace_write(ip, new, 1)) 531 return ftrace_write(ip, new, 1);
537 return -EPERM;
538 return 0;
539} 532}
540 533
541static int finish_update(struct dyn_ftrace *rec, int enable) 534static int finish_update(struct dyn_ftrace *rec, int enable)
@@ -632,8 +625,14 @@ void ftrace_replace_code(int enable)
632 printk(KERN_WARNING "Failed on %s (%d):\n", report, count); 625 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
633 for_ftrace_rec_iter(iter) { 626 for_ftrace_rec_iter(iter) {
634 rec = ftrace_rec_iter_record(iter); 627 rec = ftrace_rec_iter_record(iter);
635 remove_breakpoint(rec); 628 /*
629 * Breakpoints are handled only when this function is in
630 * progress. The system could not work with them.
631 */
632 if (remove_breakpoint(rec))
633 BUG();
636 } 634 }
635 run_sync();
637} 636}
638 637
639static int 638static int
@@ -655,16 +654,19 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
655 run_sync(); 654 run_sync();
656 655
657 ret = ftrace_write(ip, new_code, 1); 656 ret = ftrace_write(ip, new_code, 1);
658 if (ret) { 657 /*
659 ret = -EPERM; 658 * The breakpoint is handled only when this function is in progress.
660 goto out; 659 * The system could not work if we could not remove it.
661 } 660 */
662 run_sync(); 661 BUG_ON(ret);
663 out: 662 out:
663 run_sync();
664 return ret; 664 return ret;
665 665
666 fail_update: 666 fail_update:
667 probe_kernel_write((void *)ip, &old_code[0], 1); 667 /* Also here the system could not work with the breakpoint */
668 if (ftrace_write(ip, old_code, 1))
669 BUG();
668 goto out; 670 goto out;
669} 671}
670 672
@@ -678,11 +680,8 @@ void arch_ftrace_update_code(int command)
678 atomic_dec(&modifying_ftrace_code); 680 atomic_dec(&modifying_ftrace_code);
679} 681}
680 682
681int __init ftrace_dyn_arch_init(void *data) 683int __init ftrace_dyn_arch_init(void)
682{ 684{
683 /* The return code is retured via data */
684 *(unsigned long *)data = 0;
685
686 return 0; 685 return 0;
687} 686}
688#endif 687#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f4233b195dab..9212b017bc72 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -92,6 +92,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
92 * STUB - The ftrace_ops is just a place holder. 92 * STUB - The ftrace_ops is just a place holder.
93 * INITIALIZED - The ftrace_ops has already been initialized (first use time 93 * INITIALIZED - The ftrace_ops has already been initialized (first use time
94 * register_ftrace_function() is called, it will initialized the ops) 94 * register_ftrace_function() is called, it will initialized the ops)
95 * DELETED - The ops are being deleted, do not let them be registered again.
95 */ 96 */
96enum { 97enum {
97 FTRACE_OPS_FL_ENABLED = 1 << 0, 98 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -103,13 +104,26 @@ enum {
103 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 104 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
104 FTRACE_OPS_FL_STUB = 1 << 7, 105 FTRACE_OPS_FL_STUB = 1 << 7,
105 FTRACE_OPS_FL_INITIALIZED = 1 << 8, 106 FTRACE_OPS_FL_INITIALIZED = 1 << 8,
107 FTRACE_OPS_FL_DELETED = 1 << 9,
106}; 108};
107 109
110/*
111 * Note, ftrace_ops can be referenced outside of RCU protection.
112 * (Although, for perf, the control ops prevent that). If ftrace_ops is
113 * allocated and not part of kernel core data, the unregistering of it will
114 * perform a scheduling on all CPUs to make sure that there are no more users.
115 * Depending on the load of the system that may take a bit of time.
116 *
117 * Any private data added must also take care not to be freed and if private
118 * data is added to a ftrace_ops that is in core code, the user of the
119 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
120 */
108struct ftrace_ops { 121struct ftrace_ops {
109 ftrace_func_t func; 122 ftrace_func_t func;
110 struct ftrace_ops *next; 123 struct ftrace_ops *next;
111 unsigned long flags; 124 unsigned long flags;
112 int __percpu *disabled; 125 int __percpu *disabled;
126 void *private;
113#ifdef CONFIG_DYNAMIC_FTRACE 127#ifdef CONFIG_DYNAMIC_FTRACE
114 struct ftrace_hash *notrace_hash; 128 struct ftrace_hash *notrace_hash;
115 struct ftrace_hash *filter_hash; 129 struct ftrace_hash *filter_hash;
@@ -285,7 +299,7 @@ extern void
285unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 299unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
286extern void unregister_ftrace_function_probe_all(char *glob); 300extern void unregister_ftrace_function_probe_all(char *glob);
287 301
288extern int ftrace_text_reserved(void *start, void *end); 302extern int ftrace_text_reserved(const void *start, const void *end);
289 303
290extern int ftrace_nr_registered_ops(void); 304extern int ftrace_nr_registered_ops(void);
291 305
@@ -316,12 +330,9 @@ enum {
316#define FTRACE_REF_MAX ((1UL << 29) - 1) 330#define FTRACE_REF_MAX ((1UL << 29) - 1)
317 331
318struct dyn_ftrace { 332struct dyn_ftrace {
319 union { 333 unsigned long ip; /* address of mcount call-site */
320 unsigned long ip; /* address of mcount call-site */
321 struct dyn_ftrace *freelist;
322 };
323 unsigned long flags; 334 unsigned long flags;
324 struct dyn_arch_ftrace arch; 335 struct dyn_arch_ftrace arch;
325}; 336};
326 337
327int ftrace_force_update(void); 338int ftrace_force_update(void);
@@ -409,7 +420,7 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
409 420
410/* defined in arch */ 421/* defined in arch */
411extern int ftrace_ip_converted(unsigned long ip); 422extern int ftrace_ip_converted(unsigned long ip);
412extern int ftrace_dyn_arch_init(void *data); 423extern int ftrace_dyn_arch_init(void);
413extern void ftrace_replace_code(int enable); 424extern void ftrace_replace_code(int enable);
414extern int ftrace_update_ftrace_func(ftrace_func_t func); 425extern int ftrace_update_ftrace_func(ftrace_func_t func);
415extern void ftrace_caller(void); 426extern void ftrace_caller(void);
@@ -541,7 +552,7 @@ static inline __init int unregister_ftrace_command(char *cmd_name)
541{ 552{
542 return -EINVAL; 553 return -EINVAL;
543} 554}
544static inline int ftrace_text_reserved(void *start, void *end) 555static inline int ftrace_text_reserved(const void *start, const void *end)
545{ 556{
546 return 0; 557 return 0;
547} 558}
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4cdb3a17bcb5..cdc30111d2f8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -163,6 +163,8 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
163 163
164void tracing_record_cmdline(struct task_struct *tsk); 164void tracing_record_cmdline(struct task_struct *tsk);
165 165
166int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
167
166struct event_filter; 168struct event_filter;
167 169
168enum trace_reg { 170enum trace_reg {
@@ -197,6 +199,32 @@ struct ftrace_event_class {
197extern int ftrace_event_reg(struct ftrace_event_call *event, 199extern int ftrace_event_reg(struct ftrace_event_call *event,
198 enum trace_reg type, void *data); 200 enum trace_reg type, void *data);
199 201
202int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
203 char *fmt, ...);
204
205int ftrace_event_define_field(struct ftrace_event_call *call,
206 char *type, int len, char *item, int offset,
207 int field_size, int sign, int filter);
208
209struct ftrace_event_buffer {
210 struct ring_buffer *buffer;
211 struct ring_buffer_event *event;
212 struct ftrace_event_file *ftrace_file;
213 void *entry;
214 unsigned long flags;
215 int pc;
216};
217
218void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
219 struct ftrace_event_file *ftrace_file,
220 unsigned long len);
221
222void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
223
224int ftrace_event_define_field(struct ftrace_event_call *call,
225 char *type, int len, char *item, int offset,
226 int field_size, int sign, int filter);
227
200enum { 228enum {
201 TRACE_EVENT_FL_FILTERED_BIT, 229 TRACE_EVENT_FL_FILTERED_BIT,
202 TRACE_EVENT_FL_CAP_ANY_BIT, 230 TRACE_EVENT_FL_CAP_ANY_BIT,
diff --git a/include/linux/module.h b/include/linux/module.h
index eaf60ff9ba94..5a5053975114 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -15,7 +15,7 @@
15#include <linux/stringify.h> 15#include <linux/stringify.h>
16#include <linux/kobject.h> 16#include <linux/kobject.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/tracepoint.h> 18#include <linux/jump_label.h>
19#include <linux/export.h> 19#include <linux/export.h>
20 20
21#include <linux/percpu.h> 21#include <linux/percpu.h>
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 7159a0a933df..812b2553dfd8 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -48,12 +48,6 @@ extern int tracepoint_probe_register(const char *name, void *probe, void *data);
48extern int 48extern int
49tracepoint_probe_unregister(const char *name, void *probe, void *data); 49tracepoint_probe_unregister(const char *name, void *probe, void *data);
50 50
51extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
52 void *data);
53extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
54 void *data);
55extern void tracepoint_probe_update_all(void);
56
57#ifdef CONFIG_MODULES 51#ifdef CONFIG_MODULES
58struct tp_module { 52struct tp_module {
59 struct list_head list; 53 struct list_head list;
@@ -68,18 +62,6 @@ static inline bool trace_module_has_bad_taint(struct module *mod)
68} 62}
69#endif /* CONFIG_MODULES */ 63#endif /* CONFIG_MODULES */
70 64
71struct tracepoint_iter {
72#ifdef CONFIG_MODULES
73 struct tp_module *module;
74#endif /* CONFIG_MODULES */
75 struct tracepoint * const *tracepoint;
76};
77
78extern void tracepoint_iter_start(struct tracepoint_iter *iter);
79extern void tracepoint_iter_next(struct tracepoint_iter *iter);
80extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
81extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
82
83/* 65/*
84 * tracepoint_synchronize_unregister must be called between the last tracepoint 66 * tracepoint_synchronize_unregister must be called between the last tracepoint
85 * probe unregistration and the end of module exit to make sure there is no 67 * probe unregistration and the end of module exit to make sure there is no
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 3075ffbb9a83..4e4f2f8b1ac2 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -4,6 +4,8 @@
4#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_MIGRATE_H 5#define _TRACE_MIGRATE_H
6 6
7#include <linux/tracepoint.h>
8
7#define MIGRATE_MODE \ 9#define MIGRATE_MODE \
8 {MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \ 10 {MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \
9 {MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \ 11 {MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 464ea82e10db..cee02d65ab3f 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -4,6 +4,7 @@
4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WRITEBACK_H 5#define _TRACE_WRITEBACK_H
6 6
7#include <linux/tracepoint.h>
7#include <linux/backing-dev.h> 8#include <linux/backing-dev.h>
8#include <linux/writeback.h> 9#include <linux/writeback.h>
9 10
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1ee19a24cc5f..8765126b328c 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -265,11 +265,9 @@ static notrace enum print_line_t \
265ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 265ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
266 struct trace_event *event) \ 266 struct trace_event *event) \
267{ \ 267{ \
268 struct trace_seq *s = &iter->seq; \
269 struct ftrace_raw_##template *field; \ 268 struct ftrace_raw_##template *field; \
270 struct trace_entry *entry; \ 269 struct trace_entry *entry; \
271 struct trace_seq *p = &iter->tmp_seq; \ 270 struct trace_seq *p = &iter->tmp_seq; \
272 int ret; \
273 \ 271 \
274 entry = iter->ent; \ 272 entry = iter->ent; \
275 \ 273 \
@@ -281,13 +279,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
281 field = (typeof(field))entry; \ 279 field = (typeof(field))entry; \
282 \ 280 \
283 trace_seq_init(p); \ 281 trace_seq_init(p); \
284 ret = trace_seq_printf(s, "%s: ", #call); \ 282 return ftrace_output_call(iter, #call, print); \
285 if (ret) \
286 ret = trace_seq_printf(s, print); \
287 if (!ret) \
288 return TRACE_TYPE_PARTIAL_LINE; \
289 \
290 return TRACE_TYPE_HANDLED; \
291} \ 283} \
292static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 284static struct trace_event_functions ftrace_event_type_funcs_##call = { \
293 .trace = ftrace_raw_output_##call, \ 285 .trace = ftrace_raw_output_##call, \
@@ -370,10 +362,11 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
370 362
371#undef __dynamic_array 363#undef __dynamic_array
372#define __dynamic_array(type, item, len) \ 364#define __dynamic_array(type, item, len) \
365 __item_length = (len) * sizeof(type); \
373 __data_offsets->item = __data_size + \ 366 __data_offsets->item = __data_size + \
374 offsetof(typeof(*entry), __data); \ 367 offsetof(typeof(*entry), __data); \
375 __data_offsets->item |= (len * sizeof(type)) << 16; \ 368 __data_offsets->item |= __item_length << 16; \
376 __data_size += (len) * sizeof(type); 369 __data_size += __item_length;
377 370
378#undef __string 371#undef __string
379#define __string(item, src) __dynamic_array(char, item, \ 372#define __string(item, src) __dynamic_array(char, item, \
@@ -385,6 +378,7 @@ static inline notrace int ftrace_get_offsets_##call( \
385 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 378 struct ftrace_data_offsets_##call *__data_offsets, proto) \
386{ \ 379{ \
387 int __data_size = 0; \ 380 int __data_size = 0; \
381 int __maybe_unused __item_length; \
388 struct ftrace_raw_##call __maybe_unused *entry; \ 382 struct ftrace_raw_##call __maybe_unused *entry; \
389 \ 383 \
390 tstruct; \ 384 tstruct; \
@@ -541,37 +535,27 @@ static notrace void \
541ftrace_raw_event_##call(void *__data, proto) \ 535ftrace_raw_event_##call(void *__data, proto) \
542{ \ 536{ \
543 struct ftrace_event_file *ftrace_file = __data; \ 537 struct ftrace_event_file *ftrace_file = __data; \
544 struct ftrace_event_call *event_call = ftrace_file->event_call; \
545 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 538 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
546 struct ring_buffer_event *event; \ 539 struct ftrace_event_buffer fbuffer; \
547 struct ftrace_raw_##call *entry; \ 540 struct ftrace_raw_##call *entry; \
548 struct ring_buffer *buffer; \
549 unsigned long irq_flags; \
550 int __data_size; \ 541 int __data_size; \
551 int pc; \
552 \ 542 \
553 if (ftrace_trigger_soft_disabled(ftrace_file)) \ 543 if (ftrace_trigger_soft_disabled(ftrace_file)) \
554 return; \ 544 return; \
555 \ 545 \
556 local_save_flags(irq_flags); \
557 pc = preempt_count(); \
558 \
559 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 546 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
560 \ 547 \
561 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \ 548 entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
562 event_call->event.type, \ 549 sizeof(*entry) + __data_size); \
563 sizeof(*entry) + __data_size, \ 550 \
564 irq_flags, pc); \ 551 if (!entry) \
565 if (!event) \
566 return; \ 552 return; \
567 entry = ring_buffer_event_data(event); \
568 \ 553 \
569 tstruct \ 554 tstruct \
570 \ 555 \
571 { assign; } \ 556 { assign; } \
572 \ 557 \
573 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, \ 558 ftrace_event_buffer_commit(&fbuffer); \
574 irq_flags, pc); \
575} 559}
576/* 560/*
577 * The ftrace_test_probe is compiled out, it is only here as a build time check 561 * The ftrace_test_probe is compiled out, it is only here as a build time check
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 4f3a3c03eadb..c1bd4ada2a04 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1429,7 +1429,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1429 return print_one_line(iter, true); 1429 return print_one_line(iter, true);
1430} 1430}
1431 1431
1432static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) 1432static int
1433blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1433{ 1434{
1434 /* don't output context-info for blk_classic output */ 1435 /* don't output context-info for blk_classic output */
1435 if (bit == TRACE_BLK_OPT_CLASSIC) { 1436 if (bit == TRACE_BLK_OPT_CLASSIC) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cd7f76d1eb86..1fd4b9479210 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -237,14 +237,13 @@ static int control_ops_alloc(struct ftrace_ops *ops)
237 return 0; 237 return 0;
238} 238}
239 239
240static void control_ops_free(struct ftrace_ops *ops)
241{
242 free_percpu(ops->disabled);
243}
244
245static void update_global_ops(void) 240static void update_global_ops(void)
246{ 241{
247 ftrace_func_t func; 242 ftrace_func_t func = ftrace_global_list_func;
243 void *private = NULL;
244
245 /* The list has its own recursion protection. */
246 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
248 247
249 /* 248 /*
250 * If there's only one function registered, then call that 249 * If there's only one function registered, then call that
@@ -254,23 +253,17 @@ static void update_global_ops(void)
254 if (ftrace_global_list == &ftrace_list_end || 253 if (ftrace_global_list == &ftrace_list_end ||
255 ftrace_global_list->next == &ftrace_list_end) { 254 ftrace_global_list->next == &ftrace_list_end) {
256 func = ftrace_global_list->func; 255 func = ftrace_global_list->func;
256 private = ftrace_global_list->private;
257 /* 257 /*
258 * As we are calling the function directly. 258 * As we are calling the function directly.
259 * If it does not have recursion protection, 259 * If it does not have recursion protection,
260 * the function_trace_op needs to be updated 260 * the function_trace_op needs to be updated
261 * accordingly. 261 * accordingly.
262 */ 262 */
263 if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) 263 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
264 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
265 else
266 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; 264 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
267 } else {
268 func = ftrace_global_list_func;
269 /* The list has its own recursion protection. */
270 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
271 } 265 }
272 266
273
274 /* If we filter on pids, update to use the pid function */ 267 /* If we filter on pids, update to use the pid function */
275 if (!list_empty(&ftrace_pids)) { 268 if (!list_empty(&ftrace_pids)) {
276 set_ftrace_pid_function(func); 269 set_ftrace_pid_function(func);
@@ -278,6 +271,7 @@ static void update_global_ops(void)
278 } 271 }
279 272
280 global_ops.func = func; 273 global_ops.func = func;
274 global_ops.private = private;
281} 275}
282 276
283static void ftrace_sync(struct work_struct *work) 277static void ftrace_sync(struct work_struct *work)
@@ -437,6 +431,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
437 431
438static int __register_ftrace_function(struct ftrace_ops *ops) 432static int __register_ftrace_function(struct ftrace_ops *ops)
439{ 433{
434 if (ops->flags & FTRACE_OPS_FL_DELETED)
435 return -EINVAL;
436
440 if (FTRACE_WARN_ON(ops == &global_ops)) 437 if (FTRACE_WARN_ON(ops == &global_ops))
441 return -EINVAL; 438 return -EINVAL;
442 439
@@ -1172,8 +1169,6 @@ struct ftrace_page {
1172 int size; 1169 int size;
1173}; 1170};
1174 1171
1175static struct ftrace_page *ftrace_new_pgs;
1176
1177#define ENTRY_SIZE sizeof(struct dyn_ftrace) 1172#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1178#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1173#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1179 1174
@@ -1560,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip)
1560 * the function tracer. It checks the ftrace internal tables to 1555 * the function tracer. It checks the ftrace internal tables to
1561 * determine if the address belongs or not. 1556 * determine if the address belongs or not.
1562 */ 1557 */
1563int ftrace_text_reserved(void *start, void *end) 1558int ftrace_text_reserved(const void *start, const void *end)
1564{ 1559{
1565 unsigned long ret; 1560 unsigned long ret;
1566 1561
@@ -1994,6 +1989,7 @@ int __weak ftrace_arch_code_modify_post_process(void)
1994void ftrace_modify_all_code(int command) 1989void ftrace_modify_all_code(int command)
1995{ 1990{
1996 int update = command & FTRACE_UPDATE_TRACE_FUNC; 1991 int update = command & FTRACE_UPDATE_TRACE_FUNC;
1992 int err = 0;
1997 1993
1998 /* 1994 /*
1999 * If the ftrace_caller calls a ftrace_ops func directly, 1995 * If the ftrace_caller calls a ftrace_ops func directly,
@@ -2005,8 +2001,11 @@ void ftrace_modify_all_code(int command)
2005 * to make sure the ops are having the right functions 2001 * to make sure the ops are having the right functions
2006 * traced. 2002 * traced.
2007 */ 2003 */
2008 if (update) 2004 if (update) {
2009 ftrace_update_ftrace_func(ftrace_ops_list_func); 2005 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2006 if (FTRACE_WARN_ON(err))
2007 return;
2008 }
2010 2009
2011 if (command & FTRACE_UPDATE_CALLS) 2010 if (command & FTRACE_UPDATE_CALLS)
2012 ftrace_replace_code(1); 2011 ftrace_replace_code(1);
@@ -2019,13 +2018,16 @@ void ftrace_modify_all_code(int command)
2019 /* If irqs are disabled, we are in stop machine */ 2018 /* If irqs are disabled, we are in stop machine */
2020 if (!irqs_disabled()) 2019 if (!irqs_disabled())
2021 smp_call_function(ftrace_sync_ipi, NULL, 1); 2020 smp_call_function(ftrace_sync_ipi, NULL, 1);
2022 ftrace_update_ftrace_func(ftrace_trace_function); 2021 err = ftrace_update_ftrace_func(ftrace_trace_function);
2022 if (FTRACE_WARN_ON(err))
2023 return;
2023 } 2024 }
2024 2025
2025 if (command & FTRACE_START_FUNC_RET) 2026 if (command & FTRACE_START_FUNC_RET)
2026 ftrace_enable_ftrace_graph_caller(); 2027 err = ftrace_enable_ftrace_graph_caller();
2027 else if (command & FTRACE_STOP_FUNC_RET) 2028 else if (command & FTRACE_STOP_FUNC_RET)
2028 ftrace_disable_ftrace_graph_caller(); 2029 err = ftrace_disable_ftrace_graph_caller();
2030 FTRACE_WARN_ON(err);
2029} 2031}
2030 2032
2031static int __ftrace_modify_code(void *data) 2033static int __ftrace_modify_code(void *data)
@@ -2093,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func;
2093static int ftrace_start_up; 2095static int ftrace_start_up;
2094static int global_start_up; 2096static int global_start_up;
2095 2097
2098static void control_ops_free(struct ftrace_ops *ops)
2099{
2100 free_percpu(ops->disabled);
2101}
2102
2096static void ftrace_startup_enable(int command) 2103static void ftrace_startup_enable(int command)
2097{ 2104{
2098 if (saved_ftrace_func != ftrace_trace_function) { 2105 if (saved_ftrace_func != ftrace_trace_function) {
@@ -2244,7 +2251,6 @@ static void ftrace_shutdown_sysctl(void)
2244} 2251}
2245 2252
2246static cycle_t ftrace_update_time; 2253static cycle_t ftrace_update_time;
2247static unsigned long ftrace_update_cnt;
2248unsigned long ftrace_update_tot_cnt; 2254unsigned long ftrace_update_tot_cnt;
2249 2255
2250static inline int ops_traces_mod(struct ftrace_ops *ops) 2256static inline int ops_traces_mod(struct ftrace_ops *ops)
@@ -2300,11 +2306,12 @@ static int referenced_filters(struct dyn_ftrace *rec)
2300 return cnt; 2306 return cnt;
2301} 2307}
2302 2308
2303static int ftrace_update_code(struct module *mod) 2309static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2304{ 2310{
2305 struct ftrace_page *pg; 2311 struct ftrace_page *pg;
2306 struct dyn_ftrace *p; 2312 struct dyn_ftrace *p;
2307 cycle_t start, stop; 2313 cycle_t start, stop;
2314 unsigned long update_cnt = 0;
2308 unsigned long ref = 0; 2315 unsigned long ref = 0;
2309 bool test = false; 2316 bool test = false;
2310 int i; 2317 int i;
@@ -2330,9 +2337,8 @@ static int ftrace_update_code(struct module *mod)
2330 } 2337 }
2331 2338
2332 start = ftrace_now(raw_smp_processor_id()); 2339 start = ftrace_now(raw_smp_processor_id());
2333 ftrace_update_cnt = 0;
2334 2340
2335 for (pg = ftrace_new_pgs; pg; pg = pg->next) { 2341 for (pg = new_pgs; pg; pg = pg->next) {
2336 2342
2337 for (i = 0; i < pg->index; i++) { 2343 for (i = 0; i < pg->index; i++) {
2338 int cnt = ref; 2344 int cnt = ref;
@@ -2353,7 +2359,7 @@ static int ftrace_update_code(struct module *mod)
2353 if (!ftrace_code_disable(mod, p)) 2359 if (!ftrace_code_disable(mod, p))
2354 break; 2360 break;
2355 2361
2356 ftrace_update_cnt++; 2362 update_cnt++;
2357 2363
2358 /* 2364 /*
2359 * If the tracing is enabled, go ahead and enable the record. 2365 * If the tracing is enabled, go ahead and enable the record.
@@ -2372,11 +2378,9 @@ static int ftrace_update_code(struct module *mod)
2372 } 2378 }
2373 } 2379 }
2374 2380
2375 ftrace_new_pgs = NULL;
2376
2377 stop = ftrace_now(raw_smp_processor_id()); 2381 stop = ftrace_now(raw_smp_processor_id());
2378 ftrace_update_time = stop - start; 2382 ftrace_update_time = stop - start;
2379 ftrace_update_tot_cnt += ftrace_update_cnt; 2383 ftrace_update_tot_cnt += update_cnt;
2380 2384
2381 return 0; 2385 return 0;
2382} 2386}
@@ -2468,22 +2472,6 @@ ftrace_allocate_pages(unsigned long num_to_init)
2468 return NULL; 2472 return NULL;
2469} 2473}
2470 2474
2471static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2472{
2473 int cnt;
2474
2475 if (!num_to_init) {
2476 pr_info("ftrace: No functions to be traced?\n");
2477 return -1;
2478 }
2479
2480 cnt = num_to_init / ENTRIES_PER_PAGE;
2481 pr_info("ftrace: allocating %ld entries in %d pages\n",
2482 num_to_init, cnt + 1);
2483
2484 return 0;
2485}
2486
2487#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 2475#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2488 2476
2489struct ftrace_iterator { 2477struct ftrace_iterator {
@@ -2871,7 +2859,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2871static int 2859static int
2872ftrace_filter_open(struct inode *inode, struct file *file) 2860ftrace_filter_open(struct inode *inode, struct file *file)
2873{ 2861{
2874 return ftrace_regex_open(&global_ops, 2862 struct ftrace_ops *ops = inode->i_private;
2863
2864 return ftrace_regex_open(ops,
2875 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, 2865 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2876 inode, file); 2866 inode, file);
2877} 2867}
@@ -2879,7 +2869,9 @@ ftrace_filter_open(struct inode *inode, struct file *file)
2879static int 2869static int
2880ftrace_notrace_open(struct inode *inode, struct file *file) 2870ftrace_notrace_open(struct inode *inode, struct file *file)
2881{ 2871{
2882 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, 2872 struct ftrace_ops *ops = inode->i_private;
2873
2874 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
2883 inode, file); 2875 inode, file);
2884} 2876}
2885 2877
@@ -4109,6 +4101,36 @@ static const struct file_operations ftrace_graph_notrace_fops = {
4109}; 4101};
4110#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4111 4103
4104void ftrace_create_filter_files(struct ftrace_ops *ops,
4105 struct dentry *parent)
4106{
4107
4108 trace_create_file("set_ftrace_filter", 0644, parent,
4109 ops, &ftrace_filter_fops);
4110
4111 trace_create_file("set_ftrace_notrace", 0644, parent,
4112 ops, &ftrace_notrace_fops);
4113}
4114
4115/*
4116 * The name "destroy_filter_files" is really a misnomer. Although
4117 * in the future, it may actualy delete the files, but this is
4118 * really intended to make sure the ops passed in are disabled
4119 * and that when this function returns, the caller is free to
4120 * free the ops.
4121 *
4122 * The "destroy" name is only to match the "create" name that this
4123 * should be paired with.
4124 */
4125void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4126{
4127 mutex_lock(&ftrace_lock);
4128 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4129 ftrace_shutdown(ops, 0);
4130 ops->flags |= FTRACE_OPS_FL_DELETED;
4131 mutex_unlock(&ftrace_lock);
4132}
4133
4112static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) 4134static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4113{ 4135{
4114 4136
@@ -4118,11 +4140,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4118 trace_create_file("enabled_functions", 0444, 4140 trace_create_file("enabled_functions", 0444,
4119 d_tracer, NULL, &ftrace_enabled_fops); 4141 d_tracer, NULL, &ftrace_enabled_fops);
4120 4142
4121 trace_create_file("set_ftrace_filter", 0644, d_tracer, 4143 ftrace_create_filter_files(&global_ops, d_tracer);
4122 NULL, &ftrace_filter_fops);
4123
4124 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
4125 NULL, &ftrace_notrace_fops);
4126 4144
4127#ifdef CONFIG_FUNCTION_GRAPH_TRACER 4145#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4128 trace_create_file("set_graph_function", 0444, d_tracer, 4146 trace_create_file("set_graph_function", 0444, d_tracer,
@@ -4238,9 +4256,6 @@ static int ftrace_process_locs(struct module *mod,
4238 /* Assign the last page to ftrace_pages */ 4256 /* Assign the last page to ftrace_pages */
4239 ftrace_pages = pg; 4257 ftrace_pages = pg;
4240 4258
4241 /* These new locations need to be initialized */
4242 ftrace_new_pgs = start_pg;
4243
4244 /* 4259 /*
4245 * We only need to disable interrupts on start up 4260 * We only need to disable interrupts on start up
4246 * because we are modifying code that an interrupt 4261 * because we are modifying code that an interrupt
@@ -4251,7 +4266,7 @@ static int ftrace_process_locs(struct module *mod,
4251 */ 4266 */
4252 if (!mod) 4267 if (!mod)
4253 local_irq_save(flags); 4268 local_irq_save(flags);
4254 ftrace_update_code(mod); 4269 ftrace_update_code(mod, start_pg);
4255 if (!mod) 4270 if (!mod)
4256 local_irq_restore(flags); 4271 local_irq_restore(flags);
4257 ret = 0; 4272 ret = 0;
@@ -4360,30 +4375,27 @@ struct notifier_block ftrace_module_exit_nb = {
4360 .priority = INT_MIN, /* Run after anything that can remove kprobes */ 4375 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4361}; 4376};
4362 4377
4363extern unsigned long __start_mcount_loc[];
4364extern unsigned long __stop_mcount_loc[];
4365
4366void __init ftrace_init(void) 4378void __init ftrace_init(void)
4367{ 4379{
4368 unsigned long count, addr, flags; 4380 extern unsigned long __start_mcount_loc[];
4381 extern unsigned long __stop_mcount_loc[];
4382 unsigned long count, flags;
4369 int ret; 4383 int ret;
4370 4384
4371 /* Keep the ftrace pointer to the stub */
4372 addr = (unsigned long)ftrace_stub;
4373
4374 local_irq_save(flags); 4385 local_irq_save(flags);
4375 ftrace_dyn_arch_init(&addr); 4386 ret = ftrace_dyn_arch_init();
4376 local_irq_restore(flags); 4387 local_irq_restore(flags);
4377 4388 if (ret)
4378 /* ftrace_dyn_arch_init places the return code in addr */
4379 if (addr)
4380 goto failed; 4389 goto failed;
4381 4390
4382 count = __stop_mcount_loc - __start_mcount_loc; 4391 count = __stop_mcount_loc - __start_mcount_loc;
4383 4392 if (!count) {
4384 ret = ftrace_dyn_table_alloc(count); 4393 pr_info("ftrace: No functions to be traced?\n");
4385 if (ret)
4386 goto failed; 4394 goto failed;
4395 }
4396
4397 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4398 count, count / ENTRIES_PER_PAGE + 1);
4387 4399
4388 last_ftrace_enabled = ftrace_enabled = 1; 4400 last_ftrace_enabled = ftrace_enabled = 1;
4389 4401
@@ -4431,7 +4443,13 @@ static inline void ftrace_startup_enable(int command) { }
4431 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 4443 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4432 ___ret; \ 4444 ___ret; \
4433 }) 4445 })
4434# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) 4446# define ftrace_shutdown(ops, command) \
4447 ({ \
4448 int ___ret = __unregister_ftrace_function(ops); \
4449 if (!___ret) \
4450 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4451 ___ret; \
4452 })
4435 4453
4436# define ftrace_startup_sysctl() do { } while (0) 4454# define ftrace_startup_sysctl() do { } while (0)
4437# define ftrace_shutdown_sysctl() do { } while (0) 4455# define ftrace_shutdown_sysctl() do { } while (0)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 24c1f2382557..9be67c5e5b0f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = {
73 .opts = dummy_tracer_opt 73 .opts = dummy_tracer_opt
74}; 74};
75 75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set) 76static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
77{ 78{
78 return 0; 79 return 0;
79} 80}
@@ -118,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops;
118/* When set, tracing will stop when a WARN*() is hit */ 119/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning; 120int __disable_trace_on_warning;
120 121
121static int tracing_set_tracer(const char *buf); 122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
122 123
123#define MAX_TRACER_SIZE 100 124#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -180,6 +181,17 @@ static int __init set_trace_boot_options(char *str)
180} 181}
181__setup("trace_options=", set_trace_boot_options); 182__setup("trace_options=", set_trace_boot_options);
182 183
184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
183 195
184unsigned long long ns2usecs(cycle_t nsec) 196unsigned long long ns2usecs(cycle_t nsec)
185{ 197{
@@ -1230,7 +1242,7 @@ int register_tracer(struct tracer *type)
1230 1242
1231 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 1243 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1232 /* Do we want this tracer to start on bootup? */ 1244 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type->name); 1245 tracing_set_tracer(&global_trace, type->name);
1234 default_bootup_tracer = NULL; 1246 default_bootup_tracer = NULL;
1235 /* disable other selftests, since this will break it. */ 1247 /* disable other selftests, since this will break it. */
1236 tracing_selftest_disabled = true; 1248 tracing_selftest_disabled = true;
@@ -3137,27 +3149,52 @@ static int tracing_open(struct inode *inode, struct file *file)
3137 return ret; 3149 return ret;
3138} 3150}
3139 3151
3152/*
3153 * Some tracers are not suitable for instance buffers.
3154 * A tracer is always available for the global array (toplevel)
3155 * or if it explicitly states that it is.
3156 */
3157static bool
3158trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3159{
3160 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3161}
3162
3163/* Find the next tracer that this trace array may use */
3164static struct tracer *
3165get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3166{
3167 while (t && !trace_ok_for_array(t, tr))
3168 t = t->next;
3169
3170 return t;
3171}
3172
3140static void * 3173static void *
3141t_next(struct seq_file *m, void *v, loff_t *pos) 3174t_next(struct seq_file *m, void *v, loff_t *pos)
3142{ 3175{
3176 struct trace_array *tr = m->private;
3143 struct tracer *t = v; 3177 struct tracer *t = v;
3144 3178
3145 (*pos)++; 3179 (*pos)++;
3146 3180
3147 if (t) 3181 if (t)
3148 t = t->next; 3182 t = get_tracer_for_array(tr, t->next);
3149 3183
3150 return t; 3184 return t;
3151} 3185}
3152 3186
3153static void *t_start(struct seq_file *m, loff_t *pos) 3187static void *t_start(struct seq_file *m, loff_t *pos)
3154{ 3188{
3189 struct trace_array *tr = m->private;
3155 struct tracer *t; 3190 struct tracer *t;
3156 loff_t l = 0; 3191 loff_t l = 0;
3157 3192
3158 mutex_lock(&trace_types_lock); 3193 mutex_lock(&trace_types_lock);
3159 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 3194
3160 ; 3195 t = get_tracer_for_array(tr, trace_types);
3196 for (; t && l < *pos; t = t_next(m, t, &l))
3197 ;
3161 3198
3162 return t; 3199 return t;
3163} 3200}
@@ -3192,10 +3229,21 @@ static const struct seq_operations show_traces_seq_ops = {
3192 3229
3193static int show_traces_open(struct inode *inode, struct file *file) 3230static int show_traces_open(struct inode *inode, struct file *file)
3194{ 3231{
3232 struct trace_array *tr = inode->i_private;
3233 struct seq_file *m;
3234 int ret;
3235
3195 if (tracing_disabled) 3236 if (tracing_disabled)
3196 return -ENODEV; 3237 return -ENODEV;
3197 3238
3198 return seq_open(file, &show_traces_seq_ops); 3239 ret = seq_open(file, &show_traces_seq_ops);
3240 if (ret)
3241 return ret;
3242
3243 m = file->private_data;
3244 m->private = tr;
3245
3246 return 0;
3199} 3247}
3200 3248
3201static ssize_t 3249static ssize_t
@@ -3355,13 +3403,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
3355 return 0; 3403 return 0;
3356} 3404}
3357 3405
3358static int __set_tracer_option(struct tracer *trace, 3406static int __set_tracer_option(struct trace_array *tr,
3359 struct tracer_flags *tracer_flags, 3407 struct tracer_flags *tracer_flags,
3360 struct tracer_opt *opts, int neg) 3408 struct tracer_opt *opts, int neg)
3361{ 3409{
3410 struct tracer *trace = tr->current_trace;
3362 int ret; 3411 int ret;
3363 3412
3364 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 3413 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3365 if (ret) 3414 if (ret)
3366 return ret; 3415 return ret;
3367 3416
@@ -3373,8 +3422,9 @@ static int __set_tracer_option(struct tracer *trace,
3373} 3422}
3374 3423
3375/* Try to assign a tracer specific option */ 3424/* Try to assign a tracer specific option */
3376static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 3425static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3377{ 3426{
3427 struct tracer *trace = tr->current_trace;
3378 struct tracer_flags *tracer_flags = trace->flags; 3428 struct tracer_flags *tracer_flags = trace->flags;
3379 struct tracer_opt *opts = NULL; 3429 struct tracer_opt *opts = NULL;
3380 int i; 3430 int i;
@@ -3383,8 +3433,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3383 opts = &tracer_flags->opts[i]; 3433 opts = &tracer_flags->opts[i];
3384 3434
3385 if (strcmp(cmp, opts->name) == 0) 3435 if (strcmp(cmp, opts->name) == 0)
3386 return __set_tracer_option(trace, trace->flags, 3436 return __set_tracer_option(tr, trace->flags, opts, neg);
3387 opts, neg);
3388 } 3437 }
3389 3438
3390 return -EINVAL; 3439 return -EINVAL;
@@ -3407,7 +3456,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3407 3456
3408 /* Give the tracer a chance to approve the change */ 3457 /* Give the tracer a chance to approve the change */
3409 if (tr->current_trace->flag_changed) 3458 if (tr->current_trace->flag_changed)
3410 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) 3459 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3411 return -EINVAL; 3460 return -EINVAL;
3412 3461
3413 if (enabled) 3462 if (enabled)
@@ -3456,7 +3505,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
3456 3505
3457 /* If no option could be set, test the specific tracer options */ 3506 /* If no option could be set, test the specific tracer options */
3458 if (!trace_options[i]) 3507 if (!trace_options[i])
3459 ret = set_tracer_option(tr->current_trace, cmp, neg); 3508 ret = set_tracer_option(tr, cmp, neg);
3460 3509
3461 mutex_unlock(&trace_types_lock); 3510 mutex_unlock(&trace_types_lock);
3462 3511
@@ -3885,10 +3934,26 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3885static void 3934static void
3886destroy_trace_option_files(struct trace_option_dentry *topts); 3935destroy_trace_option_files(struct trace_option_dentry *topts);
3887 3936
3888static int tracing_set_tracer(const char *buf) 3937/*
3938 * Used to clear out the tracer before deletion of an instance.
3939 * Must have trace_types_lock held.
3940 */
3941static void tracing_set_nop(struct trace_array *tr)
3942{
3943 if (tr->current_trace == &nop_trace)
3944 return;
3945
3946 tr->current_trace->enabled--;
3947
3948 if (tr->current_trace->reset)
3949 tr->current_trace->reset(tr);
3950
3951 tr->current_trace = &nop_trace;
3952}
3953
3954static int tracing_set_tracer(struct trace_array *tr, const char *buf)
3889{ 3955{
3890 static struct trace_option_dentry *topts; 3956 static struct trace_option_dentry *topts;
3891 struct trace_array *tr = &global_trace;
3892 struct tracer *t; 3957 struct tracer *t;
3893#ifdef CONFIG_TRACER_MAX_TRACE 3958#ifdef CONFIG_TRACER_MAX_TRACE
3894 bool had_max_tr; 3959 bool had_max_tr;
@@ -3916,9 +3981,15 @@ static int tracing_set_tracer(const char *buf)
3916 if (t == tr->current_trace) 3981 if (t == tr->current_trace)
3917 goto out; 3982 goto out;
3918 3983
3984 /* Some tracers are only allowed for the top level buffer */
3985 if (!trace_ok_for_array(t, tr)) {
3986 ret = -EINVAL;
3987 goto out;
3988 }
3989
3919 trace_branch_disable(); 3990 trace_branch_disable();
3920 3991
3921 tr->current_trace->enabled = false; 3992 tr->current_trace->enabled--;
3922 3993
3923 if (tr->current_trace->reset) 3994 if (tr->current_trace->reset)
3924 tr->current_trace->reset(tr); 3995 tr->current_trace->reset(tr);
@@ -3941,9 +4012,11 @@ static int tracing_set_tracer(const char *buf)
3941 free_snapshot(tr); 4012 free_snapshot(tr);
3942 } 4013 }
3943#endif 4014#endif
3944 destroy_trace_option_files(topts); 4015 /* Currently, only the top instance has options */
3945 4016 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
3946 topts = create_trace_option_files(tr, t); 4017 destroy_trace_option_files(topts);
4018 topts = create_trace_option_files(tr, t);
4019 }
3947 4020
3948#ifdef CONFIG_TRACER_MAX_TRACE 4021#ifdef CONFIG_TRACER_MAX_TRACE
3949 if (t->use_max_tr && !had_max_tr) { 4022 if (t->use_max_tr && !had_max_tr) {
@@ -3960,7 +4033,7 @@ static int tracing_set_tracer(const char *buf)
3960 } 4033 }
3961 4034
3962 tr->current_trace = t; 4035 tr->current_trace = t;
3963 tr->current_trace->enabled = true; 4036 tr->current_trace->enabled++;
3964 trace_branch_enable(tr); 4037 trace_branch_enable(tr);
3965 out: 4038 out:
3966 mutex_unlock(&trace_types_lock); 4039 mutex_unlock(&trace_types_lock);
@@ -3972,6 +4045,7 @@ static ssize_t
3972tracing_set_trace_write(struct file *filp, const char __user *ubuf, 4045tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3973 size_t cnt, loff_t *ppos) 4046 size_t cnt, loff_t *ppos)
3974{ 4047{
4048 struct trace_array *tr = filp->private_data;
3975 char buf[MAX_TRACER_SIZE+1]; 4049 char buf[MAX_TRACER_SIZE+1];
3976 int i; 4050 int i;
3977 size_t ret; 4051 size_t ret;
@@ -3991,7 +4065,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3991 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 4065 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3992 buf[i] = 0; 4066 buf[i] = 0;
3993 4067
3994 err = tracing_set_tracer(buf); 4068 err = tracing_set_tracer(tr, buf);
3995 if (err) 4069 if (err)
3996 return err; 4070 return err;
3997 4071
@@ -4699,25 +4773,10 @@ static int tracing_clock_show(struct seq_file *m, void *v)
4699 return 0; 4773 return 0;
4700} 4774}
4701 4775
4702static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4776static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4703 size_t cnt, loff_t *fpos)
4704{ 4777{
4705 struct seq_file *m = filp->private_data;
4706 struct trace_array *tr = m->private;
4707 char buf[64];
4708 const char *clockstr;
4709 int i; 4778 int i;
4710 4779
4711 if (cnt >= sizeof(buf))
4712 return -EINVAL;
4713
4714 if (copy_from_user(&buf, ubuf, cnt))
4715 return -EFAULT;
4716
4717 buf[cnt] = 0;
4718
4719 clockstr = strstrip(buf);
4720
4721 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4780 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4722 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4781 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4723 break; 4782 break;
@@ -4745,6 +4804,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4745 4804
4746 mutex_unlock(&trace_types_lock); 4805 mutex_unlock(&trace_types_lock);
4747 4806
4807 return 0;
4808}
4809
4810static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4811 size_t cnt, loff_t *fpos)
4812{
4813 struct seq_file *m = filp->private_data;
4814 struct trace_array *tr = m->private;
4815 char buf[64];
4816 const char *clockstr;
4817 int ret;
4818
4819 if (cnt >= sizeof(buf))
4820 return -EINVAL;
4821
4822 if (copy_from_user(&buf, ubuf, cnt))
4823 return -EFAULT;
4824
4825 buf[cnt] = 0;
4826
4827 clockstr = strstrip(buf);
4828
4829 ret = tracing_set_clock(tr, clockstr);
4830 if (ret)
4831 return ret;
4832
4748 *fpos += cnt; 4833 *fpos += cnt;
4749 4834
4750 return cnt; 4835 return cnt;
@@ -5705,7 +5790,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5705 5790
5706 if (!!(topt->flags->val & topt->opt->bit) != val) { 5791 if (!!(topt->flags->val & topt->opt->bit) != val) {
5707 mutex_lock(&trace_types_lock); 5792 mutex_lock(&trace_types_lock);
5708 ret = __set_tracer_option(topt->tr->current_trace, topt->flags, 5793 ret = __set_tracer_option(topt->tr, topt->flags,
5709 topt->opt, !val); 5794 topt->opt, !val);
5710 mutex_unlock(&trace_types_lock); 5795 mutex_unlock(&trace_types_lock);
5711 if (ret) 5796 if (ret)
@@ -6112,7 +6197,9 @@ static int instance_delete(const char *name)
6112 6197
6113 list_del(&tr->list); 6198 list_del(&tr->list);
6114 6199
6200 tracing_set_nop(tr);
6115 event_trace_del_tracer(tr); 6201 event_trace_del_tracer(tr);
6202 ftrace_destroy_function_files(tr);
6116 debugfs_remove_recursive(tr->dir); 6203 debugfs_remove_recursive(tr->dir);
6117 free_percpu(tr->trace_buffer.data); 6204 free_percpu(tr->trace_buffer.data);
6118 ring_buffer_free(tr->trace_buffer.buffer); 6205 ring_buffer_free(tr->trace_buffer.buffer);
@@ -6207,6 +6294,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6207{ 6294{
6208 int cpu; 6295 int cpu;
6209 6296
6297 trace_create_file("available_tracers", 0444, d_tracer,
6298 tr, &show_traces_fops);
6299
6300 trace_create_file("current_tracer", 0644, d_tracer,
6301 tr, &set_tracer_fops);
6302
6210 trace_create_file("tracing_cpumask", 0644, d_tracer, 6303 trace_create_file("tracing_cpumask", 0644, d_tracer,
6211 tr, &tracing_cpumask_fops); 6304 tr, &tracing_cpumask_fops);
6212 6305
@@ -6237,6 +6330,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6237 trace_create_file("tracing_on", 0644, d_tracer, 6330 trace_create_file("tracing_on", 0644, d_tracer,
6238 tr, &rb_simple_fops); 6331 tr, &rb_simple_fops);
6239 6332
6333 if (ftrace_create_function_files(tr, d_tracer))
6334 WARN(1, "Could not allocate function filter files");
6335
6240#ifdef CONFIG_TRACER_SNAPSHOT 6336#ifdef CONFIG_TRACER_SNAPSHOT
6241 trace_create_file("snapshot", 0644, d_tracer, 6337 trace_create_file("snapshot", 0644, d_tracer,
6242 tr, &snapshot_fops); 6338 tr, &snapshot_fops);
@@ -6259,12 +6355,6 @@ static __init int tracer_init_debugfs(void)
6259 6355
6260 init_tracer_debugfs(&global_trace, d_tracer); 6356 init_tracer_debugfs(&global_trace, d_tracer);
6261 6357
6262 trace_create_file("available_tracers", 0444, d_tracer,
6263 &global_trace, &show_traces_fops);
6264
6265 trace_create_file("current_tracer", 0644, d_tracer,
6266 &global_trace, &set_tracer_fops);
6267
6268#ifdef CONFIG_TRACER_MAX_TRACE 6358#ifdef CONFIG_TRACER_MAX_TRACE
6269 trace_create_file("tracing_max_latency", 0644, d_tracer, 6359 trace_create_file("tracing_max_latency", 0644, d_tracer,
6270 &tracing_max_latency, &tracing_max_lat_fops); 6360 &tracing_max_latency, &tracing_max_lat_fops);
@@ -6527,6 +6617,13 @@ __init static int tracer_alloc_buffers(void)
6527 6617
6528 trace_init_cmdlines(); 6618 trace_init_cmdlines();
6529 6619
6620 if (trace_boot_clock) {
6621 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6622 if (ret < 0)
6623 pr_warning("Trace clock %s not defined, going back to default\n",
6624 trace_boot_clock);
6625 }
6626
6530 /* 6627 /*
6531 * register_tracer() might reference current_trace, so it 6628 * register_tracer() might reference current_trace, so it
6532 * needs to be set before we register anything. This is 6629 * needs to be set before we register anything. This is
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 02b592f2d4b7..ffc314b7e92b 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -210,6 +210,11 @@ struct trace_array {
210 struct list_head events; 210 struct list_head events;
211 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 211 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
212 int ref; 212 int ref;
213#ifdef CONFIG_FUNCTION_TRACER
214 struct ftrace_ops *ops;
215 /* function tracing enabled */
216 int function_enabled;
217#endif
213}; 218};
214 219
215enum { 220enum {
@@ -355,14 +360,16 @@ struct tracer {
355 void (*print_header)(struct seq_file *m); 360 void (*print_header)(struct seq_file *m);
356 enum print_line_t (*print_line)(struct trace_iterator *iter); 361 enum print_line_t (*print_line)(struct trace_iterator *iter);
357 /* If you handled the flag setting, return 0 */ 362 /* If you handled the flag setting, return 0 */
358 int (*set_flag)(u32 old_flags, u32 bit, int set); 363 int (*set_flag)(struct trace_array *tr,
364 u32 old_flags, u32 bit, int set);
359 /* Return 0 if OK with change, else return non-zero */ 365 /* Return 0 if OK with change, else return non-zero */
360 int (*flag_changed)(struct tracer *tracer, 366 int (*flag_changed)(struct trace_array *tr,
361 u32 mask, int set); 367 u32 mask, int set);
362 struct tracer *next; 368 struct tracer *next;
363 struct tracer_flags *flags; 369 struct tracer_flags *flags;
370 int enabled;
364 bool print_max; 371 bool print_max;
365 bool enabled; 372 bool allow_instances;
366#ifdef CONFIG_TRACER_MAX_TRACE 373#ifdef CONFIG_TRACER_MAX_TRACE
367 bool use_max_tr; 374 bool use_max_tr;
368#endif 375#endif
@@ -812,13 +819,36 @@ static inline int ftrace_trace_task(struct task_struct *task)
812 return test_tsk_trace_trace(task); 819 return test_tsk_trace_trace(task);
813} 820}
814extern int ftrace_is_dead(void); 821extern int ftrace_is_dead(void);
822int ftrace_create_function_files(struct trace_array *tr,
823 struct dentry *parent);
824void ftrace_destroy_function_files(struct trace_array *tr);
815#else 825#else
816static inline int ftrace_trace_task(struct task_struct *task) 826static inline int ftrace_trace_task(struct task_struct *task)
817{ 827{
818 return 1; 828 return 1;
819} 829}
820static inline int ftrace_is_dead(void) { return 0; } 830static inline int ftrace_is_dead(void) { return 0; }
821#endif 831static inline int
832ftrace_create_function_files(struct trace_array *tr,
833 struct dentry *parent)
834{
835 return 0;
836}
837static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
838#endif /* CONFIG_FUNCTION_TRACER */
839
840#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
841void ftrace_create_filter_files(struct ftrace_ops *ops,
842 struct dentry *parent);
843void ftrace_destroy_filter_files(struct ftrace_ops *ops);
844#else
845/*
846 * The ops parameter passed in is usually undefined.
847 * This must be a macro.
848 */
849#define ftrace_create_filter_files(ops, parent) do { } while (0)
850#define ftrace_destroy_filter_files(ops) do { } while (0)
851#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
822 852
823int ftrace_event_is_function(struct ftrace_event_call *call); 853int ftrace_event_is_function(struct ftrace_event_call *call);
824 854
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7b16d40bd64d..83a4378dc5e0 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -188,6 +188,36 @@ int trace_event_raw_init(struct ftrace_event_call *call)
188} 188}
189EXPORT_SYMBOL_GPL(trace_event_raw_init); 189EXPORT_SYMBOL_GPL(trace_event_raw_init);
190 190
191void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
192 struct ftrace_event_file *ftrace_file,
193 unsigned long len)
194{
195 struct ftrace_event_call *event_call = ftrace_file->event_call;
196
197 local_save_flags(fbuffer->flags);
198 fbuffer->pc = preempt_count();
199 fbuffer->ftrace_file = ftrace_file;
200
201 fbuffer->event =
202 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
203 event_call->event.type, len,
204 fbuffer->flags, fbuffer->pc);
205 if (!fbuffer->event)
206 return NULL;
207
208 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
209 return fbuffer->entry;
210}
211EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
212
213void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
214{
215 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
216 fbuffer->event, fbuffer->entry,
217 fbuffer->flags, fbuffer->pc);
218}
219EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
220
191int ftrace_event_reg(struct ftrace_event_call *call, 221int ftrace_event_reg(struct ftrace_event_call *call,
192 enum trace_reg type, void *data) 222 enum trace_reg type, void *data)
193{ 223{
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 38fe1483c508..5b781d2be383 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,32 +13,106 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/slab.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
17 18
18#include "trace.h" 19#include "trace.h"
19 20
20/* function tracing enabled */ 21static void tracing_start_function_trace(struct trace_array *tr);
21static int ftrace_function_enabled; 22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags;
32
33/* Our option */
34enum {
35 TRACE_FUNC_OPT_STACK = 0x1,
36};
37
38static int allocate_ftrace_ops(struct trace_array *tr)
39{
40 struct ftrace_ops *ops;
41
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43 if (!ops)
44 return -ENOMEM;
22 45
23static struct trace_array *func_trace; 46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
49
50 tr->ops = ops;
51 ops->private = tr;
52 return 0;
53}
54
55
56int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
58{
59 int ret;
60
61 /* The top level array uses the "global_ops". */
62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
63 ret = allocate_ftrace_ops(tr);
64 if (ret)
65 return ret;
66 }
67
68 ftrace_create_filter_files(tr->ops, parent);
69
70 return 0;
71}
24 72
25static void tracing_start_function_trace(void); 73void ftrace_destroy_function_files(struct trace_array *tr)
26static void tracing_stop_function_trace(void); 74{
75 ftrace_destroy_filter_files(tr->ops);
76 kfree(tr->ops);
77 tr->ops = NULL;
78}
27 79
28static int function_trace_init(struct trace_array *tr) 80static int function_trace_init(struct trace_array *tr)
29{ 81{
30 func_trace = tr; 82 struct ftrace_ops *ops;
83
84 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
85 /* There's only one global tr */
86 if (!trace_ops.private) {
87 trace_ops.private = tr;
88 trace_stack_ops.private = tr;
89 }
90
91 if (func_flags.val & TRACE_FUNC_OPT_STACK)
92 ops = &trace_stack_ops;
93 else
94 ops = &trace_ops;
95 tr->ops = ops;
96 } else if (!tr->ops) {
97 /*
98 * Instance trace_arrays get their ops allocated
99 * at instance creation. Unless it failed
100 * the allocation.
101 */
102 return -ENOMEM;
103 }
104
31 tr->trace_buffer.cpu = get_cpu(); 105 tr->trace_buffer.cpu = get_cpu();
32 put_cpu(); 106 put_cpu();
33 107
34 tracing_start_cmdline_record(); 108 tracing_start_cmdline_record();
35 tracing_start_function_trace(); 109 tracing_start_function_trace(tr);
36 return 0; 110 return 0;
37} 111}
38 112
39static void function_trace_reset(struct trace_array *tr) 113static void function_trace_reset(struct trace_array *tr)
40{ 114{
41 tracing_stop_function_trace(); 115 tracing_stop_function_trace(tr);
42 tracing_stop_cmdline_record(); 116 tracing_stop_cmdline_record();
43} 117}
44 118
@@ -47,25 +121,18 @@ static void function_trace_start(struct trace_array *tr)
47 tracing_reset_online_cpus(&tr->trace_buffer); 121 tracing_reset_online_cpus(&tr->trace_buffer);
48} 122}
49 123
50/* Our option */
51enum {
52 TRACE_FUNC_OPT_STACK = 0x1,
53};
54
55static struct tracer_flags func_flags;
56
57static void 124static void
58function_trace_call(unsigned long ip, unsigned long parent_ip, 125function_trace_call(unsigned long ip, unsigned long parent_ip,
59 struct ftrace_ops *op, struct pt_regs *pt_regs) 126 struct ftrace_ops *op, struct pt_regs *pt_regs)
60{ 127{
61 struct trace_array *tr = func_trace; 128 struct trace_array *tr = op->private;
62 struct trace_array_cpu *data; 129 struct trace_array_cpu *data;
63 unsigned long flags; 130 unsigned long flags;
64 int bit; 131 int bit;
65 int cpu; 132 int cpu;
66 int pc; 133 int pc;
67 134
68 if (unlikely(!ftrace_function_enabled)) 135 if (unlikely(!tr->function_enabled))
69 return; 136 return;
70 137
71 pc = preempt_count(); 138 pc = preempt_count();
@@ -91,14 +158,14 @@ static void
91function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 158function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
92 struct ftrace_ops *op, struct pt_regs *pt_regs) 159 struct ftrace_ops *op, struct pt_regs *pt_regs)
93{ 160{
94 struct trace_array *tr = func_trace; 161 struct trace_array *tr = op->private;
95 struct trace_array_cpu *data; 162 struct trace_array_cpu *data;
96 unsigned long flags; 163 unsigned long flags;
97 long disabled; 164 long disabled;
98 int cpu; 165 int cpu;
99 int pc; 166 int pc;
100 167
101 if (unlikely(!ftrace_function_enabled)) 168 if (unlikely(!tr->function_enabled))
102 return; 169 return;
103 170
104 /* 171 /*
@@ -128,7 +195,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
128 local_irq_restore(flags); 195 local_irq_restore(flags);
129} 196}
130 197
131
132static struct ftrace_ops trace_ops __read_mostly = 198static struct ftrace_ops trace_ops __read_mostly =
133{ 199{
134 .func = function_trace_call, 200 .func = function_trace_call,
@@ -153,29 +219,21 @@ static struct tracer_flags func_flags = {
153 .opts = func_opts 219 .opts = func_opts
154}; 220};
155 221
156static void tracing_start_function_trace(void) 222static void tracing_start_function_trace(struct trace_array *tr)
157{ 223{
158 ftrace_function_enabled = 0; 224 tr->function_enabled = 0;
159 225 register_ftrace_function(tr->ops);
160 if (func_flags.val & TRACE_FUNC_OPT_STACK) 226 tr->function_enabled = 1;
161 register_ftrace_function(&trace_stack_ops);
162 else
163 register_ftrace_function(&trace_ops);
164
165 ftrace_function_enabled = 1;
166} 227}
167 228
168static void tracing_stop_function_trace(void) 229static void tracing_stop_function_trace(struct trace_array *tr)
169{ 230{
170 ftrace_function_enabled = 0; 231 tr->function_enabled = 0;
171 232 unregister_ftrace_function(tr->ops);
172 if (func_flags.val & TRACE_FUNC_OPT_STACK)
173 unregister_ftrace_function(&trace_stack_ops);
174 else
175 unregister_ftrace_function(&trace_ops);
176} 233}
177 234
178static int func_set_flag(u32 old_flags, u32 bit, int set) 235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
179{ 237{
180 switch (bit) { 238 switch (bit) {
181 case TRACE_FUNC_OPT_STACK: 239 case TRACE_FUNC_OPT_STACK:
@@ -183,12 +241,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
183 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
184 break; 242 break;
185 243
244 unregister_ftrace_function(tr->ops);
245
186 if (set) { 246 if (set) {
187 unregister_ftrace_function(&trace_ops); 247 tr->ops = &trace_stack_ops;
188 register_ftrace_function(&trace_stack_ops); 248 register_ftrace_function(tr->ops);
189 } else { 249 } else {
190 unregister_ftrace_function(&trace_stack_ops); 250 tr->ops = &trace_ops;
191 register_ftrace_function(&trace_ops); 251 register_ftrace_function(tr->ops);
192 } 252 }
193 253
194 break; 254 break;
@@ -208,6 +268,7 @@ static struct tracer function_trace __tracer_data =
208 .wait_pipe = poll_wait_pipe, 268 .wait_pipe = poll_wait_pipe,
209 .flags = &func_flags, 269 .flags = &func_flags,
210 .set_flag = func_set_flag, 270 .set_flag = func_set_flag,
271 .allow_instances = true,
211#ifdef CONFIG_FTRACE_SELFTEST 272#ifdef CONFIG_FTRACE_SELFTEST
212 .selftest = trace_selftest_startup_function, 273 .selftest = trace_selftest_startup_function,
213#endif 274#endif
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 0b99120d395c..deff11200261 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter)
1476 } 1476 }
1477} 1477}
1478 1478
1479static int func_graph_set_flag(u32 old_flags, u32 bit, int set) 1479static int
1480func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1480{ 1481{
1481 if (bit == TRACE_GRAPH_PRINT_IRQS) 1482 if (bit == TRACE_GRAPH_PRINT_IRQS)
1482 ftrace_graph_skip_irqs = !set; 1483 ftrace_graph_skip_irqs = !set;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 887ef88b0bc7..8ff02cbb892f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly =
160#endif /* CONFIG_FUNCTION_TRACER */ 160#endif /* CONFIG_FUNCTION_TRACER */
161 161
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER 162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
163static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 163static int
164irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
164{ 165{
165 int cpu; 166 int cpu;
166 167
@@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr,
266#else 267#else
267#define __trace_function trace_function 268#define __trace_function trace_function
268 269
269static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 270static int
271irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
270{ 272{
271 return -EINVAL; 273 return -EINVAL;
272} 274}
@@ -570,8 +572,10 @@ static void irqsoff_function_set(int set)
570 unregister_irqsoff_function(is_graph()); 572 unregister_irqsoff_function(is_graph());
571} 573}
572 574
573static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) 575static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
574{ 576{
577 struct tracer *tracer = tr->current_trace;
578
575 if (mask & TRACE_ITER_FUNCTION) 579 if (mask & TRACE_ITER_FUNCTION)
576 irqsoff_function_set(set); 580 irqsoff_function_set(set);
577 581
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index bdbae450c13e..d021d21dd150 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,11 +35,6 @@ struct trace_kprobe {
35 struct trace_probe tp; 35 struct trace_probe tp;
36}; 36};
37 37
38struct event_file_link {
39 struct ftrace_event_file *file;
40 struct list_head list;
41};
42
43#define SIZEOF_TRACE_KPROBE(n) \ 38#define SIZEOF_TRACE_KPROBE(n) \
44 (offsetof(struct trace_kprobe, tp.args) + \ 39 (offsetof(struct trace_kprobe, tp.args) + \
45 (sizeof(struct probe_arg) * (n))) 40 (sizeof(struct probe_arg) * (n)))
@@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
387 return ret; 382 return ret;
388} 383}
389 384
390static struct event_file_link *
391find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
392{
393 struct event_file_link *link;
394
395 list_for_each_entry(link, &tp->files, list)
396 if (link->file == file)
397 return link;
398
399 return NULL;
400}
401
402/* 385/*
403 * Disable trace_probe 386 * Disable trace_probe
404 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 387 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index 394f94417e2f..69a5cc94c01a 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr)
62 * If you don't implement it, then the flag setting will be 62 * If you don't implement it, then the flag setting will be
63 * automatically accepted. 63 * automatically accepted.
64 */ 64 */
65static int nop_set_flag(u32 old_flags, u32 bit, int set) 65static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
66{ 66{
67 /* 67 /*
68 * Note that you don't need to update nop_flags.val yourself. 68 * Note that you don't need to update nop_flags.val yourself.
@@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly =
96 .selftest = trace_selftest_startup_nop, 96 .selftest = trace_selftest_startup_nop,
97#endif 97#endif
98 .flags = &nop_flags, 98 .flags = &nop_flags,
99 .set_flag = nop_set_flag 99 .set_flag = nop_set_flag,
100 .allow_instances = true,
100}; 101};
101 102
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ed32284fbe32..ca0e79e2abaa 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
439} 439}
440EXPORT_SYMBOL(ftrace_raw_output_prep); 440EXPORT_SYMBOL(ftrace_raw_output_prep);
441 441
442static int ftrace_output_raw(struct trace_iterator *iter, char *name,
443 char *fmt, va_list ap)
444{
445 struct trace_seq *s = &iter->seq;
446 int ret;
447
448 ret = trace_seq_printf(s, "%s: ", name);
449 if (!ret)
450 return TRACE_TYPE_PARTIAL_LINE;
451
452 ret = trace_seq_vprintf(s, fmt, ap);
453
454 if (!ret)
455 return TRACE_TYPE_PARTIAL_LINE;
456
457 return TRACE_TYPE_HANDLED;
458}
459
460int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
461{
462 va_list ap;
463 int ret;
464
465 va_start(ap, fmt);
466 ret = ftrace_output_raw(iter, name, fmt, ap);
467 va_end(ap);
468
469 return ret;
470}
471EXPORT_SYMBOL_GPL(ftrace_output_call);
472
442#ifdef CONFIG_KRETPROBES 473#ifdef CONFIG_KRETPROBES
443static inline const char *kretprobed(const char *name) 474static inline const char *kretprobed(const char *name)
444{ 475{
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index b73574a5f429..fb1ab5dfbd42 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -288,6 +288,11 @@ struct trace_probe {
288 struct probe_arg args[]; 288 struct probe_arg args[];
289}; 289};
290 290
291struct event_file_link {
292 struct ftrace_event_file *file;
293 struct list_head list;
294};
295
291static inline bool trace_probe_is_enabled(struct trace_probe *tp) 296static inline bool trace_probe_is_enabled(struct trace_probe *tp)
292{ 297{
293 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); 298 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
@@ -316,6 +321,18 @@ static inline int is_good_name(const char *name)
316 return 1; 321 return 1;
317} 322}
318 323
324static inline struct event_file_link *
325find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
326{
327 struct event_file_link *link;
328
329 list_for_each_entry(link, &tp->files, list)
330 if (link->file == file)
331 return link;
332
333 return NULL;
334}
335
319extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, 336extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
320 struct probe_arg *parg, bool is_return, bool is_kprobe); 337 struct probe_arg *parg, bool is_return, bool is_kprobe);
321 338
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 6e32635e5e57..e14da5e97a69 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -179,8 +179,10 @@ static void wakeup_function_set(int set)
179 unregister_wakeup_function(is_graph()); 179 unregister_wakeup_function(is_graph());
180} 180}
181 181
182static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) 182static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
183{ 183{
184 struct tracer *tracer = tr->current_trace;
185
184 if (mask & TRACE_ITER_FUNCTION) 186 if (mask & TRACE_ITER_FUNCTION)
185 wakeup_function_set(set); 187 wakeup_function_set(set);
186 188
@@ -209,7 +211,8 @@ static void stop_func_tracer(int graph)
209} 211}
210 212
211#ifdef CONFIG_FUNCTION_GRAPH_TRACER 213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
212static int wakeup_set_flag(u32 old_flags, u32 bit, int set) 214static int
215wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
213{ 216{
214 217
215 if (!(bit & TRACE_DISPLAY_GRAPH)) 218 if (!(bit & TRACE_DISPLAY_GRAPH))
@@ -311,7 +314,8 @@ __trace_function(struct trace_array *tr,
311#else 314#else
312#define __trace_function trace_function 315#define __trace_function trace_function
313 316
314static int wakeup_set_flag(u32 old_flags, u32 bit, int set) 317static int
318wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
315{ 319{
316 return -EINVAL; 320 return -EINVAL;
317} 321}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index e6be585cf06a..21b320e5d163 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -13,6 +13,7 @@
13#include <linux/sysctl.h> 13#include <linux/sysctl.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/magic.h>
16 17
17#include <asm/setup.h> 18#include <asm/setup.h>
18 19
@@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack)
144 i++; 145 i++;
145 } 146 }
146 147
148 BUG_ON(current != &init_task &&
149 *(end_of_stack(current)) != STACK_END_MAGIC);
147 out: 150 out:
148 arch_spin_unlock(&max_stack_lock); 151 arch_spin_unlock(&max_stack_lock);
149 local_irq_restore(flags); 152 local_irq_restore(flags);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 79e52d93860b..e4473367e7a4 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
260 goto error; 260 goto error;
261 261
262 INIT_LIST_HEAD(&tu->list); 262 INIT_LIST_HEAD(&tu->list);
263 INIT_LIST_HEAD(&tu->tp.files);
263 tu->consumer.handler = uprobe_dispatcher; 264 tu->consumer.handler = uprobe_dispatcher;
264 if (is_ret) 265 if (is_ret)
265 tu->consumer.ret_handler = uretprobe_dispatcher; 266 tu->consumer.ret_handler = uretprobe_dispatcher;
@@ -758,31 +759,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
758 mutex_unlock(&ucb->mutex); 759 mutex_unlock(&ucb->mutex);
759} 760}
760 761
761static void uprobe_trace_print(struct trace_uprobe *tu, 762static void __uprobe_trace_func(struct trace_uprobe *tu,
762 unsigned long func, struct pt_regs *regs) 763 unsigned long func, struct pt_regs *regs,
764 struct uprobe_cpu_buffer *ucb, int dsize,
765 struct ftrace_event_file *ftrace_file)
763{ 766{
764 struct uprobe_trace_entry_head *entry; 767 struct uprobe_trace_entry_head *entry;
765 struct ring_buffer_event *event; 768 struct ring_buffer_event *event;
766 struct ring_buffer *buffer; 769 struct ring_buffer *buffer;
767 struct uprobe_cpu_buffer *ucb;
768 void *data; 770 void *data;
769 int size, dsize, esize; 771 int size, esize;
770 struct ftrace_event_call *call = &tu->tp.call; 772 struct ftrace_event_call *call = &tu->tp.call;
771 773
772 dsize = __get_data_size(&tu->tp, regs); 774 WARN_ON(call != ftrace_file->event_call);
773 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
774 775
775 if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) 776 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
776 return; 777 return;
777 778
778 ucb = uprobe_buffer_get(); 779 if (ftrace_trigger_soft_disabled(ftrace_file))
779 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); 780 return;
780 781
782 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
781 size = esize + tu->tp.size + dsize; 783 size = esize + tu->tp.size + dsize;
782 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 784 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
783 size, 0, 0); 785 call->event.type, size, 0, 0);
784 if (!event) 786 if (!event)
785 goto out; 787 return;
786 788
787 entry = ring_buffer_event_data(event); 789 entry = ring_buffer_event_data(event);
788 if (is_ret_probe(tu)) { 790 if (is_ret_probe(tu)) {
@@ -796,25 +798,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu,
796 798
797 memcpy(data, ucb->buf, tu->tp.size + dsize); 799 memcpy(data, ucb->buf, tu->tp.size + dsize);
798 800
799 if (!call_filter_check_discard(call, entry, buffer, event)) 801 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0);
800 trace_buffer_unlock_commit(buffer, event, 0, 0);
801
802out:
803 uprobe_buffer_put(ucb);
804} 802}
805 803
806/* uprobe handler */ 804/* uprobe handler */
807static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) 805static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
806 struct uprobe_cpu_buffer *ucb, int dsize)
808{ 807{
809 if (!is_ret_probe(tu)) 808 struct event_file_link *link;
810 uprobe_trace_print(tu, 0, regs); 809
810 if (is_ret_probe(tu))
811 return 0;
812
813 rcu_read_lock();
814 list_for_each_entry_rcu(link, &tu->tp.files, list)
815 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
816 rcu_read_unlock();
817
811 return 0; 818 return 0;
812} 819}
813 820
814static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, 821static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
815 struct pt_regs *regs) 822 struct pt_regs *regs,
823 struct uprobe_cpu_buffer *ucb, int dsize)
816{ 824{
817 uprobe_trace_print(tu, func, regs); 825 struct event_file_link *link;
826
827 rcu_read_lock();
828 list_for_each_entry_rcu(link, &tu->tp.files, list)
829 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
830 rcu_read_unlock();
818} 831}
819 832
820/* Event entry printers */ 833/* Event entry printers */
@@ -861,12 +874,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self,
861 struct mm_struct *mm); 874 struct mm_struct *mm);
862 875
863static int 876static int
864probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) 877probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
878 filter_func_t filter)
865{ 879{
866 int ret = 0; 880 bool enabled = trace_probe_is_enabled(&tu->tp);
881 struct event_file_link *link = NULL;
882 int ret;
883
884 if (file) {
885 link = kmalloc(sizeof(*link), GFP_KERNEL);
886 if (!link)
887 return -ENOMEM;
867 888
868 if (trace_probe_is_enabled(&tu->tp)) 889 link->file = file;
869 return -EINTR; 890 list_add_tail_rcu(&link->list, &tu->tp.files);
891
892 tu->tp.flags |= TP_FLAG_TRACE;
893 } else
894 tu->tp.flags |= TP_FLAG_PROFILE;
870 895
871 ret = uprobe_buffer_enable(); 896 ret = uprobe_buffer_enable();
872 if (ret < 0) 897 if (ret < 0)
@@ -874,24 +899,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
874 899
875 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 900 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
876 901
877 tu->tp.flags |= flag; 902 if (enabled)
903 return 0;
904
878 tu->consumer.filter = filter; 905 tu->consumer.filter = filter;
879 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 906 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
880 if (ret) 907 if (ret) {
881 tu->tp.flags &= ~flag; 908 if (file) {
909 list_del(&link->list);
910 kfree(link);
911 tu->tp.flags &= ~TP_FLAG_TRACE;
912 } else
913 tu->tp.flags &= ~TP_FLAG_PROFILE;
914 }
882 915
883 return ret; 916 return ret;
884} 917}
885 918
886static void probe_event_disable(struct trace_uprobe *tu, int flag) 919static void
920probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
887{ 921{
888 if (!trace_probe_is_enabled(&tu->tp)) 922 if (!trace_probe_is_enabled(&tu->tp))
889 return; 923 return;
890 924
925 if (file) {
926 struct event_file_link *link;
927
928 link = find_event_file_link(&tu->tp, file);
929 if (!link)
930 return;
931
932 list_del_rcu(&link->list);
933 /* synchronize with u{,ret}probe_trace_func */
934 synchronize_sched();
935 kfree(link);
936
937 if (!list_empty(&tu->tp.files))
938 return;
939 }
940
891 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 941 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
892 942
893 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 943 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
894 tu->tp.flags &= ~flag; 944 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
895 945
896 uprobe_buffer_disable(); 946 uprobe_buffer_disable();
897} 947}
@@ -1014,31 +1064,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1014 return ret; 1064 return ret;
1015} 1065}
1016 1066
1017static void uprobe_perf_print(struct trace_uprobe *tu, 1067static void __uprobe_perf_func(struct trace_uprobe *tu,
1018 unsigned long func, struct pt_regs *regs) 1068 unsigned long func, struct pt_regs *regs,
1069 struct uprobe_cpu_buffer *ucb, int dsize)
1019{ 1070{
1020 struct ftrace_event_call *call = &tu->tp.call; 1071 struct ftrace_event_call *call = &tu->tp.call;
1021 struct uprobe_trace_entry_head *entry; 1072 struct uprobe_trace_entry_head *entry;
1022 struct hlist_head *head; 1073 struct hlist_head *head;
1023 struct uprobe_cpu_buffer *ucb;
1024 void *data; 1074 void *data;
1025 int size, dsize, esize; 1075 int size, esize;
1026 int rctx; 1076 int rctx;
1027 1077
1028 dsize = __get_data_size(&tu->tp, regs);
1029 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 1078 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1030 1079
1031 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1032 return;
1033
1034 size = esize + tu->tp.size + dsize; 1080 size = esize + tu->tp.size + dsize;
1035 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); 1081 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1036 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) 1082 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1037 return; 1083 return;
1038 1084
1039 ucb = uprobe_buffer_get();
1040 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1041
1042 preempt_disable(); 1085 preempt_disable();
1043 head = this_cpu_ptr(call->perf_events); 1086 head = this_cpu_ptr(call->perf_events);
1044 if (hlist_empty(head)) 1087 if (hlist_empty(head))
@@ -1068,46 +1111,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu,
1068 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); 1111 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1069 out: 1112 out:
1070 preempt_enable(); 1113 preempt_enable();
1071 uprobe_buffer_put(ucb);
1072} 1114}
1073 1115
1074/* uprobe profile handler */ 1116/* uprobe profile handler */
1075static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) 1117static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1118 struct uprobe_cpu_buffer *ucb, int dsize)
1076{ 1119{
1077 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) 1120 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1078 return UPROBE_HANDLER_REMOVE; 1121 return UPROBE_HANDLER_REMOVE;
1079 1122
1080 if (!is_ret_probe(tu)) 1123 if (!is_ret_probe(tu))
1081 uprobe_perf_print(tu, 0, regs); 1124 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1082 return 0; 1125 return 0;
1083} 1126}
1084 1127
1085static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, 1128static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1086 struct pt_regs *regs) 1129 struct pt_regs *regs,
1130 struct uprobe_cpu_buffer *ucb, int dsize)
1087{ 1131{
1088 uprobe_perf_print(tu, func, regs); 1132 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1089} 1133}
1090#endif /* CONFIG_PERF_EVENTS */ 1134#endif /* CONFIG_PERF_EVENTS */
1091 1135
1092static 1136static int
1093int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) 1137trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
1138 void *data)
1094{ 1139{
1095 struct trace_uprobe *tu = event->data; 1140 struct trace_uprobe *tu = event->data;
1141 struct ftrace_event_file *file = data;
1096 1142
1097 switch (type) { 1143 switch (type) {
1098 case TRACE_REG_REGISTER: 1144 case TRACE_REG_REGISTER:
1099 return probe_event_enable(tu, TP_FLAG_TRACE, NULL); 1145 return probe_event_enable(tu, file, NULL);
1100 1146
1101 case TRACE_REG_UNREGISTER: 1147 case TRACE_REG_UNREGISTER:
1102 probe_event_disable(tu, TP_FLAG_TRACE); 1148 probe_event_disable(tu, file);
1103 return 0; 1149 return 0;
1104 1150
1105#ifdef CONFIG_PERF_EVENTS 1151#ifdef CONFIG_PERF_EVENTS
1106 case TRACE_REG_PERF_REGISTER: 1152 case TRACE_REG_PERF_REGISTER:
1107 return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); 1153 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1108 1154
1109 case TRACE_REG_PERF_UNREGISTER: 1155 case TRACE_REG_PERF_UNREGISTER:
1110 probe_event_disable(tu, TP_FLAG_PROFILE); 1156 probe_event_disable(tu, NULL);
1111 return 0; 1157 return 0;
1112 1158
1113 case TRACE_REG_PERF_OPEN: 1159 case TRACE_REG_PERF_OPEN:
@@ -1127,8 +1173,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1127{ 1173{
1128 struct trace_uprobe *tu; 1174 struct trace_uprobe *tu;
1129 struct uprobe_dispatch_data udd; 1175 struct uprobe_dispatch_data udd;
1176 struct uprobe_cpu_buffer *ucb;
1177 int dsize, esize;
1130 int ret = 0; 1178 int ret = 0;
1131 1179
1180
1132 tu = container_of(con, struct trace_uprobe, consumer); 1181 tu = container_of(con, struct trace_uprobe, consumer);
1133 tu->nhit++; 1182 tu->nhit++;
1134 1183
@@ -1137,13 +1186,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1137 1186
1138 current->utask->vaddr = (unsigned long) &udd; 1187 current->utask->vaddr = (unsigned long) &udd;
1139 1188
1189#ifdef CONFIG_PERF_EVENTS
1190 if ((tu->tp.flags & TP_FLAG_TRACE) == 0 &&
1191 !uprobe_perf_filter(&tu->consumer, 0, current->mm))
1192 return UPROBE_HANDLER_REMOVE;
1193#endif
1194
1195 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1196 return 0;
1197
1198 dsize = __get_data_size(&tu->tp, regs);
1199 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1200
1201 ucb = uprobe_buffer_get();
1202 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1203
1140 if (tu->tp.flags & TP_FLAG_TRACE) 1204 if (tu->tp.flags & TP_FLAG_TRACE)
1141 ret |= uprobe_trace_func(tu, regs); 1205 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1142 1206
1143#ifdef CONFIG_PERF_EVENTS 1207#ifdef CONFIG_PERF_EVENTS
1144 if (tu->tp.flags & TP_FLAG_PROFILE) 1208 if (tu->tp.flags & TP_FLAG_PROFILE)
1145 ret |= uprobe_perf_func(tu, regs); 1209 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1146#endif 1210#endif
1211 uprobe_buffer_put(ucb);
1147 return ret; 1212 return ret;
1148} 1213}
1149 1214
@@ -1152,6 +1217,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
1152{ 1217{
1153 struct trace_uprobe *tu; 1218 struct trace_uprobe *tu;
1154 struct uprobe_dispatch_data udd; 1219 struct uprobe_dispatch_data udd;
1220 struct uprobe_cpu_buffer *ucb;
1221 int dsize, esize;
1155 1222
1156 tu = container_of(con, struct trace_uprobe, consumer); 1223 tu = container_of(con, struct trace_uprobe, consumer);
1157 1224
@@ -1160,13 +1227,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
1160 1227
1161 current->utask->vaddr = (unsigned long) &udd; 1228 current->utask->vaddr = (unsigned long) &udd;
1162 1229
1230 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1231 return 0;
1232
1233 dsize = __get_data_size(&tu->tp, regs);
1234 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1235
1236 ucb = uprobe_buffer_get();
1237 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1238
1163 if (tu->tp.flags & TP_FLAG_TRACE) 1239 if (tu->tp.flags & TP_FLAG_TRACE)
1164 uretprobe_trace_func(tu, func, regs); 1240 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1165 1241
1166#ifdef CONFIG_PERF_EVENTS 1242#ifdef CONFIG_PERF_EVENTS
1167 if (tu->tp.flags & TP_FLAG_PROFILE) 1243 if (tu->tp.flags & TP_FLAG_PROFILE)
1168 uretprobe_perf_func(tu, func, regs); 1244 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1169#endif 1245#endif
1246 uprobe_buffer_put(ucb);
1170 return 0; 1247 return 0;
1171} 1248}
1172 1249
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 031cc5655a51..50f8329c2042 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -62,14 +62,12 @@ struct tracepoint_entry {
62 struct hlist_node hlist; 62 struct hlist_node hlist;
63 struct tracepoint_func *funcs; 63 struct tracepoint_func *funcs;
64 int refcount; /* Number of times armed. 0 if disarmed. */ 64 int refcount; /* Number of times armed. 0 if disarmed. */
65 int enabled; /* Tracepoint enabled */
65 char name[0]; 66 char name[0];
66}; 67};
67 68
68struct tp_probes { 69struct tp_probes {
69 union { 70 struct rcu_head rcu;
70 struct rcu_head rcu;
71 struct list_head list;
72 } u;
73 struct tracepoint_func probes[0]; 71 struct tracepoint_func probes[0];
74}; 72};
75 73
@@ -82,7 +80,7 @@ static inline void *allocate_probes(int count)
82 80
83static void rcu_free_old_probes(struct rcu_head *head) 81static void rcu_free_old_probes(struct rcu_head *head)
84{ 82{
85 kfree(container_of(head, struct tp_probes, u.rcu)); 83 kfree(container_of(head, struct tp_probes, rcu));
86} 84}
87 85
88static inline void release_probes(struct tracepoint_func *old) 86static inline void release_probes(struct tracepoint_func *old)
@@ -90,7 +88,7 @@ static inline void release_probes(struct tracepoint_func *old)
90 if (old) { 88 if (old) {
91 struct tp_probes *tp_probes = container_of(old, 89 struct tp_probes *tp_probes = container_of(old,
92 struct tp_probes, probes[0]); 90 struct tp_probes, probes[0]);
93 call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); 91 call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
94 } 92 }
95} 93}
96 94
@@ -237,6 +235,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name)
237 memcpy(&e->name[0], name, name_len); 235 memcpy(&e->name[0], name, name_len);
238 e->funcs = NULL; 236 e->funcs = NULL;
239 e->refcount = 0; 237 e->refcount = 0;
238 e->enabled = 0;
240 hlist_add_head(&e->hlist, head); 239 hlist_add_head(&e->hlist, head);
241 return e; 240 return e;
242} 241}
@@ -316,6 +315,7 @@ static void tracepoint_update_probe_range(struct tracepoint * const *begin,
316 if (mark_entry) { 315 if (mark_entry) {
317 set_tracepoint(&mark_entry, *iter, 316 set_tracepoint(&mark_entry, *iter,
318 !!mark_entry->refcount); 317 !!mark_entry->refcount);
318 mark_entry->enabled = !!mark_entry->refcount;
319 } else { 319 } else {
320 disable_tracepoint(*iter); 320 disable_tracepoint(*iter);
321 } 321 }
@@ -373,13 +373,26 @@ tracepoint_add_probe(const char *name, void *probe, void *data)
373 * tracepoint_probe_register - Connect a probe to a tracepoint 373 * tracepoint_probe_register - Connect a probe to a tracepoint
374 * @name: tracepoint name 374 * @name: tracepoint name
375 * @probe: probe handler 375 * @probe: probe handler
376 * @data: probe private data
377 *
378 * Returns:
379 * - 0 if the probe was successfully registered, and tracepoint
380 * callsites are currently loaded for that probe,
381 * - -ENODEV if the probe was successfully registered, but no tracepoint
382 * callsite is currently loaded for that probe,
383 * - other negative error value on error.
384 *
385 * When tracepoint_probe_register() returns either 0 or -ENODEV,
386 * parameters @name, @probe, and @data may be used by the tracepoint
387 * infrastructure until the probe is unregistered.
376 * 388 *
377 * Returns 0 if ok, error value on error.
378 * The probe address must at least be aligned on the architecture pointer size. 389 * The probe address must at least be aligned on the architecture pointer size.
379 */ 390 */
380int tracepoint_probe_register(const char *name, void *probe, void *data) 391int tracepoint_probe_register(const char *name, void *probe, void *data)
381{ 392{
382 struct tracepoint_func *old; 393 struct tracepoint_func *old;
394 struct tracepoint_entry *entry;
395 int ret = 0;
383 396
384 mutex_lock(&tracepoints_mutex); 397 mutex_lock(&tracepoints_mutex);
385 old = tracepoint_add_probe(name, probe, data); 398 old = tracepoint_add_probe(name, probe, data);
@@ -388,9 +401,13 @@ int tracepoint_probe_register(const char *name, void *probe, void *data)
388 return PTR_ERR(old); 401 return PTR_ERR(old);
389 } 402 }
390 tracepoint_update_probes(); /* may update entry */ 403 tracepoint_update_probes(); /* may update entry */
404 entry = get_tracepoint(name);
405 /* Make sure the entry was enabled */
406 if (!entry || !entry->enabled)
407 ret = -ENODEV;
391 mutex_unlock(&tracepoints_mutex); 408 mutex_unlock(&tracepoints_mutex);
392 release_probes(old); 409 release_probes(old);
393 return 0; 410 return ret;
394} 411}
395EXPORT_SYMBOL_GPL(tracepoint_probe_register); 412EXPORT_SYMBOL_GPL(tracepoint_probe_register);
396 413
@@ -415,6 +432,7 @@ tracepoint_remove_probe(const char *name, void *probe, void *data)
415 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint 432 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
416 * @name: tracepoint name 433 * @name: tracepoint name
417 * @probe: probe function pointer 434 * @probe: probe function pointer
435 * @data: probe private data
418 * 436 *
419 * We do not need to call a synchronize_sched to make sure the probes have 437 * We do not need to call a synchronize_sched to make sure the probes have
420 * finished running before doing a module unload, because the module unload 438 * finished running before doing a module unload, because the module unload
@@ -438,197 +456,6 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data)
438} 456}
439EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 457EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
440 458
441static LIST_HEAD(old_probes);
442static int need_update;
443
444static void tracepoint_add_old_probes(void *old)
445{
446 need_update = 1;
447 if (old) {
448 struct tp_probes *tp_probes = container_of(old,
449 struct tp_probes, probes[0]);
450 list_add(&tp_probes->u.list, &old_probes);
451 }
452}
453
454/**
455 * tracepoint_probe_register_noupdate - register a probe but not connect
456 * @name: tracepoint name
457 * @probe: probe handler
458 *
459 * caller must call tracepoint_probe_update_all()
460 */
461int tracepoint_probe_register_noupdate(const char *name, void *probe,
462 void *data)
463{
464 struct tracepoint_func *old;
465
466 mutex_lock(&tracepoints_mutex);
467 old = tracepoint_add_probe(name, probe, data);
468 if (IS_ERR(old)) {
469 mutex_unlock(&tracepoints_mutex);
470 return PTR_ERR(old);
471 }
472 tracepoint_add_old_probes(old);
473 mutex_unlock(&tracepoints_mutex);
474 return 0;
475}
476EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
477
478/**
479 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
480 * @name: tracepoint name
481 * @probe: probe function pointer
482 *
483 * caller must call tracepoint_probe_update_all()
484 */
485int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
486 void *data)
487{
488 struct tracepoint_func *old;
489
490 mutex_lock(&tracepoints_mutex);
491 old = tracepoint_remove_probe(name, probe, data);
492 if (IS_ERR(old)) {
493 mutex_unlock(&tracepoints_mutex);
494 return PTR_ERR(old);
495 }
496 tracepoint_add_old_probes(old);
497 mutex_unlock(&tracepoints_mutex);
498 return 0;
499}
500EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
501
502/**
503 * tracepoint_probe_update_all - update tracepoints
504 */
505void tracepoint_probe_update_all(void)
506{
507 LIST_HEAD(release_probes);
508 struct tp_probes *pos, *next;
509
510 mutex_lock(&tracepoints_mutex);
511 if (!need_update) {
512 mutex_unlock(&tracepoints_mutex);
513 return;
514 }
515 if (!list_empty(&old_probes))
516 list_replace_init(&old_probes, &release_probes);
517 need_update = 0;
518 tracepoint_update_probes();
519 mutex_unlock(&tracepoints_mutex);
520 list_for_each_entry_safe(pos, next, &release_probes, u.list) {
521 list_del(&pos->u.list);
522 call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
523 }
524}
525EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
526
527/**
528 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
529 * @tracepoint: current tracepoints (in), next tracepoint (out)
530 * @begin: beginning of the range
531 * @end: end of the range
532 *
533 * Returns whether a next tracepoint has been found (1) or not (0).
534 * Will return the first tracepoint in the range if the input tracepoint is
535 * NULL.
536 */
537static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
538 struct tracepoint * const *begin, struct tracepoint * const *end)
539{
540 if (!*tracepoint && begin != end) {
541 *tracepoint = begin;
542 return 1;
543 }
544 if (*tracepoint >= begin && *tracepoint < end)
545 return 1;
546 return 0;
547}
548
549#ifdef CONFIG_MODULES
550static void tracepoint_get_iter(struct tracepoint_iter *iter)
551{
552 int found = 0;
553 struct tp_module *iter_mod;
554
555 /* Core kernel tracepoints */
556 if (!iter->module) {
557 found = tracepoint_get_iter_range(&iter->tracepoint,
558 __start___tracepoints_ptrs,
559 __stop___tracepoints_ptrs);
560 if (found)
561 goto end;
562 }
563 /* Tracepoints in modules */
564 mutex_lock(&tracepoints_mutex);
565 list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
566 /*
567 * Sorted module list
568 */
569 if (iter_mod < iter->module)
570 continue;
571 else if (iter_mod > iter->module)
572 iter->tracepoint = NULL;
573 found = tracepoint_get_iter_range(&iter->tracepoint,
574 iter_mod->tracepoints_ptrs,
575 iter_mod->tracepoints_ptrs
576 + iter_mod->num_tracepoints);
577 if (found) {
578 iter->module = iter_mod;
579 break;
580 }
581 }
582 mutex_unlock(&tracepoints_mutex);
583end:
584 if (!found)
585 tracepoint_iter_reset(iter);
586}
587#else /* CONFIG_MODULES */
588static void tracepoint_get_iter(struct tracepoint_iter *iter)
589{
590 int found = 0;
591
592 /* Core kernel tracepoints */
593 found = tracepoint_get_iter_range(&iter->tracepoint,
594 __start___tracepoints_ptrs,
595 __stop___tracepoints_ptrs);
596 if (!found)
597 tracepoint_iter_reset(iter);
598}
599#endif /* CONFIG_MODULES */
600
601void tracepoint_iter_start(struct tracepoint_iter *iter)
602{
603 tracepoint_get_iter(iter);
604}
605EXPORT_SYMBOL_GPL(tracepoint_iter_start);
606
607void tracepoint_iter_next(struct tracepoint_iter *iter)
608{
609 iter->tracepoint++;
610 /*
611 * iter->tracepoint may be invalid because we blindly incremented it.
612 * Make sure it is valid by marshalling on the tracepoints, getting the
613 * tracepoints from following modules if necessary.
614 */
615 tracepoint_get_iter(iter);
616}
617EXPORT_SYMBOL_GPL(tracepoint_iter_next);
618
619void tracepoint_iter_stop(struct tracepoint_iter *iter)
620{
621}
622EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
623
624void tracepoint_iter_reset(struct tracepoint_iter *iter)
625{
626#ifdef CONFIG_MODULES
627 iter->module = NULL;
628#endif /* CONFIG_MODULES */
629 iter->tracepoint = NULL;
630}
631EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
632 459
633#ifdef CONFIG_MODULES 460#ifdef CONFIG_MODULES
634bool trace_module_has_bad_taint(struct module *mod) 461bool trace_module_has_bad_taint(struct module *mod)
@@ -638,9 +465,12 @@ bool trace_module_has_bad_taint(struct module *mod)
638 465
639static int tracepoint_module_coming(struct module *mod) 466static int tracepoint_module_coming(struct module *mod)
640{ 467{
641 struct tp_module *tp_mod, *iter; 468 struct tp_module *tp_mod;
642 int ret = 0; 469 int ret = 0;
643 470
471 if (!mod->num_tracepoints)
472 return 0;
473
644 /* 474 /*
645 * We skip modules that taint the kernel, especially those with different 475 * We skip modules that taint the kernel, especially those with different
646 * module headers (for forced load), to make sure we don't cause a crash. 476 * module headers (for forced load), to make sure we don't cause a crash.
@@ -656,23 +486,7 @@ static int tracepoint_module_coming(struct module *mod)
656 } 486 }
657 tp_mod->num_tracepoints = mod->num_tracepoints; 487 tp_mod->num_tracepoints = mod->num_tracepoints;
658 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; 488 tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
659 489 list_add_tail(&tp_mod->list, &tracepoint_module_list);
660 /*
661 * tracepoint_module_list is kept sorted by struct module pointer
662 * address for iteration on tracepoints from a seq_file that can release
663 * the mutex between calls.
664 */
665 list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
666 BUG_ON(iter == tp_mod); /* Should never be in the list twice */
667 if (iter < tp_mod) {
668 /* We belong to the location right after iter. */
669 list_add(&tp_mod->list, &iter->list);
670 goto module_added;
671 }
672 }
673 /* We belong to the beginning of the list */
674 list_add(&tp_mod->list, &tracepoint_module_list);
675module_added:
676 tracepoint_update_probe_range(mod->tracepoints_ptrs, 490 tracepoint_update_probe_range(mod->tracepoints_ptrs,
677 mod->tracepoints_ptrs + mod->num_tracepoints); 491 mod->tracepoints_ptrs + mod->num_tracepoints);
678end: 492end:
@@ -684,6 +498,9 @@ static int tracepoint_module_going(struct module *mod)
684{ 498{
685 struct tp_module *pos; 499 struct tp_module *pos;
686 500
501 if (!mod->num_tracepoints)
502 return 0;
503
687 mutex_lock(&tracepoints_mutex); 504 mutex_lock(&tracepoints_mutex);
688 tracepoint_update_probe_range(mod->tracepoints_ptrs, 505 tracepoint_update_probe_range(mod->tracepoints_ptrs,
689 mod->tracepoints_ptrs + mod->num_tracepoints); 506 mod->tracepoints_ptrs + mod->num_tracepoints);