aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig24
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c86
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_irqsoff.c19
-rw-r--r--kernel/trace/trace_sched_wakeup.c18
6 files changed, 120 insertions, 37 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 192473b22799..fc382d6e2765 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -414,24 +414,28 @@ config PROBE_EVENTS
414 def_bool n 414 def_bool n
415 415
416config DYNAMIC_FTRACE 416config DYNAMIC_FTRACE
417 bool "enable/disable ftrace tracepoints dynamically" 417 bool "enable/disable function tracing dynamically"
418 depends on FUNCTION_TRACER 418 depends on FUNCTION_TRACER
419 depends on HAVE_DYNAMIC_FTRACE 419 depends on HAVE_DYNAMIC_FTRACE
420 default y 420 default y
421 help 421 help
422 This option will modify all the calls to ftrace dynamically 422 This option will modify all the calls to function tracing
423 (will patch them out of the binary image and replace them 423 dynamically (will patch them out of the binary image and
424 with a No-Op instruction) as they are called. A table is 424 replace them with a No-Op instruction) on boot up. During
425 created to dynamically enable them again. 425 compile time, a table is made of all the locations that ftrace
426 can function trace, and this table is linked into the kernel
427 image. When this is enabled, functions can be individually
428 enabled, and the functions not enabled will not affect
429 performance of the system.
430
431 See the files in /sys/kernel/debug/tracing:
432 available_filter_functions
433 set_ftrace_filter
434 set_ftrace_notrace
426 435
427 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 436 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
428 otherwise has native performance as long as no tracing is active. 437 otherwise has native performance as long as no tracing is active.
429 438
430 The changes to the code are done by a kernel thread that
431 wakes up once a second and checks to see if any ftrace calls
432 were made. If so, it runs stop_machine (stops all CPUS)
433 and modifies the code to jump over the call to ftrace.
434
435config DYNAMIC_FTRACE_WITH_REGS 439config DYNAMIC_FTRACE_WITH_REGS
436 def_bool y 440 def_bool y
437 depends on DYNAMIC_FTRACE 441 depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ab25b88aae56..6893d5a2bf08 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3104 continue; 3104 continue;
3105 } 3105 }
3106 3106
3107 hlist_del(&entry->node); 3107 hlist_del_rcu(&entry->node);
3108 call_rcu(&entry->rcu, ftrace_free_entry_rcu); 3108 call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3109 } 3109 }
3110 } 3110 }
3111 __disable_ftrace_function_probe(); 3111 __disable_ftrace_function_probe();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c2e2c2310374..4f1dade56981 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
704void 704void
705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
706{ 706{
707 struct ring_buffer *buf = tr->buffer; 707 struct ring_buffer *buf;
708 708
709 if (trace_stop_count) 709 if (trace_stop_count)
710 return; 710 return;
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
719 719
720 arch_spin_lock(&ftrace_max_lock); 720 arch_spin_lock(&ftrace_max_lock);
721 721
722 buf = tr->buffer;
722 tr->buffer = max_tr.buffer; 723 tr->buffer = max_tr.buffer;
723 max_tr.buffer = buf; 724 max_tr.buffer = buf;
724 725
@@ -2400,6 +2401,27 @@ static void test_ftrace_alive(struct seq_file *m)
2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2401 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2401} 2402}
2402 2403
2404#ifdef CONFIG_TRACER_MAX_TRACE
2405static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2406{
2407 if (iter->trace->allocated_snapshot)
2408 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2409 else
2410 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2411
2412 seq_printf(m, "# Snapshot commands:\n");
2413 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2414 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2415 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2416 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2417 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2418 seq_printf(m, "# is not a '0' or '1')\n");
2419}
2420#else
2421/* Should never be called */
2422static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2423#endif
2424
2403static int s_show(struct seq_file *m, void *v) 2425static int s_show(struct seq_file *m, void *v)
2404{ 2426{
2405 struct trace_iterator *iter = v; 2427 struct trace_iterator *iter = v;
@@ -2411,7 +2433,9 @@ static int s_show(struct seq_file *m, void *v)
2411 seq_puts(m, "#\n"); 2433 seq_puts(m, "#\n");
2412 test_ftrace_alive(m); 2434 test_ftrace_alive(m);
2413 } 2435 }
2414 if (iter->trace && iter->trace->print_header) 2436 if (iter->snapshot && trace_empty(iter))
2437 print_snapshot_help(m, iter);
2438 else if (iter->trace && iter->trace->print_header)
2415 iter->trace->print_header(m); 2439 iter->trace->print_header(m);
2416 else 2440 else
2417 trace_default_header(m); 2441 trace_default_header(m);
@@ -2857,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2857 return -EINVAL; 2881 return -EINVAL;
2858} 2882}
2859 2883
2860static void set_tracer_flags(unsigned int mask, int enabled) 2884/* Some tracers require overwrite to stay enabled */
2885int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
2886{
2887 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
2888 return -1;
2889
2890 return 0;
2891}
2892
2893int set_tracer_flag(unsigned int mask, int enabled)
2861{ 2894{
2862 /* do nothing if flag is already set */ 2895 /* do nothing if flag is already set */
2863 if (!!(trace_flags & mask) == !!enabled) 2896 if (!!(trace_flags & mask) == !!enabled)
2864 return; 2897 return 0;
2898
2899 /* Give the tracer a chance to approve the change */
2900 if (current_trace->flag_changed)
2901 if (current_trace->flag_changed(current_trace, mask, !!enabled))
2902 return -EINVAL;
2865 2903
2866 if (enabled) 2904 if (enabled)
2867 trace_flags |= mask; 2905 trace_flags |= mask;
@@ -2871,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2871 if (mask == TRACE_ITER_RECORD_CMD) 2909 if (mask == TRACE_ITER_RECORD_CMD)
2872 trace_event_enable_cmd_record(enabled); 2910 trace_event_enable_cmd_record(enabled);
2873 2911
2874 if (mask == TRACE_ITER_OVERWRITE) 2912 if (mask == TRACE_ITER_OVERWRITE) {
2875 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2913 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2914#ifdef CONFIG_TRACER_MAX_TRACE
2915 ring_buffer_change_overwrite(max_tr.buffer, enabled);
2916#endif
2917 }
2876 2918
2877 if (mask == TRACE_ITER_PRINTK) 2919 if (mask == TRACE_ITER_PRINTK)
2878 trace_printk_start_stop_comm(enabled); 2920 trace_printk_start_stop_comm(enabled);
2921
2922 return 0;
2879} 2923}
2880 2924
2881static int trace_set_options(char *option) 2925static int trace_set_options(char *option)
2882{ 2926{
2883 char *cmp; 2927 char *cmp;
2884 int neg = 0; 2928 int neg = 0;
2885 int ret = 0; 2929 int ret = -ENODEV;
2886 int i; 2930 int i;
2887 2931
2888 cmp = strstrip(option); 2932 cmp = strstrip(option);
@@ -2892,19 +2936,20 @@ static int trace_set_options(char *option)
2892 cmp += 2; 2936 cmp += 2;
2893 } 2937 }
2894 2938
2939 mutex_lock(&trace_types_lock);
2940
2895 for (i = 0; trace_options[i]; i++) { 2941 for (i = 0; trace_options[i]; i++) {
2896 if (strcmp(cmp, trace_options[i]) == 0) { 2942 if (strcmp(cmp, trace_options[i]) == 0) {
2897 set_tracer_flags(1 << i, !neg); 2943 ret = set_tracer_flag(1 << i, !neg);
2898 break; 2944 break;
2899 } 2945 }
2900 } 2946 }
2901 2947
2902 /* If no option could be set, test the specific tracer options */ 2948 /* If no option could be set, test the specific tracer options */
2903 if (!trace_options[i]) { 2949 if (!trace_options[i])
2904 mutex_lock(&trace_types_lock);
2905 ret = set_tracer_option(current_trace, cmp, neg); 2950 ret = set_tracer_option(current_trace, cmp, neg);
2906 mutex_unlock(&trace_types_lock); 2951
2907 } 2952 mutex_unlock(&trace_types_lock);
2908 2953
2909 return ret; 2954 return ret;
2910} 2955}
@@ -2914,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2914 size_t cnt, loff_t *ppos) 2959 size_t cnt, loff_t *ppos)
2915{ 2960{
2916 char buf[64]; 2961 char buf[64];
2962 int ret;
2917 2963
2918 if (cnt >= sizeof(buf)) 2964 if (cnt >= sizeof(buf))
2919 return -EINVAL; 2965 return -EINVAL;
@@ -2923,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2923 2969
2924 buf[cnt] = 0; 2970 buf[cnt] = 0;
2925 2971
2926 trace_set_options(buf); 2972 ret = trace_set_options(buf);
2973 if (ret < 0)
2974 return ret;
2927 2975
2928 *ppos += cnt; 2976 *ppos += cnt;
2929 2977
@@ -3227,6 +3275,9 @@ static int tracing_set_tracer(const char *buf)
3227 goto out; 3275 goto out;
3228 3276
3229 trace_branch_disable(); 3277 trace_branch_disable();
3278
3279 current_trace->enabled = false;
3280
3230 if (current_trace->reset) 3281 if (current_trace->reset)
3231 current_trace->reset(tr); 3282 current_trace->reset(tr);
3232 3283
@@ -3271,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
3271 } 3322 }
3272 3323
3273 current_trace = t; 3324 current_trace = t;
3325 current_trace->enabled = true;
3274 trace_branch_enable(tr); 3326 trace_branch_enable(tr);
3275 out: 3327 out:
3276 mutex_unlock(&trace_types_lock); 3328 mutex_unlock(&trace_types_lock);
@@ -4144,8 +4196,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4144 default: 4196 default:
4145 if (current_trace->allocated_snapshot) 4197 if (current_trace->allocated_snapshot)
4146 tracing_reset_online_cpus(&max_tr); 4198 tracing_reset_online_cpus(&max_tr);
4147 else
4148 ret = -EINVAL;
4149 break; 4199 break;
4150 } 4200 }
4151 4201
@@ -4759,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4759 4809
4760 if (val != 0 && val != 1) 4810 if (val != 0 && val != 1)
4761 return -EINVAL; 4811 return -EINVAL;
4762 set_tracer_flags(1 << index, val); 4812
4813 mutex_lock(&trace_types_lock);
4814 ret = set_tracer_flag(1 << index, val);
4815 mutex_unlock(&trace_types_lock);
4816
4817 if (ret < 0)
4818 return ret;
4763 4819
4764 *ppos += cnt; 4820 *ppos += cnt;
4765 4821
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 57d7e5397d56..2081971367ea 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -283,11 +283,15 @@ struct tracer {
283 enum print_line_t (*print_line)(struct trace_iterator *iter); 283 enum print_line_t (*print_line)(struct trace_iterator *iter);
284 /* If you handled the flag setting, return 0 */ 284 /* If you handled the flag setting, return 0 */
285 int (*set_flag)(u32 old_flags, u32 bit, int set); 285 int (*set_flag)(u32 old_flags, u32 bit, int set);
286 /* Return 0 if OK with change, else return non-zero */
287 int (*flag_changed)(struct tracer *tracer,
288 u32 mask, int set);
286 struct tracer *next; 289 struct tracer *next;
287 struct tracer_flags *flags; 290 struct tracer_flags *flags;
288 bool print_max; 291 bool print_max;
289 bool use_max_tr; 292 bool use_max_tr;
290 bool allocated_snapshot; 293 bool allocated_snapshot;
294 bool enabled;
291}; 295};
292 296
293 297
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[];
943 947
944void trace_printk_init_buffers(void); 948void trace_printk_init_buffers(void);
945void trace_printk_start_comm(void); 949void trace_printk_start_comm(void);
950int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
951int set_tracer_flag(unsigned int mask, int enabled);
946 952
947#undef FTRACE_ENTRY 953#undef FTRACE_ENTRY
948#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 954#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 713a2cac4881..443b25b43b4f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,7 +32,7 @@ enum {
32 32
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35static int save_lat_flag; 35static int save_flags;
36 36
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph); 37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph); 38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
558 558
559static void __irqsoff_tracer_init(struct trace_array *tr) 559static void __irqsoff_tracer_init(struct trace_array *tr)
560{ 560{
561 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 561 save_flags = trace_flags;
562 trace_flags |= TRACE_ITER_LATENCY_FMT; 562
563 /* non overwrite screws up the latency tracers */
564 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
565 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
563 566
564 tracing_max_latency = 0; 567 tracing_max_latency = 0;
565 irqsoff_trace = tr; 568 irqsoff_trace = tr;
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
573 576
574static void irqsoff_tracer_reset(struct trace_array *tr) 577static void irqsoff_tracer_reset(struct trace_array *tr)
575{ 578{
579 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
580 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
581
576 stop_irqsoff_tracer(tr, is_graph()); 582 stop_irqsoff_tracer(tr, is_graph());
577 583
578 if (!save_lat_flag) 584 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
579 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 585 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
580} 586}
581 587
582static void irqsoff_tracer_start(struct trace_array *tr) 588static void irqsoff_tracer_start(struct trace_array *tr)
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly =
609 .print_line = irqsoff_print_line, 615 .print_line = irqsoff_print_line,
610 .flags = &tracer_flags, 616 .flags = &tracer_flags,
611 .set_flag = irqsoff_set_flag, 617 .set_flag = irqsoff_set_flag,
618 .flag_changed = trace_keep_overwrite,
612#ifdef CONFIG_FTRACE_SELFTEST 619#ifdef CONFIG_FTRACE_SELFTEST
613 .selftest = trace_selftest_startup_irqsoff, 620 .selftest = trace_selftest_startup_irqsoff,
614#endif 621#endif
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly =
642 .print_line = irqsoff_print_line, 649 .print_line = irqsoff_print_line,
643 .flags = &tracer_flags, 650 .flags = &tracer_flags,
644 .set_flag = irqsoff_set_flag, 651 .set_flag = irqsoff_set_flag,
652 .flag_changed = trace_keep_overwrite,
645#ifdef CONFIG_FTRACE_SELFTEST 653#ifdef CONFIG_FTRACE_SELFTEST
646 .selftest = trace_selftest_startup_preemptoff, 654 .selftest = trace_selftest_startup_preemptoff,
647#endif 655#endif
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
677 .print_line = irqsoff_print_line, 685 .print_line = irqsoff_print_line,
678 .flags = &tracer_flags, 686 .flags = &tracer_flags,
679 .set_flag = irqsoff_set_flag, 687 .set_flag = irqsoff_set_flag,
688 .flag_changed = trace_keep_overwrite,
680#ifdef CONFIG_FTRACE_SELFTEST 689#ifdef CONFIG_FTRACE_SELFTEST
681 .selftest = trace_selftest_startup_preemptirqsoff, 690 .selftest = trace_selftest_startup_preemptirqsoff,
682#endif 691#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 75aa97fbe1a1..fde652c9a511 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
36static int wakeup_graph_entry(struct ftrace_graph_ent *trace); 36static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
37static void wakeup_graph_return(struct ftrace_graph_ret *trace); 37static void wakeup_graph_return(struct ftrace_graph_ret *trace);
38 38
39static int save_lat_flag; 39static int save_flags;
40 40
41#define TRACE_DISPLAY_GRAPH 1 41#define TRACE_DISPLAY_GRAPH 1
42 42
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
540 540
541static int __wakeup_tracer_init(struct trace_array *tr) 541static int __wakeup_tracer_init(struct trace_array *tr)
542{ 542{
543 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 543 save_flags = trace_flags;
544 trace_flags |= TRACE_ITER_LATENCY_FMT; 544
545 /* non overwrite screws up the latency tracers */
546 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
547 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
545 548
546 tracing_max_latency = 0; 549 tracing_max_latency = 0;
547 wakeup_trace = tr; 550 wakeup_trace = tr;
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
563 566
564static void wakeup_tracer_reset(struct trace_array *tr) 567static void wakeup_tracer_reset(struct trace_array *tr)
565{ 568{
569 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
570 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
571
566 stop_wakeup_tracer(tr); 572 stop_wakeup_tracer(tr);
567 /* make sure we put back any tasks we are tracing */ 573 /* make sure we put back any tasks we are tracing */
568 wakeup_reset(tr); 574 wakeup_reset(tr);
569 575
570 if (!save_lat_flag) 576 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
571 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 577 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
572} 578}
573 579
574static void wakeup_tracer_start(struct trace_array *tr) 580static void wakeup_tracer_start(struct trace_array *tr)
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly =
594 .print_line = wakeup_print_line, 600 .print_line = wakeup_print_line,
595 .flags = &tracer_flags, 601 .flags = &tracer_flags,
596 .set_flag = wakeup_set_flag, 602 .set_flag = wakeup_set_flag,
603 .flag_changed = trace_keep_overwrite,
597#ifdef CONFIG_FTRACE_SELFTEST 604#ifdef CONFIG_FTRACE_SELFTEST
598 .selftest = trace_selftest_startup_wakeup, 605 .selftest = trace_selftest_startup_wakeup,
599#endif 606#endif
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
615 .print_line = wakeup_print_line, 622 .print_line = wakeup_print_line,
616 .flags = &tracer_flags, 623 .flags = &tracer_flags,
617 .set_flag = wakeup_set_flag, 624 .set_flag = wakeup_set_flag,
625 .flag_changed = trace_keep_overwrite,
618#ifdef CONFIG_FTRACE_SELFTEST 626#ifdef CONFIG_FTRACE_SELFTEST
619 .selftest = trace_selftest_startup_wakeup, 627 .selftest = trace_selftest_startup_wakeup,
620#endif 628#endif