aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 18:08:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 18:08:14 -0400
commit42c8e86c9c3e15ac653c31d57b2f985579c2991d (patch)
tree09d9149676af1d37ecd595a89b72c3e367d241f2
parent3aea311c1b4002bd730a09530f80f2f2ad3bf495 (diff)
parent170b3b1050e28d1ba0700e262f0899ffa4fccc52 (diff)
Merge tag 'trace-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Nothing new in development for this release. These are mostly fixes that were found during development of changes for the next merge window and fixes that were sent to me late in the last cycle" * tag 'trace-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Apply trace_clock changes to instance max buffer tracing: Fix clear of RECORDED_TGID flag when disabling trace event tracing: Add barrier to trace_printk() buffer nesting modification ftrace: Fix memleak when unregistering dynamic ops when tracing disabled ftrace: Fix selftest goto location on error ftrace: Zero out ftrace hashes when a module is removed tracing: Only have rmmod clear buffers that its events were active in ftrace: Fix debug preempt config name in stack_tracer_{en,dis}able
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--kernel/trace/ftrace.c68
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_events.c17
-rw-r--r--kernel/trace/trace_selftest.c2
7 files changed, 87 insertions, 26 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6383115e9d2c..2e028854bac7 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -307,7 +307,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
307static inline void stack_tracer_disable(void) 307static inline void stack_tracer_disable(void)
308{ 308{
309 /* Preemption or interupts must be disabled */ 309 /* Preemption or interupts must be disabled */
310 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) 310 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
311 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 311 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
312 this_cpu_inc(disable_stack_tracer); 312 this_cpu_inc(disable_stack_tracer);
313} 313}
@@ -320,7 +320,7 @@ static inline void stack_tracer_disable(void)
320 */ 320 */
321static inline void stack_tracer_enable(void) 321static inline void stack_tracer_enable(void)
322{ 322{
323 if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) 323 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
324 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 324 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
325 this_cpu_dec(disable_stack_tracer); 325 this_cpu_dec(disable_stack_tracer);
326} 326}
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5012b524283d..7f11050746ae 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -217,7 +217,6 @@ enum {
217 TRACE_EVENT_FL_CAP_ANY_BIT, 217 TRACE_EVENT_FL_CAP_ANY_BIT,
218 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 218 TRACE_EVENT_FL_NO_SET_FILTER_BIT,
219 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 219 TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
220 TRACE_EVENT_FL_WAS_ENABLED_BIT,
221 TRACE_EVENT_FL_TRACEPOINT_BIT, 220 TRACE_EVENT_FL_TRACEPOINT_BIT,
222 TRACE_EVENT_FL_KPROBE_BIT, 221 TRACE_EVENT_FL_KPROBE_BIT,
223 TRACE_EVENT_FL_UPROBE_BIT, 222 TRACE_EVENT_FL_UPROBE_BIT,
@@ -229,9 +228,6 @@ enum {
229 * CAP_ANY - Any user can enable for perf 228 * CAP_ANY - Any user can enable for perf
230 * NO_SET_FILTER - Set when filter has error and is to be ignored 229 * NO_SET_FILTER - Set when filter has error and is to be ignored
231 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file 230 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
232 * WAS_ENABLED - Set and stays set when an event was ever enabled
233 * (used for module unloading, if a module event is enabled,
234 * it is best to clear the buffers that used it).
235 * TRACEPOINT - Event is a tracepoint 231 * TRACEPOINT - Event is a tracepoint
236 * KPROBE - Event is a kprobe 232 * KPROBE - Event is a kprobe
237 * UPROBE - Event is a uprobe 233 * UPROBE - Event is a uprobe
@@ -241,7 +237,6 @@ enum {
241 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), 237 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
242 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 238 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
243 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 239 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
244 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
245 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), 240 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
246 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 241 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
247 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), 242 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
@@ -306,6 +301,7 @@ enum {
306 EVENT_FILE_FL_TRIGGER_MODE_BIT, 301 EVENT_FILE_FL_TRIGGER_MODE_BIT,
307 EVENT_FILE_FL_TRIGGER_COND_BIT, 302 EVENT_FILE_FL_TRIGGER_COND_BIT,
308 EVENT_FILE_FL_PID_FILTER_BIT, 303 EVENT_FILE_FL_PID_FILTER_BIT,
304 EVENT_FILE_FL_WAS_ENABLED_BIT,
309}; 305};
310 306
311/* 307/*
@@ -321,6 +317,7 @@ enum {
321 * TRIGGER_MODE - When set, invoke the triggers associated with the event 317 * TRIGGER_MODE - When set, invoke the triggers associated with the event
322 * TRIGGER_COND - When set, one or more triggers has an associated filter 318 * TRIGGER_COND - When set, one or more triggers has an associated filter
323 * PID_FILTER - When set, the event is filtered based on pid 319 * PID_FILTER - When set, the event is filtered based on pid
320 * WAS_ENABLED - Set when enabled to know to clear trace on module removal
324 */ 321 */
325enum { 322enum {
326 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), 323 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -333,6 +330,7 @@ enum {
333 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), 330 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
334 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), 331 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
335 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), 332 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
333 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
336}; 334};
337 335
338struct trace_event_file { 336struct trace_event_file {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 96cea88fa00f..6abfafd7f173 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2828,13 +2828,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2828 2828
2829 if (!command || !ftrace_enabled) { 2829 if (!command || !ftrace_enabled) {
2830 /* 2830 /*
2831 * If these are per_cpu ops, they still need their 2831 * If these are dynamic or per_cpu ops, they still
2832 * per_cpu field freed. Since, function tracing is 2832 * need their data freed. Since, function tracing is
2833 * not currently active, we can just free them 2833 * not currently active, we can just free them
2834 * without synchronizing all CPUs. 2834 * without synchronizing all CPUs.
2835 */ 2835 */
2836 if (ops->flags & FTRACE_OPS_FL_PER_CPU) 2836 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
2837 per_cpu_ops_free(ops); 2837 goto free_ops;
2838
2838 return 0; 2839 return 0;
2839 } 2840 }
2840 2841
@@ -2900,6 +2901,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2900 if (IS_ENABLED(CONFIG_PREEMPT)) 2901 if (IS_ENABLED(CONFIG_PREEMPT))
2901 synchronize_rcu_tasks(); 2902 synchronize_rcu_tasks();
2902 2903
2904 free_ops:
2903 arch_ftrace_trampoline_free(ops); 2905 arch_ftrace_trampoline_free(ops);
2904 2906
2905 if (ops->flags & FTRACE_OPS_FL_PER_CPU) 2907 if (ops->flags & FTRACE_OPS_FL_PER_CPU)
@@ -5690,10 +5692,51 @@ static int referenced_filters(struct dyn_ftrace *rec)
5690 return cnt; 5692 return cnt;
5691} 5693}
5692 5694
5695static void
5696clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
5697{
5698 struct ftrace_func_entry *entry;
5699 struct dyn_ftrace *rec;
5700 int i;
5701
5702 if (ftrace_hash_empty(hash))
5703 return;
5704
5705 for (i = 0; i < pg->index; i++) {
5706 rec = &pg->records[i];
5707 entry = __ftrace_lookup_ip(hash, rec->ip);
5708 /*
5709 * Do not allow this rec to match again.
5710 * Yeah, it may waste some memory, but will be removed
5711 * if/when the hash is modified again.
5712 */
5713 if (entry)
5714 entry->ip = 0;
5715 }
5716}
5717
5718/* Clear any records from hashs */
5719static void clear_mod_from_hashes(struct ftrace_page *pg)
5720{
5721 struct trace_array *tr;
5722
5723 mutex_lock(&trace_types_lock);
5724 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5725 if (!tr->ops || !tr->ops->func_hash)
5726 continue;
5727 mutex_lock(&tr->ops->func_hash->regex_lock);
5728 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
5729 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
5730 mutex_unlock(&tr->ops->func_hash->regex_lock);
5731 }
5732 mutex_unlock(&trace_types_lock);
5733}
5734
5693void ftrace_release_mod(struct module *mod) 5735void ftrace_release_mod(struct module *mod)
5694{ 5736{
5695 struct dyn_ftrace *rec; 5737 struct dyn_ftrace *rec;
5696 struct ftrace_page **last_pg; 5738 struct ftrace_page **last_pg;
5739 struct ftrace_page *tmp_page = NULL;
5697 struct ftrace_page *pg; 5740 struct ftrace_page *pg;
5698 int order; 5741 int order;
5699 5742
@@ -5723,14 +5766,25 @@ void ftrace_release_mod(struct module *mod)
5723 5766
5724 ftrace_update_tot_cnt -= pg->index; 5767 ftrace_update_tot_cnt -= pg->index;
5725 *last_pg = pg->next; 5768 *last_pg = pg->next;
5726 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 5769
5727 free_pages((unsigned long)pg->records, order); 5770 pg->next = tmp_page;
5728 kfree(pg); 5771 tmp_page = pg;
5729 } else 5772 } else
5730 last_pg = &pg->next; 5773 last_pg = &pg->next;
5731 } 5774 }
5732 out_unlock: 5775 out_unlock:
5733 mutex_unlock(&ftrace_lock); 5776 mutex_unlock(&ftrace_lock);
5777
5778 for (pg = tmp_page; pg; pg = tmp_page) {
5779
5780 /* Needs to be called outside of ftrace_lock */
5781 clear_mod_from_hashes(pg);
5782
5783 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5784 free_pages((unsigned long)pg->records, order);
5785 tmp_page = pg->next;
5786 kfree(pg);
5787 }
5734} 5788}
5735 5789
5736void ftrace_module_enable(struct module *mod) 5790void ftrace_module_enable(struct module *mod)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 44004d8aa3b3..5360b7aec57a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1702,6 +1702,9 @@ void tracing_reset_all_online_cpus(void)
1702 struct trace_array *tr; 1702 struct trace_array *tr;
1703 1703
1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1705 if (!tr->clear_trace)
1706 continue;
1707 tr->clear_trace = false;
1705 tracing_reset_online_cpus(&tr->trace_buffer); 1708 tracing_reset_online_cpus(&tr->trace_buffer);
1706#ifdef CONFIG_TRACER_MAX_TRACE 1709#ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr->max_buffer); 1710 tracing_reset_online_cpus(&tr->max_buffer);
@@ -2799,11 +2802,17 @@ static char *get_trace_buf(void)
2799 if (!buffer || buffer->nesting >= 4) 2802 if (!buffer || buffer->nesting >= 4)
2800 return NULL; 2803 return NULL;
2801 2804
2802 return &buffer->buffer[buffer->nesting++][0]; 2805 buffer->nesting++;
2806
2807 /* Interrupts must see nesting incremented before we use the buffer */
2808 barrier();
2809 return &buffer->buffer[buffer->nesting][0];
2803} 2810}
2804 2811
2805static void put_trace_buf(void) 2812static void put_trace_buf(void)
2806{ 2813{
2814 /* Don't let the decrement of nesting leak before this */
2815 barrier();
2807 this_cpu_dec(trace_percpu_buffer->nesting); 2816 this_cpu_dec(trace_percpu_buffer->nesting);
2808} 2817}
2809 2818
@@ -6220,7 +6229,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6220 tracing_reset_online_cpus(&tr->trace_buffer); 6229 tracing_reset_online_cpus(&tr->trace_buffer);
6221 6230
6222#ifdef CONFIG_TRACER_MAX_TRACE 6231#ifdef CONFIG_TRACER_MAX_TRACE
6223 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 6232 if (tr->max_buffer.buffer)
6224 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 6233 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6225 tracing_reset_online_cpus(&tr->max_buffer); 6234 tracing_reset_online_cpus(&tr->max_buffer);
6226#endif 6235#endif
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 490ba229931d..fb5d54d0d1b3 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -245,6 +245,7 @@ struct trace_array {
245 int stop_count; 245 int stop_count;
246 int clock_id; 246 int clock_id;
247 int nr_topts; 247 int nr_topts;
248 bool clear_trace;
248 struct tracer *current_trace; 249 struct tracer *current_trace;
249 unsigned int trace_flags; 250 unsigned int trace_flags;
250 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 251 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 36132f9280e6..87468398b9ed 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -406,7 +406,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
406 406
407 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { 407 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
408 tracing_stop_tgid_record(); 408 tracing_stop_tgid_record();
409 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 409 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
410 } 410 }
411 411
412 call->class->reg(call, TRACE_REG_UNREGISTER, file); 412 call->class->reg(call, TRACE_REG_UNREGISTER, file);
@@ -466,7 +466,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
466 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 466 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
467 467
468 /* WAS_ENABLED gets set but never cleared. */ 468 /* WAS_ENABLED gets set but never cleared. */
469 call->flags |= TRACE_EVENT_FL_WAS_ENABLED; 469 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
470 } 470 }
471 break; 471 break;
472 } 472 }
@@ -2058,6 +2058,10 @@ static void event_remove(struct trace_event_call *call)
2058 do_for_each_event_file(tr, file) { 2058 do_for_each_event_file(tr, file) {
2059 if (file->event_call != call) 2059 if (file->event_call != call)
2060 continue; 2060 continue;
2061
2062 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2063 tr->clear_trace = true;
2064
2061 ftrace_event_enable_disable(file, 0); 2065 ftrace_event_enable_disable(file, 0);
2062 /* 2066 /*
2063 * The do_for_each_event_file() is 2067 * The do_for_each_event_file() is
@@ -2396,15 +2400,11 @@ static void trace_module_add_events(struct module *mod)
2396static void trace_module_remove_events(struct module *mod) 2400static void trace_module_remove_events(struct module *mod)
2397{ 2401{
2398 struct trace_event_call *call, *p; 2402 struct trace_event_call *call, *p;
2399 bool clear_trace = false;
2400 2403
2401 down_write(&trace_event_sem); 2404 down_write(&trace_event_sem);
2402 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2405 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2403 if (call->mod == mod) { 2406 if (call->mod == mod)
2404 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2405 clear_trace = true;
2406 __trace_remove_event_call(call); 2407 __trace_remove_event_call(call);
2407 }
2408 } 2408 }
2409 up_write(&trace_event_sem); 2409 up_write(&trace_event_sem);
2410 2410
@@ -2416,8 +2416,7 @@ static void trace_module_remove_events(struct module *mod)
2416 * over from this module may be passed to the new module events and 2416 * over from this module may be passed to the new module events and
2417 * unexpected results may occur. 2417 * unexpected results may occur.
2418 */ 2418 */
2419 if (clear_trace) 2419 tracing_reset_all_online_cpus();
2420 tracing_reset_all_online_cpus();
2421} 2420}
2422 2421
2423static int trace_module_notify(struct notifier_block *self, 2422static int trace_module_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index cb917cebae29..b17ec642793b 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -273,7 +273,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
273 goto out_free; 273 goto out_free;
274 if (cnt > 1) { 274 if (cnt > 1) {
275 if (trace_selftest_test_global_cnt == 0) 275 if (trace_selftest_test_global_cnt == 0)
276 goto out; 276 goto out_free;
277 } 277 }
278 if (trace_selftest_test_dyn_cnt == 0) 278 if (trace_selftest_test_dyn_cnt == 0)
279 goto out_free; 279 goto out_free;