diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/kthread.c | 3 | ||||
-rw-r--r-- | kernel/marker.c | 114 | ||||
-rw-r--r-- | kernel/module.c | 11 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/signal.c | 2 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 1 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 304 | ||||
-rw-r--r-- | kernel/trace/trace.c | 23 | ||||
-rw-r--r-- | kernel/trace/trace.h | 3 | ||||
-rw-r--r-- | kernel/trace/trace_boot.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_branch.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_functions.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_functions_return.c | 16 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_nop.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_sched_switch.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 3 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 70 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 3 | ||||
-rw-r--r-- | kernel/tracepoint.c | 34 |
23 files changed, 427 insertions, 199 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index ae2b92be5fae..f995d2418668 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -54,6 +54,10 @@ | |||
54 | #include <asm/pgtable.h> | 54 | #include <asm/pgtable.h> |
55 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
56 | 56 | ||
57 | DEFINE_TRACE(sched_process_free); | ||
58 | DEFINE_TRACE(sched_process_exit); | ||
59 | DEFINE_TRACE(sched_process_wait); | ||
60 | |||
57 | static void exit_mm(struct task_struct * tsk); | 61 | static void exit_mm(struct task_struct * tsk); |
58 | 62 | ||
59 | static inline int task_detached(struct task_struct *p) | 63 | static inline int task_detached(struct task_struct *p) |
diff --git a/kernel/fork.c b/kernel/fork.c index f6083561dfe0..0837d0deee5f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -79,6 +79,8 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; | |||
79 | 79 | ||
80 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 80 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
81 | 81 | ||
82 | DEFINE_TRACE(sched_process_fork); | ||
83 | |||
82 | int nr_processes(void) | 84 | int nr_processes(void) |
83 | { | 85 | { |
84 | int cpu; | 86 | int cpu; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 8e7a7ce3ed0a..4fbc456f393d 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -21,6 +21,9 @@ static DEFINE_SPINLOCK(kthread_create_lock); | |||
21 | static LIST_HEAD(kthread_create_list); | 21 | static LIST_HEAD(kthread_create_list); |
22 | struct task_struct *kthreadd_task; | 22 | struct task_struct *kthreadd_task; |
23 | 23 | ||
24 | DEFINE_TRACE(sched_kthread_stop); | ||
25 | DEFINE_TRACE(sched_kthread_stop_ret); | ||
26 | |||
24 | struct kthread_create_info | 27 | struct kthread_create_info |
25 | { | 28 | { |
26 | /* Information passed to kthread() from kthreadd. */ | 29 | /* Information passed to kthread() from kthreadd. */ |
diff --git a/kernel/marker.c b/kernel/marker.c index 2898b647d415..ea54f2647868 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -81,7 +81,7 @@ struct marker_entry { | |||
81 | * though the function pointer change and the marker enabling are two distinct | 81 | * though the function pointer change and the marker enabling are two distinct |
82 | * operations that modifies the execution flow of preemptible code. | 82 | * operations that modifies the execution flow of preemptible code. |
83 | */ | 83 | */ |
84 | void __mark_empty_function(void *probe_private, void *call_private, | 84 | notrace void __mark_empty_function(void *probe_private, void *call_private, |
85 | const char *fmt, va_list *args) | 85 | const char *fmt, va_list *args) |
86 | { | 86 | { |
87 | } | 87 | } |
@@ -97,7 +97,8 @@ EXPORT_SYMBOL_GPL(__mark_empty_function); | |||
97 | * need to put a full smp_rmb() in this branch. This is why we do not use | 97 | * need to put a full smp_rmb() in this branch. This is why we do not use |
98 | * rcu_dereference() for the pointer read. | 98 | * rcu_dereference() for the pointer read. |
99 | */ | 99 | */ |
100 | void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | 100 | notrace void marker_probe_cb(const struct marker *mdata, |
101 | void *call_private, ...) | ||
101 | { | 102 | { |
102 | va_list args; | 103 | va_list args; |
103 | char ptype; | 104 | char ptype; |
@@ -107,7 +108,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
107 | * sure the teardown of the callbacks can be done correctly when they | 108 | * sure the teardown of the callbacks can be done correctly when they |
108 | * are in modules and they insure RCU read coherency. | 109 | * are in modules and they insure RCU read coherency. |
109 | */ | 110 | */ |
110 | rcu_read_lock_sched(); | 111 | rcu_read_lock_sched_notrace(); |
111 | ptype = mdata->ptype; | 112 | ptype = mdata->ptype; |
112 | if (likely(!ptype)) { | 113 | if (likely(!ptype)) { |
113 | marker_probe_func *func; | 114 | marker_probe_func *func; |
@@ -145,7 +146,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
145 | va_end(args); | 146 | va_end(args); |
146 | } | 147 | } |
147 | } | 148 | } |
148 | rcu_read_unlock_sched(); | 149 | rcu_read_unlock_sched_notrace(); |
149 | } | 150 | } |
150 | EXPORT_SYMBOL_GPL(marker_probe_cb); | 151 | EXPORT_SYMBOL_GPL(marker_probe_cb); |
151 | 152 | ||
@@ -157,12 +158,13 @@ EXPORT_SYMBOL_GPL(marker_probe_cb); | |||
157 | * | 158 | * |
158 | * Should be connected to markers "MARK_NOARGS". | 159 | * Should be connected to markers "MARK_NOARGS". |
159 | */ | 160 | */ |
160 | static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | 161 | static notrace void marker_probe_cb_noarg(const struct marker *mdata, |
162 | void *call_private, ...) | ||
161 | { | 163 | { |
162 | va_list args; /* not initialized */ | 164 | va_list args; /* not initialized */ |
163 | char ptype; | 165 | char ptype; |
164 | 166 | ||
165 | rcu_read_lock_sched(); | 167 | rcu_read_lock_sched_notrace(); |
166 | ptype = mdata->ptype; | 168 | ptype = mdata->ptype; |
167 | if (likely(!ptype)) { | 169 | if (likely(!ptype)) { |
168 | marker_probe_func *func; | 170 | marker_probe_func *func; |
@@ -195,7 +197,7 @@ static void marker_probe_cb_noarg(const struct marker *mdata, void *call_private | |||
195 | multi[i].func(multi[i].probe_private, call_private, | 197 | multi[i].func(multi[i].probe_private, call_private, |
196 | mdata->format, &args); | 198 | mdata->format, &args); |
197 | } | 199 | } |
198 | rcu_read_unlock_sched(); | 200 | rcu_read_unlock_sched_notrace(); |
199 | } | 201 | } |
200 | 202 | ||
201 | static void free_old_closure(struct rcu_head *head) | 203 | static void free_old_closure(struct rcu_head *head) |
@@ -477,7 +479,7 @@ static int marker_set_format(struct marker_entry *entry, const char *format) | |||
477 | static int set_marker(struct marker_entry *entry, struct marker *elem, | 479 | static int set_marker(struct marker_entry *entry, struct marker *elem, |
478 | int active) | 480 | int active) |
479 | { | 481 | { |
480 | int ret; | 482 | int ret = 0; |
481 | WARN_ON(strcmp(entry->name, elem->name) != 0); | 483 | WARN_ON(strcmp(entry->name, elem->name) != 0); |
482 | 484 | ||
483 | if (entry->format) { | 485 | if (entry->format) { |
@@ -529,9 +531,40 @@ static int set_marker(struct marker_entry *entry, struct marker *elem, | |||
529 | */ | 531 | */ |
530 | smp_wmb(); | 532 | smp_wmb(); |
531 | elem->ptype = entry->ptype; | 533 | elem->ptype = entry->ptype; |
534 | |||
535 | if (elem->tp_name && (active ^ elem->state)) { | ||
536 | WARN_ON(!elem->tp_cb); | ||
537 | /* | ||
538 | * It is ok to directly call the probe registration because type | ||
539 | * checking has been done in the __trace_mark_tp() macro. | ||
540 | */ | ||
541 | |||
542 | if (active) { | ||
543 | /* | ||
544 | * try_module_get should always succeed because we hold | ||
545 | * lock_module() to get the tp_cb address. | ||
546 | */ | ||
547 | ret = try_module_get(__module_text_address( | ||
548 | (unsigned long)elem->tp_cb)); | ||
549 | BUG_ON(!ret); | ||
550 | ret = tracepoint_probe_register_noupdate( | ||
551 | elem->tp_name, | ||
552 | elem->tp_cb); | ||
553 | } else { | ||
554 | ret = tracepoint_probe_unregister_noupdate( | ||
555 | elem->tp_name, | ||
556 | elem->tp_cb); | ||
557 | /* | ||
558 | * tracepoint_probe_update_all() must be called | ||
559 | * before the module containing tp_cb is unloaded. | ||
560 | */ | ||
561 | module_put(__module_text_address( | ||
562 | (unsigned long)elem->tp_cb)); | ||
563 | } | ||
564 | } | ||
532 | elem->state = active; | 565 | elem->state = active; |
533 | 566 | ||
534 | return 0; | 567 | return ret; |
535 | } | 568 | } |
536 | 569 | ||
537 | /* | 570 | /* |
@@ -542,7 +575,24 @@ static int set_marker(struct marker_entry *entry, struct marker *elem, | |||
542 | */ | 575 | */ |
543 | static void disable_marker(struct marker *elem) | 576 | static void disable_marker(struct marker *elem) |
544 | { | 577 | { |
578 | int ret; | ||
579 | |||
545 | /* leave "call" as is. It is known statically. */ | 580 | /* leave "call" as is. It is known statically. */ |
581 | if (elem->tp_name && elem->state) { | ||
582 | WARN_ON(!elem->tp_cb); | ||
583 | /* | ||
584 | * It is ok to directly call the probe registration because type | ||
585 | * checking has been done in the __trace_mark_tp() macro. | ||
586 | */ | ||
587 | ret = tracepoint_probe_unregister_noupdate(elem->tp_name, | ||
588 | elem->tp_cb); | ||
589 | WARN_ON(ret); | ||
590 | /* | ||
591 | * tracepoint_probe_update_all() must be called | ||
592 | * before the module containing tp_cb is unloaded. | ||
593 | */ | ||
594 | module_put(__module_text_address((unsigned long)elem->tp_cb)); | ||
595 | } | ||
546 | elem->state = 0; | 596 | elem->state = 0; |
547 | elem->single.func = __mark_empty_function; | 597 | elem->single.func = __mark_empty_function; |
548 | /* Update the function before setting the ptype */ | 598 | /* Update the function before setting the ptype */ |
@@ -606,6 +656,7 @@ static void marker_update_probes(void) | |||
606 | marker_update_probe_range(__start___markers, __stop___markers); | 656 | marker_update_probe_range(__start___markers, __stop___markers); |
607 | /* Markers in modules. */ | 657 | /* Markers in modules. */ |
608 | module_update_markers(); | 658 | module_update_markers(); |
659 | tracepoint_probe_update_all(); | ||
609 | } | 660 | } |
610 | 661 | ||
611 | /** | 662 | /** |
@@ -653,10 +704,11 @@ int marker_probe_register(const char *name, const char *format, | |||
653 | goto end; | 704 | goto end; |
654 | } | 705 | } |
655 | mutex_unlock(&markers_mutex); | 706 | mutex_unlock(&markers_mutex); |
656 | marker_update_probes(); /* may update entry */ | 707 | marker_update_probes(); |
657 | mutex_lock(&markers_mutex); | 708 | mutex_lock(&markers_mutex); |
658 | entry = get_marker(name); | 709 | entry = get_marker(name); |
659 | WARN_ON(!entry); | 710 | if (!entry) |
711 | goto end; | ||
660 | if (entry->rcu_pending) | 712 | if (entry->rcu_pending) |
661 | rcu_barrier_sched(); | 713 | rcu_barrier_sched(); |
662 | entry->oldptr = old; | 714 | entry->oldptr = old; |
@@ -697,7 +749,7 @@ int marker_probe_unregister(const char *name, | |||
697 | rcu_barrier_sched(); | 749 | rcu_barrier_sched(); |
698 | old = marker_entry_remove_probe(entry, probe, probe_private); | 750 | old = marker_entry_remove_probe(entry, probe, probe_private); |
699 | mutex_unlock(&markers_mutex); | 751 | mutex_unlock(&markers_mutex); |
700 | marker_update_probes(); /* may update entry */ | 752 | marker_update_probes(); |
701 | mutex_lock(&markers_mutex); | 753 | mutex_lock(&markers_mutex); |
702 | entry = get_marker(name); | 754 | entry = get_marker(name); |
703 | if (!entry) | 755 | if (!entry) |
@@ -778,10 +830,11 @@ int marker_probe_unregister_private_data(marker_probe_func *probe, | |||
778 | rcu_barrier_sched(); | 830 | rcu_barrier_sched(); |
779 | old = marker_entry_remove_probe(entry, NULL, probe_private); | 831 | old = marker_entry_remove_probe(entry, NULL, probe_private); |
780 | mutex_unlock(&markers_mutex); | 832 | mutex_unlock(&markers_mutex); |
781 | marker_update_probes(); /* may update entry */ | 833 | marker_update_probes(); |
782 | mutex_lock(&markers_mutex); | 834 | mutex_lock(&markers_mutex); |
783 | entry = get_marker_from_private_data(probe, probe_private); | 835 | entry = get_marker_from_private_data(probe, probe_private); |
784 | WARN_ON(!entry); | 836 | if (!entry) |
837 | goto end; | ||
785 | if (entry->rcu_pending) | 838 | if (entry->rcu_pending) |
786 | rcu_barrier_sched(); | 839 | rcu_barrier_sched(); |
787 | entry->oldptr = old; | 840 | entry->oldptr = old; |
@@ -842,3 +895,36 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe, | |||
842 | return ERR_PTR(-ENOENT); | 895 | return ERR_PTR(-ENOENT); |
843 | } | 896 | } |
844 | EXPORT_SYMBOL_GPL(marker_get_private_data); | 897 | EXPORT_SYMBOL_GPL(marker_get_private_data); |
898 | |||
899 | #ifdef CONFIG_MODULES | ||
900 | |||
901 | int marker_module_notify(struct notifier_block *self, | ||
902 | unsigned long val, void *data) | ||
903 | { | ||
904 | struct module *mod = data; | ||
905 | |||
906 | switch (val) { | ||
907 | case MODULE_STATE_COMING: | ||
908 | marker_update_probe_range(mod->markers, | ||
909 | mod->markers + mod->num_markers); | ||
910 | break; | ||
911 | case MODULE_STATE_GOING: | ||
912 | marker_update_probe_range(mod->markers, | ||
913 | mod->markers + mod->num_markers); | ||
914 | break; | ||
915 | } | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | struct notifier_block marker_module_nb = { | ||
920 | .notifier_call = marker_module_notify, | ||
921 | .priority = 0, | ||
922 | }; | ||
923 | |||
924 | static int init_markers(void) | ||
925 | { | ||
926 | return register_module_notifier(&marker_module_nb); | ||
927 | } | ||
928 | __initcall(init_markers); | ||
929 | |||
930 | #endif /* CONFIG_MODULES */ | ||
diff --git a/kernel/module.c b/kernel/module.c index 1f4cc00e0c20..89bcf7c1327d 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2184,24 +2184,15 @@ static noinline struct module *load_module(void __user *umod, | |||
2184 | struct mod_debug *debug; | 2184 | struct mod_debug *debug; |
2185 | unsigned int num_debug; | 2185 | unsigned int num_debug; |
2186 | 2186 | ||
2187 | #ifdef CONFIG_MARKERS | ||
2188 | marker_update_probe_range(mod->markers, | ||
2189 | mod->markers + mod->num_markers); | ||
2190 | #endif | ||
2191 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", | 2187 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", |
2192 | sizeof(*debug), &num_debug); | 2188 | sizeof(*debug), &num_debug); |
2193 | dynamic_printk_setup(debug, num_debug); | 2189 | dynamic_printk_setup(debug, num_debug); |
2194 | |||
2195 | #ifdef CONFIG_TRACEPOINTS | ||
2196 | tracepoint_update_probe_range(mod->tracepoints, | ||
2197 | mod->tracepoints + mod->num_tracepoints); | ||
2198 | #endif | ||
2199 | } | 2190 | } |
2200 | 2191 | ||
2201 | /* sechdrs[0].sh_size is always zero */ | 2192 | /* sechdrs[0].sh_size is always zero */ |
2202 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", | 2193 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", |
2203 | sizeof(*mseg), &num_mcount); | 2194 | sizeof(*mseg), &num_mcount); |
2204 | ftrace_init_module(mseg, mseg + num_mcount); | 2195 | ftrace_init_module(mod, mseg, mseg + num_mcount); |
2205 | 2196 | ||
2206 | err = module_finalize(hdr, sechdrs, mod); | 2197 | err = module_finalize(hdr, sechdrs, mod); |
2207 | if (err < 0) | 2198 | if (err < 0) |
diff --git a/kernel/sched.c b/kernel/sched.c index 50a21f964679..327f91c63c99 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -118,6 +118,12 @@ | |||
118 | */ | 118 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 120 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
122 | /* | 128 | /* |
123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
diff --git a/kernel/signal.c b/kernel/signal.c index 4530fc654455..e9afe63da24b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -41,6 +41,8 @@ | |||
41 | 41 | ||
42 | static struct kmem_cache *sigqueue_cachep; | 42 | static struct kmem_cache *sigqueue_cachep; |
43 | 43 | ||
44 | DEFINE_TRACE(sched_signal_send); | ||
45 | |||
44 | static void __user *sig_handler(struct task_struct *t, int sig) | 46 | static void __user *sig_handler(struct task_struct *t, int sig) |
45 | { | 47 | { |
46 | return t->sighand->action[sig - 1].sa.sa_handler; | 48 | return t->sighand->action[sig - 1].sa.sa_handler; |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9c89526b6b7c..b8378fad29a3 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -59,7 +59,6 @@ config FUNCTION_TRACER | |||
59 | 59 | ||
60 | config FUNCTION_RET_TRACER | 60 | config FUNCTION_RET_TRACER |
61 | bool "Kernel Function return Tracer" | 61 | bool "Kernel Function return Tracer" |
62 | depends on !DYNAMIC_FTRACE | ||
63 | depends on HAVE_FUNCTION_RET_TRACER | 62 | depends on HAVE_FUNCTION_RET_TRACER |
64 | depends on FUNCTION_TRACER | 63 | depends on FUNCTION_TRACER |
65 | help | 64 | help |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 54cb9a7d15e5..f212da486689 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -50,6 +50,9 @@ static int last_ftrace_enabled; | |||
50 | /* Quick disabling of function tracer. */ | 50 | /* Quick disabling of function tracer. */ |
51 | int function_trace_stop; | 51 | int function_trace_stop; |
52 | 52 | ||
53 | /* By default, current tracing type is normal tracing. */ | ||
54 | enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
55 | |||
53 | /* | 56 | /* |
54 | * ftrace_disabled is set when an anomaly is discovered. | 57 | * ftrace_disabled is set when an anomaly is discovered. |
55 | * ftrace_disabled is much stronger than ftrace_enabled. | 58 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -334,7 +337,7 @@ ftrace_record_ip(unsigned long ip) | |||
334 | { | 337 | { |
335 | struct dyn_ftrace *rec; | 338 | struct dyn_ftrace *rec; |
336 | 339 | ||
337 | if (!ftrace_enabled || ftrace_disabled) | 340 | if (ftrace_disabled) |
338 | return NULL; | 341 | return NULL; |
339 | 342 | ||
340 | rec = ftrace_alloc_dyn_node(ip); | 343 | rec = ftrace_alloc_dyn_node(ip); |
@@ -348,107 +351,138 @@ ftrace_record_ip(unsigned long ip) | |||
348 | return rec; | 351 | return rec; |
349 | } | 352 | } |
350 | 353 | ||
351 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 354 | static void print_ip_ins(const char *fmt, unsigned char *p) |
355 | { | ||
356 | int i; | ||
357 | |||
358 | printk(KERN_CONT "%s", fmt); | ||
359 | |||
360 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
361 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
362 | } | ||
363 | |||
364 | static void ftrace_bug(int failed, unsigned long ip) | ||
365 | { | ||
366 | switch (failed) { | ||
367 | case -EFAULT: | ||
368 | FTRACE_WARN_ON_ONCE(1); | ||
369 | pr_info("ftrace faulted on modifying "); | ||
370 | print_ip_sym(ip); | ||
371 | break; | ||
372 | case -EINVAL: | ||
373 | FTRACE_WARN_ON_ONCE(1); | ||
374 | pr_info("ftrace failed to modify "); | ||
375 | print_ip_sym(ip); | ||
376 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
377 | printk(KERN_CONT "\n"); | ||
378 | break; | ||
379 | case -EPERM: | ||
380 | FTRACE_WARN_ON_ONCE(1); | ||
381 | pr_info("ftrace faulted on writing "); | ||
382 | print_ip_sym(ip); | ||
383 | break; | ||
384 | default: | ||
385 | FTRACE_WARN_ON_ONCE(1); | ||
386 | pr_info("ftrace faulted on unknown error "); | ||
387 | print_ip_sym(ip); | ||
388 | } | ||
389 | } | ||
390 | |||
352 | 391 | ||
353 | static int | 392 | static int |
354 | __ftrace_replace_code(struct dyn_ftrace *rec, | 393 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
355 | unsigned char *old, unsigned char *new, int enable) | ||
356 | { | 394 | { |
357 | unsigned long ip, fl; | 395 | unsigned long ip, fl; |
396 | unsigned long ftrace_addr; | ||
397 | |||
398 | #ifdef CONFIG_FUNCTION_RET_TRACER | ||
399 | if (ftrace_tracing_type == FTRACE_TYPE_ENTER) | ||
400 | ftrace_addr = (unsigned long)ftrace_caller; | ||
401 | else | ||
402 | ftrace_addr = (unsigned long)ftrace_return_caller; | ||
403 | #else | ||
404 | ftrace_addr = (unsigned long)ftrace_caller; | ||
405 | #endif | ||
358 | 406 | ||
359 | ip = rec->ip; | 407 | ip = rec->ip; |
360 | 408 | ||
361 | if (ftrace_filtered && enable) { | 409 | /* |
410 | * If this record is not to be traced and | ||
411 | * it is not enabled then do nothing. | ||
412 | * | ||
413 | * If this record is not to be traced and | ||
414 | * it is enabled then disabled it. | ||
415 | * | ||
416 | */ | ||
417 | if (rec->flags & FTRACE_FL_NOTRACE) { | ||
418 | if (rec->flags & FTRACE_FL_ENABLED) | ||
419 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
420 | else | ||
421 | return 0; | ||
422 | |||
423 | } else if (ftrace_filtered && enable) { | ||
362 | /* | 424 | /* |
363 | * If filtering is on: | 425 | * Filtering is on: |
364 | * | ||
365 | * If this record is set to be filtered and | ||
366 | * is enabled then do nothing. | ||
367 | * | ||
368 | * If this record is set to be filtered and | ||
369 | * it is not enabled, enable it. | ||
370 | * | ||
371 | * If this record is not set to be filtered | ||
372 | * and it is not enabled do nothing. | ||
373 | * | ||
374 | * If this record is set not to trace then | ||
375 | * do nothing. | ||
376 | * | ||
377 | * If this record is set not to trace and | ||
378 | * it is enabled then disable it. | ||
379 | * | ||
380 | * If this record is not set to be filtered and | ||
381 | * it is enabled, disable it. | ||
382 | */ | 426 | */ |
383 | 427 | ||
384 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | | 428 | fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); |
385 | FTRACE_FL_ENABLED); | ||
386 | 429 | ||
387 | if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || | 430 | /* Record is filtered and enabled, do nothing */ |
388 | (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || | 431 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
389 | !fl || (fl == FTRACE_FL_NOTRACE)) | ||
390 | return 0; | 432 | return 0; |
391 | 433 | ||
392 | /* | 434 | /* Record is not filtered and is not enabled do nothing */ |
393 | * If it is enabled disable it, | 435 | if (!fl) |
394 | * otherwise enable it! | 436 | return 0; |
395 | */ | 437 | |
396 | if (fl & FTRACE_FL_ENABLED) { | 438 | /* Record is not filtered but enabled, disable it */ |
397 | /* swap new and old */ | 439 | if (fl == FTRACE_FL_ENABLED) |
398 | new = old; | ||
399 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
400 | rec->flags &= ~FTRACE_FL_ENABLED; | 440 | rec->flags &= ~FTRACE_FL_ENABLED; |
401 | } else { | 441 | else |
402 | new = ftrace_call_replace(ip, FTRACE_ADDR); | 442 | /* Otherwise record is filtered but not enabled, enable it */ |
403 | rec->flags |= FTRACE_FL_ENABLED; | 443 | rec->flags |= FTRACE_FL_ENABLED; |
404 | } | ||
405 | } else { | 444 | } else { |
445 | /* Disable or not filtered */ | ||
406 | 446 | ||
407 | if (enable) { | 447 | if (enable) { |
408 | /* | 448 | /* if record is enabled, do nothing */ |
409 | * If this record is set not to trace and is | ||
410 | * not enabled, do nothing. | ||
411 | */ | ||
412 | fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); | ||
413 | if (fl == FTRACE_FL_NOTRACE) | ||
414 | return 0; | ||
415 | |||
416 | new = ftrace_call_replace(ip, FTRACE_ADDR); | ||
417 | } else | ||
418 | old = ftrace_call_replace(ip, FTRACE_ADDR); | ||
419 | |||
420 | if (enable) { | ||
421 | if (rec->flags & FTRACE_FL_ENABLED) | 449 | if (rec->flags & FTRACE_FL_ENABLED) |
422 | return 0; | 450 | return 0; |
451 | |||
423 | rec->flags |= FTRACE_FL_ENABLED; | 452 | rec->flags |= FTRACE_FL_ENABLED; |
453 | |||
424 | } else { | 454 | } else { |
455 | |||
456 | /* if record is not enabled do nothing */ | ||
425 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 457 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
426 | return 0; | 458 | return 0; |
459 | |||
427 | rec->flags &= ~FTRACE_FL_ENABLED; | 460 | rec->flags &= ~FTRACE_FL_ENABLED; |
428 | } | 461 | } |
429 | } | 462 | } |
430 | 463 | ||
431 | return ftrace_modify_code(ip, old, new); | 464 | if (rec->flags & FTRACE_FL_ENABLED) |
465 | return ftrace_make_call(rec, ftrace_addr); | ||
466 | else | ||
467 | return ftrace_make_nop(NULL, rec, ftrace_addr); | ||
432 | } | 468 | } |
433 | 469 | ||
434 | static void ftrace_replace_code(int enable) | 470 | static void ftrace_replace_code(int enable) |
435 | { | 471 | { |
436 | int i, failed; | 472 | int i, failed; |
437 | unsigned char *new = NULL, *old = NULL; | ||
438 | struct dyn_ftrace *rec; | 473 | struct dyn_ftrace *rec; |
439 | struct ftrace_page *pg; | 474 | struct ftrace_page *pg; |
440 | 475 | ||
441 | if (enable) | ||
442 | old = ftrace_nop_replace(); | ||
443 | else | ||
444 | new = ftrace_nop_replace(); | ||
445 | |||
446 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 476 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
447 | for (i = 0; i < pg->index; i++) { | 477 | for (i = 0; i < pg->index; i++) { |
448 | rec = &pg->records[i]; | 478 | rec = &pg->records[i]; |
449 | 479 | ||
450 | /* don't modify code that has already faulted */ | 480 | /* |
451 | if (rec->flags & FTRACE_FL_FAILED) | 481 | * Skip over free records and records that have |
482 | * failed. | ||
483 | */ | ||
484 | if (rec->flags & FTRACE_FL_FREE || | ||
485 | rec->flags & FTRACE_FL_FAILED) | ||
452 | continue; | 486 | continue; |
453 | 487 | ||
454 | /* ignore updates to this record's mcount site */ | 488 | /* ignore updates to this record's mcount site */ |
@@ -459,68 +493,30 @@ static void ftrace_replace_code(int enable) | |||
459 | unfreeze_record(rec); | 493 | unfreeze_record(rec); |
460 | } | 494 | } |
461 | 495 | ||
462 | failed = __ftrace_replace_code(rec, old, new, enable); | 496 | failed = __ftrace_replace_code(rec, enable); |
463 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 497 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
464 | rec->flags |= FTRACE_FL_FAILED; | 498 | rec->flags |= FTRACE_FL_FAILED; |
465 | if ((system_state == SYSTEM_BOOTING) || | 499 | if ((system_state == SYSTEM_BOOTING) || |
466 | !core_kernel_text(rec->ip)) { | 500 | !core_kernel_text(rec->ip)) { |
467 | ftrace_free_rec(rec); | 501 | ftrace_free_rec(rec); |
468 | } | 502 | } else |
503 | ftrace_bug(failed, rec->ip); | ||
469 | } | 504 | } |
470 | } | 505 | } |
471 | } | 506 | } |
472 | } | 507 | } |
473 | 508 | ||
474 | static void print_ip_ins(const char *fmt, unsigned char *p) | ||
475 | { | ||
476 | int i; | ||
477 | |||
478 | printk(KERN_CONT "%s", fmt); | ||
479 | |||
480 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
481 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
482 | } | ||
483 | |||
484 | static int | 509 | static int |
485 | ftrace_code_disable(struct dyn_ftrace *rec) | 510 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
486 | { | 511 | { |
487 | unsigned long ip; | 512 | unsigned long ip; |
488 | unsigned char *nop, *call; | ||
489 | int ret; | 513 | int ret; |
490 | 514 | ||
491 | ip = rec->ip; | 515 | ip = rec->ip; |
492 | 516 | ||
493 | nop = ftrace_nop_replace(); | 517 | ret = ftrace_make_nop(mod, rec, mcount_addr); |
494 | call = ftrace_call_replace(ip, mcount_addr); | ||
495 | |||
496 | ret = ftrace_modify_code(ip, call, nop); | ||
497 | if (ret) { | 518 | if (ret) { |
498 | switch (ret) { | 519 | ftrace_bug(ret, ip); |
499 | case -EFAULT: | ||
500 | FTRACE_WARN_ON_ONCE(1); | ||
501 | pr_info("ftrace faulted on modifying "); | ||
502 | print_ip_sym(ip); | ||
503 | break; | ||
504 | case -EINVAL: | ||
505 | FTRACE_WARN_ON_ONCE(1); | ||
506 | pr_info("ftrace failed to modify "); | ||
507 | print_ip_sym(ip); | ||
508 | print_ip_ins(" expected: ", call); | ||
509 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
510 | print_ip_ins(" replace: ", nop); | ||
511 | printk(KERN_CONT "\n"); | ||
512 | break; | ||
513 | case -EPERM: | ||
514 | FTRACE_WARN_ON_ONCE(1); | ||
515 | pr_info("ftrace faulted on writing "); | ||
516 | print_ip_sym(ip); | ||
517 | break; | ||
518 | default: | ||
519 | FTRACE_WARN_ON_ONCE(1); | ||
520 | pr_info("ftrace faulted on unknown error "); | ||
521 | print_ip_sym(ip); | ||
522 | } | ||
523 | |||
524 | rec->flags |= FTRACE_FL_FAILED; | 520 | rec->flags |= FTRACE_FL_FAILED; |
525 | return 0; | 521 | return 0; |
526 | } | 522 | } |
@@ -560,8 +556,7 @@ static void ftrace_startup(void) | |||
560 | 556 | ||
561 | mutex_lock(&ftrace_start_lock); | 557 | mutex_lock(&ftrace_start_lock); |
562 | ftrace_start_up++; | 558 | ftrace_start_up++; |
563 | if (ftrace_start_up == 1) | 559 | command |= FTRACE_ENABLE_CALLS; |
564 | command |= FTRACE_ENABLE_CALLS; | ||
565 | 560 | ||
566 | if (saved_ftrace_func != ftrace_trace_function) { | 561 | if (saved_ftrace_func != ftrace_trace_function) { |
567 | saved_ftrace_func = ftrace_trace_function; | 562 | saved_ftrace_func = ftrace_trace_function; |
@@ -639,7 +634,7 @@ static cycle_t ftrace_update_time; | |||
639 | static unsigned long ftrace_update_cnt; | 634 | static unsigned long ftrace_update_cnt; |
640 | unsigned long ftrace_update_tot_cnt; | 635 | unsigned long ftrace_update_tot_cnt; |
641 | 636 | ||
642 | static int ftrace_update_code(void) | 637 | static int ftrace_update_code(struct module *mod) |
643 | { | 638 | { |
644 | struct dyn_ftrace *p, *t; | 639 | struct dyn_ftrace *p, *t; |
645 | cycle_t start, stop; | 640 | cycle_t start, stop; |
@@ -656,7 +651,7 @@ static int ftrace_update_code(void) | |||
656 | list_del_init(&p->list); | 651 | list_del_init(&p->list); |
657 | 652 | ||
658 | /* convert record (i.e, patch mcount-call with NOP) */ | 653 | /* convert record (i.e, patch mcount-call with NOP) */ |
659 | if (ftrace_code_disable(p)) { | 654 | if (ftrace_code_disable(mod, p)) { |
660 | p->flags |= FTRACE_FL_CONVERTED; | 655 | p->flags |= FTRACE_FL_CONVERTED; |
661 | ftrace_update_cnt++; | 656 | ftrace_update_cnt++; |
662 | } else | 657 | } else |
@@ -699,7 +694,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
699 | 694 | ||
700 | cnt = num_to_init / ENTRIES_PER_PAGE; | 695 | cnt = num_to_init / ENTRIES_PER_PAGE; |
701 | pr_info("ftrace: allocating %ld entries in %d pages\n", | 696 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
702 | num_to_init, cnt); | 697 | num_to_init, cnt + 1); |
703 | 698 | ||
704 | for (i = 0; i < cnt; i++) { | 699 | for (i = 0; i < cnt; i++) { |
705 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); | 700 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
@@ -782,13 +777,11 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
782 | void *p = NULL; | 777 | void *p = NULL; |
783 | loff_t l = -1; | 778 | loff_t l = -1; |
784 | 779 | ||
785 | if (*pos != iter->pos) { | 780 | if (*pos > iter->pos) |
786 | for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) | 781 | *pos = iter->pos; |
787 | ; | 782 | |
788 | } else { | 783 | l = *pos; |
789 | l = *pos; | 784 | p = t_next(m, p, &l); |
790 | p = t_next(m, p, &l); | ||
791 | } | ||
792 | 785 | ||
793 | return p; | 786 | return p; |
794 | } | 787 | } |
@@ -799,15 +792,21 @@ static void t_stop(struct seq_file *m, void *p) | |||
799 | 792 | ||
800 | static int t_show(struct seq_file *m, void *v) | 793 | static int t_show(struct seq_file *m, void *v) |
801 | { | 794 | { |
795 | struct ftrace_iterator *iter = m->private; | ||
802 | struct dyn_ftrace *rec = v; | 796 | struct dyn_ftrace *rec = v; |
803 | char str[KSYM_SYMBOL_LEN]; | 797 | char str[KSYM_SYMBOL_LEN]; |
798 | int ret = 0; | ||
804 | 799 | ||
805 | if (!rec) | 800 | if (!rec) |
806 | return 0; | 801 | return 0; |
807 | 802 | ||
808 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 803 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
809 | 804 | ||
810 | seq_printf(m, "%s\n", str); | 805 | ret = seq_printf(m, "%s\n", str); |
806 | if (ret < 0) { | ||
807 | iter->pos--; | ||
808 | iter->idx--; | ||
809 | } | ||
811 | 810 | ||
812 | return 0; | 811 | return 0; |
813 | } | 812 | } |
@@ -833,7 +832,7 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
833 | return -ENOMEM; | 832 | return -ENOMEM; |
834 | 833 | ||
835 | iter->pg = ftrace_pages_start; | 834 | iter->pg = ftrace_pages_start; |
836 | iter->pos = -1; | 835 | iter->pos = 0; |
837 | 836 | ||
838 | ret = seq_open(file, &show_ftrace_seq_ops); | 837 | ret = seq_open(file, &show_ftrace_seq_ops); |
839 | if (!ret) { | 838 | if (!ret) { |
@@ -920,7 +919,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
920 | 919 | ||
921 | if (file->f_mode & FMODE_READ) { | 920 | if (file->f_mode & FMODE_READ) { |
922 | iter->pg = ftrace_pages_start; | 921 | iter->pg = ftrace_pages_start; |
923 | iter->pos = -1; | 922 | iter->pos = 0; |
924 | iter->flags = enable ? FTRACE_ITER_FILTER : | 923 | iter->flags = enable ? FTRACE_ITER_FILTER : |
925 | FTRACE_ITER_NOTRACE; | 924 | FTRACE_ITER_NOTRACE; |
926 | 925 | ||
@@ -1211,7 +1210,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1211 | 1210 | ||
1212 | mutex_lock(&ftrace_sysctl_lock); | 1211 | mutex_lock(&ftrace_sysctl_lock); |
1213 | mutex_lock(&ftrace_start_lock); | 1212 | mutex_lock(&ftrace_start_lock); |
1214 | if (iter->filtered && ftrace_start_up && ftrace_enabled) | 1213 | if (ftrace_start_up && ftrace_enabled) |
1215 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1214 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1216 | mutex_unlock(&ftrace_start_lock); | 1215 | mutex_unlock(&ftrace_start_lock); |
1217 | mutex_unlock(&ftrace_sysctl_lock); | 1216 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1298,7 +1297,8 @@ static __init int ftrace_init_debugfs(void) | |||
1298 | 1297 | ||
1299 | fs_initcall(ftrace_init_debugfs); | 1298 | fs_initcall(ftrace_init_debugfs); |
1300 | 1299 | ||
1301 | static int ftrace_convert_nops(unsigned long *start, | 1300 | static int ftrace_convert_nops(struct module *mod, |
1301 | unsigned long *start, | ||
1302 | unsigned long *end) | 1302 | unsigned long *end) |
1303 | { | 1303 | { |
1304 | unsigned long *p; | 1304 | unsigned long *p; |
@@ -1309,23 +1309,32 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1309 | p = start; | 1309 | p = start; |
1310 | while (p < end) { | 1310 | while (p < end) { |
1311 | addr = ftrace_call_adjust(*p++); | 1311 | addr = ftrace_call_adjust(*p++); |
1312 | /* | ||
1313 | * Some architecture linkers will pad between | ||
1314 | * the different mcount_loc sections of different | ||
1315 | * object files to satisfy alignments. | ||
1316 | * Skip any NULL pointers. | ||
1317 | */ | ||
1318 | if (!addr) | ||
1319 | continue; | ||
1312 | ftrace_record_ip(addr); | 1320 | ftrace_record_ip(addr); |
1313 | } | 1321 | } |
1314 | 1322 | ||
1315 | /* disable interrupts to prevent kstop machine */ | 1323 | /* disable interrupts to prevent kstop machine */ |
1316 | local_irq_save(flags); | 1324 | local_irq_save(flags); |
1317 | ftrace_update_code(); | 1325 | ftrace_update_code(mod); |
1318 | local_irq_restore(flags); | 1326 | local_irq_restore(flags); |
1319 | mutex_unlock(&ftrace_start_lock); | 1327 | mutex_unlock(&ftrace_start_lock); |
1320 | 1328 | ||
1321 | return 0; | 1329 | return 0; |
1322 | } | 1330 | } |
1323 | 1331 | ||
1324 | void ftrace_init_module(unsigned long *start, unsigned long *end) | 1332 | void ftrace_init_module(struct module *mod, |
1333 | unsigned long *start, unsigned long *end) | ||
1325 | { | 1334 | { |
1326 | if (ftrace_disabled || start == end) | 1335 | if (ftrace_disabled || start == end) |
1327 | return; | 1336 | return; |
1328 | ftrace_convert_nops(start, end); | 1337 | ftrace_convert_nops(mod, start, end); |
1329 | } | 1338 | } |
1330 | 1339 | ||
1331 | extern unsigned long __start_mcount_loc[]; | 1340 | extern unsigned long __start_mcount_loc[]; |
@@ -1355,7 +1364,8 @@ void __init ftrace_init(void) | |||
1355 | 1364 | ||
1356 | last_ftrace_enabled = ftrace_enabled = 1; | 1365 | last_ftrace_enabled = ftrace_enabled = 1; |
1357 | 1366 | ||
1358 | ret = ftrace_convert_nops(__start_mcount_loc, | 1367 | ret = ftrace_convert_nops(NULL, |
1368 | __start_mcount_loc, | ||
1359 | __stop_mcount_loc); | 1369 | __stop_mcount_loc); |
1360 | 1370 | ||
1361 | return; | 1371 | return; |
@@ -1411,10 +1421,17 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1411 | return -1; | 1421 | return -1; |
1412 | 1422 | ||
1413 | mutex_lock(&ftrace_sysctl_lock); | 1423 | mutex_lock(&ftrace_sysctl_lock); |
1424 | |||
1425 | if (ftrace_tracing_type == FTRACE_TYPE_RETURN) { | ||
1426 | ret = -EBUSY; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | |||
1414 | ret = __register_ftrace_function(ops); | 1430 | ret = __register_ftrace_function(ops); |
1415 | ftrace_startup(); | 1431 | ftrace_startup(); |
1416 | mutex_unlock(&ftrace_sysctl_lock); | ||
1417 | 1432 | ||
1433 | out: | ||
1434 | mutex_unlock(&ftrace_sysctl_lock); | ||
1418 | return ret; | 1435 | return ret; |
1419 | } | 1436 | } |
1420 | 1437 | ||
@@ -1480,16 +1497,45 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1480 | } | 1497 | } |
1481 | 1498 | ||
1482 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1499 | #ifdef CONFIG_FUNCTION_RET_TRACER |
1500 | |||
1501 | /* The callback that hooks the return of a function */ | ||
1483 | trace_function_return_t ftrace_function_return = | 1502 | trace_function_return_t ftrace_function_return = |
1484 | (trace_function_return_t)ftrace_stub; | 1503 | (trace_function_return_t)ftrace_stub; |
1485 | void register_ftrace_return(trace_function_return_t func) | 1504 | |
1505 | int register_ftrace_return(trace_function_return_t func) | ||
1486 | { | 1506 | { |
1507 | int ret = 0; | ||
1508 | |||
1509 | mutex_lock(&ftrace_sysctl_lock); | ||
1510 | |||
1511 | /* | ||
1512 | * Don't launch return tracing if normal function | ||
1513 | * tracing is already running. | ||
1514 | */ | ||
1515 | if (ftrace_trace_function != ftrace_stub) { | ||
1516 | ret = -EBUSY; | ||
1517 | goto out; | ||
1518 | } | ||
1519 | |||
1520 | ftrace_tracing_type = FTRACE_TYPE_RETURN; | ||
1487 | ftrace_function_return = func; | 1521 | ftrace_function_return = func; |
1522 | ftrace_startup(); | ||
1523 | |||
1524 | out: | ||
1525 | mutex_unlock(&ftrace_sysctl_lock); | ||
1526 | return ret; | ||
1488 | } | 1527 | } |
1489 | 1528 | ||
1490 | void unregister_ftrace_return(void) | 1529 | void unregister_ftrace_return(void) |
1491 | { | 1530 | { |
1531 | mutex_lock(&ftrace_sysctl_lock); | ||
1532 | |||
1492 | ftrace_function_return = (trace_function_return_t)ftrace_stub; | 1533 | ftrace_function_return = (trace_function_return_t)ftrace_stub; |
1534 | ftrace_shutdown(); | ||
1535 | /* Restore normal tracing type */ | ||
1536 | ftrace_tracing_type = FTRACE_TYPE_ENTER; | ||
1537 | |||
1538 | mutex_unlock(&ftrace_sysctl_lock); | ||
1493 | } | 1539 | } |
1494 | #endif | 1540 | #endif |
1495 | 1541 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4a904623e05d..396fda034e3f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1051,7 +1051,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1051 | * Need to use raw, since this must be called before the | 1051 | * Need to use raw, since this must be called before the |
1052 | * recursive protection is performed. | 1052 | * recursive protection is performed. |
1053 | */ | 1053 | */ |
1054 | raw_local_irq_save(flags); | 1054 | local_irq_save(flags); |
1055 | cpu = raw_smp_processor_id(); | 1055 | cpu = raw_smp_processor_id(); |
1056 | data = tr->data[cpu]; | 1056 | data = tr->data[cpu]; |
1057 | disabled = atomic_inc_return(&data->disabled); | 1057 | disabled = atomic_inc_return(&data->disabled); |
@@ -1062,7 +1062,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | atomic_dec(&data->disabled); | 1064 | atomic_dec(&data->disabled); |
1065 | raw_local_irq_restore(flags); | 1065 | local_irq_restore(flags); |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | #ifdef CONFIG_FUNCTION_RET_TRACER | 1068 | #ifdef CONFIG_FUNCTION_RET_TRACER |
@@ -2638,8 +2638,11 @@ static int tracing_set_tracer(char *buf) | |||
2638 | current_trace->reset(tr); | 2638 | current_trace->reset(tr); |
2639 | 2639 | ||
2640 | current_trace = t; | 2640 | current_trace = t; |
2641 | if (t->init) | 2641 | if (t->init) { |
2642 | t->init(tr); | 2642 | ret = t->init(tr); |
2643 | if (ret) | ||
2644 | goto out; | ||
2645 | } | ||
2643 | 2646 | ||
2644 | trace_branch_enable(tr); | 2647 | trace_branch_enable(tr); |
2645 | out: | 2648 | out: |
@@ -2655,6 +2658,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2655 | char buf[max_tracer_type_len+1]; | 2658 | char buf[max_tracer_type_len+1]; |
2656 | int i; | 2659 | int i; |
2657 | size_t ret; | 2660 | size_t ret; |
2661 | int err; | ||
2662 | |||
2663 | ret = cnt; | ||
2658 | 2664 | ||
2659 | if (cnt > max_tracer_type_len) | 2665 | if (cnt > max_tracer_type_len) |
2660 | cnt = max_tracer_type_len; | 2666 | cnt = max_tracer_type_len; |
@@ -2668,12 +2674,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2668 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | 2674 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) |
2669 | buf[i] = 0; | 2675 | buf[i] = 0; |
2670 | 2676 | ||
2671 | ret = tracing_set_tracer(buf); | 2677 | err = tracing_set_tracer(buf); |
2672 | if (!ret) | 2678 | if (err) |
2673 | ret = cnt; | 2679 | return err; |
2674 | 2680 | ||
2675 | if (ret > 0) | 2681 | filp->f_pos += ret; |
2676 | filp->f_pos += ret; | ||
2677 | 2682 | ||
2678 | return ret; | 2683 | return ret; |
2679 | } | 2684 | } |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 790ea8c0e1f3..cdbd5cc22be8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -264,7 +264,8 @@ enum print_line_t { | |||
264 | */ | 264 | */ |
265 | struct tracer { | 265 | struct tracer { |
266 | const char *name; | 266 | const char *name; |
267 | void (*init)(struct trace_array *tr); | 267 | /* Your tracer should raise a warning if init fails */ |
268 | int (*init)(struct trace_array *tr); | ||
268 | void (*reset)(struct trace_array *tr); | 269 | void (*reset)(struct trace_array *tr); |
269 | void (*start)(struct trace_array *tr); | 270 | void (*start)(struct trace_array *tr); |
270 | void (*stop)(struct trace_array *tr); | 271 | void (*stop)(struct trace_array *tr); |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index cb333b7fd113..a4fa2c57e34e 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -47,7 +47,7 @@ static void reset_boot_trace(struct trace_array *tr) | |||
47 | tracing_reset(tr, cpu); | 47 | tracing_reset(tr, cpu); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void boot_trace_init(struct trace_array *tr) | 50 | static int boot_trace_init(struct trace_array *tr) |
51 | { | 51 | { |
52 | int cpu; | 52 | int cpu; |
53 | boot_trace = tr; | 53 | boot_trace = tr; |
@@ -56,6 +56,7 @@ static void boot_trace_init(struct trace_array *tr) | |||
56 | tracing_reset(tr, cpu); | 56 | tracing_reset(tr, cpu); |
57 | 57 | ||
58 | tracing_sched_switch_assign_trace(tr); | 58 | tracing_sched_switch_assign_trace(tr); |
59 | return 0; | ||
59 | } | 60 | } |
60 | 61 | ||
61 | static enum print_line_t | 62 | static enum print_line_t |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 2511e32572ca..23f9b02ce967 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -125,7 +125,7 @@ static void stop_branch_trace(struct trace_array *tr) | |||
125 | disable_branch_tracing(); | 125 | disable_branch_tracing(); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void branch_trace_init(struct trace_array *tr) | 128 | static int branch_trace_init(struct trace_array *tr) |
129 | { | 129 | { |
130 | int cpu; | 130 | int cpu; |
131 | 131 | ||
@@ -133,6 +133,7 @@ static void branch_trace_init(struct trace_array *tr) | |||
133 | tracing_reset(tr, cpu); | 133 | tracing_reset(tr, cpu); |
134 | 134 | ||
135 | start_branch_trace(tr); | 135 | start_branch_trace(tr); |
136 | return 0; | ||
136 | } | 137 | } |
137 | 138 | ||
138 | static void branch_trace_reset(struct trace_array *tr) | 139 | static void branch_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8693b7a0a5b2..e74f6d0a3216 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -42,9 +42,10 @@ static void stop_function_trace(struct trace_array *tr) | |||
42 | tracing_stop_cmdline_record(); | 42 | tracing_stop_cmdline_record(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static void function_trace_init(struct trace_array *tr) | 45 | static int function_trace_init(struct trace_array *tr) |
46 | { | 46 | { |
47 | start_function_trace(tr); | 47 | start_function_trace(tr); |
48 | return 0; | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static void function_trace_reset(struct trace_array *tr) | 51 | static void function_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_functions_return.c b/kernel/trace/trace_functions_return.c index 7680b21537dd..a68564af022b 100644 --- a/kernel/trace/trace_functions_return.c +++ b/kernel/trace/trace_functions_return.c | |||
@@ -14,28 +14,18 @@ | |||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | 15 | ||
16 | 16 | ||
17 | static void start_return_trace(struct trace_array *tr) | 17 | static int return_trace_init(struct trace_array *tr) |
18 | { | ||
19 | register_ftrace_return(&trace_function_return); | ||
20 | } | ||
21 | |||
22 | static void stop_return_trace(struct trace_array *tr) | ||
23 | { | ||
24 | unregister_ftrace_return(); | ||
25 | } | ||
26 | |||
27 | static void return_trace_init(struct trace_array *tr) | ||
28 | { | 18 | { |
29 | int cpu; | 19 | int cpu; |
30 | for_each_online_cpu(cpu) | 20 | for_each_online_cpu(cpu) |
31 | tracing_reset(tr, cpu); | 21 | tracing_reset(tr, cpu); |
32 | 22 | ||
33 | start_return_trace(tr); | 23 | return register_ftrace_return(&trace_function_return); |
34 | } | 24 | } |
35 | 25 | ||
36 | static void return_trace_reset(struct trace_array *tr) | 26 | static void return_trace_reset(struct trace_array *tr) |
37 | { | 27 | { |
38 | stop_return_trace(tr); | 28 | unregister_ftrace_return(); |
39 | } | 29 | } |
40 | 30 | ||
41 | 31 | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index d919d4eaa7cc..7c2e326bbc8b 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -416,11 +416,12 @@ static void irqsoff_tracer_close(struct trace_iterator *iter) | |||
416 | } | 416 | } |
417 | 417 | ||
418 | #ifdef CONFIG_IRQSOFF_TRACER | 418 | #ifdef CONFIG_IRQSOFF_TRACER |
419 | static void irqsoff_tracer_init(struct trace_array *tr) | 419 | static int irqsoff_tracer_init(struct trace_array *tr) |
420 | { | 420 | { |
421 | trace_type = TRACER_IRQS_OFF; | 421 | trace_type = TRACER_IRQS_OFF; |
422 | 422 | ||
423 | __irqsoff_tracer_init(tr); | 423 | __irqsoff_tracer_init(tr); |
424 | return 0; | ||
424 | } | 425 | } |
425 | static struct tracer irqsoff_tracer __read_mostly = | 426 | static struct tracer irqsoff_tracer __read_mostly = |
426 | { | 427 | { |
@@ -442,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
442 | #endif | 443 | #endif |
443 | 444 | ||
444 | #ifdef CONFIG_PREEMPT_TRACER | 445 | #ifdef CONFIG_PREEMPT_TRACER |
445 | static void preemptoff_tracer_init(struct trace_array *tr) | 446 | static int preemptoff_tracer_init(struct trace_array *tr) |
446 | { | 447 | { |
447 | trace_type = TRACER_PREEMPT_OFF; | 448 | trace_type = TRACER_PREEMPT_OFF; |
448 | 449 | ||
449 | __irqsoff_tracer_init(tr); | 450 | __irqsoff_tracer_init(tr); |
451 | return 0; | ||
450 | } | 452 | } |
451 | 453 | ||
452 | static struct tracer preemptoff_tracer __read_mostly = | 454 | static struct tracer preemptoff_tracer __read_mostly = |
@@ -471,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
471 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 473 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
472 | defined(CONFIG_PREEMPT_TRACER) | 474 | defined(CONFIG_PREEMPT_TRACER) |
473 | 475 | ||
474 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | 476 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
475 | { | 477 | { |
476 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 478 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
477 | 479 | ||
478 | __irqsoff_tracer_init(tr); | 480 | __irqsoff_tracer_init(tr); |
481 | return 0; | ||
479 | } | 482 | } |
480 | 483 | ||
481 | static struct tracer preemptirqsoff_tracer __read_mostly = | 484 | static struct tracer preemptirqsoff_tracer __read_mostly = |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 51bcf370215e..433d650eda9f 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -30,13 +30,14 @@ static void mmio_reset_data(struct trace_array *tr) | |||
30 | tracing_reset(tr, cpu); | 30 | tracing_reset(tr, cpu); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void mmio_trace_init(struct trace_array *tr) | 33 | static int mmio_trace_init(struct trace_array *tr) |
34 | { | 34 | { |
35 | pr_debug("in %s\n", __func__); | 35 | pr_debug("in %s\n", __func__); |
36 | mmio_trace_array = tr; | 36 | mmio_trace_array = tr; |
37 | 37 | ||
38 | mmio_reset_data(tr); | 38 | mmio_reset_data(tr); |
39 | enable_mmiotrace(); | 39 | enable_mmiotrace(); |
40 | return 0; | ||
40 | } | 41 | } |
41 | 42 | ||
42 | static void mmio_trace_reset(struct trace_array *tr) | 43 | static void mmio_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 2ef1d227e7d8..0e77415caed3 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -24,7 +24,7 @@ static void stop_nop_trace(struct trace_array *tr) | |||
24 | /* Nothing to do! */ | 24 | /* Nothing to do! */ |
25 | } | 25 | } |
26 | 26 | ||
27 | static void nop_trace_init(struct trace_array *tr) | 27 | static int nop_trace_init(struct trace_array *tr) |
28 | { | 28 | { |
29 | int cpu; | 29 | int cpu; |
30 | ctx_trace = tr; | 30 | ctx_trace = tr; |
@@ -33,6 +33,7 @@ static void nop_trace_init(struct trace_array *tr) | |||
33 | tracing_reset(tr, cpu); | 33 | tracing_reset(tr, cpu); |
34 | 34 | ||
35 | start_nop_trace(tr); | 35 | start_nop_trace(tr); |
36 | return 0; | ||
36 | } | 37 | } |
37 | 38 | ||
38 | static void nop_trace_reset(struct trace_array *tr) | 39 | static void nop_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index be35bdfe2e38..863390557b44 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -206,10 +206,11 @@ static void stop_sched_trace(struct trace_array *tr) | |||
206 | tracing_stop_sched_switch_record(); | 206 | tracing_stop_sched_switch_record(); |
207 | } | 207 | } |
208 | 208 | ||
209 | static void sched_switch_trace_init(struct trace_array *tr) | 209 | static int sched_switch_trace_init(struct trace_array *tr) |
210 | { | 210 | { |
211 | ctx_trace = tr; | 211 | ctx_trace = tr; |
212 | start_sched_trace(tr); | 212 | start_sched_trace(tr); |
213 | return 0; | ||
213 | } | 214 | } |
214 | 215 | ||
215 | static void sched_switch_trace_reset(struct trace_array *tr) | 216 | static void sched_switch_trace_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 983f2b1478c9..0067b49746c1 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -331,10 +331,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
331 | unregister_trace_sched_wakeup(probe_wakeup); | 331 | unregister_trace_sched_wakeup(probe_wakeup); |
332 | } | 332 | } |
333 | 333 | ||
334 | static void wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
335 | { | 335 | { |
336 | wakeup_trace = tr; | 336 | wakeup_trace = tr; |
337 | start_wakeup_tracer(tr); | 337 | start_wakeup_tracer(tr); |
338 | return 0; | ||
338 | } | 339 | } |
339 | 340 | ||
340 | static void wakeup_tracer_reset(struct trace_array *tr) | 341 | static void wakeup_tracer_reset(struct trace_array *tr) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 24e6e075e6d6..88c8eb70f54a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -52,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
52 | int cpu, ret = 0; | 52 | int cpu, ret = 0; |
53 | 53 | ||
54 | /* Don't allow flipping of max traces now */ | 54 | /* Don't allow flipping of max traces now */ |
55 | raw_local_irq_save(flags); | 55 | local_irq_save(flags); |
56 | __raw_spin_lock(&ftrace_max_lock); | 56 | __raw_spin_lock(&ftrace_max_lock); |
57 | 57 | ||
58 | cnt = ring_buffer_entries(tr->buffer); | 58 | cnt = ring_buffer_entries(tr->buffer); |
@@ -63,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
63 | break; | 63 | break; |
64 | } | 64 | } |
65 | __raw_spin_unlock(&ftrace_max_lock); | 65 | __raw_spin_unlock(&ftrace_max_lock); |
66 | raw_local_irq_restore(flags); | 66 | local_irq_restore(flags); |
67 | 67 | ||
68 | if (count) | 68 | if (count) |
69 | *count = cnt; | 69 | *count = cnt; |
@@ -71,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | ||
75 | { | ||
76 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | ||
77 | trace->name, init_ret); | ||
78 | } | ||
74 | #ifdef CONFIG_FUNCTION_TRACER | 79 | #ifdef CONFIG_FUNCTION_TRACER |
75 | 80 | ||
76 | #ifdef CONFIG_DYNAMIC_FTRACE | 81 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -111,7 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
111 | ftrace_set_filter(func_name, strlen(func_name), 1); | 116 | ftrace_set_filter(func_name, strlen(func_name), 1); |
112 | 117 | ||
113 | /* enable tracing */ | 118 | /* enable tracing */ |
114 | trace->init(tr); | 119 | ret = trace->init(tr); |
120 | if (ret) { | ||
121 | warn_failed_init_tracer(trace, ret); | ||
122 | goto out; | ||
123 | } | ||
115 | 124 | ||
116 | /* Sleep for a 1/10 of a second */ | 125 | /* Sleep for a 1/10 of a second */ |
117 | msleep(100); | 126 | msleep(100); |
@@ -181,7 +190,12 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
181 | ftrace_enabled = 1; | 190 | ftrace_enabled = 1; |
182 | tracer_enabled = 1; | 191 | tracer_enabled = 1; |
183 | 192 | ||
184 | trace->init(tr); | 193 | ret = trace->init(tr); |
194 | if (ret) { | ||
195 | warn_failed_init_tracer(trace, ret); | ||
196 | goto out; | ||
197 | } | ||
198 | |||
185 | /* Sleep for a 1/10 of a second */ | 199 | /* Sleep for a 1/10 of a second */ |
186 | msleep(100); | 200 | msleep(100); |
187 | /* stop the tracing. */ | 201 | /* stop the tracing. */ |
@@ -223,7 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
223 | int ret; | 237 | int ret; |
224 | 238 | ||
225 | /* start the tracing */ | 239 | /* start the tracing */ |
226 | trace->init(tr); | 240 | ret = trace->init(tr); |
241 | if (ret) { | ||
242 | warn_failed_init_tracer(trace, ret); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
227 | /* reset the max latency */ | 246 | /* reset the max latency */ |
228 | tracing_max_latency = 0; | 247 | tracing_max_latency = 0; |
229 | /* disable interrupts for a bit */ | 248 | /* disable interrupts for a bit */ |
@@ -272,7 +291,12 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
272 | } | 291 | } |
273 | 292 | ||
274 | /* start the tracing */ | 293 | /* start the tracing */ |
275 | trace->init(tr); | 294 | ret = trace->init(tr); |
295 | if (ret) { | ||
296 | warn_failed_init_tracer(trace, ret); | ||
297 | return ret; | ||
298 | } | ||
299 | |||
276 | /* reset the max latency */ | 300 | /* reset the max latency */ |
277 | tracing_max_latency = 0; | 301 | tracing_max_latency = 0; |
278 | /* disable preemption for a bit */ | 302 | /* disable preemption for a bit */ |
@@ -321,7 +345,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
321 | } | 345 | } |
322 | 346 | ||
323 | /* start the tracing */ | 347 | /* start the tracing */ |
324 | trace->init(tr); | 348 | ret = trace->init(tr); |
349 | if (ret) { | ||
350 | warn_failed_init_tracer(trace, ret); | ||
351 | goto out; | ||
352 | } | ||
325 | 353 | ||
326 | /* reset the max latency */ | 354 | /* reset the max latency */ |
327 | tracing_max_latency = 0; | 355 | tracing_max_latency = 0; |
@@ -449,7 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
449 | wait_for_completion(&isrt); | 477 | wait_for_completion(&isrt); |
450 | 478 | ||
451 | /* start the tracing */ | 479 | /* start the tracing */ |
452 | trace->init(tr); | 480 | ret = trace->init(tr); |
481 | if (ret) { | ||
482 | warn_failed_init_tracer(trace, ret); | ||
483 | return ret; | ||
484 | } | ||
485 | |||
453 | /* reset the max latency */ | 486 | /* reset the max latency */ |
454 | tracing_max_latency = 0; | 487 | tracing_max_latency = 0; |
455 | 488 | ||
@@ -505,7 +538,12 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
505 | int ret; | 538 | int ret; |
506 | 539 | ||
507 | /* start the tracing */ | 540 | /* start the tracing */ |
508 | trace->init(tr); | 541 | ret = trace->init(tr); |
542 | if (ret) { | ||
543 | warn_failed_init_tracer(trace, ret); | ||
544 | return ret; | ||
545 | } | ||
546 | |||
509 | /* Sleep for a 1/10 of a second */ | 547 | /* Sleep for a 1/10 of a second */ |
510 | msleep(100); | 548 | msleep(100); |
511 | /* stop the tracing. */ | 549 | /* stop the tracing. */ |
@@ -532,7 +570,12 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
532 | int ret; | 570 | int ret; |
533 | 571 | ||
534 | /* start the tracing */ | 572 | /* start the tracing */ |
535 | trace->init(tr); | 573 | ret = trace->init(tr); |
574 | if (ret) { | ||
575 | warn_failed_init_tracer(trace, ret); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
536 | /* Sleep for a 1/10 of a second */ | 579 | /* Sleep for a 1/10 of a second */ |
537 | msleep(100); | 580 | msleep(100); |
538 | /* stop the tracing. */ | 581 | /* stop the tracing. */ |
@@ -554,7 +597,12 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
554 | int ret; | 597 | int ret; |
555 | 598 | ||
556 | /* start the tracing */ | 599 | /* start the tracing */ |
557 | trace->init(tr); | 600 | ret = trace->init(tr); |
601 | if (ret) { | ||
602 | warn_failed_init_tracer(trace, ret); | ||
603 | return ret; | ||
604 | } | ||
605 | |||
558 | /* Sleep for a 1/10 of a second */ | 606 | /* Sleep for a 1/10 of a second */ |
559 | msleep(100); | 607 | msleep(100); |
560 | /* stop the tracing. */ | 608 | /* stop the tracing. */ |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 05f753422aea..54960edb96d0 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -261,11 +261,12 @@ static void stop_stack_trace(struct trace_array *tr) | |||
261 | mutex_unlock(&sample_timer_lock); | 261 | mutex_unlock(&sample_timer_lock); |
262 | } | 262 | } |
263 | 263 | ||
264 | static void stack_trace_init(struct trace_array *tr) | 264 | static int stack_trace_init(struct trace_array *tr) |
265 | { | 265 | { |
266 | sysprof_trace = tr; | 266 | sysprof_trace = tr; |
267 | 267 | ||
268 | start_stack_trace(tr); | 268 | start_stack_trace(tr); |
269 | return 0; | ||
269 | } | 270 | } |
270 | 271 | ||
271 | static void stack_trace_reset(struct trace_array *tr) | 272 | static void stack_trace_reset(struct trace_array *tr) |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index e96590f17de1..79602740bbb5 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -262,6 +262,7 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
262 | static void disable_tracepoint(struct tracepoint *elem) | 262 | static void disable_tracepoint(struct tracepoint *elem) |
263 | { | 263 | { |
264 | elem->state = 0; | 264 | elem->state = 0; |
265 | rcu_assign_pointer(elem->funcs, NULL); | ||
265 | } | 266 | } |
266 | 267 | ||
267 | /** | 268 | /** |
@@ -540,3 +541,36 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
540 | iter->tracepoint = NULL; | 541 | iter->tracepoint = NULL; |
541 | } | 542 | } |
542 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 543 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
544 | |||
545 | #ifdef CONFIG_MODULES | ||
546 | |||
547 | int tracepoint_module_notify(struct notifier_block *self, | ||
548 | unsigned long val, void *data) | ||
549 | { | ||
550 | struct module *mod = data; | ||
551 | |||
552 | switch (val) { | ||
553 | case MODULE_STATE_COMING: | ||
554 | tracepoint_update_probe_range(mod->tracepoints, | ||
555 | mod->tracepoints + mod->num_tracepoints); | ||
556 | break; | ||
557 | case MODULE_STATE_GOING: | ||
558 | tracepoint_update_probe_range(mod->tracepoints, | ||
559 | mod->tracepoints + mod->num_tracepoints); | ||
560 | break; | ||
561 | } | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | struct notifier_block tracepoint_module_nb = { | ||
566 | .notifier_call = tracepoint_module_notify, | ||
567 | .priority = 0, | ||
568 | }; | ||
569 | |||
570 | static int init_tracepoints(void) | ||
571 | { | ||
572 | return register_module_notifier(&tracepoint_module_nb); | ||
573 | } | ||
574 | __initcall(init_tracepoints); | ||
575 | |||
576 | #endif /* CONFIG_MODULES */ | ||