aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/tracepoint.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 16:55:38 -0400
commit9e8529afc4518f4e5d610001545ebc97e1333c79 (patch)
tree26e1aa2cbb50f3f511cfa7d8e39e6b7bd9221b68 /kernel/tracepoint.c
parentec25e246b94a3233ab064994ef05a170bdba0e7c (diff)
parent4c69e6ea415a35eb7f0fc8ee9390c8f7436492a2 (diff)
Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Along with the usual minor fixes and clean ups there are a few major changes with this pull request. 1) Multiple buffers for the ftrace facility This feature has been requested by many people over the last few years. I even heard that Google was about to implement it themselves. I finally had time and cleaned up the code such that you can now create multiple instances of the ftrace buffer and have different events go to different buffers. This way, a low frequency event will not be lost in the noise of a high frequency event. Note, currently only events can go to different buffers, the tracers (ie function, function_graph and the latency tracers) still can only be written to the main buffer. 2) The function tracer triggers have now been extended. The function tracer had two triggers. One to enable tracing when a function is hit, and one to disable tracing. Now you can record a stack trace on a single (or many) function(s), take a snapshot of the buffer (copy it to the snapshot buffer), and you can enable or disable an event to be traced when a function is hit. 3) A perf clock has been added. A "perf" clock can be chosen to be used when tracing. This will cause ftrace to use the same clock as perf uses, and hopefully this will make it easier to interleave the perf and ftrace data for analysis." * tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits) tracepoints: Prevent null probe from being added tracing: Compare to 1 instead of zero for is_signed_type() tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT ftrace: Get rid of ftrace_profile_bits tracing: Check return value of tracing_init_dentry() tracing: Get rid of unneeded key calculation in ftrace_hash_move() tracing: Reset ftrace_graph_filter_enabled if count is zero tracing: Fix off-by-one on allocating stat->pages kernel: tracing: Use strlcpy instead of strncpy tracing: Update debugfs README file tracing: Fix ftrace_dump() tracing: Rename trace_event_mutex to trace_event_sem tracing: Fix comment about prefix in arch_syscall_match_sym_name() tracing: Convert trace_destroy_fields() to static tracing: Move find_event_field() into trace_events.c tracing: Use TRACE_MAX_PRINT instead of constant tracing: Use pr_warn_once instead of open coded implementation ring-buffer: Add ring buffer startup selftest tracing: Bring Documentation/trace/ftrace.txt up to date tracing: Add "perf" trace_clock ... Conflicts: kernel/trace/ftrace.c kernel/trace/trace.c
Diffstat (limited to 'kernel/tracepoint.c')
-rw-r--r--kernel/tracepoint.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 0c05a4592047..29f26540e9c9 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -112,7 +112,8 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry,
112 int nr_probes = 0; 112 int nr_probes = 0;
113 struct tracepoint_func *old, *new; 113 struct tracepoint_func *old, *new;
114 114
115 WARN_ON(!probe); 115 if (WARN_ON(!probe))
116 return ERR_PTR(-EINVAL);
116 117
117 debug_print_probes(entry); 118 debug_print_probes(entry);
118 old = entry->funcs; 119 old = entry->funcs;
@@ -152,13 +153,18 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
152 153
153 debug_print_probes(entry); 154 debug_print_probes(entry);
154 /* (N -> M), (N > 1, M >= 0) probes */ 155 /* (N -> M), (N > 1, M >= 0) probes */
155 for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 156 if (probe) {
156 if (!probe || 157 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
157 (old[nr_probes].func == probe && 158 if (old[nr_probes].func == probe &&
158 old[nr_probes].data == data)) 159 old[nr_probes].data == data)
159 nr_del++; 160 nr_del++;
161 }
160 } 162 }
161 163
164 /*
165 * If probe is NULL, then nr_probes = nr_del = 0, and then the
166 * entire entry will be removed.
167 */
162 if (nr_probes - nr_del == 0) { 168 if (nr_probes - nr_del == 0) {
163 /* N -> 0, (N > 1) */ 169 /* N -> 0, (N > 1) */
164 entry->funcs = NULL; 170 entry->funcs = NULL;
@@ -173,8 +179,7 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
173 if (new == NULL) 179 if (new == NULL)
174 return ERR_PTR(-ENOMEM); 180 return ERR_PTR(-ENOMEM);
175 for (i = 0; old[i].func; i++) 181 for (i = 0; old[i].func; i++)
176 if (probe && 182 if (old[i].func != probe || old[i].data != data)
177 (old[i].func != probe || old[i].data != data))
178 new[j++] = old[i]; 183 new[j++] = old[i];
179 new[nr_probes - nr_del].func = NULL; 184 new[nr_probes - nr_del].func = NULL;
180 entry->refcount = nr_probes - nr_del; 185 entry->refcount = nr_probes - nr_del;