diff options
author | Steven Rostedt <srostedt@redhat.com> | 2013-01-23 15:22:59 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:34:41 -0400 |
commit | ae3b5093ad6004b52e2825f3db1ad8200a2724d8 (patch) | |
tree | fb4918a6300a3d3016cf06da2de192a58514ee71 | |
parent | ae63b31e4d0e2ec09c569306ea46f664508ef717 (diff) |
tracing: Use RING_BUFFER_ALL_CPUS for TRACE_PIPE_ALL_CPU
Both RING_BUFFER_ALL_CPUS and TRACE_PIPE_ALL_CPU are defined as
-1 and used to say that all the ring buffers are to be modified
or read (instead of just a single cpu, which would be >= 0).
There's no reason to keep TRACE_PIPE_ALL_CPU as it is also started
to be used for more than what it was created for, and now that
the ring buffer code added a generic RING_BUFFER_ALL_CPUS define,
we can clean up the trace code to use that instead and remove
the TRACE_PIPE_ALL_CPU macro.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/trace.c | 28 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kdb.c | 4 |
3 files changed, 16 insertions, 18 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 932931897b8d..59953aa28845 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -287,13 +287,13 @@ static DEFINE_PER_CPU(struct mutex, cpu_access_lock); | |||
287 | 287 | ||
288 | static inline void trace_access_lock(int cpu) | 288 | static inline void trace_access_lock(int cpu) |
289 | { | 289 | { |
290 | if (cpu == TRACE_PIPE_ALL_CPU) { | 290 | if (cpu == RING_BUFFER_ALL_CPUS) { |
291 | /* gain it for accessing the whole ring buffer. */ | 291 | /* gain it for accessing the whole ring buffer. */ |
292 | down_write(&all_cpu_access_lock); | 292 | down_write(&all_cpu_access_lock); |
293 | } else { | 293 | } else { |
294 | /* gain it for accessing a cpu ring buffer. */ | 294 | /* gain it for accessing a cpu ring buffer. */ |
295 | 295 | ||
296 | /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ | 296 | /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ |
297 | down_read(&all_cpu_access_lock); | 297 | down_read(&all_cpu_access_lock); |
298 | 298 | ||
299 | /* Secondly block other access to this @cpu ring buffer. */ | 299 | /* Secondly block other access to this @cpu ring buffer. */ |
@@ -303,7 +303,7 @@ static inline void trace_access_lock(int cpu) | |||
303 | 303 | ||
304 | static inline void trace_access_unlock(int cpu) | 304 | static inline void trace_access_unlock(int cpu) |
305 | { | 305 | { |
306 | if (cpu == TRACE_PIPE_ALL_CPU) { | 306 | if (cpu == RING_BUFFER_ALL_CPUS) { |
307 | up_write(&all_cpu_access_lock); | 307 | up_write(&all_cpu_access_lock); |
308 | } else { | 308 | } else { |
309 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); | 309 | mutex_unlock(&per_cpu(cpu_access_lock, cpu)); |
@@ -1823,7 +1823,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, | |||
1823 | * If we are in a per_cpu trace file, don't bother by iterating over | 1823 | * If we are in a per_cpu trace file, don't bother by iterating over |
1824 | * all cpu and peek directly. | 1824 | * all cpu and peek directly. |
1825 | */ | 1825 | */ |
1826 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | 1826 | if (cpu_file > RING_BUFFER_ALL_CPUS) { |
1827 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | 1827 | if (ring_buffer_empty_cpu(buffer, cpu_file)) |
1828 | return NULL; | 1828 | return NULL; |
1829 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); | 1829 | ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); |
@@ -1983,7 +1983,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1983 | iter->cpu = 0; | 1983 | iter->cpu = 0; |
1984 | iter->idx = -1; | 1984 | iter->idx = -1; |
1985 | 1985 | ||
1986 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 1986 | if (cpu_file == RING_BUFFER_ALL_CPUS) { |
1987 | for_each_tracing_cpu(cpu) | 1987 | for_each_tracing_cpu(cpu) |
1988 | tracing_iter_reset(iter, cpu); | 1988 | tracing_iter_reset(iter, cpu); |
1989 | } else | 1989 | } else |
@@ -2291,7 +2291,7 @@ int trace_empty(struct trace_iterator *iter) | |||
2291 | int cpu; | 2291 | int cpu; |
2292 | 2292 | ||
2293 | /* If we are looking at one CPU buffer, only check that one */ | 2293 | /* If we are looking at one CPU buffer, only check that one */ |
2294 | if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { | 2294 | if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { |
2295 | cpu = iter->cpu_file; | 2295 | cpu = iter->cpu_file; |
2296 | buf_iter = trace_buffer_iter(iter, cpu); | 2296 | buf_iter = trace_buffer_iter(iter, cpu); |
2297 | if (buf_iter) { | 2297 | if (buf_iter) { |
@@ -2533,7 +2533,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) | |||
2533 | if (!iter->snapshot) | 2533 | if (!iter->snapshot) |
2534 | tracing_stop(); | 2534 | tracing_stop(); |
2535 | 2535 | ||
2536 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | 2536 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { |
2537 | for_each_tracing_cpu(cpu) { | 2537 | for_each_tracing_cpu(cpu) { |
2538 | iter->buffer_iter[cpu] = | 2538 | iter->buffer_iter[cpu] = |
2539 | ring_buffer_read_prepare(iter->tr->buffer, cpu); | 2539 | ring_buffer_read_prepare(iter->tr->buffer, cpu); |
@@ -2617,7 +2617,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
2617 | (file->f_flags & O_TRUNC)) { | 2617 | (file->f_flags & O_TRUNC)) { |
2618 | long cpu = (long) inode->i_private; | 2618 | long cpu = (long) inode->i_private; |
2619 | 2619 | ||
2620 | if (cpu == TRACE_PIPE_ALL_CPU) | 2620 | if (cpu == RING_BUFFER_ALL_CPUS) |
2621 | tracing_reset_online_cpus(&global_trace); | 2621 | tracing_reset_online_cpus(&global_trace); |
2622 | else | 2622 | else |
2623 | tracing_reset(&global_trace, cpu); | 2623 | tracing_reset(&global_trace, cpu); |
@@ -5035,7 +5035,7 @@ static __init int tracer_init_debugfs(void) | |||
5035 | NULL, &tracing_cpumask_fops); | 5035 | NULL, &tracing_cpumask_fops); |
5036 | 5036 | ||
5037 | trace_create_file("trace", 0644, d_tracer, | 5037 | trace_create_file("trace", 0644, d_tracer, |
5038 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); | 5038 | (void *) RING_BUFFER_ALL_CPUS, &tracing_fops); |
5039 | 5039 | ||
5040 | trace_create_file("available_tracers", 0444, d_tracer, | 5040 | trace_create_file("available_tracers", 0444, d_tracer, |
5041 | &global_trace, &show_traces_fops); | 5041 | &global_trace, &show_traces_fops); |
@@ -5055,7 +5055,7 @@ static __init int tracer_init_debugfs(void) | |||
5055 | NULL, &tracing_readme_fops); | 5055 | NULL, &tracing_readme_fops); |
5056 | 5056 | ||
5057 | trace_create_file("trace_pipe", 0444, d_tracer, | 5057 | trace_create_file("trace_pipe", 0444, d_tracer, |
5058 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); | 5058 | (void *) RING_BUFFER_ALL_CPUS, &tracing_pipe_fops); |
5059 | 5059 | ||
5060 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 5060 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
5061 | (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); | 5061 | (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); |
@@ -5085,7 +5085,7 @@ static __init int tracer_init_debugfs(void) | |||
5085 | 5085 | ||
5086 | #ifdef CONFIG_TRACER_SNAPSHOT | 5086 | #ifdef CONFIG_TRACER_SNAPSHOT |
5087 | trace_create_file("snapshot", 0644, d_tracer, | 5087 | trace_create_file("snapshot", 0644, d_tracer, |
5088 | (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); | 5088 | (void *) RING_BUFFER_ALL_CPUS, &snapshot_fops); |
5089 | #endif | 5089 | #endif |
5090 | 5090 | ||
5091 | create_trace_options_dir(); | 5091 | create_trace_options_dir(); |
@@ -5162,7 +5162,7 @@ void trace_init_global_iter(struct trace_iterator *iter) | |||
5162 | { | 5162 | { |
5163 | iter->tr = &global_trace; | 5163 | iter->tr = &global_trace; |
5164 | iter->trace = current_trace; | 5164 | iter->trace = current_trace; |
5165 | iter->cpu_file = TRACE_PIPE_ALL_CPU; | 5165 | iter->cpu_file = RING_BUFFER_ALL_CPUS; |
5166 | } | 5166 | } |
5167 | 5167 | ||
5168 | static void | 5168 | static void |
@@ -5210,7 +5210,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5210 | 5210 | ||
5211 | switch (oops_dump_mode) { | 5211 | switch (oops_dump_mode) { |
5212 | case DUMP_ALL: | 5212 | case DUMP_ALL: |
5213 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 5213 | iter.cpu_file = RING_BUFFER_ALL_CPUS; |
5214 | break; | 5214 | break; |
5215 | case DUMP_ORIG: | 5215 | case DUMP_ORIG: |
5216 | iter.cpu_file = raw_smp_processor_id(); | 5216 | iter.cpu_file = raw_smp_processor_id(); |
@@ -5219,7 +5219,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) | |||
5219 | goto out_enable; | 5219 | goto out_enable; |
5220 | default: | 5220 | default: |
5221 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); | 5221 | printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); |
5222 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | 5222 | iter.cpu_file = RING_BUFFER_ALL_CPUS; |
5223 | } | 5223 | } |
5224 | 5224 | ||
5225 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 5225 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 037f7eb03d69..da09a037abcd 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -453,8 +453,6 @@ static __always_inline void trace_clear_recursion(int bit) | |||
453 | current->trace_recursion = val; | 453 | current->trace_recursion = val; |
454 | } | 454 | } |
455 | 455 | ||
456 | #define TRACE_PIPE_ALL_CPU -1 | ||
457 | |||
458 | static inline struct ring_buffer_iter * | 456 | static inline struct ring_buffer_iter * |
459 | trace_buffer_iter(struct trace_iterator *iter, int cpu) | 457 | trace_buffer_iter(struct trace_iterator *iter, int cpu) |
460 | { | 458 | { |
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index 3c5c5dfea0b3..cc1dbdc5ee5d 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c | |||
@@ -43,7 +43,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) | |||
43 | iter.iter_flags |= TRACE_FILE_LAT_FMT; | 43 | iter.iter_flags |= TRACE_FILE_LAT_FMT; |
44 | iter.pos = -1; | 44 | iter.pos = -1; |
45 | 45 | ||
46 | if (cpu_file == TRACE_PIPE_ALL_CPU) { | 46 | if (cpu_file == RING_BUFFER_ALL_CPUS) { |
47 | for_each_tracing_cpu(cpu) { | 47 | for_each_tracing_cpu(cpu) { |
48 | iter.buffer_iter[cpu] = | 48 | iter.buffer_iter[cpu] = |
49 | ring_buffer_read_prepare(iter.tr->buffer, cpu); | 49 | ring_buffer_read_prepare(iter.tr->buffer, cpu); |
@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv) | |||
115 | !cpu_online(cpu_file)) | 115 | !cpu_online(cpu_file)) |
116 | return KDB_BADINT; | 116 | return KDB_BADINT; |
117 | } else { | 117 | } else { |
118 | cpu_file = TRACE_PIPE_ALL_CPU; | 118 | cpu_file = RING_BUFFER_ALL_CPUS; |
119 | } | 119 | } |
120 | 120 | ||
121 | kdb_trap_printk++; | 121 | kdb_trap_printk++; |