diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-13 00:07:25 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-13 00:07:25 -0400 |
| commit | 8841c8b3c4c4d9a9f2a6d30b463ad8d2c6e2f0ea (patch) | |
| tree | dae6cf40947c0eab290da7165e6eac2818a3ab92 | |
| parent | b7c8c1945cfbcfb9d60f5be957b4339c6eee4201 (diff) | |
| parent | da9c3413a27be5ba6f996e90495c836dd30b8841 (diff) | |
Merge tag 'trace-3.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing cleanups and bugfixes from Steven Rostedt:
"One bug fix that goes back to 3.10. Accessing a non existent buffer
if "possible cpus" is greater than actual CPUs (including offline
CPUs).
Namhyung Kim did some reviews of the patches I sent this merge window
and found a memory leak and had a few clean ups"
* tag 'trace-3.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix check of ftrace_trace_arrays list_empty() check
tracing: Fix leak of per cpu max data in instances
tracing: Cleanup saved_cmdlines_size changes
ring-buffer: Check if buffer exists before polling
| -rw-r--r-- | include/linux/ring_buffer.h | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 49 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 2 |
4 files changed, 37 insertions, 21 deletions
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index d69cf637a15a..49a4d6f59108 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k | |||
| 97 | __ring_buffer_alloc((size), (flags), &__key); \ | 97 | __ring_buffer_alloc((size), (flags), &__key); \ |
| 98 | }) | 98 | }) |
| 99 | 99 | ||
| 100 | void ring_buffer_wait(struct ring_buffer *buffer, int cpu); | 100 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
| 101 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | 101 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
| 102 | struct file *filp, poll_table *poll_table); | 102 | struct file *filp, poll_table *poll_table); |
| 103 | 103 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c634868c2921..7c56c3d06943 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work) | |||
| 543 | * as data is added to any of the @buffer's cpu buffers. Otherwise | 543 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
| 544 | * it will wait for data to be added to a specific cpu buffer. | 544 | * it will wait for data to be added to a specific cpu buffer. |
| 545 | */ | 545 | */ |
| 546 | void ring_buffer_wait(struct ring_buffer *buffer, int cpu) | 546 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
| 547 | { | 547 | { |
| 548 | struct ring_buffer_per_cpu *cpu_buffer; | 548 | struct ring_buffer_per_cpu *cpu_buffer; |
| 549 | DEFINE_WAIT(wait); | 549 | DEFINE_WAIT(wait); |
| @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) | |||
| 557 | if (cpu == RING_BUFFER_ALL_CPUS) | 557 | if (cpu == RING_BUFFER_ALL_CPUS) |
| 558 | work = &buffer->irq_work; | 558 | work = &buffer->irq_work; |
| 559 | else { | 559 | else { |
| 560 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | ||
| 561 | return -ENODEV; | ||
| 560 | cpu_buffer = buffer->buffers[cpu]; | 562 | cpu_buffer = buffer->buffers[cpu]; |
| 561 | work = &cpu_buffer->irq_work; | 563 | work = &cpu_buffer->irq_work; |
| 562 | } | 564 | } |
| @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) | |||
| 591 | schedule(); | 593 | schedule(); |
| 592 | 594 | ||
| 593 | finish_wait(&work->waiters, &wait); | 595 | finish_wait(&work->waiters, &wait); |
| 596 | return 0; | ||
| 594 | } | 597 | } |
| 595 | 598 | ||
| 596 | /** | 599 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 16f7038d1f4d..384ede311717 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1085,13 +1085,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 1085 | } | 1085 | } |
| 1086 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 1086 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
| 1087 | 1087 | ||
| 1088 | static void wait_on_pipe(struct trace_iterator *iter) | 1088 | static int wait_on_pipe(struct trace_iterator *iter) |
| 1089 | { | 1089 | { |
| 1090 | /* Iterators are static, they should be filled or empty */ | 1090 | /* Iterators are static, they should be filled or empty */ |
| 1091 | if (trace_buffer_iter(iter, iter->cpu_file)) | 1091 | if (trace_buffer_iter(iter, iter->cpu_file)) |
| 1092 | return; | 1092 | return 0; |
| 1093 | 1093 | ||
| 1094 | ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); | 1094 | return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
| 1095 | } | 1095 | } |
| 1096 | 1096 | ||
| 1097 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 1097 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| @@ -1338,7 +1338,7 @@ static int trace_create_savedcmd(void) | |||
| 1338 | { | 1338 | { |
| 1339 | int ret; | 1339 | int ret; |
| 1340 | 1340 | ||
| 1341 | savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL); | 1341 | savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); |
| 1342 | if (!savedcmd) | 1342 | if (!savedcmd) |
| 1343 | return -ENOMEM; | 1343 | return -ENOMEM; |
| 1344 | 1344 | ||
| @@ -3840,7 +3840,7 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, | |||
| 3840 | int r; | 3840 | int r; |
| 3841 | 3841 | ||
| 3842 | arch_spin_lock(&trace_cmdline_lock); | 3842 | arch_spin_lock(&trace_cmdline_lock); |
| 3843 | r = sprintf(buf, "%u\n", savedcmd->cmdline_num); | 3843 | r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); |
| 3844 | arch_spin_unlock(&trace_cmdline_lock); | 3844 | arch_spin_unlock(&trace_cmdline_lock); |
| 3845 | 3845 | ||
| 3846 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3846 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
| @@ -3857,7 +3857,7 @@ static int tracing_resize_saved_cmdlines(unsigned int val) | |||
| 3857 | { | 3857 | { |
| 3858 | struct saved_cmdlines_buffer *s, *savedcmd_temp; | 3858 | struct saved_cmdlines_buffer *s, *savedcmd_temp; |
| 3859 | 3859 | ||
| 3860 | s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL); | 3860 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
| 3861 | if (!s) | 3861 | if (!s) |
| 3862 | return -ENOMEM; | 3862 | return -ENOMEM; |
| 3863 | 3863 | ||
| @@ -4378,6 +4378,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
| 4378 | static int tracing_wait_pipe(struct file *filp) | 4378 | static int tracing_wait_pipe(struct file *filp) |
| 4379 | { | 4379 | { |
| 4380 | struct trace_iterator *iter = filp->private_data; | 4380 | struct trace_iterator *iter = filp->private_data; |
| 4381 | int ret; | ||
| 4381 | 4382 | ||
| 4382 | while (trace_empty(iter)) { | 4383 | while (trace_empty(iter)) { |
| 4383 | 4384 | ||
| @@ -4399,10 +4400,13 @@ static int tracing_wait_pipe(struct file *filp) | |||
| 4399 | 4400 | ||
| 4400 | mutex_unlock(&iter->mutex); | 4401 | mutex_unlock(&iter->mutex); |
| 4401 | 4402 | ||
| 4402 | wait_on_pipe(iter); | 4403 | ret = wait_on_pipe(iter); |
| 4403 | 4404 | ||
| 4404 | mutex_lock(&iter->mutex); | 4405 | mutex_lock(&iter->mutex); |
| 4405 | 4406 | ||
| 4407 | if (ret) | ||
| 4408 | return ret; | ||
| 4409 | |||
| 4406 | if (signal_pending(current)) | 4410 | if (signal_pending(current)) |
| 4407 | return -EINTR; | 4411 | return -EINTR; |
| 4408 | } | 4412 | } |
| @@ -5327,8 +5331,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
| 5327 | goto out_unlock; | 5331 | goto out_unlock; |
| 5328 | } | 5332 | } |
| 5329 | mutex_unlock(&trace_types_lock); | 5333 | mutex_unlock(&trace_types_lock); |
| 5330 | wait_on_pipe(iter); | 5334 | ret = wait_on_pipe(iter); |
| 5331 | mutex_lock(&trace_types_lock); | 5335 | mutex_lock(&trace_types_lock); |
| 5336 | if (ret) { | ||
| 5337 | size = ret; | ||
| 5338 | goto out_unlock; | ||
| 5339 | } | ||
| 5332 | if (signal_pending(current)) { | 5340 | if (signal_pending(current)) { |
| 5333 | size = -EINTR; | 5341 | size = -EINTR; |
| 5334 | goto out_unlock; | 5342 | goto out_unlock; |
| @@ -5538,8 +5546,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 5538 | goto out; | 5546 | goto out; |
| 5539 | } | 5547 | } |
| 5540 | mutex_unlock(&trace_types_lock); | 5548 | mutex_unlock(&trace_types_lock); |
| 5541 | wait_on_pipe(iter); | 5549 | ret = wait_on_pipe(iter); |
| 5542 | mutex_lock(&trace_types_lock); | 5550 | mutex_lock(&trace_types_lock); |
| 5551 | if (ret) | ||
| 5552 | goto out; | ||
| 5543 | if (signal_pending(current)) { | 5553 | if (signal_pending(current)) { |
| 5544 | ret = -EINTR; | 5554 | ret = -EINTR; |
| 5545 | goto out; | 5555 | goto out; |
| @@ -6232,22 +6242,25 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) | |||
| 6232 | return 0; | 6242 | return 0; |
| 6233 | } | 6243 | } |
| 6234 | 6244 | ||
| 6245 | static void free_trace_buffer(struct trace_buffer *buf) | ||
| 6246 | { | ||
| 6247 | if (buf->buffer) { | ||
| 6248 | ring_buffer_free(buf->buffer); | ||
| 6249 | buf->buffer = NULL; | ||
| 6250 | free_percpu(buf->data); | ||
| 6251 | buf->data = NULL; | ||
| 6252 | } | ||
| 6253 | } | ||
| 6254 | |||
| 6235 | static void free_trace_buffers(struct trace_array *tr) | 6255 | static void free_trace_buffers(struct trace_array *tr) |
| 6236 | { | 6256 | { |
| 6237 | if (!tr) | 6257 | if (!tr) |
| 6238 | return; | 6258 | return; |
| 6239 | 6259 | ||
| 6240 | if (tr->trace_buffer.buffer) { | 6260 | free_trace_buffer(&tr->trace_buffer); |
| 6241 | ring_buffer_free(tr->trace_buffer.buffer); | ||
| 6242 | tr->trace_buffer.buffer = NULL; | ||
| 6243 | free_percpu(tr->trace_buffer.data); | ||
| 6244 | } | ||
| 6245 | 6261 | ||
| 6246 | #ifdef CONFIG_TRACER_MAX_TRACE | 6262 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 6247 | if (tr->max_buffer.buffer) { | 6263 | free_trace_buffer(&tr->max_buffer); |
| 6248 | ring_buffer_free(tr->max_buffer.buffer); | ||
| 6249 | tr->max_buffer.buffer = NULL; | ||
| 6250 | } | ||
| 6251 | #endif | 6264 | #endif |
| 6252 | } | 6265 | } |
| 6253 | 6266 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 9e82551dd566..9258f5a815db 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -252,7 +252,7 @@ static inline struct trace_array *top_trace_array(void) | |||
| 252 | { | 252 | { |
| 253 | struct trace_array *tr; | 253 | struct trace_array *tr; |
| 254 | 254 | ||
| 255 | if (list_empty(ftrace_trace_arrays.prev)) | 255 | if (list_empty(&ftrace_trace_arrays)) |
| 256 | return NULL; | 256 | return NULL; |
| 257 | 257 | ||
| 258 | tr = list_entry(ftrace_trace_arrays.prev, | 258 | tr = list_entry(ftrace_trace_arrays.prev, |
