aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-12-15 22:31:07 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-12-22 23:37:46 -0500
commitd716ff71dd12bc6328f84a9ec1c3647daf01c827 (patch)
tree28b94a30daa2270dc14f782f6bb93dcb0c2a1429 /kernel/trace
parentcf6ab6d9143b157786bf29bca5c32e55234bb07d (diff)
tracing: Remove taking of trace_types_lock in pipe files
Taking the global mutex "trace_types_lock" in the trace_pipe files causes a bottle neck as most the pipe files can be read per cpu and there's no reason to serialize them. The current_trace variable was given a ref count and it can not change when the ref count is not zero. Opening the trace_pipe files will up the ref count (and decremented on close), so that the lock no longer needs to be taken when accessing the current_trace variable. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c110
1 files changed, 28 insertions, 82 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed3fba1d6570..7669b1f3234e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4332,17 +4332,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4332 } 4332 }
4333 4333
4334 trace_seq_init(&iter->seq); 4334 trace_seq_init(&iter->seq);
4335 4335 iter->trace = tr->current_trace;
4336 /*
4337 * We make a copy of the current tracer to avoid concurrent
4338 * changes on it while we are reading.
4339 */
4340 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4341 if (!iter->trace) {
4342 ret = -ENOMEM;
4343 goto fail;
4344 }
4345 *iter->trace = *tr->current_trace;
4346 4336
4347 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 4337 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4348 ret = -ENOMEM; 4338 ret = -ENOMEM;
@@ -4399,7 +4389,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
4399 4389
4400 free_cpumask_var(iter->started); 4390 free_cpumask_var(iter->started);
4401 mutex_destroy(&iter->mutex); 4391 mutex_destroy(&iter->mutex);
4402 kfree(iter->trace);
4403 kfree(iter); 4392 kfree(iter);
4404 4393
4405 trace_array_put(tr); 4394 trace_array_put(tr);
@@ -4432,7 +4421,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4432 return trace_poll(iter, filp, poll_table); 4421 return trace_poll(iter, filp, poll_table);
4433} 4422}
4434 4423
4435/* Must be called with trace_types_lock mutex held. */ 4424/* Must be called with iter->mutex held. */
4436static int tracing_wait_pipe(struct file *filp) 4425static int tracing_wait_pipe(struct file *filp)
4437{ 4426{
4438 struct trace_iterator *iter = filp->private_data; 4427 struct trace_iterator *iter = filp->private_data;
@@ -4477,7 +4466,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
4477 size_t cnt, loff_t *ppos) 4466 size_t cnt, loff_t *ppos)
4478{ 4467{
4479 struct trace_iterator *iter = filp->private_data; 4468 struct trace_iterator *iter = filp->private_data;
4480 struct trace_array *tr = iter->tr;
4481 ssize_t sret; 4469 ssize_t sret;
4482 4470
4483 /* return any leftover data */ 4471 /* return any leftover data */
@@ -4487,12 +4475,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
4487 4475
4488 trace_seq_init(&iter->seq); 4476 trace_seq_init(&iter->seq);
4489 4477
4490 /* copy the tracer to avoid using a global lock all around */
4491 mutex_lock(&trace_types_lock);
4492 if (unlikely(iter->trace->name != tr->current_trace->name))
4493 *iter->trace = *tr->current_trace;
4494 mutex_unlock(&trace_types_lock);
4495
4496 /* 4478 /*
4497 * Avoid more than one consumer on a single file descriptor 4479 * Avoid more than one consumer on a single file descriptor
4498 * This is just a matter of traces coherency, the ring buffer itself 4480 * This is just a matter of traces coherency, the ring buffer itself
@@ -4652,7 +4634,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4652 .ops = &tracing_pipe_buf_ops, 4634 .ops = &tracing_pipe_buf_ops,
4653 .spd_release = tracing_spd_release_pipe, 4635 .spd_release = tracing_spd_release_pipe,
4654 }; 4636 };
4655 struct trace_array *tr = iter->tr;
4656 ssize_t ret; 4637 ssize_t ret;
4657 size_t rem; 4638 size_t rem;
4658 unsigned int i; 4639 unsigned int i;
@@ -4660,12 +4641,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4660 if (splice_grow_spd(pipe, &spd)) 4641 if (splice_grow_spd(pipe, &spd))
4661 return -ENOMEM; 4642 return -ENOMEM;
4662 4643
4663 /* copy the tracer to avoid using a global lock all around */
4664 mutex_lock(&trace_types_lock);
4665 if (unlikely(iter->trace->name != tr->current_trace->name))
4666 *iter->trace = *tr->current_trace;
4667 mutex_unlock(&trace_types_lock);
4668
4669 mutex_lock(&iter->mutex); 4644 mutex_lock(&iter->mutex);
4670 4645
4671 if (iter->trace->splice_read) { 4646 if (iter->trace->splice_read) {
@@ -5373,21 +5348,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5373 if (!count) 5348 if (!count)
5374 return 0; 5349 return 0;
5375 5350
5376 mutex_lock(&trace_types_lock);
5377
5378#ifdef CONFIG_TRACER_MAX_TRACE 5351#ifdef CONFIG_TRACER_MAX_TRACE
5379 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5352 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5380 size = -EBUSY; 5353 return -EBUSY;
5381 goto out_unlock;
5382 }
5383#endif 5354#endif
5384 5355
5385 if (!info->spare) 5356 if (!info->spare)
5386 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 5357 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5387 iter->cpu_file); 5358 iter->cpu_file);
5388 size = -ENOMEM;
5389 if (!info->spare) 5359 if (!info->spare)
5390 goto out_unlock; 5360 return -ENOMEM;
5391 5361
5392 /* Do we have previous read data to read? */ 5362 /* Do we have previous read data to read? */
5393 if (info->read < PAGE_SIZE) 5363 if (info->read < PAGE_SIZE)
@@ -5403,21 +5373,16 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5403 5373
5404 if (ret < 0) { 5374 if (ret < 0) {
5405 if (trace_empty(iter)) { 5375 if (trace_empty(iter)) {
5406 if ((filp->f_flags & O_NONBLOCK)) { 5376 if ((filp->f_flags & O_NONBLOCK))
5407 size = -EAGAIN; 5377 return -EAGAIN;
5408 goto out_unlock; 5378
5409 }
5410 mutex_unlock(&trace_types_lock);
5411 ret = wait_on_pipe(iter, false); 5379 ret = wait_on_pipe(iter, false);
5412 mutex_lock(&trace_types_lock); 5380 if (ret)
5413 if (ret) { 5381 return ret;
5414 size = ret; 5382
5415 goto out_unlock;
5416 }
5417 goto again; 5383 goto again;
5418 } 5384 }
5419 size = 0; 5385 return 0;
5420 goto out_unlock;
5421 } 5386 }
5422 5387
5423 info->read = 0; 5388 info->read = 0;
@@ -5427,18 +5392,14 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5427 size = count; 5392 size = count;
5428 5393
5429 ret = copy_to_user(ubuf, info->spare + info->read, size); 5394 ret = copy_to_user(ubuf, info->spare + info->read, size);
5430 if (ret == size) { 5395 if (ret == size)
5431 size = -EFAULT; 5396 return -EFAULT;
5432 goto out_unlock; 5397
5433 }
5434 size -= ret; 5398 size -= ret;
5435 5399
5436 *ppos += size; 5400 *ppos += size;
5437 info->read += size; 5401 info->read += size;
5438 5402
5439 out_unlock:
5440 mutex_unlock(&trace_types_lock);
5441
5442 return size; 5403 return size;
5443} 5404}
5444 5405
@@ -5536,30 +5497,20 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5536 int entries, size, i; 5497 int entries, size, i;
5537 ssize_t ret = 0; 5498 ssize_t ret = 0;
5538 5499
5539 mutex_lock(&trace_types_lock);
5540
5541#ifdef CONFIG_TRACER_MAX_TRACE 5500#ifdef CONFIG_TRACER_MAX_TRACE
5542 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5501 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5543 ret = -EBUSY; 5502 return -EBUSY;
5544 goto out;
5545 }
5546#endif 5503#endif
5547 5504
5548 if (splice_grow_spd(pipe, &spd)) { 5505 if (splice_grow_spd(pipe, &spd))
5549 ret = -ENOMEM; 5506 return -ENOMEM;
5550 goto out;
5551 }
5552 5507
5553 if (*ppos & (PAGE_SIZE - 1)) { 5508 if (*ppos & (PAGE_SIZE - 1))
5554 ret = -EINVAL; 5509 return -EINVAL;
5555 goto out;
5556 }
5557 5510
5558 if (len & (PAGE_SIZE - 1)) { 5511 if (len & (PAGE_SIZE - 1)) {
5559 if (len < PAGE_SIZE) { 5512 if (len < PAGE_SIZE)
5560 ret = -EINVAL; 5513 return -EINVAL;
5561 goto out;
5562 }
5563 len &= PAGE_MASK; 5514 len &= PAGE_MASK;
5564 } 5515 }
5565 5516
@@ -5620,25 +5571,20 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5620 /* did we read anything? */ 5571 /* did we read anything? */
5621 if (!spd.nr_pages) { 5572 if (!spd.nr_pages) {
5622 if (ret) 5573 if (ret)
5623 goto out; 5574 return ret;
5575
5576 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5577 return -EAGAIN;
5624 5578
5625 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5626 ret = -EAGAIN;
5627 goto out;
5628 }
5629 mutex_unlock(&trace_types_lock);
5630 ret = wait_on_pipe(iter, true); 5579 ret = wait_on_pipe(iter, true);
5631 mutex_lock(&trace_types_lock);
5632 if (ret) 5580 if (ret)
5633 goto out; 5581 return ret;
5634 5582
5635 goto again; 5583 goto again;
5636 } 5584 }
5637 5585
5638 ret = splice_to_pipe(pipe, &spd); 5586 ret = splice_to_pipe(pipe, &spd);
5639 splice_shrink_spd(&spd); 5587 splice_shrink_spd(&spd);
5640out:
5641 mutex_unlock(&trace_types_lock);
5642 5588
5643 return ret; 5589 return ret;
5644} 5590}