aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 15:28:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 15:28:39 -0500
commit758338e960ebe8ac3bef7ae11ff830bc2f9c655c (patch)
treedbaea6140ada681baa462a2818095bcea0160108 /kernel/trace
parent224394ad75711042c6d362c9dbc9874b476edbc0 (diff)
parentbf3071f5a054db9e5bab873355d27a7330ce5187 (diff)
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull minor tracing updates and fixes from Steven Rostedt: "It seems that one of my old pull requests have slipped through. The changes are contained to just the files that I maintain, and are changes from others that I told I would get into this merge window. They have already been in linux-next for several weeks, and should be well tested." * 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Remove unnecessary WARN_ONCE's from tracing_buffers_splice_read tracing: Remove unneeded checks from the stack tracer tracing: Add a resize function to make one buffer equivalent to another buffer
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c60
-rw-r--r--kernel/trace/trace_stack.c4
2 files changed, 31 insertions, 33 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 61e081b4ba11..e5125677efa0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3034,6 +3034,31 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3034 tr->data[cpu]->entries = val; 3034 tr->data[cpu]->entries = val;
3035} 3035}
3036 3036
3037/* resize @tr's buffer to the size of @size_tr's entries */
3038static int resize_buffer_duplicate_size(struct trace_array *tr,
3039 struct trace_array *size_tr, int cpu_id)
3040{
3041 int cpu, ret = 0;
3042
3043 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3044 for_each_tracing_cpu(cpu) {
3045 ret = ring_buffer_resize(tr->buffer,
3046 size_tr->data[cpu]->entries, cpu);
3047 if (ret < 0)
3048 break;
3049 tr->data[cpu]->entries = size_tr->data[cpu]->entries;
3050 }
3051 } else {
3052 ret = ring_buffer_resize(tr->buffer,
3053 size_tr->data[cpu_id]->entries, cpu_id);
3054 if (ret == 0)
3055 tr->data[cpu_id]->entries =
3056 size_tr->data[cpu_id]->entries;
3057 }
3058
3059 return ret;
3060}
3061
3037static int __tracing_resize_ring_buffer(unsigned long size, int cpu) 3062static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3038{ 3063{
3039 int ret; 3064 int ret;
@@ -3058,23 +3083,8 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3058 3083
3059 ret = ring_buffer_resize(max_tr.buffer, size, cpu); 3084 ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3060 if (ret < 0) { 3085 if (ret < 0) {
3061 int r = 0; 3086 int r = resize_buffer_duplicate_size(&global_trace,
3062 3087 &global_trace, cpu);
3063 if (cpu == RING_BUFFER_ALL_CPUS) {
3064 int i;
3065 for_each_tracing_cpu(i) {
3066 r = ring_buffer_resize(global_trace.buffer,
3067 global_trace.data[i]->entries,
3068 i);
3069 if (r < 0)
3070 break;
3071 }
3072 } else {
3073 r = ring_buffer_resize(global_trace.buffer,
3074 global_trace.data[cpu]->entries,
3075 cpu);
3076 }
3077
3078 if (r < 0) { 3088 if (r < 0) {
3079 /* 3089 /*
3080 * AARGH! We are left with different 3090 * AARGH! We are left with different
@@ -3212,17 +3222,11 @@ static int tracing_set_tracer(const char *buf)
3212 3222
3213 topts = create_trace_option_files(t); 3223 topts = create_trace_option_files(t);
3214 if (t->use_max_tr) { 3224 if (t->use_max_tr) {
3215 int cpu;
3216 /* we need to make per cpu buffer sizes equivalent */ 3225 /* we need to make per cpu buffer sizes equivalent */
3217 for_each_tracing_cpu(cpu) { 3226 ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
3218 ret = ring_buffer_resize(max_tr.buffer, 3227 RING_BUFFER_ALL_CPUS);
3219 global_trace.data[cpu]->entries, 3228 if (ret < 0)
3220 cpu); 3229 goto out;
3221 if (ret < 0)
3222 goto out;
3223 max_tr.data[cpu]->entries =
3224 global_trace.data[cpu]->entries;
3225 }
3226 } 3230 }
3227 3231
3228 if (t->init) { 3232 if (t->init) {
@@ -4271,13 +4275,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4271 return -ENOMEM; 4275 return -ENOMEM;
4272 4276
4273 if (*ppos & (PAGE_SIZE - 1)) { 4277 if (*ppos & (PAGE_SIZE - 1)) {
4274 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4275 ret = -EINVAL; 4278 ret = -EINVAL;
4276 goto out; 4279 goto out;
4277 } 4280 }
4278 4281
4279 if (len & (PAGE_SIZE - 1)) { 4282 if (len & (PAGE_SIZE - 1)) {
4280 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4281 if (len < PAGE_SIZE) { 4283 if (len < PAGE_SIZE) {
4282 ret = -EINVAL; 4284 ret = -EINVAL;
4283 goto out; 4285 goto out;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 0c1b165778e5..42ca822fc701 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -33,7 +33,6 @@ static unsigned long max_stack_size;
33static arch_spinlock_t max_stack_lock = 33static arch_spinlock_t max_stack_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 35
36static int stack_trace_disabled __read_mostly;
37static DEFINE_PER_CPU(int, trace_active); 36static DEFINE_PER_CPU(int, trace_active);
38static DEFINE_MUTEX(stack_sysctl_mutex); 37static DEFINE_MUTEX(stack_sysctl_mutex);
39 38
@@ -116,9 +115,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
116{ 115{
117 int cpu; 116 int cpu;
118 117
119 if (unlikely(!ftrace_enabled || stack_trace_disabled))
120 return;
121
122 preempt_disable_notrace(); 118 preempt_disable_notrace();
123 119
124 cpu = raw_smp_processor_id(); 120 cpu = raw_smp_processor_id();