aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-01-14 10:04:59 -0500
committerSteven Rostedt <rostedt@goodmis.org>2014-04-21 13:59:27 -0400
commit0b9b12c1b884eb34773312f15c194220025e0416 (patch)
treeffe346fc3ec3b5d188f3a278d73ba3c55a64bd10 /kernel/trace/trace.c
parent6d9b3fa5e7f663bbfb9d2d80d46136f75319cb28 (diff)
tracing: Move ftrace_max_lock into trace_array
In preparation for having tracers enabled in instances, the max_lock should be unique as updating the max for one tracer is a separate operation than updating it for another tracer using a different max. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c40
1 files changed, 14 insertions, 26 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f5fc56bf0227..bb5147a55be5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -963,22 +963,6 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
963 return cnt; 963 return cnt;
964} 964}
965 965
966/*
967 * ftrace_max_lock is used to protect the swapping of buffers
968 * when taking a max snapshot. The buffers themselves are
969 * protected by per_cpu spinlocks. But the action of the swap
970 * needs its own lock.
971 *
972 * This is defined as a arch_spinlock_t in order to help
973 * with performance when lockdep debugging is enabled.
974 *
975 * It is also used in other places outside the update_max_tr
976 * so it needs to be defined outside of the
977 * CONFIG_TRACER_MAX_TRACE.
978 */
979static arch_spinlock_t ftrace_max_lock =
980 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
981
982unsigned long __read_mostly tracing_thresh; 966unsigned long __read_mostly tracing_thresh;
983 967
984#ifdef CONFIG_TRACER_MAX_TRACE 968#ifdef CONFIG_TRACER_MAX_TRACE
@@ -1046,14 +1030,14 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1046 return; 1030 return;
1047 } 1031 }
1048 1032
1049 arch_spin_lock(&ftrace_max_lock); 1033 arch_spin_lock(&tr->max_lock);
1050 1034
1051 buf = tr->trace_buffer.buffer; 1035 buf = tr->trace_buffer.buffer;
1052 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1053 tr->max_buffer.buffer = buf; 1037 tr->max_buffer.buffer = buf;
1054 1038
1055 __update_max_tr(tr, tsk, cpu); 1039 __update_max_tr(tr, tsk, cpu);
1056 arch_spin_unlock(&ftrace_max_lock); 1040 arch_spin_unlock(&tr->max_lock);
1057} 1041}
1058 1042
1059/** 1043/**
@@ -1079,7 +1063,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1079 return; 1063 return;
1080 } 1064 }
1081 1065
1082 arch_spin_lock(&ftrace_max_lock); 1066 arch_spin_lock(&tr->max_lock);
1083 1067
1084 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1085 1069
@@ -1097,7 +1081,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1097 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1098 1082
1099 __update_max_tr(tr, tsk, cpu); 1083 __update_max_tr(tr, tsk, cpu);
1100 arch_spin_unlock(&ftrace_max_lock); 1084 arch_spin_unlock(&tr->max_lock);
1101} 1085}
1102#endif /* CONFIG_TRACER_MAX_TRACE */ 1086#endif /* CONFIG_TRACER_MAX_TRACE */
1103 1087
@@ -1351,7 +1335,7 @@ void tracing_start(void)
1351 } 1335 }
1352 1336
1353 /* Prevent the buffers from switching */ 1337 /* Prevent the buffers from switching */
1354 arch_spin_lock(&ftrace_max_lock); 1338 arch_spin_lock(&global_trace.max_lock);
1355 1339
1356 buffer = global_trace.trace_buffer.buffer; 1340 buffer = global_trace.trace_buffer.buffer;
1357 if (buffer) 1341 if (buffer)
@@ -1363,7 +1347,7 @@ void tracing_start(void)
1363 ring_buffer_record_enable(buffer); 1347 ring_buffer_record_enable(buffer);
1364#endif 1348#endif
1365 1349
1366 arch_spin_unlock(&ftrace_max_lock); 1350 arch_spin_unlock(&global_trace.max_lock);
1367 1351
1368 ftrace_start(); 1352 ftrace_start();
1369 out: 1353 out:
@@ -1418,7 +1402,7 @@ void tracing_stop(void)
1418 goto out; 1402 goto out;
1419 1403
1420 /* Prevent the buffers from switching */ 1404 /* Prevent the buffers from switching */
1421 arch_spin_lock(&ftrace_max_lock); 1405 arch_spin_lock(&global_trace.max_lock);
1422 1406
1423 buffer = global_trace.trace_buffer.buffer; 1407 buffer = global_trace.trace_buffer.buffer;
1424 if (buffer) 1408 if (buffer)
@@ -1430,7 +1414,7 @@ void tracing_stop(void)
1430 ring_buffer_record_disable(buffer); 1414 ring_buffer_record_disable(buffer);
1431#endif 1415#endif
1432 1416
1433 arch_spin_unlock(&ftrace_max_lock); 1417 arch_spin_unlock(&global_trace.max_lock);
1434 1418
1435 out: 1419 out:
1436 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1420 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
@@ -3331,7 +3315,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3331 mutex_lock(&tracing_cpumask_update_lock); 3315 mutex_lock(&tracing_cpumask_update_lock);
3332 3316
3333 local_irq_disable(); 3317 local_irq_disable();
3334 arch_spin_lock(&ftrace_max_lock); 3318 arch_spin_lock(&tr->max_lock);
3335 for_each_tracing_cpu(cpu) { 3319 for_each_tracing_cpu(cpu) {
3336 /* 3320 /*
3337 * Increase/decrease the disabled counter if we are 3321 * Increase/decrease the disabled counter if we are
@@ -3348,7 +3332,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3348 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3332 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3349 } 3333 }
3350 } 3334 }
3351 arch_spin_unlock(&ftrace_max_lock); 3335 arch_spin_unlock(&tr->max_lock);
3352 local_irq_enable(); 3336 local_irq_enable();
3353 3337
3354 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3338 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
@@ -6129,6 +6113,8 @@ static int new_instance_create(const char *name)
6129 6113
6130 raw_spin_lock_init(&tr->start_lock); 6114 raw_spin_lock_init(&tr->start_lock);
6131 6115
6116 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6117
6132 tr->current_trace = &nop_trace; 6118 tr->current_trace = &nop_trace;
6133 6119
6134 INIT_LIST_HEAD(&tr->systems); 6120 INIT_LIST_HEAD(&tr->systems);
@@ -6627,6 +6613,8 @@ __init static int tracer_alloc_buffers(void)
6627 */ 6613 */
6628 global_trace.current_trace = &nop_trace; 6614 global_trace.current_trace = &nop_trace;
6629 6615
6616 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6617
6630 ftrace_init_global_array_ops(&global_trace); 6618 ftrace_init_global_array_ops(&global_trace);
6631 6619
6632 register_tracer(&nop_trace); 6620 register_tracer(&nop_trace);