aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-05-30 09:42:39 -0400
committerSteven Rostedt <rostedt@goodmis.org>2014-05-30 09:42:39 -0400
commit379cfdac37923653c9d4242d10052378b7563005 (patch)
tree441d3a81677f0b0031645dc176996bd93f230539 /kernel/trace
parent42584c81c5adc1737a6fe0687facc5e62a5dc8c1 (diff)
tracing: Try again for saved cmdline if failed due to locking
In order to prevent the saved cmdline cache from being filled when tracing is not active, the comms are only recorded after a trace event is recorded. The problem is, a comm can fail to be recorded if the trace_cmdline_lock is held. That lock is taken via a trylock to allow it to happen from any context (including NMI). If the lock fails to be taken, the comm is skipped. No big deal, as we will try again later. But! Because of the code that was added to only record after an event, we may not try again later as the recording is made as a oneshot per event per CPU. Only disable the recording of the comm if the comm is actually recorded. Fixes: 7ffbd48d5cab "tracing: Cache comms only after an event occurred" Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1855dae73f34..22a902e2ded9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1441,12 +1441,12 @@ static void tracing_stop_tr(struct trace_array *tr)
1441 1441
1442void trace_stop_cmdline_recording(void); 1442void trace_stop_cmdline_recording(void);
1443 1443
1444static void trace_save_cmdline(struct task_struct *tsk) 1444static int trace_save_cmdline(struct task_struct *tsk)
1445{ 1445{
1446 unsigned pid, idx; 1446 unsigned pid, idx;
1447 1447
1448 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1448 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1449 return; 1449 return 0;
1450 1450
1451 /* 1451 /*
1452 * It's not the end of the world if we don't get 1452 * It's not the end of the world if we don't get
@@ -1455,7 +1455,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
1455 * so if we miss here, then better luck next time. 1455 * so if we miss here, then better luck next time.
1456 */ 1456 */
1457 if (!arch_spin_trylock(&trace_cmdline_lock)) 1457 if (!arch_spin_trylock(&trace_cmdline_lock))
1458 return; 1458 return 0;
1459 1459
1460 idx = map_pid_to_cmdline[tsk->pid]; 1460 idx = map_pid_to_cmdline[tsk->pid];
1461 if (idx == NO_CMDLINE_MAP) { 1461 if (idx == NO_CMDLINE_MAP) {
@@ -1480,6 +1480,8 @@ static void trace_save_cmdline(struct task_struct *tsk)
1480 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1480 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1481 1481
1482 arch_spin_unlock(&trace_cmdline_lock); 1482 arch_spin_unlock(&trace_cmdline_lock);
1483
1484 return 1;
1483} 1485}
1484 1486
1485void trace_find_cmdline(int pid, char comm[]) 1487void trace_find_cmdline(int pid, char comm[])
@@ -1521,9 +1523,8 @@ void tracing_record_cmdline(struct task_struct *tsk)
1521 if (!__this_cpu_read(trace_cmdline_save)) 1523 if (!__this_cpu_read(trace_cmdline_save))
1522 return; 1524 return;
1523 1525
1524 __this_cpu_write(trace_cmdline_save, false); 1526 if (trace_save_cmdline(tsk))
1525 1527 __this_cpu_write(trace_cmdline_save, false);
1526 trace_save_cmdline(tsk);
1527} 1528}
1528 1529
1529void 1530void