aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-09 02:08:28 -0500
commita93ae8dccc3c723ed7a629dab37a3392387acd79 (patch)
tree386e8d2fc815b3ebcaca99aeea4e77496dc36481
parentb67c7d39bc284776c27eeaefd424046c742b0d93 (diff)
tracing: Fix hwlat kthread migration
commit 79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 upstream. The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/trace/trace_hwlat.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index b97286c48735..f00b0131c8f9 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -266,7 +266,7 @@ out:
266static struct cpumask save_cpumask; 266static struct cpumask save_cpumask;
267static bool disable_migrate; 267static bool disable_migrate;
268 268
269static void move_to_next_cpu(void) 269static void move_to_next_cpu(bool initmask)
270{ 270{
271 static struct cpumask *current_mask; 271 static struct cpumask *current_mask;
272 int next_cpu; 272 int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
275 return; 275 return;
276 276
277 /* Just pick the first CPU on first iteration */ 277 /* Just pick the first CPU on first iteration */
278 if (!current_mask) { 278 if (initmask) {
279 current_mask = &save_cpumask; 279 current_mask = &save_cpumask;
280 get_online_cpus(); 280 get_online_cpus();
281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
330static int kthread_fn(void *data) 330static int kthread_fn(void *data)
331{ 331{
332 u64 interval; 332 u64 interval;
333 bool initmask = true;
333 334
334 while (!kthread_should_stop()) { 335 while (!kthread_should_stop()) {
335 336
336 move_to_next_cpu(); 337 move_to_next_cpu(initmask);
338 initmask = false;
337 339
338 local_irq_disable(); 340 local_irq_disable();
339 get_sample(); 341 get_sample();