aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-27 16:36:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-27 16:36:19 -0500
commit45554b2357d5782497e59f09146cc3636d6ad551 (patch)
tree5b8092f39bba3e7e4238c78f749af45f6346db97
parent79b17ea740d9fab178d6a1aa15d848b5e6c01b82 (diff)
parentf447c196fe7a3a92c6396f7628020cb8d564be15 (diff)
Merge tag 'trace-v4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull another tracing update from Steven Rostedt: "Commit 79c6f448c8b79c ("tracing: Fix hwlat kthread migration") fixed a bug that was caused by a race condition in initializing the hwlat thread. When fixing this code, I realized that it should have been done differently. Instead of doing the rewrite and sending that to stable, I just sent the above commit to fix the bug that should be back ported. This commit is on top of the quick fix commit to rewrite the code the way it should have been written in the first place" * tag 'trace-v4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Clean up the hwlat binding code
-rw-r--r--kernel/trace/trace_hwlat.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 1199fe1d8eba..edfacd954e1b 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -266,24 +266,13 @@ out:
266static struct cpumask save_cpumask; 266static struct cpumask save_cpumask;
267static bool disable_migrate; 267static bool disable_migrate;
268 268
269static void move_to_next_cpu(bool initmask) 269static void move_to_next_cpu(void)
270{ 270{
271 static struct cpumask *current_mask; 271 struct cpumask *current_mask = &save_cpumask;
272 int next_cpu; 272 int next_cpu;
273 273
274 if (disable_migrate) 274 if (disable_migrate)
275 return; 275 return;
276
277 /* Just pick the first CPU on first iteration */
278 if (initmask) {
279 current_mask = &save_cpumask;
280 get_online_cpus();
281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
282 put_online_cpus();
283 next_cpu = cpumask_first(current_mask);
284 goto set_affinity;
285 }
286
287 /* 276 /*
288 * If for some reason the user modifies the CPU affinity 277 * If for some reason the user modifies the CPU affinity
289 * of this thread, than stop migrating for the duration 278 * of this thread, than stop migrating for the duration
@@ -300,7 +289,6 @@ static void move_to_next_cpu(bool initmask)
300 if (next_cpu >= nr_cpu_ids) 289 if (next_cpu >= nr_cpu_ids)
301 next_cpu = cpumask_first(current_mask); 290 next_cpu = cpumask_first(current_mask);
302 291
303 set_affinity:
304 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ 292 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
305 goto disable; 293 goto disable;
306 294
@@ -327,12 +315,10 @@ static void move_to_next_cpu(bool initmask)
327static int kthread_fn(void *data) 315static int kthread_fn(void *data)
328{ 316{
329 u64 interval; 317 u64 interval;
330 bool initmask = true;
331 318
332 while (!kthread_should_stop()) { 319 while (!kthread_should_stop()) {
333 320
334 move_to_next_cpu(initmask); 321 move_to_next_cpu();
335 initmask = false;
336 322
337 local_irq_disable(); 323 local_irq_disable();
338 get_sample(); 324 get_sample();
@@ -363,13 +349,27 @@ static int kthread_fn(void *data)
363 */ 349 */
364static int start_kthread(struct trace_array *tr) 350static int start_kthread(struct trace_array *tr)
365{ 351{
352 struct cpumask *current_mask = &save_cpumask;
366 struct task_struct *kthread; 353 struct task_struct *kthread;
354 int next_cpu;
355
356 /* Just pick the first CPU on first iteration */
357 current_mask = &save_cpumask;
358 get_online_cpus();
359 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
360 put_online_cpus();
361 next_cpu = cpumask_first(current_mask);
367 362
368 kthread = kthread_create(kthread_fn, NULL, "hwlatd"); 363 kthread = kthread_create(kthread_fn, NULL, "hwlatd");
369 if (IS_ERR(kthread)) { 364 if (IS_ERR(kthread)) {
370 pr_err(BANNER "could not start sampling thread\n"); 365 pr_err(BANNER "could not start sampling thread\n");
371 return -ENOMEM; 366 return -ENOMEM;
372 } 367 }
368
369 cpumask_clear(current_mask);
370 cpumask_set_cpu(next_cpu, current_mask);
371 sched_setaffinity(kthread->pid, current_mask);
372
373 hwlat_kthread = kthread; 373 hwlat_kthread = kthread;
374 wake_up_process(kthread); 374 wake_up_process(kthread);
375 375