aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2016-07-15 15:48:56 -0400
committerSteven Rostedt <rostedt@goodmis.org>2016-09-02 12:47:54 -0400
commit0330f7aa8ee63d0c435c0cb4e47ea06235ee4b7f (patch)
tree67fa852b52a5c4c09ec74f6ba38c7b44f14af180 /kernel/trace
parentc850ed38db5f46441565174ef57c271124cce568 (diff)
tracing: Have hwlat trace migrate across tracing_cpumask CPUs
Instead of having the hwlat detector thread stay on one CPU, have it migrate across all the CPUs specified by tracing_cpumask. If the user modifies the thread's CPU affinity, the migration will stop until the next instance that the tracer is instantiated. The migration happens at the end of each window (period). Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_hwlat.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 08dfabe4e862..65aab3914a56 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -42,6 +42,7 @@
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <linux/tracefs.h> 43#include <linux/tracefs.h>
44#include <linux/uaccess.h> 44#include <linux/uaccess.h>
45#include <linux/cpumask.h>
45#include <linux/delay.h> 46#include <linux/delay.h>
46#include "trace.h" 47#include "trace.h"
47 48
@@ -211,6 +212,57 @@ out:
211 return ret; 212 return ret;
212} 213}
213 214
215static struct cpumask save_cpumask;
216static bool disable_migrate;
217
218static void move_to_next_cpu(void)
219{
220 static struct cpumask *current_mask;
221 int next_cpu;
222
223 if (disable_migrate)
224 return;
225
226 /* Just pick the first CPU on first iteration */
227 if (!current_mask) {
228 current_mask = &save_cpumask;
229 get_online_cpus();
230 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
231 put_online_cpus();
232 next_cpu = cpumask_first(current_mask);
233 goto set_affinity;
234 }
235
236 /*
237 * If for some reason the user modifies the CPU affinity
238 * of this thread, than stop migrating for the duration
239 * of the current test.
240 */
241 if (!cpumask_equal(current_mask, &current->cpus_allowed))
242 goto disable;
243
244 get_online_cpus();
245 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
246 next_cpu = cpumask_next(smp_processor_id(), current_mask);
247 put_online_cpus();
248
249 if (next_cpu >= nr_cpu_ids)
250 next_cpu = cpumask_first(current_mask);
251
252 set_affinity:
253 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
254 goto disable;
255
256 cpumask_clear(current_mask);
257 cpumask_set_cpu(next_cpu, current_mask);
258
259 sched_setaffinity(0, current_mask);
260 return;
261
262 disable:
263 disable_migrate = true;
264}
265
214/* 266/*
215 * kthread_fn - The CPU time sampling/hardware latency detection kernel thread 267 * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
216 * 268 *
@@ -230,6 +282,8 @@ static int kthread_fn(void *data)
230 282
231 while (!kthread_should_stop()) { 283 while (!kthread_should_stop()) {
232 284
285 move_to_next_cpu();
286
233 local_irq_disable(); 287 local_irq_disable();
234 get_sample(); 288 get_sample();
235 local_irq_enable(); 289 local_irq_enable();
@@ -473,6 +527,7 @@ static int hwlat_tracer_init(struct trace_array *tr)
473 527
474 hwlat_trace = tr; 528 hwlat_trace = tr;
475 529
530 disable_migrate = false;
476 hwlat_data.count = 0; 531 hwlat_data.count = 0;
477 tr->max_latency = 0; 532 tr->max_latency = 0;
478 save_tracing_thresh = tracing_thresh; 533 save_tracing_thresh = tracing_thresh;