diff options
author | Qais Yousef <qais.yousef@arm.com> | 2019-06-04 07:14:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-06-24 13:23:41 -0400 |
commit | ba19f51fcb549c7ee6261da243eea55a47e98d78 (patch) | |
tree | 9b2f24a17a880894a71ab6bf51722dacd2ab7d38 /kernel/sched/pelt.c | |
parent | 3c93a0c04dfdcba199982b53b97488b1b1d90eff (diff) |
sched/debug: Add new tracepoints to track PELT at rq level
The new tracepoints allow tracking PELT signals at rq level for all
scheduling classes + irq.
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pavankumar Kondeti <pkondeti@codeaurora.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Uwe Kleine-Konig <u.kleine-koenig@pengutronix.de>
Link: https://lkml.kernel.org/r/20190604111459.2862-4-qais.yousef@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/pelt.c')
-rw-r--r-- | kernel/sched/pelt.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 42ea66b07b1d..4e961b55b5ea 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include "sched.h" | 28 | #include "sched.h" |
29 | #include "pelt.h" | 29 | #include "pelt.h" |
30 | 30 | ||
31 | #include <trace/events/sched.h> | ||
32 | |||
31 | /* | 33 | /* |
32 | * Approximate: | 34 | * Approximate: |
33 | * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) | 35 | * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) |
@@ -292,6 +294,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) | |||
292 | cfs_rq->curr != NULL)) { | 294 | cfs_rq->curr != NULL)) { |
293 | 295 | ||
294 | ___update_load_avg(&cfs_rq->avg, 1, 1); | 296 | ___update_load_avg(&cfs_rq->avg, 1, 1); |
297 | trace_pelt_cfs_tp(cfs_rq); | ||
295 | return 1; | 298 | return 1; |
296 | } | 299 | } |
297 | 300 | ||
@@ -317,6 +320,7 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) | |||
317 | running)) { | 320 | running)) { |
318 | 321 | ||
319 | ___update_load_avg(&rq->avg_rt, 1, 1); | 322 | ___update_load_avg(&rq->avg_rt, 1, 1); |
323 | trace_pelt_rt_tp(rq); | ||
320 | return 1; | 324 | return 1; |
321 | } | 325 | } |
322 | 326 | ||
@@ -340,6 +344,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) | |||
340 | running)) { | 344 | running)) { |
341 | 345 | ||
342 | ___update_load_avg(&rq->avg_dl, 1, 1); | 346 | ___update_load_avg(&rq->avg_dl, 1, 1); |
347 | trace_pelt_dl_tp(rq); | ||
343 | return 1; | 348 | return 1; |
344 | } | 349 | } |
345 | 350 | ||
@@ -388,8 +393,10 @@ int update_irq_load_avg(struct rq *rq, u64 running) | |||
388 | 1, | 393 | 1, |
389 | 1); | 394 | 1); |
390 | 395 | ||
391 | if (ret) | 396 | if (ret) { |
392 | ___update_load_avg(&rq->avg_irq, 1, 1); | 397 | ___update_load_avg(&rq->avg_irq, 1, 1); |
398 | trace_pelt_irq_tp(rq); | ||
399 | } | ||
393 | 400 | ||
394 | return ret; | 401 | return ret; |
395 | } | 402 | } |