aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/stats.h
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-04-11 19:51:02 -0400
committerIngo Molnar <mingo@kernel.org>2013-05-28 03:40:27 -0400
commit78becc27097585c6aec7043834cadde950ae79f2 (patch)
tree3e8f0982faa2a456c1c68fa5e5a4c7c815194ae4 /kernel/sched/stats.h
parent1a55af2e45cce0ff13bc33c8ee99da84e188b615 (diff)
sched: Use an accessor to read the rq clock
Read the runqueue clock through an accessor. This prepares for adding a debugging infrastructure to detect missing or redundant calls to update_rq_clock() between a scheduler's entry and exit point. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Turner <pjt@google.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1365724262-20142-6-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/stats.h')
-rw-r--r--kernel/sched/stats.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 2ef90a51ec5e..17d7065c3872 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -61,7 +61,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
61 */ 61 */
62static inline void sched_info_dequeued(struct task_struct *t) 62static inline void sched_info_dequeued(struct task_struct *t)
63{ 63{
64 unsigned long long now = task_rq(t)->clock, delta = 0; 64 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
65 65
66 if (unlikely(sched_info_on())) 66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued) 67 if (t->sched_info.last_queued)
@@ -79,7 +79,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
79 */ 79 */
80static void sched_info_arrive(struct task_struct *t) 80static void sched_info_arrive(struct task_struct *t)
81{ 81{
82 unsigned long long now = task_rq(t)->clock, delta = 0; 82 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
83 83
84 if (t->sched_info.last_queued) 84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued; 85 delta = now - t->sched_info.last_queued;
@@ -100,7 +100,7 @@ static inline void sched_info_queued(struct task_struct *t)
100{ 100{
101 if (unlikely(sched_info_on())) 101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued) 102 if (!t->sched_info.last_queued)
103 t->sched_info.last_queued = task_rq(t)->clock; 103 t->sched_info.last_queued = rq_clock(task_rq(t));
104} 104}
105 105
106/* 106/*
@@ -112,7 +112,7 @@ static inline void sched_info_queued(struct task_struct *t)
112 */ 112 */
113static inline void sched_info_depart(struct task_struct *t) 113static inline void sched_info_depart(struct task_struct *t)
114{ 114{
115 unsigned long long delta = task_rq(t)->clock - 115 unsigned long long delta = rq_clock(task_rq(t)) -
116 t->sched_info.last_arrival; 116 t->sched_info.last_arrival;
117 117
118 rq_sched_info_depart(task_rq(t), delta); 118 rq_sched_info_depart(task_rq(t), delta);