aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-04-11 19:51:02 -0400
committerIngo Molnar <mingo@kernel.org>2013-05-28 03:40:27 -0400
commit78becc27097585c6aec7043834cadde950ae79f2 (patch)
tree3e8f0982faa2a456c1c68fa5e5a4c7c815194ae4 /kernel/sched/rt.c
parent1a55af2e45cce0ff13bc33c8ee99da84e188b615 (diff)
sched: Use an accessor to read the rq clock
Read the runqueue clock through an accessor. This prepares for adding a debugging infrastructure to detect missing or redundant calls to update_rq_clock() between a scheduler's entry and exit point. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Turner <pjt@google.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1365724262-20142-6-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 8853ab17b750..8d85f9ac4262 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -886,7 +886,7 @@ static void update_curr_rt(struct rq *rq)
886 if (curr->sched_class != &rt_sched_class) 886 if (curr->sched_class != &rt_sched_class)
887 return; 887 return;
888 888
889 delta_exec = rq->clock_task - curr->se.exec_start; 889 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
890 if (unlikely((s64)delta_exec <= 0)) 890 if (unlikely((s64)delta_exec <= 0))
891 return; 891 return;
892 892
@@ -896,7 +896,7 @@ static void update_curr_rt(struct rq *rq)
896 curr->se.sum_exec_runtime += delta_exec; 896 curr->se.sum_exec_runtime += delta_exec;
897 account_group_exec_runtime(curr, delta_exec); 897 account_group_exec_runtime(curr, delta_exec);
898 898
899 curr->se.exec_start = rq->clock_task; 899 curr->se.exec_start = rq_clock_task(rq);
900 cpuacct_charge(curr, delta_exec); 900 cpuacct_charge(curr, delta_exec);
901 901
902 sched_rt_avg_update(rq, delta_exec); 902 sched_rt_avg_update(rq, delta_exec);
@@ -1345,7 +1345,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1345 } while (rt_rq); 1345 } while (rt_rq);
1346 1346
1347 p = rt_task_of(rt_se); 1347 p = rt_task_of(rt_se);
1348 p->se.exec_start = rq->clock_task; 1348 p->se.exec_start = rq_clock_task(rq);
1349 1349
1350 return p; 1350 return p;
1351} 1351}
@@ -1997,7 +1997,7 @@ static void set_curr_task_rt(struct rq *rq)
1997{ 1997{
1998 struct task_struct *p = rq->curr; 1998 struct task_struct *p = rq->curr;
1999 1999
2000 p->se.exec_start = rq->clock_task; 2000 p->se.exec_start = rq_clock_task(rq);
2001 2001
2002 /* The running task is never eligible for pushing */ 2002 /* The running task is never eligible for pushing */
2003 dequeue_pushable_task(rq, p); 2003 dequeue_pushable_task(rq, p);