aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-04-11 19:51:02 -0400
committerIngo Molnar <mingo@kernel.org>2013-05-28 03:40:27 -0400
commit78becc27097585c6aec7043834cadde950ae79f2 (patch)
tree3e8f0982faa2a456c1c68fa5e5a4c7c815194ae4 /kernel/sched
parent1a55af2e45cce0ff13bc33c8ee99da84e188b615 (diff)
sched: Use an accessor to read the rq clock
Read the runqueue clock through an accessor. This prepares for adding a debugging infrastructure to detect missing or redundant calls to update_rq_clock() between a scheduler's entry and exit point. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Turner <pjt@google.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1365724262-20142-6-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c44
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/sched/sched.h10
-rw-r--r--kernel/sched/stats.h8
-rw-r--r--kernel/sched/stop_task.c8
6 files changed, 47 insertions, 37 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 46d00172ae4a..36f85be2932b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -667,7 +667,7 @@ void sched_avg_update(struct rq *rq)
667{ 667{
668 s64 period = sched_avg_period(); 668 s64 period = sched_avg_period();
669 669
670 while ((s64)(rq->clock - rq->age_stamp) > period) { 670 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
671 /* 671 /*
672 * Inline assembly required to prevent the compiler 672 * Inline assembly required to prevent the compiler
673 * optimising this loop into a divmod call. 673 * optimising this loop into a divmod call.
@@ -1328,7 +1328,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1328 p->sched_class->task_woken(rq, p); 1328 p->sched_class->task_woken(rq, p);
1329 1329
1330 if (rq->idle_stamp) { 1330 if (rq->idle_stamp) {
1331 u64 delta = rq->clock - rq->idle_stamp; 1331 u64 delta = rq_clock(rq) - rq->idle_stamp;
1332 u64 max = 2*sysctl_sched_migration_cost; 1332 u64 max = 2*sysctl_sched_migration_cost;
1333 1333
1334 if (delta > max) 1334 if (delta > max)
@@ -2106,7 +2106,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2106 2106
2107 if (task_current(rq, p)) { 2107 if (task_current(rq, p)) {
2108 update_rq_clock(rq); 2108 update_rq_clock(rq);
2109 ns = rq->clock_task - p->se.exec_start; 2109 ns = rq_clock_task(rq) - p->se.exec_start;
2110 if ((s64)ns < 0) 2110 if ((s64)ns < 0)
2111 ns = 0; 2111 ns = 0;
2112 } 2112 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1c8762a5370c..3ee1c2e4ae60 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -704,7 +704,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
704static void update_curr(struct cfs_rq *cfs_rq) 704static void update_curr(struct cfs_rq *cfs_rq)
705{ 705{
706 struct sched_entity *curr = cfs_rq->curr; 706 struct sched_entity *curr = cfs_rq->curr;
707 u64 now = rq_of(cfs_rq)->clock_task; 707 u64 now = rq_clock_task(rq_of(cfs_rq));
708 unsigned long delta_exec; 708 unsigned long delta_exec;
709 709
710 if (unlikely(!curr)) 710 if (unlikely(!curr))
@@ -736,7 +736,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
736static inline void 736static inline void
737update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 737update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
738{ 738{
739 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); 739 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
740} 740}
741 741
742/* 742/*
@@ -756,14 +756,14 @@ static void
756update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 756update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
757{ 757{
758 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, 758 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
759 rq_of(cfs_rq)->clock - se->statistics.wait_start)); 759 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
760 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); 760 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
761 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + 761 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
762 rq_of(cfs_rq)->clock - se->statistics.wait_start); 762 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
763#ifdef CONFIG_SCHEDSTATS 763#ifdef CONFIG_SCHEDSTATS
764 if (entity_is_task(se)) { 764 if (entity_is_task(se)) {
765 trace_sched_stat_wait(task_of(se), 765 trace_sched_stat_wait(task_of(se),
766 rq_of(cfs_rq)->clock - se->statistics.wait_start); 766 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
767 } 767 }
768#endif 768#endif
769 schedstat_set(se->statistics.wait_start, 0); 769 schedstat_set(se->statistics.wait_start, 0);
@@ -789,7 +789,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
789 /* 789 /*
790 * We are starting a new run period: 790 * We are starting a new run period:
791 */ 791 */
792 se->exec_start = rq_of(cfs_rq)->clock_task; 792 se->exec_start = rq_clock_task(rq_of(cfs_rq));
793} 793}
794 794
795/************************************************** 795/**************************************************
@@ -1515,7 +1515,7 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1515 1515
1516static inline void update_rq_runnable_avg(struct rq *rq, int runnable) 1516static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1517{ 1517{
1518 __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable); 1518 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
1519 __update_tg_runnable_avg(&rq->avg, &rq->cfs); 1519 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
1520} 1520}
1521 1521
@@ -1530,7 +1530,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1530 * accumulated while sleeping. 1530 * accumulated while sleeping.
1531 */ 1531 */
1532 if (unlikely(se->avg.decay_count <= 0)) { 1532 if (unlikely(se->avg.decay_count <= 0)) {
1533 se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task; 1533 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
1534 if (se->avg.decay_count) { 1534 if (se->avg.decay_count) {
1535 /* 1535 /*
1536 * In a wake-up migration we have to approximate the 1536 * In a wake-up migration we have to approximate the
@@ -1625,7 +1625,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1625 tsk = task_of(se); 1625 tsk = task_of(se);
1626 1626
1627 if (se->statistics.sleep_start) { 1627 if (se->statistics.sleep_start) {
1628 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; 1628 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
1629 1629
1630 if ((s64)delta < 0) 1630 if ((s64)delta < 0)
1631 delta = 0; 1631 delta = 0;
@@ -1642,7 +1642,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1642 } 1642 }
1643 } 1643 }
1644 if (se->statistics.block_start) { 1644 if (se->statistics.block_start) {
1645 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; 1645 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
1646 1646
1647 if ((s64)delta < 0) 1647 if ((s64)delta < 0)
1648 delta = 0; 1648 delta = 0;
@@ -1823,9 +1823,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1823 struct task_struct *tsk = task_of(se); 1823 struct task_struct *tsk = task_of(se);
1824 1824
1825 if (tsk->state & TASK_INTERRUPTIBLE) 1825 if (tsk->state & TASK_INTERRUPTIBLE)
1826 se->statistics.sleep_start = rq_of(cfs_rq)->clock; 1826 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
1827 if (tsk->state & TASK_UNINTERRUPTIBLE) 1827 if (tsk->state & TASK_UNINTERRUPTIBLE)
1828 se->statistics.block_start = rq_of(cfs_rq)->clock; 1828 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
1829 } 1829 }
1830#endif 1830#endif
1831 } 1831 }
@@ -2100,7 +2100,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2100 if (unlikely(cfs_rq->throttle_count)) 2100 if (unlikely(cfs_rq->throttle_count))
2101 return cfs_rq->throttled_clock_task; 2101 return cfs_rq->throttled_clock_task;
2102 2102
2103 return rq_of(cfs_rq)->clock_task - cfs_rq->throttled_clock_task_time; 2103 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
2104} 2104}
2105 2105
2106/* returns 0 on failure to allocate runtime */ 2106/* returns 0 on failure to allocate runtime */
@@ -2159,7 +2159,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2159 struct rq *rq = rq_of(cfs_rq); 2159 struct rq *rq = rq_of(cfs_rq);
2160 2160
2161 /* if the deadline is ahead of our clock, nothing to do */ 2161 /* if the deadline is ahead of our clock, nothing to do */
2162 if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0)) 2162 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
2163 return; 2163 return;
2164 2164
2165 if (cfs_rq->runtime_remaining < 0) 2165 if (cfs_rq->runtime_remaining < 0)
@@ -2248,7 +2248,7 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
2248#ifdef CONFIG_SMP 2248#ifdef CONFIG_SMP
2249 if (!cfs_rq->throttle_count) { 2249 if (!cfs_rq->throttle_count) {
2250 /* adjust cfs_rq_clock_task() */ 2250 /* adjust cfs_rq_clock_task() */
2251 cfs_rq->throttled_clock_task_time += rq->clock_task - 2251 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
2252 cfs_rq->throttled_clock_task; 2252 cfs_rq->throttled_clock_task;
2253 } 2253 }
2254#endif 2254#endif
@@ -2263,7 +2263,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
2263 2263
2264 /* group is entering throttled state, stop time */ 2264 /* group is entering throttled state, stop time */
2265 if (!cfs_rq->throttle_count) 2265 if (!cfs_rq->throttle_count)
2266 cfs_rq->throttled_clock_task = rq->clock_task; 2266 cfs_rq->throttled_clock_task = rq_clock_task(rq);
2267 cfs_rq->throttle_count++; 2267 cfs_rq->throttle_count++;
2268 2268
2269 return 0; 2269 return 0;
@@ -2302,7 +2302,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
2302 rq->nr_running -= task_delta; 2302 rq->nr_running -= task_delta;
2303 2303
2304 cfs_rq->throttled = 1; 2304 cfs_rq->throttled = 1;
2305 cfs_rq->throttled_clock = rq->clock; 2305 cfs_rq->throttled_clock = rq_clock(rq);
2306 raw_spin_lock(&cfs_b->lock); 2306 raw_spin_lock(&cfs_b->lock);
2307 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); 2307 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2308 raw_spin_unlock(&cfs_b->lock); 2308 raw_spin_unlock(&cfs_b->lock);
@@ -2323,7 +2323,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2323 update_rq_clock(rq); 2323 update_rq_clock(rq);
2324 2324
2325 raw_spin_lock(&cfs_b->lock); 2325 raw_spin_lock(&cfs_b->lock);
2326 cfs_b->throttled_time += rq->clock - cfs_rq->throttled_clock; 2326 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
2327 list_del_rcu(&cfs_rq->throttled_list); 2327 list_del_rcu(&cfs_rq->throttled_list);
2328 raw_spin_unlock(&cfs_b->lock); 2328 raw_spin_unlock(&cfs_b->lock);
2329 2329
@@ -2726,7 +2726,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2726#else /* CONFIG_CFS_BANDWIDTH */ 2726#else /* CONFIG_CFS_BANDWIDTH */
2727static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) 2727static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2728{ 2728{
2729 return rq_of(cfs_rq)->clock_task; 2729 return rq_clock_task(rq_of(cfs_rq));
2730} 2730}
2731 2731
2732static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 2732static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
@@ -3966,7 +3966,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
3966 * 2) too many balance attempts have failed. 3966 * 2) too many balance attempts have failed.
3967 */ 3967 */
3968 3968
3969 tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd); 3969 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
3970 if (!tsk_cache_hot || 3970 if (!tsk_cache_hot ||
3971 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { 3971 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3972 3972
@@ -4322,7 +4322,7 @@ static unsigned long scale_rt_power(int cpu)
4322 age_stamp = ACCESS_ONCE(rq->age_stamp); 4322 age_stamp = ACCESS_ONCE(rq->age_stamp);
4323 avg = ACCESS_ONCE(rq->rt_avg); 4323 avg = ACCESS_ONCE(rq->rt_avg);
4324 4324
4325 total = sched_avg_period() + (rq->clock - age_stamp); 4325 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
4326 4326
4327 if (unlikely(total < avg)) { 4327 if (unlikely(total < avg)) {
4328 /* Ensures that power won't end up being negative */ 4328 /* Ensures that power won't end up being negative */
@@ -5261,7 +5261,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
5261 int pulled_task = 0; 5261 int pulled_task = 0;
5262 unsigned long next_balance = jiffies + HZ; 5262 unsigned long next_balance = jiffies + HZ;
5263 5263
5264 this_rq->idle_stamp = this_rq->clock; 5264 this_rq->idle_stamp = rq_clock(this_rq);
5265 5265
5266 if (this_rq->avg_idle < sysctl_sched_migration_cost) 5266 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5267 return; 5267 return;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 8853ab17b750..8d85f9ac4262 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -886,7 +886,7 @@ static void update_curr_rt(struct rq *rq)
886 if (curr->sched_class != &rt_sched_class) 886 if (curr->sched_class != &rt_sched_class)
887 return; 887 return;
888 888
889 delta_exec = rq->clock_task - curr->se.exec_start; 889 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
890 if (unlikely((s64)delta_exec <= 0)) 890 if (unlikely((s64)delta_exec <= 0))
891 return; 891 return;
892 892
@@ -896,7 +896,7 @@ static void update_curr_rt(struct rq *rq)
896 curr->se.sum_exec_runtime += delta_exec; 896 curr->se.sum_exec_runtime += delta_exec;
897 account_group_exec_runtime(curr, delta_exec); 897 account_group_exec_runtime(curr, delta_exec);
898 898
899 curr->se.exec_start = rq->clock_task; 899 curr->se.exec_start = rq_clock_task(rq);
900 cpuacct_charge(curr, delta_exec); 900 cpuacct_charge(curr, delta_exec);
901 901
902 sched_rt_avg_update(rq, delta_exec); 902 sched_rt_avg_update(rq, delta_exec);
@@ -1345,7 +1345,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1345 } while (rt_rq); 1345 } while (rt_rq);
1346 1346
1347 p = rt_task_of(rt_se); 1347 p = rt_task_of(rt_se);
1348 p->se.exec_start = rq->clock_task; 1348 p->se.exec_start = rq_clock_task(rq);
1349 1349
1350 return p; 1350 return p;
1351} 1351}
@@ -1997,7 +1997,7 @@ static void set_curr_task_rt(struct rq *rq)
1997{ 1997{
1998 struct task_struct *p = rq->curr; 1998 struct task_struct *p = rq->curr;
1999 1999
2000 p->se.exec_start = rq->clock_task; 2000 p->se.exec_start = rq_clock_task(rq);
2001 2001
2002 /* The running task is never eligible for pushing */ 2002 /* The running task is never eligible for pushing */
2003 dequeue_pushable_task(rq, p); 2003 dequeue_pushable_task(rq, p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c806c61a1261..74ff659e964f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -548,6 +548,16 @@ DECLARE_PER_CPU(struct rq, runqueues);
548#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 548#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
549#define raw_rq() (&__raw_get_cpu_var(runqueues)) 549#define raw_rq() (&__raw_get_cpu_var(runqueues))
550 550
551static inline u64 rq_clock(struct rq *rq)
552{
553 return rq->clock;
554}
555
556static inline u64 rq_clock_task(struct rq *rq)
557{
558 return rq->clock_task;
559}
560
551#ifdef CONFIG_SMP 561#ifdef CONFIG_SMP
552 562
553#define rcu_dereference_check_sched_domain(p) \ 563#define rcu_dereference_check_sched_domain(p) \
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 2ef90a51ec5e..17d7065c3872 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -61,7 +61,7 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
61 */ 61 */
62static inline void sched_info_dequeued(struct task_struct *t) 62static inline void sched_info_dequeued(struct task_struct *t)
63{ 63{
64 unsigned long long now = task_rq(t)->clock, delta = 0; 64 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
65 65
66 if (unlikely(sched_info_on())) 66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued) 67 if (t->sched_info.last_queued)
@@ -79,7 +79,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
79 */ 79 */
80static void sched_info_arrive(struct task_struct *t) 80static void sched_info_arrive(struct task_struct *t)
81{ 81{
82 unsigned long long now = task_rq(t)->clock, delta = 0; 82 unsigned long long now = rq_clock(task_rq(t)), delta = 0;
83 83
84 if (t->sched_info.last_queued) 84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued; 85 delta = now - t->sched_info.last_queued;
@@ -100,7 +100,7 @@ static inline void sched_info_queued(struct task_struct *t)
100{ 100{
101 if (unlikely(sched_info_on())) 101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued) 102 if (!t->sched_info.last_queued)
103 t->sched_info.last_queued = task_rq(t)->clock; 103 t->sched_info.last_queued = rq_clock(task_rq(t));
104} 104}
105 105
106/* 106/*
@@ -112,7 +112,7 @@ static inline void sched_info_queued(struct task_struct *t)
112 */ 112 */
113static inline void sched_info_depart(struct task_struct *t) 113static inline void sched_info_depart(struct task_struct *t)
114{ 114{
115 unsigned long long delta = task_rq(t)->clock - 115 unsigned long long delta = rq_clock(task_rq(t)) -
116 t->sched_info.last_arrival; 116 t->sched_info.last_arrival;
117 117
118 rq_sched_info_depart(task_rq(t), delta); 118 rq_sched_info_depart(task_rq(t), delta);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index da5eb5bed84a..e08fbeeb54b9 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -28,7 +28,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
28 struct task_struct *stop = rq->stop; 28 struct task_struct *stop = rq->stop;
29 29
30 if (stop && stop->on_rq) { 30 if (stop && stop->on_rq) {
31 stop->se.exec_start = rq->clock_task; 31 stop->se.exec_start = rq_clock_task(rq);
32 return stop; 32 return stop;
33 } 33 }
34 34
@@ -57,7 +57,7 @@ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
57 struct task_struct *curr = rq->curr; 57 struct task_struct *curr = rq->curr;
58 u64 delta_exec; 58 u64 delta_exec;
59 59
60 delta_exec = rq->clock_task - curr->se.exec_start; 60 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
61 if (unlikely((s64)delta_exec < 0)) 61 if (unlikely((s64)delta_exec < 0))
62 delta_exec = 0; 62 delta_exec = 0;
63 63
@@ -67,7 +67,7 @@ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
67 curr->se.sum_exec_runtime += delta_exec; 67 curr->se.sum_exec_runtime += delta_exec;
68 account_group_exec_runtime(curr, delta_exec); 68 account_group_exec_runtime(curr, delta_exec);
69 69
70 curr->se.exec_start = rq->clock_task; 70 curr->se.exec_start = rq_clock_task(rq);
71 cpuacct_charge(curr, delta_exec); 71 cpuacct_charge(curr, delta_exec);
72} 72}
73 73
@@ -79,7 +79,7 @@ static void set_curr_task_stop(struct rq *rq)
79{ 79{
80 struct task_struct *stop = rq->stop; 80 struct task_struct *stop = rq->stop;
81 81
82 stop->se.exec_start = rq->clock_task; 82 stop->se.exec_start = rq_clock_task(rq);
83} 83}
84 84
85static void switched_to_stop(struct rq *rq, struct task_struct *p) 85static void switched_to_stop(struct rq *rq, struct task_struct *p)