aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2007-11-09 16:39:37 -0500
committerIngo Molnar <mingo@elte.hu>2007-11-09 16:39:37 -0500
commit9a41785cc43d88397f787a651ed7286a33f8462f (patch)
treeab3efa693c573f2b40dbf4d0e3f219f2dc20da22 /kernel
parentb2be5e96dc0b5a179cf4cb98e65cfb605752ca26 (diff)
sched: fix delay accounting regression
Fix the delay accounting regression introduced by commit 75d4ef16a6aa84f708188bada182315f80aab6fa. rq no longer has sched_info data associated with it. task_struct sched_info structure is used by delay accounting to provide back statistics to user space. also remove direct use of sched_clock() (which is not a valid thing to do anymore) and use rq->clock instead. Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_stats.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index ef1a7df80ea2..630178e53bb6 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -127,7 +127,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
127# define schedstat_set(var, val) do { } while (0) 127# define schedstat_set(var, val) do { } while (0)
128#endif 128#endif
129 129
130#ifdef CONFIG_SCHEDSTATS 130#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
131/* 131/*
132 * Called when a process is dequeued from the active array and given 132 * Called when a process is dequeued from the active array and given
133 * the cpu. We should note that with the exception of interactive 133 * the cpu. We should note that with the exception of interactive
@@ -155,7 +155,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
155 */ 155 */
156static void sched_info_arrive(struct task_struct *t) 156static void sched_info_arrive(struct task_struct *t)
157{ 157{
158 unsigned long long now = sched_clock(), delta = 0; 158 unsigned long long now = task_rq(t)->clock, delta = 0;
159 159
160 if (t->sched_info.last_queued) 160 if (t->sched_info.last_queued)
161 delta = now - t->sched_info.last_queued; 161 delta = now - t->sched_info.last_queued;
@@ -186,7 +186,7 @@ static inline void sched_info_queued(struct task_struct *t)
186{ 186{
187 if (unlikely(sched_info_on())) 187 if (unlikely(sched_info_on()))
188 if (!t->sched_info.last_queued) 188 if (!t->sched_info.last_queued)
189 t->sched_info.last_queued = sched_clock(); 189 t->sched_info.last_queued = task_rq(t)->clock;
190} 190}
191 191
192/* 192/*
@@ -195,7 +195,8 @@ static inline void sched_info_queued(struct task_struct *t)
195 */ 195 */
196static inline void sched_info_depart(struct task_struct *t) 196static inline void sched_info_depart(struct task_struct *t)
197{ 197{
198 unsigned long long delta = sched_clock() - t->sched_info.last_arrival; 198 unsigned long long delta = task_rq(t)->clock -
199 t->sched_info.last_arrival;
199 200
200 t->sched_info.cpu_time += delta; 201 t->sched_info.cpu_time += delta;
201 rq_sched_info_depart(task_rq(t), delta); 202 rq_sched_info_depart(task_rq(t), delta);
@@ -231,5 +232,5 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
231#else 232#else
232#define sched_info_queued(t) do { } while (0) 233#define sched_info_queued(t) do { } while (0)
233#define sched_info_switch(t, next) do { } while (0) 234#define sched_info_switch(t, next) do { } while (0)
234#endif /* CONFIG_SCHEDSTATS */ 235#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
235 236