aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_stats.h
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2008-07-21 00:55:14 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2008-07-21 00:55:14 -0400
commit908cf4b925e419bc74f3297b2f0e51d6f8a81da2 (patch)
tree6c2da79366d4695a9c2560ab18259eca8a2a25b4 /kernel/sched_stats.h
parent92c49890922d54cba4b1eadeb0b185773c2c9570 (diff)
parent14b395e35d1afdd8019d11b92e28041fad591b71 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into next
Diffstat (limited to 'kernel/sched_stats.h')
-rw-r--r--kernel/sched_stats.h49
1 files changed, 40 insertions, 9 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 5bae2e0c3ff2..8385d43987e2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
67 preempt_enable(); 67 preempt_enable();
68#endif 68#endif
69 } 69 }
70 kfree(mask_str);
70 return 0; 71 return 0;
71} 72}
72 73
@@ -117,6 +118,13 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
117 if (rq) 118 if (rq)
118 rq->rq_sched_info.cpu_time += delta; 119 rq->rq_sched_info.cpu_time += delta;
119} 120}
121
122static inline void
123rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
124{
125 if (rq)
126 rq->rq_sched_info.run_delay += delta;
127}
120# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) 128# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
121# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) 129# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
122# define schedstat_set(var, val) do { var = (val); } while (0) 130# define schedstat_set(var, val) do { var = (val); } while (0)
@@ -125,6 +133,9 @@ static inline void
125rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 133rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
126{} 134{}
127static inline void 135static inline void
136rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
137{}
138static inline void
128rq_sched_info_depart(struct rq *rq, unsigned long long delta) 139rq_sched_info_depart(struct rq *rq, unsigned long long delta)
129{} 140{}
130# define schedstat_inc(rq, field) do { } while (0) 141# define schedstat_inc(rq, field) do { } while (0)
@@ -133,6 +144,11 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
133#endif 144#endif
134 145
135#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 146#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
147static inline void sched_info_reset_dequeued(struct task_struct *t)
148{
149 t->sched_info.last_queued = 0;
150}
151
136/* 152/*
137 * Called when a process is dequeued from the active array and given 153 * Called when a process is dequeued from the active array and given
138 * the cpu. We should note that with the exception of interactive 154 * the cpu. We should note that with the exception of interactive
@@ -142,15 +158,22 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
142 * active queue, thus delaying tasks in the expired queue from running; 158 * active queue, thus delaying tasks in the expired queue from running;
143 * see scheduler_tick()). 159 * see scheduler_tick()).
144 * 160 *
145 * This function is only called from sched_info_arrive(), rather than 161 * Though we are interested in knowing how long it was from the *first* time a
146 * dequeue_task(). Even though a task may be queued and dequeued multiple 162 * task was queued to the time that it finally hit a cpu, we call this routine
147 * times as it is shuffled about, we're really interested in knowing how 163 * from dequeue_task() to account for possible rq->clock skew across cpus. The
148 * long it was from the *first* time it was queued to the time that it 164 * delta taken on each cpu would annul the skew.
149 * finally hit a cpu.
150 */ 165 */
151static inline void sched_info_dequeued(struct task_struct *t) 166static inline void sched_info_dequeued(struct task_struct *t)
152{ 167{
153 t->sched_info.last_queued = 0; 168 unsigned long long now = task_rq(t)->clock, delta = 0;
169
170 if (unlikely(sched_info_on()))
171 if (t->sched_info.last_queued)
172 delta = now - t->sched_info.last_queued;
173 sched_info_reset_dequeued(t);
174 t->sched_info.run_delay += delta;
175
176 rq_sched_info_dequeued(task_rq(t), delta);
154} 177}
155 178
156/* 179/*
@@ -164,7 +187,7 @@ static void sched_info_arrive(struct task_struct *t)
164 187
165 if (t->sched_info.last_queued) 188 if (t->sched_info.last_queued)
166 delta = now - t->sched_info.last_queued; 189 delta = now - t->sched_info.last_queued;
167 sched_info_dequeued(t); 190 sched_info_reset_dequeued(t);
168 t->sched_info.run_delay += delta; 191 t->sched_info.run_delay += delta;
169 t->sched_info.last_arrival = now; 192 t->sched_info.last_arrival = now;
170 t->sched_info.pcount++; 193 t->sched_info.pcount++;
@@ -197,6 +220,9 @@ static inline void sched_info_queued(struct task_struct *t)
197/* 220/*
198 * Called when a process ceases being the active-running process, either 221 * Called when a process ceases being the active-running process, either
199 * voluntarily or involuntarily. Now we can calculate how long we ran. 222 * voluntarily or involuntarily. Now we can calculate how long we ran.
223 * Also, if the process is still in the TASK_RUNNING state, call
224 * sched_info_queued() to mark that it has now again started waiting on
225 * the runqueue.
200 */ 226 */
201static inline void sched_info_depart(struct task_struct *t) 227static inline void sched_info_depart(struct task_struct *t)
202{ 228{
@@ -205,6 +231,9 @@ static inline void sched_info_depart(struct task_struct *t)
205 231
206 t->sched_info.cpu_time += delta; 232 t->sched_info.cpu_time += delta;
207 rq_sched_info_depart(task_rq(t), delta); 233 rq_sched_info_depart(task_rq(t), delta);
234
235 if (t->state == TASK_RUNNING)
236 sched_info_queued(t);
208} 237}
209 238
210/* 239/*
@@ -235,7 +264,9 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
235 __sched_info_switch(prev, next); 264 __sched_info_switch(prev, next);
236} 265}
237#else 266#else
238#define sched_info_queued(t) do { } while (0) 267#define sched_info_queued(t) do { } while (0)
239#define sched_info_switch(t, next) do { } while (0) 268#define sched_info_reset_dequeued(t) do { } while (0)
269#define sched_info_dequeued(t) do { } while (0)
270#define sched_info_switch(t, next) do { } while (0)
240#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
241 272