aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_stats.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_stats.h')
-rw-r--r--kernel/sched_stats.h109
1 files changed, 104 insertions, 5 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8385d43987e2..3b01098164c8 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -9,7 +9,7 @@
9static int show_schedstat(struct seq_file *seq, void *v) 9static int show_schedstat(struct seq_file *seq, void *v)
10{ 10{
11 int cpu; 11 int cpu;
12 int mask_len = NR_CPUS/32 * 9; 12 int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
13 char *mask_str = kmalloc(mask_len, GFP_KERNEL); 13 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
14 14
15 if (mask_str == NULL) 15 if (mask_str == NULL)
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, 31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle, 32 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local, 33 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_sched_info.cpu_time, 34 rq->rq_cpu_time,
35 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); 35 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
36 36
37 seq_printf(seq, "\n"); 37 seq_printf(seq, "\n");
@@ -90,13 +90,20 @@ static int schedstat_open(struct inode *inode, struct file *file)
90 return res; 90 return res;
91} 91}
92 92
93const struct file_operations proc_schedstat_operations = { 93static const struct file_operations proc_schedstat_operations = {
94 .open = schedstat_open, 94 .open = schedstat_open,
95 .read = seq_read, 95 .read = seq_read,
96 .llseek = seq_lseek, 96 .llseek = seq_lseek,
97 .release = single_release, 97 .release = single_release,
98}; 98};
99 99
100static int __init proc_schedstat_init(void)
101{
102 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
103 return 0;
104}
105module_init(proc_schedstat_init);
106
100/* 107/*
101 * Expects runqueue lock to be held for atomicity of update 108 * Expects runqueue lock to be held for atomicity of update
102 */ 109 */
@@ -116,7 +123,7 @@ static inline void
116rq_sched_info_depart(struct rq *rq, unsigned long long delta) 123rq_sched_info_depart(struct rq *rq, unsigned long long delta)
117{ 124{
118 if (rq) 125 if (rq)
119 rq->rq_sched_info.cpu_time += delta; 126 rq->rq_cpu_time += delta;
120} 127}
121 128
122static inline void 129static inline void
@@ -229,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t)
229 unsigned long long delta = task_rq(t)->clock - 236 unsigned long long delta = task_rq(t)->clock -
230 t->sched_info.last_arrival; 237 t->sched_info.last_arrival;
231 238
232 t->sched_info.cpu_time += delta;
233 rq_sched_info_depart(task_rq(t), delta); 239 rq_sched_info_depart(task_rq(t), delta);
234 240
235 if (t->state == TASK_RUNNING) 241 if (t->state == TASK_RUNNING)
@@ -270,3 +276,96 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
270#define sched_info_switch(t, next) do { } while (0) 276#define sched_info_switch(t, next) do { } while (0)
271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 277#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
272 278
279/*
280 * The following are functions that support scheduler-internal time accounting.
281 * These functions are generally called at the timer tick. None of this depends
282 * on CONFIG_SCHEDSTATS.
283 */
284
285/**
286 * account_group_user_time - Maintain utime for a thread group.
287 *
288 * @tsk: Pointer to task structure.
289 * @cputime: Time value by which to increment the utime field of the
290 * thread_group_cputime structure.
291 *
292 * If thread group time is being maintained, get the structure for the
293 * running CPU and update the utime field there.
294 */
295static inline void account_group_user_time(struct task_struct *tsk,
296 cputime_t cputime)
297{
298 struct signal_struct *sig;
299
300 /* tsk == current, ensure it is safe to use ->signal */
301 if (unlikely(tsk->exit_state))
302 return;
303
304 sig = tsk->signal;
305 if (sig->cputime.totals) {
306 struct task_cputime *times;
307
308 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
309 times->utime = cputime_add(times->utime, cputime);
310 put_cpu_no_resched();
311 }
312}
313
314/**
315 * account_group_system_time - Maintain stime for a thread group.
316 *
317 * @tsk: Pointer to task structure.
318 * @cputime: Time value by which to increment the stime field of the
319 * thread_group_cputime structure.
320 *
321 * If thread group time is being maintained, get the structure for the
322 * running CPU and update the stime field there.
323 */
324static inline void account_group_system_time(struct task_struct *tsk,
325 cputime_t cputime)
326{
327 struct signal_struct *sig;
328
329 /* tsk == current, ensure it is safe to use ->signal */
330 if (unlikely(tsk->exit_state))
331 return;
332
333 sig = tsk->signal;
334 if (sig->cputime.totals) {
335 struct task_cputime *times;
336
337 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
338 times->stime = cputime_add(times->stime, cputime);
339 put_cpu_no_resched();
340 }
341}
342
343/**
344 * account_group_exec_runtime - Maintain exec runtime for a thread group.
345 *
346 * @tsk: Pointer to task structure.
347 * @ns: Time value by which to increment the sum_exec_runtime field
348 * of the thread_group_cputime structure.
349 *
350 * If thread group time is being maintained, get the structure for the
351 * running CPU and update the sum_exec_runtime field there.
352 */
353static inline void account_group_exec_runtime(struct task_struct *tsk,
354 unsigned long long ns)
355{
356 struct signal_struct *sig;
357
358 sig = tsk->signal;
359 /* see __exit_signal()->task_rq_unlock_wait() */
360 barrier();
361 if (unlikely(!sig))
362 return;
363
364 if (sig->cputime.totals) {
365 struct task_cputime *times;
366
367 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
368 times->sum_exec_runtime += ns;
369 put_cpu_no_resched();
370 }
371}