aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_stats.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_stats.h')
-rw-r--r--kernel/sched_stats.h95
1 files changed, 94 insertions, 1 deletions
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 67579253b53b..ee71bec1da66 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -90,13 +90,20 @@ static int schedstat_open(struct inode *inode, struct file *file)
90 return res; 90 return res;
91} 91}
92 92
93const struct file_operations proc_schedstat_operations = { 93static const struct file_operations proc_schedstat_operations = {
94 .open = schedstat_open, 94 .open = schedstat_open,
95 .read = seq_read, 95 .read = seq_read,
96 .llseek = seq_lseek, 96 .llseek = seq_lseek,
97 .release = single_release, 97 .release = single_release,
98}; 98};
99 99
100static int __init proc_schedstat_init(void)
101{
102 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
103 return 0;
104}
105module_init(proc_schedstat_init);
106
100/* 107/*
101 * Expects runqueue lock to be held for atomicity of update 108 * Expects runqueue lock to be held for atomicity of update
102 */ 109 */
@@ -270,3 +277,89 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
270#define sched_info_switch(t, next) do { } while (0) 277#define sched_info_switch(t, next) do { } while (0)
271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 278#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
272 279
280/*
281 * The following are functions that support scheduler-internal time accounting.
282 * These functions are generally called at the timer tick. None of this depends
283 * on CONFIG_SCHEDSTATS.
284 */
285
286/**
287 * account_group_user_time - Maintain utime for a thread group.
288 *
289 * @tsk: Pointer to task structure.
290 * @cputime: Time value by which to increment the utime field of the
291 * thread_group_cputime structure.
292 *
293 * If thread group time is being maintained, get the structure for the
294 * running CPU and update the utime field there.
295 */
296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime)
298{
299 struct signal_struct *sig;
300
301 sig = tsk->signal;
302 if (unlikely(!sig))
303 return;
304 if (sig->cputime.totals) {
305 struct task_cputime *times;
306
307 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
308 times->utime = cputime_add(times->utime, cputime);
309 put_cpu_no_resched();
310 }
311}
312
313/**
314 * account_group_system_time - Maintain stime for a thread group.
315 *
316 * @tsk: Pointer to task structure.
317 * @cputime: Time value by which to increment the stime field of the
318 * thread_group_cputime structure.
319 *
320 * If thread group time is being maintained, get the structure for the
321 * running CPU and update the stime field there.
322 */
323static inline void account_group_system_time(struct task_struct *tsk,
324 cputime_t cputime)
325{
326 struct signal_struct *sig;
327
328 sig = tsk->signal;
329 if (unlikely(!sig))
330 return;
331 if (sig->cputime.totals) {
332 struct task_cputime *times;
333
334 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
335 times->stime = cputime_add(times->stime, cputime);
336 put_cpu_no_resched();
337 }
338}
339
340/**
341 * account_group_exec_runtime - Maintain exec runtime for a thread group.
342 *
343 * @tsk: Pointer to task structure.
344 * @ns: Time value by which to increment the sum_exec_runtime field
345 * of the thread_group_cputime structure.
346 *
347 * If thread group time is being maintained, get the structure for the
348 * running CPU and update the sum_exec_runtime field there.
349 */
350static inline void account_group_exec_runtime(struct task_struct *tsk,
351 unsigned long long ns)
352{
353 struct signal_struct *sig;
354
355 sig = tsk->signal;
356 if (unlikely(!sig))
357 return;
358 if (sig->cputime.totals) {
359 struct task_cputime *times;
360
361 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
362 times->sum_exec_runtime += ns;
363 put_cpu_no_resched();
364 }
365}