aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h64
1 files changed, 34 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 28b3f505f18..f0a50b20e8a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -443,7 +443,6 @@ struct pacct_struct {
443 * @utime: time spent in user mode, in &cputime_t units 443 * @utime: time spent in user mode, in &cputime_t units
444 * @stime: time spent in kernel mode, in &cputime_t units 444 * @stime: time spent in kernel mode, in &cputime_t units
445 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 445 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
446 * @lock: lock for fields in this struct
447 * 446 *
448 * This structure groups together three kinds of CPU time that are 447 * This structure groups together three kinds of CPU time that are
449 * tracked for threads and thread groups. Most things considering 448 * tracked for threads and thread groups. Most things considering
@@ -454,23 +453,33 @@ struct task_cputime {
454 cputime_t utime; 453 cputime_t utime;
455 cputime_t stime; 454 cputime_t stime;
456 unsigned long long sum_exec_runtime; 455 unsigned long long sum_exec_runtime;
457 spinlock_t lock;
458}; 456};
459/* Alternate field names when used to cache expirations. */ 457/* Alternate field names when used to cache expirations. */
460#define prof_exp stime 458#define prof_exp stime
461#define virt_exp utime 459#define virt_exp utime
462#define sched_exp sum_exec_runtime 460#define sched_exp sum_exec_runtime
463 461
462#define INIT_CPUTIME \
463 (struct task_cputime) { \
464 .utime = cputime_zero, \
465 .stime = cputime_zero, \
466 .sum_exec_runtime = 0, \
467 }
468
464/** 469/**
465 * struct thread_group_cputime - thread group interval timer counts 470 * struct thread_group_cputimer - thread group interval timer counts
466 * @totals: thread group interval timers; substructure for 471 * @cputime: thread group interval timers.
467 * uniprocessor kernel, per-cpu for SMP kernel. 472 * @running: non-zero when there are timers running and
473 * @cputime receives updates.
474 * @lock: lock for fields in this struct.
468 * 475 *
469 * This structure contains the version of task_cputime, above, that is 476 * This structure contains the version of task_cputime, above, that is
470 * used for thread group CPU clock calculations. 477 * used for thread group CPU timer calculations.
471 */ 478 */
472struct thread_group_cputime { 479struct thread_group_cputimer {
473 struct task_cputime totals; 480 struct task_cputime cputime;
481 int running;
482 spinlock_t lock;
474}; 483};
475 484
476/* 485/*
@@ -519,10 +528,10 @@ struct signal_struct {
519 cputime_t it_prof_incr, it_virt_incr; 528 cputime_t it_prof_incr, it_virt_incr;
520 529
521 /* 530 /*
522 * Thread group totals for process CPU clocks. 531 * Thread group totals for process CPU timers.
523 * See thread_group_cputime(), et al, for details. 532 * See thread_group_cputimer(), et al, for details.
524 */ 533 */
525 struct thread_group_cputime cputime; 534 struct thread_group_cputimer cputimer;
526 535
527 /* Earliest-expiration cache. */ 536 /* Earliest-expiration cache. */
528 struct task_cputime cputime_expires; 537 struct task_cputime cputime_expires;
@@ -559,7 +568,7 @@ struct signal_struct {
559 * Live threads maintain their own counters and add to these 568 * Live threads maintain their own counters and add to these
560 * in __exit_signal, except for the group leader. 569 * in __exit_signal, except for the group leader.
561 */ 570 */
562 cputime_t cutime, cstime; 571 cputime_t utime, stime, cutime, cstime;
563 cputime_t gtime; 572 cputime_t gtime;
564 cputime_t cgtime; 573 cputime_t cgtime;
565 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 574 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -568,6 +577,14 @@ struct signal_struct {
568 struct task_io_accounting ioac; 577 struct task_io_accounting ioac;
569 578
570 /* 579 /*
580 * Cumulative ns of schedule CPU time fo dead threads in the
581 * group, not including a zombie group leader, (This only differs
582 * from jiffies_to_ns(utime + stime) if sched_clock uses something
583 * other than jiffies.)
584 */
585 unsigned long long sum_sched_runtime;
586
587 /*
571 * We don't bother to synchronize most readers of this at all, 588 * We don't bother to synchronize most readers of this at all,
572 * because there is no reader checking a limit that actually needs 589 * because there is no reader checking a limit that actually needs
573 * to get both rlim_cur and rlim_max atomically, and either one 590 * to get both rlim_cur and rlim_max atomically, and either one
@@ -2195,27 +2212,14 @@ static inline int spin_needbreak(spinlock_t *lock)
2195/* 2212/*
2196 * Thread group CPU time accounting. 2213 * Thread group CPU time accounting.
2197 */ 2214 */
2198 2215void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2199static inline 2216void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2200void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
2201{
2202 struct task_cputime *totals = &tsk->signal->cputime.totals;
2203 unsigned long flags;
2204
2205 spin_lock_irqsave(&totals->lock, flags);
2206 *times = *totals;
2207 spin_unlock_irqrestore(&totals->lock, flags);
2208}
2209 2217
2210static inline void thread_group_cputime_init(struct signal_struct *sig) 2218static inline void thread_group_cputime_init(struct signal_struct *sig)
2211{ 2219{
2212 sig->cputime.totals = (struct task_cputime){ 2220 sig->cputimer.cputime = INIT_CPUTIME;
2213 .utime = cputime_zero, 2221 spin_lock_init(&sig->cputimer.lock);
2214 .stime = cputime_zero, 2222 sig->cputimer.running = 0;
2215 .sum_exec_runtime = 0,
2216 };
2217
2218 spin_lock_init(&sig->cputime.totals.lock);
2219} 2223}
2220 2224
2221static inline void thread_group_cputime_free(struct signal_struct *sig) 2225static inline void thread_group_cputime_free(struct signal_struct *sig)