aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h64
1 files changed, 34 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7200c1bb8dde..726d27044778 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -446,7 +446,6 @@ struct pacct_struct {
446 * @utime: time spent in user mode, in &cputime_t units 446 * @utime: time spent in user mode, in &cputime_t units
447 * @stime: time spent in kernel mode, in &cputime_t units 447 * @stime: time spent in kernel mode, in &cputime_t units
448 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 448 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
449 * @lock: lock for fields in this struct
450 * 449 *
451 * This structure groups together three kinds of CPU time that are 450 * This structure groups together three kinds of CPU time that are
452 * tracked for threads and thread groups. Most things considering 451 * tracked for threads and thread groups. Most things considering
@@ -457,23 +456,33 @@ struct task_cputime {
457 cputime_t utime; 456 cputime_t utime;
458 cputime_t stime; 457 cputime_t stime;
459 unsigned long long sum_exec_runtime; 458 unsigned long long sum_exec_runtime;
460 spinlock_t lock;
461}; 459};
462/* Alternate field names when used to cache expirations. */ 460/* Alternate field names when used to cache expirations. */
463#define prof_exp stime 461#define prof_exp stime
464#define virt_exp utime 462#define virt_exp utime
465#define sched_exp sum_exec_runtime 463#define sched_exp sum_exec_runtime
466 464
465#define INIT_CPUTIME \
466 (struct task_cputime) { \
467 .utime = cputime_zero, \
468 .stime = cputime_zero, \
469 .sum_exec_runtime = 0, \
470 }
471
467/** 472/**
468 * struct thread_group_cputime - thread group interval timer counts 473 * struct thread_group_cputimer - thread group interval timer counts
469 * @totals: thread group interval timers; substructure for 474 * @cputime: thread group interval timers.
470 * uniprocessor kernel, per-cpu for SMP kernel. 475 * @running: non-zero when there are timers running and
476 * @cputime receives updates.
477 * @lock: lock for fields in this struct.
471 * 478 *
472 * This structure contains the version of task_cputime, above, that is 479 * This structure contains the version of task_cputime, above, that is
473 * used for thread group CPU clock calculations. 480 * used for thread group CPU timer calculations.
474 */ 481 */
475struct thread_group_cputime { 482struct thread_group_cputimer {
476 struct task_cputime totals; 483 struct task_cputime cputime;
484 int running;
485 spinlock_t lock;
477}; 486};
478 487
479/* 488/*
@@ -522,10 +531,10 @@ struct signal_struct {
522 cputime_t it_prof_incr, it_virt_incr; 531 cputime_t it_prof_incr, it_virt_incr;
523 532
524 /* 533 /*
525 * Thread group totals for process CPU clocks. 534 * Thread group totals for process CPU timers.
526 * See thread_group_cputime(), et al, for details. 535 * See thread_group_cputimer(), et al, for details.
527 */ 536 */
528 struct thread_group_cputime cputime; 537 struct thread_group_cputimer cputimer;
529 538
530 /* Earliest-expiration cache. */ 539 /* Earliest-expiration cache. */
531 struct task_cputime cputime_expires; 540 struct task_cputime cputime_expires;
@@ -562,7 +571,7 @@ struct signal_struct {
562 * Live threads maintain their own counters and add to these 571 * Live threads maintain their own counters and add to these
563 * in __exit_signal, except for the group leader. 572 * in __exit_signal, except for the group leader.
564 */ 573 */
565 cputime_t cutime, cstime; 574 cputime_t utime, stime, cutime, cstime;
566 cputime_t gtime; 575 cputime_t gtime;
567 cputime_t cgtime; 576 cputime_t cgtime;
568 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 577 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -571,6 +580,14 @@ struct signal_struct {
571 struct task_io_accounting ioac; 580 struct task_io_accounting ioac;
572 581
573 /* 582 /*
583 * Cumulative ns of schedule CPU time fo dead threads in the
584 * group, not including a zombie group leader, (This only differs
585 * from jiffies_to_ns(utime + stime) if sched_clock uses something
586 * other than jiffies.)
587 */
588 unsigned long long sum_sched_runtime;
589
590 /*
574 * We don't bother to synchronize most readers of this at all, 591 * We don't bother to synchronize most readers of this at all,
575 * because there is no reader checking a limit that actually needs 592 * because there is no reader checking a limit that actually needs
576 * to get both rlim_cur and rlim_max atomically, and either one 593 * to get both rlim_cur and rlim_max atomically, and either one
@@ -2200,27 +2217,14 @@ static inline int spin_needbreak(spinlock_t *lock)
2200/* 2217/*
2201 * Thread group CPU time accounting. 2218 * Thread group CPU time accounting.
2202 */ 2219 */
2203 2220void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2204static inline 2221void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2205void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
2206{
2207 struct task_cputime *totals = &tsk->signal->cputime.totals;
2208 unsigned long flags;
2209
2210 spin_lock_irqsave(&totals->lock, flags);
2211 *times = *totals;
2212 spin_unlock_irqrestore(&totals->lock, flags);
2213}
2214 2222
2215static inline void thread_group_cputime_init(struct signal_struct *sig) 2223static inline void thread_group_cputime_init(struct signal_struct *sig)
2216{ 2224{
2217 sig->cputime.totals = (struct task_cputime){ 2225 sig->cputimer.cputime = INIT_CPUTIME;
2218 .utime = cputime_zero, 2226 spin_lock_init(&sig->cputimer.lock);
2219 .stime = cputime_zero, 2227 sig->cputimer.running = 0;
2220 .sum_exec_runtime = 0,
2221 };
2222
2223 spin_lock_init(&sig->cputime.totals.lock);
2224} 2228}
2225 2229
2226static inline void thread_group_cputime_free(struct signal_struct *sig) 2230static inline void thread_group_cputime_free(struct signal_struct *sig)