aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-12 07:08:57 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-12 07:08:57 -0500
commit871cafcc962fa1655c44b4f0e54d4c5cc14e273c (patch)
treefdb7bc65d2606c85b7be6c33ba0dfd5b4e472245 /include/linux/sched.h
parentcf2592f59c0e8ed4308adbdb2e0a88655379d579 (diff)
parentb578f3fcca1e78624dfb5f358776e63711d7fda2 (diff)
Merge branch 'linus' into core/softlockup
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h57
1 files changed, 34 insertions, 23 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0d723fea9f5..d05e2b3ae41a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -462,16 +462,27 @@ struct task_cputime {
462#define virt_exp utime 462#define virt_exp utime
463#define sched_exp sum_exec_runtime 463#define sched_exp sum_exec_runtime
464 464
465#define INIT_CPUTIME \
466 (struct task_cputime) { \
467 .utime = cputime_zero, \
468 .stime = cputime_zero, \
469 .sum_exec_runtime = 0, \
470 }
471
465/** 472/**
466 * struct thread_group_cputime - thread group interval timer counts 473 * struct thread_group_cputimer - thread group interval timer counts
467 * @totals: thread group interval timers; substructure for 474 * @cputime: thread group interval timers.
468 * uniprocessor kernel, per-cpu for SMP kernel. 475 * @running: non-zero when there are timers running and
476 * @cputime receives updates.
477 * @lock: lock for fields in this struct.
469 * 478 *
470 * This structure contains the version of task_cputime, above, that is 479 * This structure contains the version of task_cputime, above, that is
471 * used for thread group CPU clock calculations. 480 * used for thread group CPU timer calculations.
472 */ 481 */
473struct thread_group_cputime { 482struct thread_group_cputimer {
474 struct task_cputime *totals; 483 struct task_cputime cputime;
484 int running;
485 spinlock_t lock;
475}; 486};
476 487
477/* 488/*
@@ -520,10 +531,10 @@ struct signal_struct {
520 cputime_t it_prof_incr, it_virt_incr; 531 cputime_t it_prof_incr, it_virt_incr;
521 532
522 /* 533 /*
523 * Thread group totals for process CPU clocks. 534 * Thread group totals for process CPU timers.
524 * See thread_group_cputime(), et al, for details. 535 * See thread_group_cputimer(), et al, for details.
525 */ 536 */
526 struct thread_group_cputime cputime; 537 struct thread_group_cputimer cputimer;
527 538
528 /* Earliest-expiration cache. */ 539 /* Earliest-expiration cache. */
529 struct task_cputime cputime_expires; 540 struct task_cputime cputime_expires;
@@ -560,7 +571,7 @@ struct signal_struct {
560 * Live threads maintain their own counters and add to these 571 * Live threads maintain their own counters and add to these
561 * in __exit_signal, except for the group leader. 572 * in __exit_signal, except for the group leader.
562 */ 573 */
563 cputime_t cutime, cstime; 574 cputime_t utime, stime, cutime, cstime;
564 cputime_t gtime; 575 cputime_t gtime;
565 cputime_t cgtime; 576 cputime_t cgtime;
566 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 577 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -569,6 +580,14 @@ struct signal_struct {
569 struct task_io_accounting ioac; 580 struct task_io_accounting ioac;
570 581
571 /* 582 /*
583 * Cumulative ns of schedule CPU time fo dead threads in the
584 * group, not including a zombie group leader, (This only differs
585 * from jiffies_to_ns(utime + stime) if sched_clock uses something
586 * other than jiffies.)
587 */
588 unsigned long long sum_sched_runtime;
589
590 /*
572 * We don't bother to synchronize most readers of this at all, 591 * We don't bother to synchronize most readers of this at all,
573 * because there is no reader checking a limit that actually needs 592 * because there is no reader checking a limit that actually needs
574 * to get both rlim_cur and rlim_max atomically, and either one 593 * to get both rlim_cur and rlim_max atomically, and either one
@@ -632,7 +651,6 @@ struct user_struct {
632 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 651 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
633#endif 652#endif
634#ifdef CONFIG_EPOLL 653#ifdef CONFIG_EPOLL
635 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
636 atomic_t epoll_watches; /* The number of file descriptors currently watched */ 654 atomic_t epoll_watches; /* The number of file descriptors currently watched */
637#endif 655#endif
638#ifdef CONFIG_POSIX_MQUEUE 656#ifdef CONFIG_POSIX_MQUEUE
@@ -2184,25 +2202,18 @@ static inline int spin_needbreak(spinlock_t *lock)
2184/* 2202/*
2185 * Thread group CPU time accounting. 2203 * Thread group CPU time accounting.
2186 */ 2204 */
2187 2205void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2188extern int thread_group_cputime_alloc(struct task_struct *); 2206void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2189extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2190 2207
2191static inline void thread_group_cputime_init(struct signal_struct *sig) 2208static inline void thread_group_cputime_init(struct signal_struct *sig)
2192{ 2209{
2193 sig->cputime.totals = NULL; 2210 sig->cputimer.cputime = INIT_CPUTIME;
2194} 2211 spin_lock_init(&sig->cputimer.lock);
2195 2212 sig->cputimer.running = 0;
2196static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2197{
2198 if (curr->signal->cputime.totals)
2199 return 0;
2200 return thread_group_cputime_alloc(curr);
2201} 2213}
2202 2214
2203static inline void thread_group_cputime_free(struct signal_struct *sig) 2215static inline void thread_group_cputime_free(struct signal_struct *sig)
2204{ 2216{
2205 free_percpu(sig->cputime.totals);
2206} 2217}
2207 2218
2208/* 2219/*