diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 68 |
1 files changed, 38 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b00a77f4999e..a7faa8d05d7f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -445,7 +445,6 @@ struct pacct_struct { | |||
445 | * @utime: time spent in user mode, in &cputime_t units | 445 | * @utime: time spent in user mode, in &cputime_t units |
446 | * @stime: time spent in kernel mode, in &cputime_t units | 446 | * @stime: time spent in kernel mode, in &cputime_t units |
447 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds | 447 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
448 | * @lock: lock for fields in this struct | ||
449 | * | 448 | * |
450 | * This structure groups together three kinds of CPU time that are | 449 | * This structure groups together three kinds of CPU time that are |
451 | * tracked for threads and thread groups. Most things considering | 450 | * tracked for threads and thread groups. Most things considering |
@@ -456,23 +455,33 @@ struct task_cputime { | |||
456 | cputime_t utime; | 455 | cputime_t utime; |
457 | cputime_t stime; | 456 | cputime_t stime; |
458 | unsigned long long sum_exec_runtime; | 457 | unsigned long long sum_exec_runtime; |
459 | spinlock_t lock; | ||
460 | }; | 458 | }; |
461 | /* Alternate field names when used to cache expirations. */ | 459 | /* Alternate field names when used to cache expirations. */ |
462 | #define prof_exp stime | 460 | #define prof_exp stime |
463 | #define virt_exp utime | 461 | #define virt_exp utime |
464 | #define sched_exp sum_exec_runtime | 462 | #define sched_exp sum_exec_runtime |
465 | 463 | ||
464 | #define INIT_CPUTIME \ | ||
465 | (struct task_cputime) { \ | ||
466 | .utime = cputime_zero, \ | ||
467 | .stime = cputime_zero, \ | ||
468 | .sum_exec_runtime = 0, \ | ||
469 | } | ||
470 | |||
466 | /** | 471 | /** |
467 | * struct thread_group_cputime - thread group interval timer counts | 472 | * struct thread_group_cputimer - thread group interval timer counts |
468 | * @totals: thread group interval timers; substructure for | 473 | * @cputime: thread group interval timers. |
469 | * uniprocessor kernel, per-cpu for SMP kernel. | 474 | * @running: non-zero when there are timers running and |
475 | * @cputime receives updates. | ||
476 | * @lock: lock for fields in this struct. | ||
470 | * | 477 | * |
471 | * This structure contains the version of task_cputime, above, that is | 478 | * This structure contains the version of task_cputime, above, that is |
472 | * used for thread group CPU clock calculations. | 479 | * used for thread group CPU timer calculations. |
473 | */ | 480 | */ |
474 | struct thread_group_cputime { | 481 | struct thread_group_cputimer { |
475 | struct task_cputime totals; | 482 | struct task_cputime cputime; |
483 | int running; | ||
484 | spinlock_t lock; | ||
476 | }; | 485 | }; |
477 | 486 | ||
478 | /* | 487 | /* |
@@ -521,10 +530,10 @@ struct signal_struct { | |||
521 | cputime_t it_prof_incr, it_virt_incr; | 530 | cputime_t it_prof_incr, it_virt_incr; |
522 | 531 | ||
523 | /* | 532 | /* |
524 | * Thread group totals for process CPU clocks. | 533 | * Thread group totals for process CPU timers. |
525 | * See thread_group_cputime(), et al, for details. | 534 | * See thread_group_cputimer(), et al, for details. |
526 | */ | 535 | */ |
527 | struct thread_group_cputime cputime; | 536 | struct thread_group_cputimer cputimer; |
528 | 537 | ||
529 | /* Earliest-expiration cache. */ | 538 | /* Earliest-expiration cache. */ |
530 | struct task_cputime cputime_expires; | 539 | struct task_cputime cputime_expires; |
@@ -561,7 +570,7 @@ struct signal_struct { | |||
561 | * Live threads maintain their own counters and add to these | 570 | * Live threads maintain their own counters and add to these |
562 | * in __exit_signal, except for the group leader. | 571 | * in __exit_signal, except for the group leader. |
563 | */ | 572 | */ |
564 | cputime_t cutime, cstime; | 573 | cputime_t utime, stime, cutime, cstime; |
565 | cputime_t gtime; | 574 | cputime_t gtime; |
566 | cputime_t cgtime; | 575 | cputime_t cgtime; |
567 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 576 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
@@ -570,6 +579,14 @@ struct signal_struct { | |||
570 | struct task_io_accounting ioac; | 579 | struct task_io_accounting ioac; |
571 | 580 | ||
572 | /* | 581 | /* |
582 | * Cumulative ns of schedule CPU time fo dead threads in the | ||
583 | * group, not including a zombie group leader, (This only differs | ||
584 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | ||
585 | * other than jiffies.) | ||
586 | */ | ||
587 | unsigned long long sum_sched_runtime; | ||
588 | |||
589 | /* | ||
573 | * We don't bother to synchronize most readers of this at all, | 590 | * We don't bother to synchronize most readers of this at all, |
574 | * because there is no reader checking a limit that actually needs | 591 | * because there is no reader checking a limit that actually needs |
575 | * to get both rlim_cur and rlim_max atomically, and either one | 592 | * to get both rlim_cur and rlim_max atomically, and either one |
@@ -2186,27 +2203,14 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
2186 | /* | 2203 | /* |
2187 | * Thread group CPU time accounting. | 2204 | * Thread group CPU time accounting. |
2188 | */ | 2205 | */ |
2189 | 2206 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | |
2190 | static inline | 2207 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); |
2191 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | ||
2192 | { | ||
2193 | struct task_cputime *totals = &tsk->signal->cputime.totals; | ||
2194 | unsigned long flags; | ||
2195 | |||
2196 | spin_lock_irqsave(&totals->lock, flags); | ||
2197 | *times = *totals; | ||
2198 | spin_unlock_irqrestore(&totals->lock, flags); | ||
2199 | } | ||
2200 | 2208 | ||
2201 | static inline void thread_group_cputime_init(struct signal_struct *sig) | 2209 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
2202 | { | 2210 | { |
2203 | sig->cputime.totals = (struct task_cputime){ | 2211 | sig->cputimer.cputime = INIT_CPUTIME; |
2204 | .utime = cputime_zero, | 2212 | spin_lock_init(&sig->cputimer.lock); |
2205 | .stime = cputime_zero, | 2213 | sig->cputimer.running = 0; |
2206 | .sum_exec_runtime = 0, | ||
2207 | }; | ||
2208 | |||
2209 | spin_lock_init(&sig->cputime.totals.lock); | ||
2210 | } | 2214 | } |
2211 | 2215 | ||
2212 | static inline void thread_group_cputime_free(struct signal_struct *sig) | 2216 | static inline void thread_group_cputime_free(struct signal_struct *sig) |
@@ -2290,9 +2294,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); | |||
2290 | extern int sched_group_set_rt_period(struct task_group *tg, | 2294 | extern int sched_group_set_rt_period(struct task_group *tg, |
2291 | long rt_period_us); | 2295 | long rt_period_us); |
2292 | extern long sched_group_rt_period(struct task_group *tg); | 2296 | extern long sched_group_rt_period(struct task_group *tg); |
2297 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
2293 | #endif | 2298 | #endif |
2294 | #endif | 2299 | #endif |
2295 | 2300 | ||
2301 | extern int task_can_switch_user(struct user_struct *up, | ||
2302 | struct task_struct *tsk); | ||
2303 | |||
2296 | #ifdef CONFIG_TASK_XACCT | 2304 | #ifdef CONFIG_TASK_XACCT |
2297 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2305 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
2298 | { | 2306 | { |