diff options
author | James Morris <jmorris@namei.org> | 2009-03-23 19:52:46 -0400 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-03-23 19:52:46 -0400 |
commit | 703a3cd72817e99201cef84a8a7aecc60b2b3581 (patch) | |
tree | 3e943755178ff410694722bb031f523136fbc432 /include/linux/sched.h | |
parent | df7f54c012b92ec93d56b68547351dcdf8a163d3 (diff) | |
parent | 8e0ee43bc2c3e19db56a4adaa9a9b04ce885cd84 (diff) |
Merge branch 'master' into next
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 71 |
1 files changed, 41 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 397c20cfb6a..2c36f62e754 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -443,7 +443,6 @@ struct pacct_struct { | |||
443 | * @utime: time spent in user mode, in &cputime_t units | 443 | * @utime: time spent in user mode, in &cputime_t units |
444 | * @stime: time spent in kernel mode, in &cputime_t units | 444 | * @stime: time spent in kernel mode, in &cputime_t units |
445 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds | 445 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
446 | * @lock: lock for fields in this struct | ||
447 | * | 446 | * |
448 | * This structure groups together three kinds of CPU time that are | 447 | * This structure groups together three kinds of CPU time that are |
449 | * tracked for threads and thread groups. Most things considering | 448 | * tracked for threads and thread groups. Most things considering |
@@ -454,23 +453,33 @@ struct task_cputime { | |||
454 | cputime_t utime; | 453 | cputime_t utime; |
455 | cputime_t stime; | 454 | cputime_t stime; |
456 | unsigned long long sum_exec_runtime; | 455 | unsigned long long sum_exec_runtime; |
457 | spinlock_t lock; | ||
458 | }; | 456 | }; |
459 | /* Alternate field names when used to cache expirations. */ | 457 | /* Alternate field names when used to cache expirations. */ |
460 | #define prof_exp stime | 458 | #define prof_exp stime |
461 | #define virt_exp utime | 459 | #define virt_exp utime |
462 | #define sched_exp sum_exec_runtime | 460 | #define sched_exp sum_exec_runtime |
463 | 461 | ||
462 | #define INIT_CPUTIME \ | ||
463 | (struct task_cputime) { \ | ||
464 | .utime = cputime_zero, \ | ||
465 | .stime = cputime_zero, \ | ||
466 | .sum_exec_runtime = 0, \ | ||
467 | } | ||
468 | |||
464 | /** | 469 | /** |
465 | * struct thread_group_cputime - thread group interval timer counts | 470 | * struct thread_group_cputimer - thread group interval timer counts |
466 | * @totals: thread group interval timers; substructure for | 471 | * @cputime: thread group interval timers. |
467 | * uniprocessor kernel, per-cpu for SMP kernel. | 472 | * @running: non-zero when there are timers running and |
473 | * @cputime receives updates. | ||
474 | * @lock: lock for fields in this struct. | ||
468 | * | 475 | * |
469 | * This structure contains the version of task_cputime, above, that is | 476 | * This structure contains the version of task_cputime, above, that is |
470 | * used for thread group CPU clock calculations. | 477 | * used for thread group CPU timer calculations. |
471 | */ | 478 | */ |
472 | struct thread_group_cputime { | 479 | struct thread_group_cputimer { |
473 | struct task_cputime totals; | 480 | struct task_cputime cputime; |
481 | int running; | ||
482 | spinlock_t lock; | ||
474 | }; | 483 | }; |
475 | 484 | ||
476 | /* | 485 | /* |
@@ -519,10 +528,10 @@ struct signal_struct { | |||
519 | cputime_t it_prof_incr, it_virt_incr; | 528 | cputime_t it_prof_incr, it_virt_incr; |
520 | 529 | ||
521 | /* | 530 | /* |
522 | * Thread group totals for process CPU clocks. | 531 | * Thread group totals for process CPU timers. |
523 | * See thread_group_cputime(), et al, for details. | 532 | * See thread_group_cputimer(), et al, for details. |
524 | */ | 533 | */ |
525 | struct thread_group_cputime cputime; | 534 | struct thread_group_cputimer cputimer; |
526 | 535 | ||
527 | /* Earliest-expiration cache. */ | 536 | /* Earliest-expiration cache. */ |
528 | struct task_cputime cputime_expires; | 537 | struct task_cputime cputime_expires; |
@@ -559,7 +568,7 @@ struct signal_struct { | |||
559 | * Live threads maintain their own counters and add to these | 568 | * Live threads maintain their own counters and add to these |
560 | * in __exit_signal, except for the group leader. | 569 | * in __exit_signal, except for the group leader. |
561 | */ | 570 | */ |
562 | cputime_t cutime, cstime; | 571 | cputime_t utime, stime, cutime, cstime; |
563 | cputime_t gtime; | 572 | cputime_t gtime; |
564 | cputime_t cgtime; | 573 | cputime_t cgtime; |
565 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 574 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
@@ -568,6 +577,14 @@ struct signal_struct { | |||
568 | struct task_io_accounting ioac; | 577 | struct task_io_accounting ioac; |
569 | 578 | ||
570 | /* | 579 | /* |
580 | * Cumulative ns of schedule CPU time fo dead threads in the | ||
581 | * group, not including a zombie group leader, (This only differs | ||
582 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | ||
583 | * other than jiffies.) | ||
584 | */ | ||
585 | unsigned long long sum_sched_runtime; | ||
586 | |||
587 | /* | ||
571 | * We don't bother to synchronize most readers of this at all, | 588 | * We don't bother to synchronize most readers of this at all, |
572 | * because there is no reader checking a limit that actually needs | 589 | * because there is no reader checking a limit that actually needs |
573 | * to get both rlim_cur and rlim_max atomically, and either one | 590 | * to get both rlim_cur and rlim_max atomically, and either one |
@@ -1404,6 +1421,9 @@ struct task_struct { | |||
1404 | #endif | 1421 | #endif |
1405 | }; | 1422 | }; |
1406 | 1423 | ||
1424 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | ||
1425 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | ||
1426 | |||
1407 | /* | 1427 | /* |
1408 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1428 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
1409 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 1429 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH |
@@ -2185,27 +2205,14 @@ static inline int spin_needbreak(spinlock_t *lock) | |||
2185 | /* | 2205 | /* |
2186 | * Thread group CPU time accounting. | 2206 | * Thread group CPU time accounting. |
2187 | */ | 2207 | */ |
2188 | 2208 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | |
2189 | static inline | 2209 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); |
2190 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | ||
2191 | { | ||
2192 | struct task_cputime *totals = &tsk->signal->cputime.totals; | ||
2193 | unsigned long flags; | ||
2194 | |||
2195 | spin_lock_irqsave(&totals->lock, flags); | ||
2196 | *times = *totals; | ||
2197 | spin_unlock_irqrestore(&totals->lock, flags); | ||
2198 | } | ||
2199 | 2210 | ||
2200 | static inline void thread_group_cputime_init(struct signal_struct *sig) | 2211 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
2201 | { | 2212 | { |
2202 | sig->cputime.totals = (struct task_cputime){ | 2213 | sig->cputimer.cputime = INIT_CPUTIME; |
2203 | .utime = cputime_zero, | 2214 | spin_lock_init(&sig->cputimer.lock); |
2204 | .stime = cputime_zero, | 2215 | sig->cputimer.running = 0; |
2205 | .sum_exec_runtime = 0, | ||
2206 | }; | ||
2207 | |||
2208 | spin_lock_init(&sig->cputime.totals.lock); | ||
2209 | } | 2216 | } |
2210 | 2217 | ||
2211 | static inline void thread_group_cputime_free(struct signal_struct *sig) | 2218 | static inline void thread_group_cputime_free(struct signal_struct *sig) |
@@ -2289,9 +2296,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); | |||
2289 | extern int sched_group_set_rt_period(struct task_group *tg, | 2296 | extern int sched_group_set_rt_period(struct task_group *tg, |
2290 | long rt_period_us); | 2297 | long rt_period_us); |
2291 | extern long sched_group_rt_period(struct task_group *tg); | 2298 | extern long sched_group_rt_period(struct task_group *tg); |
2299 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | ||
2292 | #endif | 2300 | #endif |
2293 | #endif | 2301 | #endif |
2294 | 2302 | ||
2303 | extern int task_can_switch_user(struct user_struct *up, | ||
2304 | struct task_struct *tsk); | ||
2305 | |||
2295 | #ifdef CONFIG_TASK_XACCT | 2306 | #ifdef CONFIG_TASK_XACCT |
2296 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2307 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
2297 | { | 2308 | { |