aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-24 06:48:46 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-24 06:48:46 -0400
commit8c82a17e9c924c0e9f13e75e4c2f6bca19a4b516 (patch)
treed535f46a917e14e90deccb29ad00aac016ad18dd /include/linux/sched.h
parent4ce72a2c063a7fa8e42a9435440ae3364115a58d (diff)
parent57f8f7b60db6f1ed2c6918ab9230c4623a9dbe37 (diff)
Merge commit 'v2.6.28-rc1' into sched/urgent
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h107
1 files changed, 88 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c05b45faef18..b483f39a7112 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -402,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm);
402#define MMF_DUMP_MAPPED_PRIVATE 4 402#define MMF_DUMP_MAPPED_PRIVATE 4
403#define MMF_DUMP_MAPPED_SHARED 5 403#define MMF_DUMP_MAPPED_SHARED 5
404#define MMF_DUMP_ELF_HEADERS 6 404#define MMF_DUMP_ELF_HEADERS 6
405#define MMF_DUMP_HUGETLB_PRIVATE 7
406#define MMF_DUMP_HUGETLB_SHARED 8
405#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 407#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
406#define MMF_DUMP_FILTER_BITS 5 408#define MMF_DUMP_FILTER_BITS 7
407#define MMF_DUMP_FILTER_MASK \ 409#define MMF_DUMP_FILTER_MASK \
408 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 410 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
409#define MMF_DUMP_FILTER_DEFAULT \ 411#define MMF_DUMP_FILTER_DEFAULT \
410 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 412 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
413 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
414
415#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
416# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
417#else
418# define MMF_DUMP_MASK_DEFAULT_ELF 0
419#endif
411 420
412struct sighand_struct { 421struct sighand_struct {
413 atomic_t count; 422 atomic_t count;
@@ -424,6 +433,39 @@ struct pacct_struct {
424 unsigned long ac_minflt, ac_majflt; 433 unsigned long ac_minflt, ac_majflt;
425}; 434};
426 435
436/**
437 * struct task_cputime - collected CPU time counts
438 * @utime: time spent in user mode, in &cputime_t units
439 * @stime: time spent in kernel mode, in &cputime_t units
440 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
441 *
442 * This structure groups together three kinds of CPU time that are
443 * tracked for threads and thread groups. Most things considering
444 * CPU time want to group these counts together and treat all three
445 * of them in parallel.
446 */
447struct task_cputime {
448 cputime_t utime;
449 cputime_t stime;
450 unsigned long long sum_exec_runtime;
451};
452/* Alternate field names when used to cache expirations. */
453#define prof_exp stime
454#define virt_exp utime
455#define sched_exp sum_exec_runtime
456
457/**
458 * struct thread_group_cputime - thread group interval timer counts
459 * @totals: thread group interval timers; substructure for
460 * uniprocessor kernel, per-cpu for SMP kernel.
461 *
462 * This structure contains the version of task_cputime, above, that is
463 * used for thread group CPU clock calculations.
464 */
465struct thread_group_cputime {
466 struct task_cputime *totals;
467};
468
427/* 469/*
428 * NOTE! "signal_struct" does not have it's own 470 * NOTE! "signal_struct" does not have it's own
429 * locking, because a shared signal_struct always 471 * locking, because a shared signal_struct always
@@ -469,6 +511,17 @@ struct signal_struct {
469 cputime_t it_prof_expires, it_virt_expires; 511 cputime_t it_prof_expires, it_virt_expires;
470 cputime_t it_prof_incr, it_virt_incr; 512 cputime_t it_prof_incr, it_virt_incr;
471 513
514 /*
515 * Thread group totals for process CPU clocks.
516 * See thread_group_cputime(), et al, for details.
517 */
518 struct thread_group_cputime cputime;
519
520 /* Earliest-expiration cache. */
521 struct task_cputime cputime_expires;
522
523 struct list_head cpu_timers[3];
524
472 /* job control IDs */ 525 /* job control IDs */
473 526
474 /* 527 /*
@@ -499,7 +552,7 @@ struct signal_struct {
499 * Live threads maintain their own counters and add to these 552 * Live threads maintain their own counters and add to these
500 * in __exit_signal, except for the group leader. 553 * in __exit_signal, except for the group leader.
501 */ 554 */
502 cputime_t utime, stime, cutime, cstime; 555 cputime_t cutime, cstime;
503 cputime_t gtime; 556 cputime_t gtime;
504 cputime_t cgtime; 557 cputime_t cgtime;
505 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 558 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -508,14 +561,6 @@ struct signal_struct {
508 struct task_io_accounting ioac; 561 struct task_io_accounting ioac;
509 562
510 /* 563 /*
511 * Cumulative ns of scheduled CPU time for dead threads in the
512 * group, not including a zombie group leader. (This only differs
513 * from jiffies_to_ns(utime + stime) if sched_clock uses something
514 * other than jiffies.)
515 */
516 unsigned long long sum_sched_runtime;
517
518 /*
519 * We don't bother to synchronize most readers of this at all, 564 * We don't bother to synchronize most readers of this at all,
520 * because there is no reader checking a limit that actually needs 565 * because there is no reader checking a limit that actually needs
521 * to get both rlim_cur and rlim_max atomically, and either one 566 * to get both rlim_cur and rlim_max atomically, and either one
@@ -526,8 +571,6 @@ struct signal_struct {
526 */ 571 */
527 struct rlimit rlim[RLIM_NLIMITS]; 572 struct rlimit rlim[RLIM_NLIMITS];
528 573
529 struct list_head cpu_timers[3];
530
531 /* keep the process-shared keyrings here so that they do the right 574 /* keep the process-shared keyrings here so that they do the right
532 * thing in threads created with CLONE_THREAD */ 575 * thing in threads created with CLONE_THREAD */
533#ifdef CONFIG_KEYS 576#ifdef CONFIG_KEYS
@@ -637,10 +680,6 @@ struct sched_info {
637}; 680};
638#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 681#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
639 682
640#ifdef CONFIG_SCHEDSTATS
641extern const struct file_operations proc_schedstat_operations;
642#endif /* CONFIG_SCHEDSTATS */
643
644#ifdef CONFIG_TASK_DELAY_ACCT 683#ifdef CONFIG_TASK_DELAY_ACCT
645struct task_delay_info { 684struct task_delay_info {
646 spinlock_t lock; 685 spinlock_t lock;
@@ -1138,8 +1177,7 @@ struct task_struct {
1138/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1177/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1139 unsigned long min_flt, maj_flt; 1178 unsigned long min_flt, maj_flt;
1140 1179
1141 cputime_t it_prof_expires, it_virt_expires; 1180 struct task_cputime cputime_expires;
1142 unsigned long long it_sched_expires;
1143 struct list_head cpu_timers[3]; 1181 struct list_head cpu_timers[3];
1144 1182
1145/* process credentials */ 1183/* process credentials */
@@ -1305,6 +1343,12 @@ struct task_struct {
1305 int latency_record_count; 1343 int latency_record_count;
1306 struct latency_record latency_record[LT_SAVECOUNT]; 1344 struct latency_record latency_record[LT_SAVECOUNT];
1307#endif 1345#endif
1346 /*
1347 * time slack values; these are used to round up poll() and
1348 * select() etc timeout values. These are in nanoseconds.
1349 */
1350 unsigned long timer_slack_ns;
1351 unsigned long default_timer_slack_ns;
1308}; 1352};
1309 1353
1310/* 1354/*
@@ -1589,6 +1633,7 @@ extern unsigned long long cpu_clock(int cpu);
1589 1633
1590extern unsigned long long 1634extern unsigned long long
1591task_sched_runtime(struct task_struct *task); 1635task_sched_runtime(struct task_struct *task);
1636extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1592 1637
1593/* sched_exec is called by processes performing an exec */ 1638/* sched_exec is called by processes performing an exec */
1594#ifdef CONFIG_SMP 1639#ifdef CONFIG_SMP
@@ -2087,6 +2132,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2087} 2132}
2088 2133
2089/* 2134/*
2135 * Thread group CPU time accounting.
2136 */
2137
2138extern int thread_group_cputime_alloc(struct task_struct *);
2139extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2140
2141static inline void thread_group_cputime_init(struct signal_struct *sig)
2142{
2143 sig->cputime.totals = NULL;
2144}
2145
2146static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2147{
2148 if (curr->signal->cputime.totals)
2149 return 0;
2150 return thread_group_cputime_alloc(curr);
2151}
2152
2153static inline void thread_group_cputime_free(struct signal_struct *sig)
2154{
2155 free_percpu(sig->cputime.totals);
2156}
2157
2158/*
2090 * Reevaluate whether the task has signals pending delivery. 2159 * Reevaluate whether the task has signals pending delivery.
2091 * Wake the task if so. 2160 * Wake the task if so.
2092 * This is required every time the blocked sigset_t changes. 2161 * This is required every time the blocked sigset_t changes.