aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h97
1 files changed, 82 insertions, 15 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c226c7b82946..5c38db536e07 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -403,12 +403,21 @@ extern int get_dumpable(struct mm_struct *mm);
403#define MMF_DUMP_MAPPED_PRIVATE 4 403#define MMF_DUMP_MAPPED_PRIVATE 4
404#define MMF_DUMP_MAPPED_SHARED 5 404#define MMF_DUMP_MAPPED_SHARED 5
405#define MMF_DUMP_ELF_HEADERS 6 405#define MMF_DUMP_ELF_HEADERS 6
406#define MMF_DUMP_HUGETLB_PRIVATE 7
407#define MMF_DUMP_HUGETLB_SHARED 8
406#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 408#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
407#define MMF_DUMP_FILTER_BITS 5 409#define MMF_DUMP_FILTER_BITS 7
408#define MMF_DUMP_FILTER_MASK \ 410#define MMF_DUMP_FILTER_MASK \
409 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 411 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
410#define MMF_DUMP_FILTER_DEFAULT \ 412#define MMF_DUMP_FILTER_DEFAULT \
411 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 413 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
414 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
415
416#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
417# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
418#else
419# define MMF_DUMP_MASK_DEFAULT_ELF 0
420#endif
412 421
413struct sighand_struct { 422struct sighand_struct {
414 atomic_t count; 423 atomic_t count;
@@ -425,6 +434,39 @@ struct pacct_struct {
425 unsigned long ac_minflt, ac_majflt; 434 unsigned long ac_minflt, ac_majflt;
426}; 435};
427 436
437/**
438 * struct task_cputime - collected CPU time counts
439 * @utime: time spent in user mode, in &cputime_t units
440 * @stime: time spent in kernel mode, in &cputime_t units
441 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
442 *
443 * This structure groups together three kinds of CPU time that are
444 * tracked for threads and thread groups. Most things considering
445 * CPU time want to group these counts together and treat all three
446 * of them in parallel.
447 */
448struct task_cputime {
449 cputime_t utime;
450 cputime_t stime;
451 unsigned long long sum_exec_runtime;
452};
453/* Alternate field names when used to cache expirations. */
454#define prof_exp stime
455#define virt_exp utime
456#define sched_exp sum_exec_runtime
457
458/**
459 * struct thread_group_cputime - thread group interval timer counts
460 * @totals: thread group interval timers; substructure for
461 * uniprocessor kernel, per-cpu for SMP kernel.
462 *
463 * This structure contains the version of task_cputime, above, that is
464 * used for thread group CPU clock calculations.
465 */
466struct thread_group_cputime {
467 struct task_cputime *totals;
468};
469
428/* 470/*
429 * NOTE! "signal_struct" does not have it's own 471 * NOTE! "signal_struct" does not have it's own
430 * locking, because a shared signal_struct always 472 * locking, because a shared signal_struct always
@@ -470,6 +512,17 @@ struct signal_struct {
470 cputime_t it_prof_expires, it_virt_expires; 512 cputime_t it_prof_expires, it_virt_expires;
471 cputime_t it_prof_incr, it_virt_incr; 513 cputime_t it_prof_incr, it_virt_incr;
472 514
515 /*
516 * Thread group totals for process CPU clocks.
517 * See thread_group_cputime(), et al, for details.
518 */
519 struct thread_group_cputime cputime;
520
521 /* Earliest-expiration cache. */
522 struct task_cputime cputime_expires;
523
524 struct list_head cpu_timers[3];
525
473 /* job control IDs */ 526 /* job control IDs */
474 527
475 /* 528 /*
@@ -500,7 +553,7 @@ struct signal_struct {
500 * Live threads maintain their own counters and add to these 553 * Live threads maintain their own counters and add to these
501 * in __exit_signal, except for the group leader. 554 * in __exit_signal, except for the group leader.
502 */ 555 */
503 cputime_t utime, stime, cutime, cstime; 556 cputime_t cutime, cstime;
504 cputime_t gtime; 557 cputime_t gtime;
505 cputime_t cgtime; 558 cputime_t cgtime;
506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 559 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +562,6 @@ struct signal_struct {
509 struct task_io_accounting ioac; 562 struct task_io_accounting ioac;
510 563
511 /* 564 /*
512 * Cumulative ns of scheduled CPU time for dead threads in the
513 * group, not including a zombie group leader. (This only differs
514 * from jiffies_to_ns(utime + stime) if sched_clock uses something
515 * other than jiffies.)
516 */
517 unsigned long long sum_sched_runtime;
518
519 /*
520 * We don't bother to synchronize most readers of this at all, 565 * We don't bother to synchronize most readers of this at all,
521 * because there is no reader checking a limit that actually needs 566 * because there is no reader checking a limit that actually needs
522 * to get both rlim_cur and rlim_max atomically, and either one 567 * to get both rlim_cur and rlim_max atomically, and either one
@@ -527,8 +572,6 @@ struct signal_struct {
527 */ 572 */
528 struct rlimit rlim[RLIM_NLIMITS]; 573 struct rlimit rlim[RLIM_NLIMITS];
529 574
530 struct list_head cpu_timers[3];
531
532 /* keep the process-shared keyrings here so that they do the right 575 /* keep the process-shared keyrings here so that they do the right
533 * thing in threads created with CLONE_THREAD */ 576 * thing in threads created with CLONE_THREAD */
534#ifdef CONFIG_KEYS 577#ifdef CONFIG_KEYS
@@ -1137,8 +1180,7 @@ struct task_struct {
1137/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1180/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1138 unsigned long min_flt, maj_flt; 1181 unsigned long min_flt, maj_flt;
1139 1182
1140 cputime_t it_prof_expires, it_virt_expires; 1183 struct task_cputime cputime_expires;
1141 unsigned long long it_sched_expires;
1142 struct list_head cpu_timers[3]; 1184 struct list_head cpu_timers[3];
1143 1185
1144/* process credentials */ 1186/* process credentials */
@@ -1588,6 +1630,7 @@ extern unsigned long long cpu_clock(int cpu);
1588 1630
1589extern unsigned long long 1631extern unsigned long long
1590task_sched_runtime(struct task_struct *task); 1632task_sched_runtime(struct task_struct *task);
1633extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1591 1634
1592/* sched_exec is called by processes performing an exec */ 1635/* sched_exec is called by processes performing an exec */
1593#ifdef CONFIG_SMP 1636#ifdef CONFIG_SMP
@@ -2085,6 +2128,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2085} 2128}
2086 2129
2087/* 2130/*
2131 * Thread group CPU time accounting.
2132 */
2133
2134extern int thread_group_cputime_alloc(struct task_struct *);
2135extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2136
2137static inline void thread_group_cputime_init(struct signal_struct *sig)
2138{
2139 sig->cputime.totals = NULL;
2140}
2141
2142static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2143{
2144 if (curr->signal->cputime.totals)
2145 return 0;
2146 return thread_group_cputime_alloc(curr);
2147}
2148
2149static inline void thread_group_cputime_free(struct signal_struct *sig)
2150{
2151 free_percpu(sig->cputime.totals);
2152}
2153
2154/*
2088 * Reevaluate whether the task has signals pending delivery. 2155 * Reevaluate whether the task has signals pending delivery.
2089 * Wake the task if so. 2156 * Wake the task if so.
2090 * This is required every time the blocked sigset_t changes. 2157 * This is required every time the blocked sigset_t changes.