aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
commita9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch)
tree886e75fdfd09690cd262ca69cb7f5d1d42b48602 /include/linux/sched.h
parentb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (diff)
parent6a94cb73064c952255336cc57731904174b2c58f (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/include/asm/pda.h kernel/fork.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h230
1 files changed, 146 insertions, 84 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1a7e8461db5a..bd5ff78798c2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
96struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer;
99 100
100/* 101/*
101 * List of flags we want to share for kernel threads, 102 * List of flags we want to share for kernel threads,
@@ -247,6 +248,7 @@ extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 248extern void init_idle_bootup_task(struct task_struct *idle);
248 249
249extern int runqueue_is_locked(void); 250extern int runqueue_is_locked(void);
251extern void task_rq_unlock_wait(struct task_struct *p);
250 252
251extern cpumask_t nohz_cpu_mask; 253extern cpumask_t nohz_cpu_mask;
252#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -258,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
258} 260}
259#endif 261#endif
260 262
261extern unsigned long rt_needs_cpu(int cpu);
262
263/* 263/*
264 * Only dump TASK_* tasks. (0 for all tasks) 264 * Only dump TASK_* tasks. (0 for all tasks)
265 */ 265 */
@@ -287,7 +287,6 @@ extern void trap_init(void);
287extern void account_process_tick(struct task_struct *task, int user); 287extern void account_process_tick(struct task_struct *task, int user);
288extern void update_process_times(int user); 288extern void update_process_times(int user);
289extern void scheduler_tick(void); 289extern void scheduler_tick(void);
290extern void hrtick_resched(void);
291 290
292extern void sched_show_task(struct task_struct *p); 291extern void sched_show_task(struct task_struct *p);
293 292
@@ -403,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm);
403#define MMF_DUMP_MAPPED_PRIVATE 4 402#define MMF_DUMP_MAPPED_PRIVATE 4
404#define MMF_DUMP_MAPPED_SHARED 5 403#define MMF_DUMP_MAPPED_SHARED 5
405#define MMF_DUMP_ELF_HEADERS 6 404#define MMF_DUMP_ELF_HEADERS 6
405#define MMF_DUMP_HUGETLB_PRIVATE 7
406#define MMF_DUMP_HUGETLB_SHARED 8
406#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 407#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
407#define MMF_DUMP_FILTER_BITS 5 408#define MMF_DUMP_FILTER_BITS 7
408#define MMF_DUMP_FILTER_MASK \ 409#define MMF_DUMP_FILTER_MASK \
409 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 410 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
410#define MMF_DUMP_FILTER_DEFAULT \ 411#define MMF_DUMP_FILTER_DEFAULT \
411 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 412 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
413 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
414
415#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
416# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
417#else
418# define MMF_DUMP_MASK_DEFAULT_ELF 0
419#endif
412 420
413struct sighand_struct { 421struct sighand_struct {
414 atomic_t count; 422 atomic_t count;
@@ -425,6 +433,39 @@ struct pacct_struct {
425 unsigned long ac_minflt, ac_majflt; 433 unsigned long ac_minflt, ac_majflt;
426}; 434};
427 435
436/**
437 * struct task_cputime - collected CPU time counts
438 * @utime: time spent in user mode, in &cputime_t units
439 * @stime: time spent in kernel mode, in &cputime_t units
440 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
441 *
442 * This structure groups together three kinds of CPU time that are
443 * tracked for threads and thread groups. Most things considering
444 * CPU time want to group these counts together and treat all three
445 * of them in parallel.
446 */
447struct task_cputime {
448 cputime_t utime;
449 cputime_t stime;
450 unsigned long long sum_exec_runtime;
451};
452/* Alternate field names when used to cache expirations. */
453#define prof_exp stime
454#define virt_exp utime
455#define sched_exp sum_exec_runtime
456
457/**
458 * struct thread_group_cputime - thread group interval timer counts
459 * @totals: thread group interval timers; substructure for
460 * uniprocessor kernel, per-cpu for SMP kernel.
461 *
462 * This structure contains the version of task_cputime, above, that is
463 * used for thread group CPU clock calculations.
464 */
465struct thread_group_cputime {
466 struct task_cputime *totals;
467};
468
428/* 469/*
429 * NOTE! "signal_struct" does not have it's own 470 * NOTE! "signal_struct" does not have it's own
430 * locking, because a shared signal_struct always 471 * locking, because a shared signal_struct always
@@ -470,6 +511,17 @@ struct signal_struct {
470 cputime_t it_prof_expires, it_virt_expires; 511 cputime_t it_prof_expires, it_virt_expires;
471 cputime_t it_prof_incr, it_virt_incr; 512 cputime_t it_prof_incr, it_virt_incr;
472 513
514 /*
515 * Thread group totals for process CPU clocks.
516 * See thread_group_cputime(), et al, for details.
517 */
518 struct thread_group_cputime cputime;
519
520 /* Earliest-expiration cache. */
521 struct task_cputime cputime_expires;
522
523 struct list_head cpu_timers[3];
524
473 /* job control IDs */ 525 /* job control IDs */
474 526
475 /* 527 /*
@@ -500,7 +552,7 @@ struct signal_struct {
500 * Live threads maintain their own counters and add to these 552 * Live threads maintain their own counters and add to these
501 * in __exit_signal, except for the group leader. 553 * in __exit_signal, except for the group leader.
502 */ 554 */
503 cputime_t utime, stime, cutime, cstime; 555 cputime_t cutime, cstime;
504 cputime_t gtime; 556 cputime_t gtime;
505 cputime_t cgtime; 557 cputime_t cgtime;
506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 558 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +561,6 @@ struct signal_struct {
509 struct task_io_accounting ioac; 561 struct task_io_accounting ioac;
510 562
511 /* 563 /*
512 * Cumulative ns of scheduled CPU time for dead threads in the
513 * group, not including a zombie group leader. (This only differs
514 * from jiffies_to_ns(utime + stime) if sched_clock uses something
515 * other than jiffies.)
516 */
517 unsigned long long sum_sched_runtime;
518
519 /*
520 * We don't bother to synchronize most readers of this at all, 564 * We don't bother to synchronize most readers of this at all,
521 * because there is no reader checking a limit that actually needs 565 * because there is no reader checking a limit that actually needs
522 * to get both rlim_cur and rlim_max atomically, and either one 566 * to get both rlim_cur and rlim_max atomically, and either one
@@ -527,14 +571,6 @@ struct signal_struct {
527 */ 571 */
528 struct rlimit rlim[RLIM_NLIMITS]; 572 struct rlimit rlim[RLIM_NLIMITS];
529 573
530 struct list_head cpu_timers[3];
531
532 /* keep the process-shared keyrings here so that they do the right
533 * thing in threads created with CLONE_THREAD */
534#ifdef CONFIG_KEYS
535 struct key *session_keyring; /* keyring inherited over fork */
536 struct key *process_keyring; /* keyring private to this process */
537#endif
538#ifdef CONFIG_BSD_PROCESS_ACCT 574#ifdef CONFIG_BSD_PROCESS_ACCT
539 struct pacct_struct pacct; /* per-process accounting information */ 575 struct pacct_struct pacct; /* per-process accounting information */
540#endif 576#endif
@@ -587,6 +623,10 @@ struct user_struct {
587 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 623 atomic_t inotify_watches; /* How many inotify watches does this user have? */
588 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 624 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
589#endif 625#endif
626#ifdef CONFIG_EPOLL
627 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
628 atomic_t epoll_watches; /* The number of file descriptors currently watched */
629#endif
590#ifdef CONFIG_POSIX_MQUEUE 630#ifdef CONFIG_POSIX_MQUEUE
591 /* protected by mq_lock */ 631 /* protected by mq_lock */
592 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 632 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
@@ -601,6 +641,7 @@ struct user_struct {
601 /* Hash table maintenance information */ 641 /* Hash table maintenance information */
602 struct hlist_node uidhash_node; 642 struct hlist_node uidhash_node;
603 uid_t uid; 643 uid_t uid;
644 struct user_namespace *user_ns;
604 645
605#ifdef CONFIG_USER_SCHED 646#ifdef CONFIG_USER_SCHED
606 struct task_group *tg; 647 struct task_group *tg;
@@ -618,6 +659,7 @@ extern struct user_struct *find_user(uid_t);
618extern struct user_struct root_user; 659extern struct user_struct root_user;
619#define INIT_USER (&root_user) 660#define INIT_USER (&root_user)
620 661
662
621struct backing_dev_info; 663struct backing_dev_info;
622struct reclaim_state; 664struct reclaim_state;
623 665
@@ -625,8 +667,7 @@ struct reclaim_state;
625struct sched_info { 667struct sched_info {
626 /* cumulative counters */ 668 /* cumulative counters */
627 unsigned long pcount; /* # of times run on this cpu */ 669 unsigned long pcount; /* # of times run on this cpu */
628 unsigned long long cpu_time, /* time spent on the cpu */ 670 unsigned long long run_delay; /* time spent waiting on a runqueue */
629 run_delay; /* time spent waiting on a runqueue */
630 671
631 /* timestamps */ 672 /* timestamps */
632 unsigned long long last_arrival,/* when we last ran on a cpu */ 673 unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -638,10 +679,6 @@ struct sched_info {
638}; 679};
639#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 680#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
640 681
641#ifdef CONFIG_SCHEDSTATS
642extern const struct file_operations proc_schedstat_operations;
643#endif /* CONFIG_SCHEDSTATS */
644
645#ifdef CONFIG_TASK_DELAY_ACCT 682#ifdef CONFIG_TASK_DELAY_ACCT
646struct task_delay_info { 683struct task_delay_info {
647 spinlock_t lock; 684 spinlock_t lock;
@@ -845,38 +882,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
845#endif /* !CONFIG_SMP */ 882#endif /* !CONFIG_SMP */
846 883
847struct io_context; /* See blkdev.h */ 884struct io_context; /* See blkdev.h */
848#define NGROUPS_SMALL 32
849#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
850struct group_info {
851 int ngroups;
852 atomic_t usage;
853 gid_t small_block[NGROUPS_SMALL];
854 int nblocks;
855 gid_t *blocks[0];
856};
857
858/*
859 * get_group_info() must be called with the owning task locked (via task_lock())
860 * when task != current. The reason being that the vast majority of callers are
861 * looking at current->group_info, which can not be changed except by the
862 * current task. Changing current->group_info requires the task lock, too.
863 */
864#define get_group_info(group_info) do { \
865 atomic_inc(&(group_info)->usage); \
866} while (0)
867 885
868#define put_group_info(group_info) do { \
869 if (atomic_dec_and_test(&(group_info)->usage)) \
870 groups_free(group_info); \
871} while (0)
872
873extern struct group_info *groups_alloc(int gidsetsize);
874extern void groups_free(struct group_info *group_info);
875extern int set_current_groups(struct group_info *group_info);
876extern int groups_search(struct group_info *group_info, gid_t grp);
877/* access the groups "array" with this macro */
878#define GROUP_AT(gi, i) \
879 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
880 886
881#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 887#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
882extern void prefetch_stack(struct task_struct *t); 888extern void prefetch_stack(struct task_struct *t);
@@ -898,7 +904,6 @@ struct sched_class {
898 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 904 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
899 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 905 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
900 void (*yield_task) (struct rq *rq); 906 void (*yield_task) (struct rq *rq);
901 int (*select_task_rq)(struct task_struct *p, int sync);
902 907
903 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); 908 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
904 909
@@ -906,6 +911,8 @@ struct sched_class {
906 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 911 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
907 912
908#ifdef CONFIG_SMP 913#ifdef CONFIG_SMP
914 int (*select_task_rq)(struct task_struct *p, int sync);
915
909 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 916 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
910 struct rq *busiest, unsigned long max_load_move, 917 struct rq *busiest, unsigned long max_load_move,
911 struct sched_domain *sd, enum cpu_idle_type idle, 918 struct sched_domain *sd, enum cpu_idle_type idle,
@@ -917,16 +924,17 @@ struct sched_class {
917 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 924 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
918 void (*post_schedule) (struct rq *this_rq); 925 void (*post_schedule) (struct rq *this_rq);
919 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 926 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
920#endif
921 927
922 void (*set_curr_task) (struct rq *rq);
923 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
924 void (*task_new) (struct rq *rq, struct task_struct *p);
925 void (*set_cpus_allowed)(struct task_struct *p, 928 void (*set_cpus_allowed)(struct task_struct *p,
926 const cpumask_t *newmask); 929 const cpumask_t *newmask);
927 930
928 void (*rq_online)(struct rq *rq); 931 void (*rq_online)(struct rq *rq);
929 void (*rq_offline)(struct rq *rq); 932 void (*rq_offline)(struct rq *rq);
933#endif
934
935 void (*set_curr_task) (struct rq *rq);
936 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
937 void (*task_new) (struct rq *rq, struct task_struct *p);
930 938
931 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 939 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
932 int running); 940 int running);
@@ -1119,6 +1127,19 @@ struct task_struct {
1119 struct list_head ptraced; 1127 struct list_head ptraced;
1120 struct list_head ptrace_entry; 1128 struct list_head ptrace_entry;
1121 1129
1130#ifdef CONFIG_X86_PTRACE_BTS
1131 /*
1132 * This is the tracer handle for the ptrace BTS extension.
1133 * This field actually belongs to the ptracer task.
1134 */
1135 struct bts_tracer *bts;
1136 /*
1137 * The buffer to hold the BTS data.
1138 */
1139 void *bts_buffer;
1140 size_t bts_size;
1141#endif /* CONFIG_X86_PTRACE_BTS */
1142
1122 /* PID/PID hash table linkage. */ 1143 /* PID/PID hash table linkage. */
1123 struct pid_link pids[PIDTYPE_MAX]; 1144 struct pid_link pids[PIDTYPE_MAX];
1124 struct list_head thread_group; 1145 struct list_head thread_group;
@@ -1136,22 +1157,16 @@ struct task_struct {
1136/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1157/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1137 unsigned long min_flt, maj_flt; 1158 unsigned long min_flt, maj_flt;
1138 1159
1139 cputime_t it_prof_expires, it_virt_expires; 1160 struct task_cputime cputime_expires;
1140 unsigned long long it_sched_expires;
1141 struct list_head cpu_timers[3]; 1161 struct list_head cpu_timers[3];
1142 1162
1143/* process credentials */ 1163/* process credentials */
1144 uid_t uid,euid,suid,fsuid; 1164 const struct cred *real_cred; /* objective and real subjective task
1145 gid_t gid,egid,sgid,fsgid; 1165 * credentials (COW) */
1146 struct group_info *group_info; 1166 const struct cred *cred; /* effective (overridable) subjective task
1147 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1167 * credentials (COW) */
1148 struct user_struct *user; 1168 struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
1149 unsigned securebits; 1169
1150#ifdef CONFIG_KEYS
1151 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1152 struct key *request_key_auth; /* assumed request_key authority */
1153 struct key *thread_keyring; /* keyring private to this thread */
1154#endif
1155 char comm[TASK_COMM_LEN]; /* executable name excluding path 1170 char comm[TASK_COMM_LEN]; /* executable name excluding path
1156 - access with [gs]et_task_comm (which lock 1171 - access with [gs]et_task_comm (which lock
1157 it with task_lock()) 1172 it with task_lock())
@@ -1188,9 +1203,6 @@ struct task_struct {
1188 int (*notifier)(void *priv); 1203 int (*notifier)(void *priv);
1189 void *notifier_data; 1204 void *notifier_data;
1190 sigset_t *notifier_mask; 1205 sigset_t *notifier_mask;
1191#ifdef CONFIG_SECURITY
1192 void *security;
1193#endif
1194 struct audit_context *audit_context; 1206 struct audit_context *audit_context;
1195#ifdef CONFIG_AUDITSYSCALL 1207#ifdef CONFIG_AUDITSYSCALL
1196 uid_t loginuid; 1208 uid_t loginuid;
@@ -1303,6 +1315,31 @@ struct task_struct {
1303 int latency_record_count; 1315 int latency_record_count;
1304 struct latency_record latency_record[LT_SAVECOUNT]; 1316 struct latency_record latency_record[LT_SAVECOUNT];
1305#endif 1317#endif
1318 /*
1319 * time slack values; these are used to round up poll() and
1320 * select() etc timeout values. These are in nanoseconds.
1321 */
1322 unsigned long timer_slack_ns;
1323 unsigned long default_timer_slack_ns;
1324
1325 struct list_head *scm_work_list;
1326#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1327 /* Index of current stored adress in ret_stack */
1328 int curr_ret_stack;
1329 /* Stack of return addresses for return function tracing */
1330 struct ftrace_ret_stack *ret_stack;
1331 /*
1332 * Number of functions that haven't been traced
1333 * because of depth overrun.
1334 */
1335 atomic_t trace_overrun;
1336 /* Pause for the tracing */
1337 atomic_t tracing_graph_pause;
1338#endif
1339#ifdef CONFIG_TRACING
1340 /* state flags for use by tracers */
1341 unsigned long trace;
1342#endif
1306}; 1343};
1307 1344
1308/* 1345/*
@@ -1587,6 +1624,7 @@ extern unsigned long long cpu_clock(int cpu);
1587 1624
1588extern unsigned long long 1625extern unsigned long long
1589task_sched_runtime(struct task_struct *task); 1626task_sched_runtime(struct task_struct *task);
1627extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1590 1628
1591/* sched_exec is called by processes performing an exec */ 1629/* sched_exec is called by processes performing an exec */
1592#ifdef CONFIG_SMP 1630#ifdef CONFIG_SMP
@@ -1621,6 +1659,7 @@ extern unsigned int sysctl_sched_features;
1621extern unsigned int sysctl_sched_migration_cost; 1659extern unsigned int sysctl_sched_migration_cost;
1622extern unsigned int sysctl_sched_nr_migrate; 1660extern unsigned int sysctl_sched_nr_migrate;
1623extern unsigned int sysctl_sched_shares_ratelimit; 1661extern unsigned int sysctl_sched_shares_ratelimit;
1662extern unsigned int sysctl_sched_shares_thresh;
1624 1663
1625int sched_nr_latency_handler(struct ctl_table *table, int write, 1664int sched_nr_latency_handler(struct ctl_table *table, int write,
1626 struct file *file, void __user *buffer, size_t *length, 1665 struct file *file, void __user *buffer, size_t *length,
@@ -1720,7 +1759,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
1720 return u; 1759 return u;
1721} 1760}
1722extern void free_uid(struct user_struct *); 1761extern void free_uid(struct user_struct *);
1723extern void switch_uid(struct user_struct *);
1724extern void release_uids(struct user_namespace *ns); 1762extern void release_uids(struct user_namespace *ns);
1725 1763
1726#include <asm/current.h> 1764#include <asm/current.h>
@@ -1739,9 +1777,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
1739extern void sched_fork(struct task_struct *p, int clone_flags); 1777extern void sched_fork(struct task_struct *p, int clone_flags);
1740extern void sched_dead(struct task_struct *p); 1778extern void sched_dead(struct task_struct *p);
1741 1779
1742extern int in_group_p(gid_t);
1743extern int in_egroup_p(gid_t);
1744
1745extern void proc_caches_init(void); 1780extern void proc_caches_init(void);
1746extern void flush_signals(struct task_struct *); 1781extern void flush_signals(struct task_struct *);
1747extern void ignore_signals(struct task_struct *); 1782extern void ignore_signals(struct task_struct *);
@@ -1873,6 +1908,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
1873#define for_each_process(p) \ 1908#define for_each_process(p) \
1874 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 1909 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1875 1910
1911extern bool is_single_threaded(struct task_struct *);
1912
1876/* 1913/*
1877 * Careful: do_each_thread/while_each_thread is a double loop so 1914 * Careful: do_each_thread/while_each_thread is a double loop so
1878 * 'break' will not work as expected - use goto instead. 1915 * 'break' will not work as expected - use goto instead.
@@ -2097,6 +2134,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2097} 2134}
2098 2135
2099/* 2136/*
2137 * Thread group CPU time accounting.
2138 */
2139
2140extern int thread_group_cputime_alloc(struct task_struct *);
2141extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2142
2143static inline void thread_group_cputime_init(struct signal_struct *sig)
2144{
2145 sig->cputime.totals = NULL;
2146}
2147
2148static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2149{
2150 if (curr->signal->cputime.totals)
2151 return 0;
2152 return thread_group_cputime_alloc(curr);
2153}
2154
2155static inline void thread_group_cputime_free(struct signal_struct *sig)
2156{
2157 free_percpu(sig->cputime.totals);
2158}
2159
2160/*
2100 * Reevaluate whether the task has signals pending delivery. 2161 * Reevaluate whether the task has signals pending delivery.
2101 * Wake the task if so. 2162 * Wake the task if so.
2102 * This is required every time the blocked sigset_t changes. 2163 * This is required every time the blocked sigset_t changes.
@@ -2158,6 +2219,7 @@ extern void normalize_rt_tasks(void);
2158extern struct task_group init_task_group; 2219extern struct task_group init_task_group;
2159#ifdef CONFIG_USER_SCHED 2220#ifdef CONFIG_USER_SCHED
2160extern struct task_group root_task_group; 2221extern struct task_group root_task_group;
2222extern void set_tg_uid(struct user_struct *user);
2161#endif 2223#endif
2162 2224
2163extern struct task_group *sched_create_group(struct task_group *parent); 2225extern struct task_group *sched_create_group(struct task_group *parent);