aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h106
1 files changed, 47 insertions, 59 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 644ffbda17ca..8395e715809d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
96struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer;
99 100
100/* 101/*
101 * List of flags we want to share for kernel threads, 102 * List of flags we want to share for kernel threads,
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
259} 260}
260#endif 261#endif
261 262
262extern unsigned long rt_needs_cpu(int cpu);
263
264/* 263/*
265 * Only dump TASK_* tasks. (0 for all tasks) 264 * Only dump TASK_* tasks. (0 for all tasks)
266 */ 265 */
@@ -572,12 +571,6 @@ struct signal_struct {
572 */ 571 */
573 struct rlimit rlim[RLIM_NLIMITS]; 572 struct rlimit rlim[RLIM_NLIMITS];
574 573
575 /* keep the process-shared keyrings here so that they do the right
576 * thing in threads created with CLONE_THREAD */
577#ifdef CONFIG_KEYS
578 struct key *session_keyring; /* keyring inherited over fork */
579 struct key *process_keyring; /* keyring private to this process */
580#endif
581#ifdef CONFIG_BSD_PROCESS_ACCT 574#ifdef CONFIG_BSD_PROCESS_ACCT
582 struct pacct_struct pacct; /* per-process accounting information */ 575 struct pacct_struct pacct; /* per-process accounting information */
583#endif 576#endif
@@ -630,6 +623,10 @@ struct user_struct {
630 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 623 atomic_t inotify_watches; /* How many inotify watches does this user have? */
631 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 624 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
632#endif 625#endif
626#ifdef CONFIG_EPOLL
627 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
628 atomic_t epoll_watches; /* The number of file descriptors currently watched */
629#endif
633#ifdef CONFIG_POSIX_MQUEUE 630#ifdef CONFIG_POSIX_MQUEUE
634 /* protected by mq_lock */ 631 /* protected by mq_lock */
635 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 632 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
@@ -644,6 +641,7 @@ struct user_struct {
644 /* Hash table maintenance information */ 641 /* Hash table maintenance information */
645 struct hlist_node uidhash_node; 642 struct hlist_node uidhash_node;
646 uid_t uid; 643 uid_t uid;
644 struct user_namespace *user_ns;
647 645
648#ifdef CONFIG_USER_SCHED 646#ifdef CONFIG_USER_SCHED
649 struct task_group *tg; 647 struct task_group *tg;
@@ -661,6 +659,7 @@ extern struct user_struct *find_user(uid_t);
661extern struct user_struct root_user; 659extern struct user_struct root_user;
662#define INIT_USER (&root_user) 660#define INIT_USER (&root_user)
663 661
662
664struct backing_dev_info; 663struct backing_dev_info;
665struct reclaim_state; 664struct reclaim_state;
666 665
@@ -668,8 +667,7 @@ struct reclaim_state;
668struct sched_info { 667struct sched_info {
669 /* cumulative counters */ 668 /* cumulative counters */
670 unsigned long pcount; /* # of times run on this cpu */ 669 unsigned long pcount; /* # of times run on this cpu */
671 unsigned long long cpu_time, /* time spent on the cpu */ 670 unsigned long long run_delay; /* time spent waiting on a runqueue */
672 run_delay; /* time spent waiting on a runqueue */
673 671
674 /* timestamps */ 672 /* timestamps */
675 unsigned long long last_arrival,/* when we last ran on a cpu */ 673 unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -884,38 +882,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
884#endif /* !CONFIG_SMP */ 882#endif /* !CONFIG_SMP */
885 883
886struct io_context; /* See blkdev.h */ 884struct io_context; /* See blkdev.h */
887#define NGROUPS_SMALL 32
888#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
889struct group_info {
890 int ngroups;
891 atomic_t usage;
892 gid_t small_block[NGROUPS_SMALL];
893 int nblocks;
894 gid_t *blocks[0];
895};
896 885
897/*
898 * get_group_info() must be called with the owning task locked (via task_lock())
899 * when task != current. The reason being that the vast majority of callers are
900 * looking at current->group_info, which can not be changed except by the
901 * current task. Changing current->group_info requires the task lock, too.
902 */
903#define get_group_info(group_info) do { \
904 atomic_inc(&(group_info)->usage); \
905} while (0)
906
907#define put_group_info(group_info) do { \
908 if (atomic_dec_and_test(&(group_info)->usage)) \
909 groups_free(group_info); \
910} while (0)
911
912extern struct group_info *groups_alloc(int gidsetsize);
913extern void groups_free(struct group_info *group_info);
914extern int set_current_groups(struct group_info *group_info);
915extern int groups_search(struct group_info *group_info, gid_t grp);
916/* access the groups "array" with this macro */
917#define GROUP_AT(gi, i) \
918 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
919 886
920#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 887#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
921extern void prefetch_stack(struct task_struct *t); 888extern void prefetch_stack(struct task_struct *t);
@@ -1161,6 +1128,19 @@ struct task_struct {
1161 struct list_head ptraced; 1128 struct list_head ptraced;
1162 struct list_head ptrace_entry; 1129 struct list_head ptrace_entry;
1163 1130
1131#ifdef CONFIG_X86_PTRACE_BTS
1132 /*
1133 * This is the tracer handle for the ptrace BTS extension.
1134 * This field actually belongs to the ptracer task.
1135 */
1136 struct bts_tracer *bts;
1137 /*
1138 * The buffer to hold the BTS data.
1139 */
1140 void *bts_buffer;
1141 size_t bts_size;
1142#endif /* CONFIG_X86_PTRACE_BTS */
1143
1164 /* PID/PID hash table linkage. */ 1144 /* PID/PID hash table linkage. */
1165 struct pid_link pids[PIDTYPE_MAX]; 1145 struct pid_link pids[PIDTYPE_MAX];
1166 struct list_head thread_group; 1146 struct list_head thread_group;
@@ -1182,17 +1162,12 @@ struct task_struct {
1182 struct list_head cpu_timers[3]; 1162 struct list_head cpu_timers[3];
1183 1163
1184/* process credentials */ 1164/* process credentials */
1185 uid_t uid,euid,suid,fsuid; 1165 const struct cred *real_cred; /* objective and real subjective task
1186 gid_t gid,egid,sgid,fsgid; 1166 * credentials (COW) */
1187 struct group_info *group_info; 1167 const struct cred *cred; /* effective (overridable) subjective task
1188 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1168 * credentials (COW) */
1189 struct user_struct *user; 1169 struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
1190 unsigned securebits; 1170
1191#ifdef CONFIG_KEYS
1192 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1193 struct key *request_key_auth; /* assumed request_key authority */
1194 struct key *thread_keyring; /* keyring private to this thread */
1195#endif
1196 char comm[TASK_COMM_LEN]; /* executable name excluding path 1171 char comm[TASK_COMM_LEN]; /* executable name excluding path
1197 - access with [gs]et_task_comm (which lock 1172 - access with [gs]et_task_comm (which lock
1198 it with task_lock()) 1173 it with task_lock())
@@ -1229,9 +1204,6 @@ struct task_struct {
1229 int (*notifier)(void *priv); 1204 int (*notifier)(void *priv);
1230 void *notifier_data; 1205 void *notifier_data;
1231 sigset_t *notifier_mask; 1206 sigset_t *notifier_mask;
1232#ifdef CONFIG_SECURITY
1233 void *security;
1234#endif
1235 struct audit_context *audit_context; 1207 struct audit_context *audit_context;
1236#ifdef CONFIG_AUDITSYSCALL 1208#ifdef CONFIG_AUDITSYSCALL
1237 uid_t loginuid; 1209 uid_t loginuid;
@@ -1352,6 +1324,23 @@ struct task_struct {
1352 unsigned long default_timer_slack_ns; 1324 unsigned long default_timer_slack_ns;
1353 1325
1354 struct list_head *scm_work_list; 1326 struct list_head *scm_work_list;
1327#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1328 /* Index of current stored adress in ret_stack */
1329 int curr_ret_stack;
1330 /* Stack of return addresses for return function tracing */
1331 struct ftrace_ret_stack *ret_stack;
1332 /*
1333 * Number of functions that haven't been traced
1334 * because of depth overrun.
1335 */
1336 atomic_t trace_overrun;
1337 /* Pause for the tracing */
1338 atomic_t tracing_graph_pause;
1339#endif
1340#ifdef CONFIG_TRACING
1341 /* state flags for use by tracers */
1342 unsigned long trace;
1343#endif
1355}; 1344};
1356 1345
1357/* 1346/*
@@ -1771,7 +1760,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
1771 return u; 1760 return u;
1772} 1761}
1773extern void free_uid(struct user_struct *); 1762extern void free_uid(struct user_struct *);
1774extern void switch_uid(struct user_struct *);
1775extern void release_uids(struct user_namespace *ns); 1763extern void release_uids(struct user_namespace *ns);
1776 1764
1777#include <asm/current.h> 1765#include <asm/current.h>
@@ -1790,9 +1778,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
1790extern void sched_fork(struct task_struct *p, int clone_flags); 1778extern void sched_fork(struct task_struct *p, int clone_flags);
1791extern void sched_dead(struct task_struct *p); 1779extern void sched_dead(struct task_struct *p);
1792 1780
1793extern int in_group_p(gid_t);
1794extern int in_egroup_p(gid_t);
1795
1796extern void proc_caches_init(void); 1781extern void proc_caches_init(void);
1797extern void flush_signals(struct task_struct *); 1782extern void flush_signals(struct task_struct *);
1798extern void ignore_signals(struct task_struct *); 1783extern void ignore_signals(struct task_struct *);
@@ -1924,6 +1909,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
1924#define for_each_process(p) \ 1909#define for_each_process(p) \
1925 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 1910 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1926 1911
1912extern bool is_single_threaded(struct task_struct *);
1913
1927/* 1914/*
1928 * Careful: do_each_thread/while_each_thread is a double loop so 1915 * Careful: do_each_thread/while_each_thread is a double loop so
1929 * 'break' will not work as expected - use goto instead. 1916 * 'break' will not work as expected - use goto instead.
@@ -2220,6 +2207,7 @@ extern void normalize_rt_tasks(void);
2220extern struct task_group init_task_group; 2207extern struct task_group init_task_group;
2221#ifdef CONFIG_USER_SCHED 2208#ifdef CONFIG_USER_SCHED
2222extern struct task_group root_task_group; 2209extern struct task_group root_task_group;
2210extern void set_tg_uid(struct user_struct *user);
2223#endif 2211#endif
2224 2212
2225extern struct task_group *sched_create_group(struct task_group *parent); 2213extern struct task_group *sched_create_group(struct task_group *parent);