diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 102 |
1 files changed, 43 insertions, 59 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 55e30d114477..8395e715809d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -96,6 +96,7 @@ struct exec_domain; | |||
96 | struct futex_pi_state; | 96 | struct futex_pi_state; |
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | ||
99 | 100 | ||
100 | /* | 101 | /* |
101 | * List of flags we want to share for kernel threads, | 102 | * List of flags we want to share for kernel threads, |
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu) | |||
259 | } | 260 | } |
260 | #endif | 261 | #endif |
261 | 262 | ||
262 | extern unsigned long rt_needs_cpu(int cpu); | ||
263 | |||
264 | /* | 263 | /* |
265 | * Only dump TASK_* tasks. (0 for all tasks) | 264 | * Only dump TASK_* tasks. (0 for all tasks) |
266 | */ | 265 | */ |
@@ -572,12 +571,6 @@ struct signal_struct { | |||
572 | */ | 571 | */ |
573 | struct rlimit rlim[RLIM_NLIMITS]; | 572 | struct rlimit rlim[RLIM_NLIMITS]; |
574 | 573 | ||
575 | /* keep the process-shared keyrings here so that they do the right | ||
576 | * thing in threads created with CLONE_THREAD */ | ||
577 | #ifdef CONFIG_KEYS | ||
578 | struct key *session_keyring; /* keyring inherited over fork */ | ||
579 | struct key *process_keyring; /* keyring private to this process */ | ||
580 | #endif | ||
581 | #ifdef CONFIG_BSD_PROCESS_ACCT | 574 | #ifdef CONFIG_BSD_PROCESS_ACCT |
582 | struct pacct_struct pacct; /* per-process accounting information */ | 575 | struct pacct_struct pacct; /* per-process accounting information */ |
583 | #endif | 576 | #endif |
@@ -648,6 +641,7 @@ struct user_struct { | |||
648 | /* Hash table maintenance information */ | 641 | /* Hash table maintenance information */ |
649 | struct hlist_node uidhash_node; | 642 | struct hlist_node uidhash_node; |
650 | uid_t uid; | 643 | uid_t uid; |
644 | struct user_namespace *user_ns; | ||
651 | 645 | ||
652 | #ifdef CONFIG_USER_SCHED | 646 | #ifdef CONFIG_USER_SCHED |
653 | struct task_group *tg; | 647 | struct task_group *tg; |
@@ -665,6 +659,7 @@ extern struct user_struct *find_user(uid_t); | |||
665 | extern struct user_struct root_user; | 659 | extern struct user_struct root_user; |
666 | #define INIT_USER (&root_user) | 660 | #define INIT_USER (&root_user) |
667 | 661 | ||
662 | |||
668 | struct backing_dev_info; | 663 | struct backing_dev_info; |
669 | struct reclaim_state; | 664 | struct reclaim_state; |
670 | 665 | ||
@@ -672,8 +667,7 @@ struct reclaim_state; | |||
672 | struct sched_info { | 667 | struct sched_info { |
673 | /* cumulative counters */ | 668 | /* cumulative counters */ |
674 | unsigned long pcount; /* # of times run on this cpu */ | 669 | unsigned long pcount; /* # of times run on this cpu */ |
675 | unsigned long long cpu_time, /* time spent on the cpu */ | 670 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
676 | run_delay; /* time spent waiting on a runqueue */ | ||
677 | 671 | ||
678 | /* timestamps */ | 672 | /* timestamps */ |
679 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 673 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
@@ -888,38 +882,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
888 | #endif /* !CONFIG_SMP */ | 882 | #endif /* !CONFIG_SMP */ |
889 | 883 | ||
890 | struct io_context; /* See blkdev.h */ | 884 | struct io_context; /* See blkdev.h */ |
891 | #define NGROUPS_SMALL 32 | ||
892 | #define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) | ||
893 | struct group_info { | ||
894 | int ngroups; | ||
895 | atomic_t usage; | ||
896 | gid_t small_block[NGROUPS_SMALL]; | ||
897 | int nblocks; | ||
898 | gid_t *blocks[0]; | ||
899 | }; | ||
900 | |||
901 | /* | ||
902 | * get_group_info() must be called with the owning task locked (via task_lock()) | ||
903 | * when task != current. The reason being that the vast majority of callers are | ||
904 | * looking at current->group_info, which can not be changed except by the | ||
905 | * current task. Changing current->group_info requires the task lock, too. | ||
906 | */ | ||
907 | #define get_group_info(group_info) do { \ | ||
908 | atomic_inc(&(group_info)->usage); \ | ||
909 | } while (0) | ||
910 | 885 | ||
911 | #define put_group_info(group_info) do { \ | ||
912 | if (atomic_dec_and_test(&(group_info)->usage)) \ | ||
913 | groups_free(group_info); \ | ||
914 | } while (0) | ||
915 | |||
916 | extern struct group_info *groups_alloc(int gidsetsize); | ||
917 | extern void groups_free(struct group_info *group_info); | ||
918 | extern int set_current_groups(struct group_info *group_info); | ||
919 | extern int groups_search(struct group_info *group_info, gid_t grp); | ||
920 | /* access the groups "array" with this macro */ | ||
921 | #define GROUP_AT(gi, i) \ | ||
922 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | ||
923 | 886 | ||
924 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 887 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
925 | extern void prefetch_stack(struct task_struct *t); | 888 | extern void prefetch_stack(struct task_struct *t); |
@@ -1165,6 +1128,19 @@ struct task_struct { | |||
1165 | struct list_head ptraced; | 1128 | struct list_head ptraced; |
1166 | struct list_head ptrace_entry; | 1129 | struct list_head ptrace_entry; |
1167 | 1130 | ||
1131 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1132 | /* | ||
1133 | * This is the tracer handle for the ptrace BTS extension. | ||
1134 | * This field actually belongs to the ptracer task. | ||
1135 | */ | ||
1136 | struct bts_tracer *bts; | ||
1137 | /* | ||
1138 | * The buffer to hold the BTS data. | ||
1139 | */ | ||
1140 | void *bts_buffer; | ||
1141 | size_t bts_size; | ||
1142 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1143 | |||
1168 | /* PID/PID hash table linkage. */ | 1144 | /* PID/PID hash table linkage. */ |
1169 | struct pid_link pids[PIDTYPE_MAX]; | 1145 | struct pid_link pids[PIDTYPE_MAX]; |
1170 | struct list_head thread_group; | 1146 | struct list_head thread_group; |
@@ -1186,17 +1162,12 @@ struct task_struct { | |||
1186 | struct list_head cpu_timers[3]; | 1162 | struct list_head cpu_timers[3]; |
1187 | 1163 | ||
1188 | /* process credentials */ | 1164 | /* process credentials */ |
1189 | uid_t uid,euid,suid,fsuid; | 1165 | const struct cred *real_cred; /* objective and real subjective task |
1190 | gid_t gid,egid,sgid,fsgid; | 1166 | * credentials (COW) */ |
1191 | struct group_info *group_info; | 1167 | const struct cred *cred; /* effective (overridable) subjective task |
1192 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; | 1168 | * credentials (COW) */ |
1193 | struct user_struct *user; | 1169 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ |
1194 | unsigned securebits; | 1170 | |
1195 | #ifdef CONFIG_KEYS | ||
1196 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
1197 | struct key *request_key_auth; /* assumed request_key authority */ | ||
1198 | struct key *thread_keyring; /* keyring private to this thread */ | ||
1199 | #endif | ||
1200 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1171 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1201 | - access with [gs]et_task_comm (which lock | 1172 | - access with [gs]et_task_comm (which lock |
1202 | it with task_lock()) | 1173 | it with task_lock()) |
@@ -1233,9 +1204,6 @@ struct task_struct { | |||
1233 | int (*notifier)(void *priv); | 1204 | int (*notifier)(void *priv); |
1234 | void *notifier_data; | 1205 | void *notifier_data; |
1235 | sigset_t *notifier_mask; | 1206 | sigset_t *notifier_mask; |
1236 | #ifdef CONFIG_SECURITY | ||
1237 | void *security; | ||
1238 | #endif | ||
1239 | struct audit_context *audit_context; | 1207 | struct audit_context *audit_context; |
1240 | #ifdef CONFIG_AUDITSYSCALL | 1208 | #ifdef CONFIG_AUDITSYSCALL |
1241 | uid_t loginuid; | 1209 | uid_t loginuid; |
@@ -1356,6 +1324,23 @@ struct task_struct { | |||
1356 | unsigned long default_timer_slack_ns; | 1324 | unsigned long default_timer_slack_ns; |
1357 | 1325 | ||
1358 | struct list_head *scm_work_list; | 1326 | struct list_head *scm_work_list; |
1327 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1328 | /* Index of current stored adress in ret_stack */ | ||
1329 | int curr_ret_stack; | ||
1330 | /* Stack of return addresses for return function tracing */ | ||
1331 | struct ftrace_ret_stack *ret_stack; | ||
1332 | /* | ||
1333 | * Number of functions that haven't been traced | ||
1334 | * because of depth overrun. | ||
1335 | */ | ||
1336 | atomic_t trace_overrun; | ||
1337 | /* Pause for the tracing */ | ||
1338 | atomic_t tracing_graph_pause; | ||
1339 | #endif | ||
1340 | #ifdef CONFIG_TRACING | ||
1341 | /* state flags for use by tracers */ | ||
1342 | unsigned long trace; | ||
1343 | #endif | ||
1359 | }; | 1344 | }; |
1360 | 1345 | ||
1361 | /* | 1346 | /* |
@@ -1775,7 +1760,6 @@ static inline struct user_struct *get_uid(struct user_struct *u) | |||
1775 | return u; | 1760 | return u; |
1776 | } | 1761 | } |
1777 | extern void free_uid(struct user_struct *); | 1762 | extern void free_uid(struct user_struct *); |
1778 | extern void switch_uid(struct user_struct *); | ||
1779 | extern void release_uids(struct user_namespace *ns); | 1763 | extern void release_uids(struct user_namespace *ns); |
1780 | 1764 | ||
1781 | #include <asm/current.h> | 1765 | #include <asm/current.h> |
@@ -1794,9 +1778,6 @@ extern void wake_up_new_task(struct task_struct *tsk, | |||
1794 | extern void sched_fork(struct task_struct *p, int clone_flags); | 1778 | extern void sched_fork(struct task_struct *p, int clone_flags); |
1795 | extern void sched_dead(struct task_struct *p); | 1779 | extern void sched_dead(struct task_struct *p); |
1796 | 1780 | ||
1797 | extern int in_group_p(gid_t); | ||
1798 | extern int in_egroup_p(gid_t); | ||
1799 | |||
1800 | extern void proc_caches_init(void); | 1781 | extern void proc_caches_init(void); |
1801 | extern void flush_signals(struct task_struct *); | 1782 | extern void flush_signals(struct task_struct *); |
1802 | extern void ignore_signals(struct task_struct *); | 1783 | extern void ignore_signals(struct task_struct *); |
@@ -1928,6 +1909,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
1928 | #define for_each_process(p) \ | 1909 | #define for_each_process(p) \ |
1929 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 1910 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
1930 | 1911 | ||
1912 | extern bool is_single_threaded(struct task_struct *); | ||
1913 | |||
1931 | /* | 1914 | /* |
1932 | * Careful: do_each_thread/while_each_thread is a double loop so | 1915 | * Careful: do_each_thread/while_each_thread is a double loop so |
1933 | * 'break' will not work as expected - use goto instead. | 1916 | * 'break' will not work as expected - use goto instead. |
@@ -2224,6 +2207,7 @@ extern void normalize_rt_tasks(void); | |||
2224 | extern struct task_group init_task_group; | 2207 | extern struct task_group init_task_group; |
2225 | #ifdef CONFIG_USER_SCHED | 2208 | #ifdef CONFIG_USER_SCHED |
2226 | extern struct task_group root_task_group; | 2209 | extern struct task_group root_task_group; |
2210 | extern void set_tg_uid(struct user_struct *user); | ||
2227 | #endif | 2211 | #endif |
2228 | 2212 | ||
2229 | extern struct task_group *sched_create_group(struct task_group *parent); | 2213 | extern struct task_group *sched_create_group(struct task_group *parent); |