diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 69 |
1 files changed, 12 insertions, 57 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index e5f928a079e8..158d53d07765 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -571,12 +571,6 @@ struct signal_struct { | |||
571 | */ | 571 | */ |
572 | struct rlimit rlim[RLIM_NLIMITS]; | 572 | struct rlimit rlim[RLIM_NLIMITS]; |
573 | 573 | ||
574 | /* keep the process-shared keyrings here so that they do the right | ||
575 | * thing in threads created with CLONE_THREAD */ | ||
576 | #ifdef CONFIG_KEYS | ||
577 | struct key *session_keyring; /* keyring inherited over fork */ | ||
578 | struct key *process_keyring; /* keyring private to this process */ | ||
579 | #endif | ||
580 | #ifdef CONFIG_BSD_PROCESS_ACCT | 574 | #ifdef CONFIG_BSD_PROCESS_ACCT |
581 | struct pacct_struct pacct; /* per-process accounting information */ | 575 | struct pacct_struct pacct; /* per-process accounting information */ |
582 | #endif | 576 | #endif |
@@ -647,6 +641,7 @@ struct user_struct { | |||
647 | /* Hash table maintenance information */ | 641 | /* Hash table maintenance information */ |
648 | struct hlist_node uidhash_node; | 642 | struct hlist_node uidhash_node; |
649 | uid_t uid; | 643 | uid_t uid; |
644 | struct user_namespace *user_ns; | ||
650 | 645 | ||
651 | #ifdef CONFIG_USER_SCHED | 646 | #ifdef CONFIG_USER_SCHED |
652 | struct task_group *tg; | 647 | struct task_group *tg; |
@@ -664,6 +659,7 @@ extern struct user_struct *find_user(uid_t); | |||
664 | extern struct user_struct root_user; | 659 | extern struct user_struct root_user; |
665 | #define INIT_USER (&root_user) | 660 | #define INIT_USER (&root_user) |
666 | 661 | ||
662 | |||
667 | struct backing_dev_info; | 663 | struct backing_dev_info; |
668 | struct reclaim_state; | 664 | struct reclaim_state; |
669 | 665 | ||
@@ -671,8 +667,7 @@ struct reclaim_state; | |||
671 | struct sched_info { | 667 | struct sched_info { |
672 | /* cumulative counters */ | 668 | /* cumulative counters */ |
673 | unsigned long pcount; /* # of times run on this cpu */ | 669 | unsigned long pcount; /* # of times run on this cpu */ |
674 | unsigned long long cpu_time, /* time spent on the cpu */ | 670 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
675 | run_delay; /* time spent waiting on a runqueue */ | ||
676 | 671 | ||
677 | /* timestamps */ | 672 | /* timestamps */ |
678 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 673 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
@@ -941,38 +936,7 @@ partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | |||
941 | #endif /* !CONFIG_SMP */ | 936 | #endif /* !CONFIG_SMP */ |
942 | 937 | ||
943 | struct io_context; /* See blkdev.h */ | 938 | struct io_context; /* See blkdev.h */ |
944 | #define NGROUPS_SMALL 32 | ||
945 | #define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) | ||
946 | struct group_info { | ||
947 | int ngroups; | ||
948 | atomic_t usage; | ||
949 | gid_t small_block[NGROUPS_SMALL]; | ||
950 | int nblocks; | ||
951 | gid_t *blocks[0]; | ||
952 | }; | ||
953 | 939 | ||
954 | /* | ||
955 | * get_group_info() must be called with the owning task locked (via task_lock()) | ||
956 | * when task != current. The reason being that the vast majority of callers are | ||
957 | * looking at current->group_info, which can not be changed except by the | ||
958 | * current task. Changing current->group_info requires the task lock, too. | ||
959 | */ | ||
960 | #define get_group_info(group_info) do { \ | ||
961 | atomic_inc(&(group_info)->usage); \ | ||
962 | } while (0) | ||
963 | |||
964 | #define put_group_info(group_info) do { \ | ||
965 | if (atomic_dec_and_test(&(group_info)->usage)) \ | ||
966 | groups_free(group_info); \ | ||
967 | } while (0) | ||
968 | |||
969 | extern struct group_info *groups_alloc(int gidsetsize); | ||
970 | extern void groups_free(struct group_info *group_info); | ||
971 | extern int set_current_groups(struct group_info *group_info); | ||
972 | extern int groups_search(struct group_info *group_info, gid_t grp); | ||
973 | /* access the groups "array" with this macro */ | ||
974 | #define GROUP_AT(gi, i) \ | ||
975 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | ||
976 | 940 | ||
977 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 941 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
978 | extern void prefetch_stack(struct task_struct *t); | 942 | extern void prefetch_stack(struct task_struct *t); |
@@ -1228,6 +1192,7 @@ struct task_struct { | |||
1228 | * The buffer to hold the BTS data. | 1192 | * The buffer to hold the BTS data. |
1229 | */ | 1193 | */ |
1230 | void *bts_buffer; | 1194 | void *bts_buffer; |
1195 | size_t bts_size; | ||
1231 | #endif /* CONFIG_X86_PTRACE_BTS */ | 1196 | #endif /* CONFIG_X86_PTRACE_BTS */ |
1232 | 1197 | ||
1233 | /* PID/PID hash table linkage. */ | 1198 | /* PID/PID hash table linkage. */ |
@@ -1251,17 +1216,12 @@ struct task_struct { | |||
1251 | struct list_head cpu_timers[3]; | 1216 | struct list_head cpu_timers[3]; |
1252 | 1217 | ||
1253 | /* process credentials */ | 1218 | /* process credentials */ |
1254 | uid_t uid,euid,suid,fsuid; | 1219 | const struct cred *real_cred; /* objective and real subjective task |
1255 | gid_t gid,egid,sgid,fsgid; | 1220 | * credentials (COW) */ |
1256 | struct group_info *group_info; | 1221 | const struct cred *cred; /* effective (overridable) subjective task |
1257 | kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; | 1222 | * credentials (COW) */ |
1258 | struct user_struct *user; | 1223 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ |
1259 | unsigned securebits; | 1224 | |
1260 | #ifdef CONFIG_KEYS | ||
1261 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | ||
1262 | struct key *request_key_auth; /* assumed request_key authority */ | ||
1263 | struct key *thread_keyring; /* keyring private to this thread */ | ||
1264 | #endif | ||
1265 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1225 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1266 | - access with [gs]et_task_comm (which lock | 1226 | - access with [gs]et_task_comm (which lock |
1267 | it with task_lock()) | 1227 | it with task_lock()) |
@@ -1298,9 +1258,6 @@ struct task_struct { | |||
1298 | int (*notifier)(void *priv); | 1258 | int (*notifier)(void *priv); |
1299 | void *notifier_data; | 1259 | void *notifier_data; |
1300 | sigset_t *notifier_mask; | 1260 | sigset_t *notifier_mask; |
1301 | #ifdef CONFIG_SECURITY | ||
1302 | void *security; | ||
1303 | #endif | ||
1304 | struct audit_context *audit_context; | 1261 | struct audit_context *audit_context; |
1305 | #ifdef CONFIG_AUDITSYSCALL | 1262 | #ifdef CONFIG_AUDITSYSCALL |
1306 | uid_t loginuid; | 1263 | uid_t loginuid; |
@@ -1857,7 +1814,6 @@ static inline struct user_struct *get_uid(struct user_struct *u) | |||
1857 | return u; | 1814 | return u; |
1858 | } | 1815 | } |
1859 | extern void free_uid(struct user_struct *); | 1816 | extern void free_uid(struct user_struct *); |
1860 | extern void switch_uid(struct user_struct *); | ||
1861 | extern void release_uids(struct user_namespace *ns); | 1817 | extern void release_uids(struct user_namespace *ns); |
1862 | 1818 | ||
1863 | #include <asm/current.h> | 1819 | #include <asm/current.h> |
@@ -1876,9 +1832,6 @@ extern void wake_up_new_task(struct task_struct *tsk, | |||
1876 | extern void sched_fork(struct task_struct *p, int clone_flags); | 1832 | extern void sched_fork(struct task_struct *p, int clone_flags); |
1877 | extern void sched_dead(struct task_struct *p); | 1833 | extern void sched_dead(struct task_struct *p); |
1878 | 1834 | ||
1879 | extern int in_group_p(gid_t); | ||
1880 | extern int in_egroup_p(gid_t); | ||
1881 | |||
1882 | extern void proc_caches_init(void); | 1835 | extern void proc_caches_init(void); |
1883 | extern void flush_signals(struct task_struct *); | 1836 | extern void flush_signals(struct task_struct *); |
1884 | extern void ignore_signals(struct task_struct *); | 1837 | extern void ignore_signals(struct task_struct *); |
@@ -2010,6 +1963,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
2010 | #define for_each_process(p) \ | 1963 | #define for_each_process(p) \ |
2011 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 1964 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
2012 | 1965 | ||
1966 | extern bool is_single_threaded(struct task_struct *); | ||
1967 | |||
2013 | /* | 1968 | /* |
2014 | * Careful: do_each_thread/while_each_thread is a double loop so | 1969 | * Careful: do_each_thread/while_each_thread is a double loop so |
2015 | * 'break' will not work as expected - use goto instead. | 1970 | * 'break' will not work as expected - use goto instead. |