diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 34 |
1 files changed, 21 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ac7d51ad0e1..c204ab0d4df1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -74,6 +74,7 @@ struct sched_param { | |||
| 74 | #include <linux/pid.h> | 74 | #include <linux/pid.h> |
| 75 | #include <linux/percpu.h> | 75 | #include <linux/percpu.h> |
| 76 | #include <linux/topology.h> | 76 | #include <linux/topology.h> |
| 77 | #include <linux/proportions.h> | ||
| 77 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
| 78 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
| 79 | #include <linux/futex.h> | 80 | #include <linux/futex.h> |
| @@ -260,6 +261,7 @@ extern void softlockup_tick(void); | |||
| 260 | extern void spawn_softlockup_task(void); | 261 | extern void spawn_softlockup_task(void); |
| 261 | extern void touch_softlockup_watchdog(void); | 262 | extern void touch_softlockup_watchdog(void); |
| 262 | extern void touch_all_softlockup_watchdogs(void); | 263 | extern void touch_all_softlockup_watchdogs(void); |
| 264 | extern int softlockup_thresh; | ||
| 263 | #else | 265 | #else |
| 264 | static inline void softlockup_tick(void) | 266 | static inline void softlockup_tick(void) |
| 265 | { | 267 | { |
| @@ -357,8 +359,9 @@ extern int get_dumpable(struct mm_struct *mm); | |||
| 357 | #define MMF_DUMP_ANON_SHARED 3 | 359 | #define MMF_DUMP_ANON_SHARED 3 |
| 358 | #define MMF_DUMP_MAPPED_PRIVATE 4 | 360 | #define MMF_DUMP_MAPPED_PRIVATE 4 |
| 359 | #define MMF_DUMP_MAPPED_SHARED 5 | 361 | #define MMF_DUMP_MAPPED_SHARED 5 |
| 362 | #define MMF_DUMP_ELF_HEADERS 6 | ||
| 360 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS | 363 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS |
| 361 | #define MMF_DUMP_FILTER_BITS 4 | 364 | #define MMF_DUMP_FILTER_BITS 5 |
| 362 | #define MMF_DUMP_FILTER_MASK \ | 365 | #define MMF_DUMP_FILTER_MASK \ |
| 363 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) | 366 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) |
| 364 | #define MMF_DUMP_FILTER_DEFAULT \ | 367 | #define MMF_DUMP_FILTER_DEFAULT \ |
| @@ -515,8 +518,10 @@ struct user_struct { | |||
| 515 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 518 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ |
| 516 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ | 519 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ |
| 517 | #endif | 520 | #endif |
| 521 | #ifdef CONFIG_POSIX_MQUEUE | ||
| 518 | /* protected by mq_lock */ | 522 | /* protected by mq_lock */ |
| 519 | unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ | 523 | unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ |
| 524 | #endif | ||
| 520 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ | 525 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ |
| 521 | 526 | ||
| 522 | #ifdef CONFIG_KEYS | 527 | #ifdef CONFIG_KEYS |
| @@ -915,6 +920,16 @@ struct task_struct { | |||
| 915 | #endif | 920 | #endif |
| 916 | 921 | ||
| 917 | unsigned short ioprio; | 922 | unsigned short ioprio; |
| 923 | /* | ||
| 924 | * fpu_counter contains the number of consecutive context switches | ||
| 925 | * that the FPU is used. If this is over a threshold, the lazy fpu | ||
| 926 | * saving becomes unlazy to save the trap. This is an unsigned char | ||
| 927 | * so that after 256 times the counter wraps and the behavior turns | ||
| 928 | * lazy again; this to deal with bursty apps that only use FPU for | ||
| 929 | * a short time | ||
| 930 | */ | ||
| 931 | unsigned char fpu_counter; | ||
| 932 | s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ | ||
| 918 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 933 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
| 919 | unsigned int btrace_seq; | 934 | unsigned int btrace_seq; |
| 920 | #endif | 935 | #endif |
| @@ -1000,16 +1015,6 @@ struct task_struct { | |||
| 1000 | struct key *thread_keyring; /* keyring private to this thread */ | 1015 | struct key *thread_keyring; /* keyring private to this thread */ |
| 1001 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | 1016 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ |
| 1002 | #endif | 1017 | #endif |
| 1003 | /* | ||
| 1004 | * fpu_counter contains the number of consecutive context switches | ||
| 1005 | * that the FPU is used. If this is over a threshold, the lazy fpu | ||
| 1006 | * saving becomes unlazy to save the trap. This is an unsigned char | ||
| 1007 | * so that after 256 times the counter wraps and the behavior turns | ||
| 1008 | * lazy again; this to deal with bursty apps that only use FPU for | ||
| 1009 | * a short time | ||
| 1010 | */ | ||
| 1011 | unsigned char fpu_counter; | ||
| 1012 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ | ||
| 1013 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1018 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 1014 | - access with [gs]et_task_comm (which lock | 1019 | - access with [gs]et_task_comm (which lock |
| 1015 | it with task_lock()) | 1020 | it with task_lock()) |
| @@ -1041,8 +1046,9 @@ struct task_struct { | |||
| 1041 | int (*notifier)(void *priv); | 1046 | int (*notifier)(void *priv); |
| 1042 | void *notifier_data; | 1047 | void *notifier_data; |
| 1043 | sigset_t *notifier_mask; | 1048 | sigset_t *notifier_mask; |
| 1044 | 1049 | #ifdef CONFIG_SECURITY | |
| 1045 | void *security; | 1050 | void *security; |
| 1051 | #endif | ||
| 1046 | struct audit_context *audit_context; | 1052 | struct audit_context *audit_context; |
| 1047 | seccomp_t seccomp; | 1053 | seccomp_t seccomp; |
| 1048 | 1054 | ||
| @@ -1131,13 +1137,14 @@ struct task_struct { | |||
| 1131 | int cpuset_mems_generation; | 1137 | int cpuset_mems_generation; |
| 1132 | int cpuset_mem_spread_rotor; | 1138 | int cpuset_mem_spread_rotor; |
| 1133 | #endif | 1139 | #endif |
| 1140 | #ifdef CONFIG_FUTEX | ||
| 1134 | struct robust_list_head __user *robust_list; | 1141 | struct robust_list_head __user *robust_list; |
| 1135 | #ifdef CONFIG_COMPAT | 1142 | #ifdef CONFIG_COMPAT |
| 1136 | struct compat_robust_list_head __user *compat_robust_list; | 1143 | struct compat_robust_list_head __user *compat_robust_list; |
| 1137 | #endif | 1144 | #endif |
| 1138 | struct list_head pi_state_list; | 1145 | struct list_head pi_state_list; |
| 1139 | struct futex_pi_state *pi_state_cache; | 1146 | struct futex_pi_state *pi_state_cache; |
| 1140 | 1147 | #endif | |
| 1141 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1148 | atomic_t fs_excl; /* holding fs exclusive resources */ |
| 1142 | struct rcu_head rcu; | 1149 | struct rcu_head rcu; |
| 1143 | 1150 | ||
| @@ -1151,6 +1158,7 @@ struct task_struct { | |||
| 1151 | #ifdef CONFIG_FAULT_INJECTION | 1158 | #ifdef CONFIG_FAULT_INJECTION |
| 1152 | int make_it_fail; | 1159 | int make_it_fail; |
| 1153 | #endif | 1160 | #endif |
| 1161 | struct prop_local_single dirties; | ||
| 1154 | }; | 1162 | }; |
| 1155 | 1163 | ||
| 1156 | /* | 1164 | /* |
