diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 34 |
1 files changed, 21 insertions, 13 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 592e3a55f81..3de5aa210fe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -74,6 +74,7 @@ struct sched_param { | |||
74 | #include <linux/pid.h> | 74 | #include <linux/pid.h> |
75 | #include <linux/percpu.h> | 75 | #include <linux/percpu.h> |
76 | #include <linux/topology.h> | 76 | #include <linux/topology.h> |
77 | #include <linux/proportions.h> | ||
77 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
78 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
79 | #include <linux/futex.h> | 80 | #include <linux/futex.h> |
@@ -260,6 +261,7 @@ extern void softlockup_tick(void); | |||
260 | extern void spawn_softlockup_task(void); | 261 | extern void spawn_softlockup_task(void); |
261 | extern void touch_softlockup_watchdog(void); | 262 | extern void touch_softlockup_watchdog(void); |
262 | extern void touch_all_softlockup_watchdogs(void); | 263 | extern void touch_all_softlockup_watchdogs(void); |
264 | extern int softlockup_thresh; | ||
263 | #else | 265 | #else |
264 | static inline void softlockup_tick(void) | 266 | static inline void softlockup_tick(void) |
265 | { | 267 | { |
@@ -357,8 +359,9 @@ extern int get_dumpable(struct mm_struct *mm); | |||
357 | #define MMF_DUMP_ANON_SHARED 3 | 359 | #define MMF_DUMP_ANON_SHARED 3 |
358 | #define MMF_DUMP_MAPPED_PRIVATE 4 | 360 | #define MMF_DUMP_MAPPED_PRIVATE 4 |
359 | #define MMF_DUMP_MAPPED_SHARED 5 | 361 | #define MMF_DUMP_MAPPED_SHARED 5 |
362 | #define MMF_DUMP_ELF_HEADERS 6 | ||
360 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS | 363 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS |
361 | #define MMF_DUMP_FILTER_BITS 4 | 364 | #define MMF_DUMP_FILTER_BITS 5 |
362 | #define MMF_DUMP_FILTER_MASK \ | 365 | #define MMF_DUMP_FILTER_MASK \ |
363 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) | 366 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) |
364 | #define MMF_DUMP_FILTER_DEFAULT \ | 367 | #define MMF_DUMP_FILTER_DEFAULT \ |
@@ -515,8 +518,10 @@ struct user_struct { | |||
515 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 518 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ |
516 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ | 519 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ |
517 | #endif | 520 | #endif |
521 | #ifdef CONFIG_POSIX_MQUEUE | ||
518 | /* protected by mq_lock */ | 522 | /* protected by mq_lock */ |
519 | unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ | 523 | unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ |
524 | #endif | ||
520 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ | 525 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ |
521 | 526 | ||
522 | #ifdef CONFIG_KEYS | 527 | #ifdef CONFIG_KEYS |
@@ -913,6 +918,16 @@ struct task_struct { | |||
913 | #endif | 918 | #endif |
914 | 919 | ||
915 | unsigned short ioprio; | 920 | unsigned short ioprio; |
921 | /* | ||
922 | * fpu_counter contains the number of consecutive context switches | ||
923 | * that the FPU is used. If this is over a threshold, the lazy fpu | ||
924 | * saving becomes unlazy to save the trap. This is an unsigned char | ||
925 | * so that after 256 times the counter wraps and the behavior turns | ||
926 | * lazy again; this to deal with bursty apps that only use FPU for | ||
927 | * a short time | ||
928 | */ | ||
929 | unsigned char fpu_counter; | ||
930 | s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ | ||
916 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 931 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
917 | unsigned int btrace_seq; | 932 | unsigned int btrace_seq; |
918 | #endif | 933 | #endif |
@@ -998,16 +1013,6 @@ struct task_struct { | |||
998 | struct key *thread_keyring; /* keyring private to this thread */ | 1013 | struct key *thread_keyring; /* keyring private to this thread */ |
999 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ | 1014 | unsigned char jit_keyring; /* default keyring to attach requested keys to */ |
1000 | #endif | 1015 | #endif |
1001 | /* | ||
1002 | * fpu_counter contains the number of consecutive context switches | ||
1003 | * that the FPU is used. If this is over a threshold, the lazy fpu | ||
1004 | * saving becomes unlazy to save the trap. This is an unsigned char | ||
1005 | * so that after 256 times the counter wraps and the behavior turns | ||
1006 | * lazy again; this to deal with bursty apps that only use FPU for | ||
1007 | * a short time | ||
1008 | */ | ||
1009 | unsigned char fpu_counter; | ||
1010 | int oomkilladj; /* OOM kill score adjustment (bit shift). */ | ||
1011 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1016 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1012 | - access with [gs]et_task_comm (which lock | 1017 | - access with [gs]et_task_comm (which lock |
1013 | it with task_lock()) | 1018 | it with task_lock()) |
@@ -1039,8 +1044,9 @@ struct task_struct { | |||
1039 | int (*notifier)(void *priv); | 1044 | int (*notifier)(void *priv); |
1040 | void *notifier_data; | 1045 | void *notifier_data; |
1041 | sigset_t *notifier_mask; | 1046 | sigset_t *notifier_mask; |
1042 | 1047 | #ifdef CONFIG_SECURITY | |
1043 | void *security; | 1048 | void *security; |
1049 | #endif | ||
1044 | struct audit_context *audit_context; | 1050 | struct audit_context *audit_context; |
1045 | seccomp_t seccomp; | 1051 | seccomp_t seccomp; |
1046 | 1052 | ||
@@ -1129,13 +1135,14 @@ struct task_struct { | |||
1129 | int cpuset_mems_generation; | 1135 | int cpuset_mems_generation; |
1130 | int cpuset_mem_spread_rotor; | 1136 | int cpuset_mem_spread_rotor; |
1131 | #endif | 1137 | #endif |
1138 | #ifdef CONFIG_FUTEX | ||
1132 | struct robust_list_head __user *robust_list; | 1139 | struct robust_list_head __user *robust_list; |
1133 | #ifdef CONFIG_COMPAT | 1140 | #ifdef CONFIG_COMPAT |
1134 | struct compat_robust_list_head __user *compat_robust_list; | 1141 | struct compat_robust_list_head __user *compat_robust_list; |
1135 | #endif | 1142 | #endif |
1136 | struct list_head pi_state_list; | 1143 | struct list_head pi_state_list; |
1137 | struct futex_pi_state *pi_state_cache; | 1144 | struct futex_pi_state *pi_state_cache; |
1138 | 1145 | #endif | |
1139 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1146 | atomic_t fs_excl; /* holding fs exclusive resources */ |
1140 | struct rcu_head rcu; | 1147 | struct rcu_head rcu; |
1141 | 1148 | ||
@@ -1149,6 +1156,7 @@ struct task_struct { | |||
1149 | #ifdef CONFIG_FAULT_INJECTION | 1156 | #ifdef CONFIG_FAULT_INJECTION |
1150 | int make_it_fail; | 1157 | int make_it_fail; |
1151 | #endif | 1158 | #endif |
1159 | struct prop_local_single dirties; | ||
1152 | }; | 1160 | }; |
1153 | 1161 | ||
1154 | /* | 1162 | /* |