diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 116 |
1 files changed, 84 insertions, 32 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..4d0754269884 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -77,6 +77,7 @@ struct sched_param { | |||
77 | #include <linux/proportions.h> | 77 | #include <linux/proportions.h> |
78 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
79 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
80 | #include <linux/rculist.h> | ||
80 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
81 | 82 | ||
82 | #include <linux/time.h> | 83 | #include <linux/time.h> |
@@ -91,13 +92,13 @@ struct sched_param { | |||
91 | 92 | ||
92 | #include <asm/processor.h> | 93 | #include <asm/processor.h> |
93 | 94 | ||
94 | struct mem_cgroup; | ||
95 | struct exec_domain; | 95 | struct exec_domain; |
96 | struct futex_pi_state; | 96 | struct futex_pi_state; |
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | ||
100 | struct fs_struct; | 99 | struct fs_struct; |
100 | struct bts_context; | ||
101 | struct perf_counter_context; | ||
101 | 102 | ||
102 | /* | 103 | /* |
103 | * List of flags we want to share for kernel threads, | 104 | * List of flags we want to share for kernel threads, |
@@ -116,6 +117,7 @@ struct fs_struct; | |||
116 | * 11 bit fractions. | 117 | * 11 bit fractions. |
117 | */ | 118 | */ |
118 | extern unsigned long avenrun[]; /* Load averages */ | 119 | extern unsigned long avenrun[]; /* Load averages */ |
120 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | ||
119 | 121 | ||
120 | #define FSHIFT 11 /* nr of bits of precision */ | 122 | #define FSHIFT 11 /* nr of bits of precision */ |
121 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 123 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
@@ -135,8 +137,9 @@ DECLARE_PER_CPU(unsigned long, process_counts); | |||
135 | extern int nr_processes(void); | 137 | extern int nr_processes(void); |
136 | extern unsigned long nr_running(void); | 138 | extern unsigned long nr_running(void); |
137 | extern unsigned long nr_uninterruptible(void); | 139 | extern unsigned long nr_uninterruptible(void); |
138 | extern unsigned long nr_active(void); | ||
139 | extern unsigned long nr_iowait(void); | 140 | extern unsigned long nr_iowait(void); |
141 | extern void calc_global_load(void); | ||
142 | extern u64 cpu_nr_migrations(int cpu); | ||
140 | 143 | ||
141 | extern unsigned long get_parent_ip(unsigned long addr); | 144 | extern unsigned long get_parent_ip(unsigned long addr); |
142 | 145 | ||
@@ -257,6 +260,7 @@ extern void task_rq_unlock_wait(struct task_struct *p); | |||
257 | extern cpumask_var_t nohz_cpu_mask; | 260 | extern cpumask_var_t nohz_cpu_mask; |
258 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 261 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
259 | extern int select_nohz_load_balancer(int cpu); | 262 | extern int select_nohz_load_balancer(int cpu); |
263 | extern int get_nohz_load_balancer(void); | ||
260 | #else | 264 | #else |
261 | static inline int select_nohz_load_balancer(int cpu) | 265 | static inline int select_nohz_load_balancer(int cpu) |
262 | { | 266 | { |
@@ -669,9 +673,13 @@ struct user_struct { | |||
669 | struct task_group *tg; | 673 | struct task_group *tg; |
670 | #ifdef CONFIG_SYSFS | 674 | #ifdef CONFIG_SYSFS |
671 | struct kobject kobj; | 675 | struct kobject kobj; |
672 | struct work_struct work; | 676 | struct delayed_work work; |
673 | #endif | 677 | #endif |
674 | #endif | 678 | #endif |
679 | |||
680 | #ifdef CONFIG_PERF_COUNTERS | ||
681 | atomic_long_t locked_vm; | ||
682 | #endif | ||
675 | }; | 683 | }; |
676 | 684 | ||
677 | extern int uids_sysfs_init(void); | 685 | extern int uids_sysfs_init(void); |
@@ -838,7 +846,17 @@ struct sched_group { | |||
838 | */ | 846 | */ |
839 | u32 reciprocal_cpu_power; | 847 | u32 reciprocal_cpu_power; |
840 | 848 | ||
841 | unsigned long cpumask[]; | 849 | /* |
850 | * The CPUs this group covers. | ||
851 | * | ||
852 | * NOTE: this field is variable length. (Allocated dynamically | ||
853 | * by attaching extra space to the end of the structure, | ||
854 | * depending on how many CPUs the kernel has booted up with) | ||
855 | * | ||
856 | * It is also be embedded into static data structures at build | ||
857 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
858 | */ | ||
859 | unsigned long cpumask[0]; | ||
842 | }; | 860 | }; |
843 | 861 | ||
844 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 862 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
@@ -924,8 +942,17 @@ struct sched_domain { | |||
924 | char *name; | 942 | char *name; |
925 | #endif | 943 | #endif |
926 | 944 | ||
927 | /* span of all CPUs in this domain */ | 945 | /* |
928 | unsigned long span[]; | 946 | * Span of all CPUs in this domain. |
947 | * | ||
948 | * NOTE: this field is variable length. (Allocated dynamically | ||
949 | * by attaching extra space to the end of the structure, | ||
950 | * depending on how many CPUs the kernel has booted up with) | ||
951 | * | ||
952 | * It is also be embedded into static data structures at build | ||
953 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
954 | */ | ||
955 | unsigned long span[0]; | ||
929 | }; | 956 | }; |
930 | 957 | ||
931 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 958 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
@@ -1052,9 +1079,10 @@ struct sched_entity { | |||
1052 | u64 last_wakeup; | 1079 | u64 last_wakeup; |
1053 | u64 avg_overlap; | 1080 | u64 avg_overlap; |
1054 | 1081 | ||
1082 | u64 nr_migrations; | ||
1083 | |||
1055 | u64 start_runtime; | 1084 | u64 start_runtime; |
1056 | u64 avg_wakeup; | 1085 | u64 avg_wakeup; |
1057 | u64 nr_migrations; | ||
1058 | 1086 | ||
1059 | #ifdef CONFIG_SCHEDSTATS | 1087 | #ifdef CONFIG_SCHEDSTATS |
1060 | u64 wait_start; | 1088 | u64 wait_start; |
@@ -1149,7 +1177,6 @@ struct task_struct { | |||
1149 | * a short time | 1177 | * a short time |
1150 | */ | 1178 | */ |
1151 | unsigned char fpu_counter; | 1179 | unsigned char fpu_counter; |
1152 | s8 oomkilladj; /* OOM kill score adjustment (bit shift). */ | ||
1153 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 1180 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
1154 | unsigned int btrace_seq; | 1181 | unsigned int btrace_seq; |
1155 | #endif | 1182 | #endif |
@@ -1209,18 +1236,11 @@ struct task_struct { | |||
1209 | struct list_head ptraced; | 1236 | struct list_head ptraced; |
1210 | struct list_head ptrace_entry; | 1237 | struct list_head ptrace_entry; |
1211 | 1238 | ||
1212 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1213 | /* | 1239 | /* |
1214 | * This is the tracer handle for the ptrace BTS extension. | 1240 | * This is the tracer handle for the ptrace BTS extension. |
1215 | * This field actually belongs to the ptracer task. | 1241 | * This field actually belongs to the ptracer task. |
1216 | */ | 1242 | */ |
1217 | struct bts_tracer *bts; | 1243 | struct bts_context *bts; |
1218 | /* | ||
1219 | * The buffer to hold the BTS data. | ||
1220 | */ | ||
1221 | void *bts_buffer; | ||
1222 | size_t bts_size; | ||
1223 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1224 | 1244 | ||
1225 | /* PID/PID hash table linkage. */ | 1245 | /* PID/PID hash table linkage. */ |
1226 | struct pid_link pids[PIDTYPE_MAX]; | 1246 | struct pid_link pids[PIDTYPE_MAX]; |
@@ -1247,7 +1267,9 @@ struct task_struct { | |||
1247 | * credentials (COW) */ | 1267 | * credentials (COW) */ |
1248 | const struct cred *cred; /* effective (overridable) subjective task | 1268 | const struct cred *cred; /* effective (overridable) subjective task |
1249 | * credentials (COW) */ | 1269 | * credentials (COW) */ |
1250 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ | 1270 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1271 | * credential calculations | ||
1272 | * (notably. ptrace) */ | ||
1251 | 1273 | ||
1252 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1274 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1253 | - access with [gs]et_task_comm (which lock | 1275 | - access with [gs]et_task_comm (which lock |
@@ -1294,7 +1316,8 @@ struct task_struct { | |||
1294 | /* Thread group tracking */ | 1316 | /* Thread group tracking */ |
1295 | u32 parent_exec_id; | 1317 | u32 parent_exec_id; |
1296 | u32 self_exec_id; | 1318 | u32 self_exec_id; |
1297 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 1319 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, |
1320 | * mempolicy */ | ||
1298 | spinlock_t alloc_lock; | 1321 | spinlock_t alloc_lock; |
1299 | 1322 | ||
1300 | #ifdef CONFIG_GENERIC_HARDIRQS | 1323 | #ifdef CONFIG_GENERIC_HARDIRQS |
@@ -1362,8 +1385,7 @@ struct task_struct { | |||
1362 | cputime_t acct_timexpd; /* stime + utime since last update */ | 1385 | cputime_t acct_timexpd; /* stime + utime since last update */ |
1363 | #endif | 1386 | #endif |
1364 | #ifdef CONFIG_CPUSETS | 1387 | #ifdef CONFIG_CPUSETS |
1365 | nodemask_t mems_allowed; | 1388 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
1366 | int cpuset_mems_generation; | ||
1367 | int cpuset_mem_spread_rotor; | 1389 | int cpuset_mem_spread_rotor; |
1368 | #endif | 1390 | #endif |
1369 | #ifdef CONFIG_CGROUPS | 1391 | #ifdef CONFIG_CGROUPS |
@@ -1380,8 +1402,13 @@ struct task_struct { | |||
1380 | struct list_head pi_state_list; | 1402 | struct list_head pi_state_list; |
1381 | struct futex_pi_state *pi_state_cache; | 1403 | struct futex_pi_state *pi_state_cache; |
1382 | #endif | 1404 | #endif |
1405 | #ifdef CONFIG_PERF_COUNTERS | ||
1406 | struct perf_counter_context *perf_counter_ctxp; | ||
1407 | struct mutex perf_counter_mutex; | ||
1408 | struct list_head perf_counter_list; | ||
1409 | #endif | ||
1383 | #ifdef CONFIG_NUMA | 1410 | #ifdef CONFIG_NUMA |
1384 | struct mempolicy *mempolicy; | 1411 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
1385 | short il_next; | 1412 | short il_next; |
1386 | #endif | 1413 | #endif |
1387 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1414 | atomic_t fs_excl; /* holding fs exclusive resources */ |
@@ -1428,7 +1455,9 @@ struct task_struct { | |||
1428 | #ifdef CONFIG_TRACING | 1455 | #ifdef CONFIG_TRACING |
1429 | /* state flags for use by tracers */ | 1456 | /* state flags for use by tracers */ |
1430 | unsigned long trace; | 1457 | unsigned long trace; |
1431 | #endif | 1458 | /* bitmask of trace recursion */ |
1459 | unsigned long trace_recursion; | ||
1460 | #endif /* CONFIG_TRACING */ | ||
1432 | }; | 1461 | }; |
1433 | 1462 | ||
1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1463 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -1766,11 +1795,23 @@ extern unsigned int sysctl_sched_child_runs_first; | |||
1766 | extern unsigned int sysctl_sched_features; | 1795 | extern unsigned int sysctl_sched_features; |
1767 | extern unsigned int sysctl_sched_migration_cost; | 1796 | extern unsigned int sysctl_sched_migration_cost; |
1768 | extern unsigned int sysctl_sched_nr_migrate; | 1797 | extern unsigned int sysctl_sched_nr_migrate; |
1798 | extern unsigned int sysctl_timer_migration; | ||
1769 | 1799 | ||
1770 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1800 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1771 | struct file *file, void __user *buffer, size_t *length, | 1801 | struct file *file, void __user *buffer, size_t *length, |
1772 | loff_t *ppos); | 1802 | loff_t *ppos); |
1773 | #endif | 1803 | #endif |
1804 | #ifdef CONFIG_SCHED_DEBUG | ||
1805 | static inline unsigned int get_sysctl_timer_migration(void) | ||
1806 | { | ||
1807 | return sysctl_timer_migration; | ||
1808 | } | ||
1809 | #else | ||
1810 | static inline unsigned int get_sysctl_timer_migration(void) | ||
1811 | { | ||
1812 | return 1; | ||
1813 | } | ||
1814 | #endif | ||
1774 | extern unsigned int sysctl_sched_rt_period; | 1815 | extern unsigned int sysctl_sched_rt_period; |
1775 | extern int sysctl_sched_rt_runtime; | 1816 | extern int sysctl_sched_rt_runtime; |
1776 | 1817 | ||
@@ -1837,9 +1878,6 @@ extern struct pid_namespace init_pid_ns; | |||
1837 | /* | 1878 | /* |
1838 | * find a task by one of its numerical ids | 1879 | * find a task by one of its numerical ids |
1839 | * | 1880 | * |
1840 | * find_task_by_pid_type_ns(): | ||
1841 | * it is the most generic call - it finds a task by all id, | ||
1842 | * type and namespace specified | ||
1843 | * find_task_by_pid_ns(): | 1881 | * find_task_by_pid_ns(): |
1844 | * finds a task by its pid in the specified namespace | 1882 | * finds a task by its pid in the specified namespace |
1845 | * find_task_by_vpid(): | 1883 | * find_task_by_vpid(): |
@@ -1848,9 +1886,6 @@ extern struct pid_namespace init_pid_ns; | |||
1848 | * see also find_vpid() etc in include/linux/pid.h | 1886 | * see also find_vpid() etc in include/linux/pid.h |
1849 | */ | 1887 | */ |
1850 | 1888 | ||
1851 | extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, | ||
1852 | struct pid_namespace *ns); | ||
1853 | |||
1854 | extern struct task_struct *find_task_by_vpid(pid_t nr); | 1889 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
1855 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | 1890 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
1856 | struct pid_namespace *ns); | 1891 | struct pid_namespace *ns); |
@@ -1885,6 +1920,7 @@ extern void sched_dead(struct task_struct *p); | |||
1885 | 1920 | ||
1886 | extern void proc_caches_init(void); | 1921 | extern void proc_caches_init(void); |
1887 | extern void flush_signals(struct task_struct *); | 1922 | extern void flush_signals(struct task_struct *); |
1923 | extern void __flush_signals(struct task_struct *); | ||
1888 | extern void ignore_signals(struct task_struct *); | 1924 | extern void ignore_signals(struct task_struct *); |
1889 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 1925 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
1890 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 1926 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
@@ -2001,8 +2037,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2001 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2037 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2002 | 2038 | ||
2003 | #ifdef CONFIG_SMP | 2039 | #ifdef CONFIG_SMP |
2040 | extern void wait_task_context_switch(struct task_struct *p); | ||
2004 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2041 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2005 | #else | 2042 | #else |
2043 | static inline void wait_task_context_switch(struct task_struct *p) {} | ||
2006 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2044 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2007 | long match_state) | 2045 | long match_state) |
2008 | { | 2046 | { |
@@ -2010,7 +2048,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
2010 | } | 2048 | } |
2011 | #endif | 2049 | #endif |
2012 | 2050 | ||
2013 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 2051 | #define next_task(p) \ |
2052 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | ||
2014 | 2053 | ||
2015 | #define for_each_process(p) \ | 2054 | #define for_each_process(p) \ |
2016 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2055 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
@@ -2049,8 +2088,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2) | |||
2049 | 2088 | ||
2050 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2089 | static inline struct task_struct *next_thread(const struct task_struct *p) |
2051 | { | 2090 | { |
2052 | return list_entry(rcu_dereference(p->thread_group.next), | 2091 | return list_entry_rcu(p->thread_group.next, |
2053 | struct task_struct, thread_group); | 2092 | struct task_struct, thread_group); |
2054 | } | 2093 | } |
2055 | 2094 | ||
2056 | static inline int thread_group_empty(struct task_struct *p) | 2095 | static inline int thread_group_empty(struct task_struct *p) |
@@ -2178,6 +2217,12 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) | |||
2178 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); | 2217 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
2179 | } | 2218 | } |
2180 | 2219 | ||
2220 | static inline int restart_syscall(void) | ||
2221 | { | ||
2222 | set_tsk_thread_flag(current, TIF_SIGPENDING); | ||
2223 | return -ERESTARTNOINTR; | ||
2224 | } | ||
2225 | |||
2181 | static inline int signal_pending(struct task_struct *p) | 2226 | static inline int signal_pending(struct task_struct *p) |
2182 | { | 2227 | { |
2183 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 2228 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
@@ -2388,6 +2433,13 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
2388 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2433 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2389 | #endif | 2434 | #endif |
2390 | 2435 | ||
2436 | /* | ||
2437 | * Call the function if the target task is executing on a CPU right now: | ||
2438 | */ | ||
2439 | extern void task_oncpu_function_call(struct task_struct *p, | ||
2440 | void (*func) (void *info), void *info); | ||
2441 | |||
2442 | |||
2391 | #ifdef CONFIG_MM_OWNER | 2443 | #ifdef CONFIG_MM_OWNER |
2392 | extern void mm_update_next_owner(struct mm_struct *mm); | 2444 | extern void mm_update_next_owner(struct mm_struct *mm); |
2393 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2445 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |