diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 79 |
1 files changed, 60 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..4896fdfec913 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -77,6 +77,7 @@ struct sched_param { | |||
| 77 | #include <linux/proportions.h> | 77 | #include <linux/proportions.h> |
| 78 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
| 79 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
| 80 | #include <linux/rculist.h> | ||
| 80 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
| 81 | 82 | ||
| 82 | #include <linux/time.h> | 83 | #include <linux/time.h> |
| @@ -96,8 +97,9 @@ struct exec_domain; | |||
| 96 | struct futex_pi_state; | 97 | struct futex_pi_state; |
| 97 | struct robust_list_head; | 98 | struct robust_list_head; |
| 98 | struct bio; | 99 | struct bio; |
| 99 | struct bts_tracer; | ||
| 100 | struct fs_struct; | 100 | struct fs_struct; |
| 101 | struct bts_context; | ||
| 102 | struct perf_counter_context; | ||
| 101 | 103 | ||
| 102 | /* | 104 | /* |
| 103 | * List of flags we want to share for kernel threads, | 105 | * List of flags we want to share for kernel threads, |
| @@ -116,6 +118,7 @@ struct fs_struct; | |||
| 116 | * 11 bit fractions. | 118 | * 11 bit fractions. |
| 117 | */ | 119 | */ |
| 118 | extern unsigned long avenrun[]; /* Load averages */ | 120 | extern unsigned long avenrun[]; /* Load averages */ |
| 121 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | ||
| 119 | 122 | ||
| 120 | #define FSHIFT 11 /* nr of bits of precision */ | 123 | #define FSHIFT 11 /* nr of bits of precision */ |
| 121 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 124 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
| @@ -135,8 +138,9 @@ DECLARE_PER_CPU(unsigned long, process_counts); | |||
| 135 | extern int nr_processes(void); | 138 | extern int nr_processes(void); |
| 136 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
| 137 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
| 138 | extern unsigned long nr_active(void); | ||
| 139 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
| 142 | extern void calc_global_load(void); | ||
| 143 | extern u64 cpu_nr_migrations(int cpu); | ||
| 140 | 144 | ||
| 141 | extern unsigned long get_parent_ip(unsigned long addr); | 145 | extern unsigned long get_parent_ip(unsigned long addr); |
| 142 | 146 | ||
| @@ -672,6 +676,10 @@ struct user_struct { | |||
| 672 | struct work_struct work; | 676 | struct work_struct work; |
| 673 | #endif | 677 | #endif |
| 674 | #endif | 678 | #endif |
| 679 | |||
| 680 | #ifdef CONFIG_PERF_COUNTERS | ||
| 681 | atomic_long_t locked_vm; | ||
| 682 | #endif | ||
| 675 | }; | 683 | }; |
| 676 | 684 | ||
| 677 | extern int uids_sysfs_init(void); | 685 | extern int uids_sysfs_init(void); |
| @@ -838,7 +846,17 @@ struct sched_group { | |||
| 838 | */ | 846 | */ |
| 839 | u32 reciprocal_cpu_power; | 847 | u32 reciprocal_cpu_power; |
| 840 | 848 | ||
| 841 | unsigned long cpumask[]; | 849 | /* |
| 850 | * The CPUs this group covers. | ||
| 851 | * | ||
| 852 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 853 | * by attaching extra space to the end of the structure, | ||
| 854 | * depending on how many CPUs the kernel has booted up with) | ||
| 855 | * | ||
| 856 | * It is also be embedded into static data structures at build | ||
| 857 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
| 858 | */ | ||
| 859 | unsigned long cpumask[0]; | ||
| 842 | }; | 860 | }; |
| 843 | 861 | ||
| 844 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 862 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| @@ -924,8 +942,17 @@ struct sched_domain { | |||
| 924 | char *name; | 942 | char *name; |
| 925 | #endif | 943 | #endif |
| 926 | 944 | ||
| 927 | /* span of all CPUs in this domain */ | 945 | /* |
| 928 | unsigned long span[]; | 946 | * Span of all CPUs in this domain. |
| 947 | * | ||
| 948 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 949 | * by attaching extra space to the end of the structure, | ||
| 950 | * depending on how many CPUs the kernel has booted up with) | ||
| 951 | * | ||
| 952 | * It is also be embedded into static data structures at build | ||
| 953 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
| 954 | */ | ||
| 955 | unsigned long span[0]; | ||
| 929 | }; | 956 | }; |
| 930 | 957 | ||
| 931 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 958 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
| @@ -1052,9 +1079,10 @@ struct sched_entity { | |||
| 1052 | u64 last_wakeup; | 1079 | u64 last_wakeup; |
| 1053 | u64 avg_overlap; | 1080 | u64 avg_overlap; |
| 1054 | 1081 | ||
| 1082 | u64 nr_migrations; | ||
| 1083 | |||
| 1055 | u64 start_runtime; | 1084 | u64 start_runtime; |
| 1056 | u64 avg_wakeup; | 1085 | u64 avg_wakeup; |
| 1057 | u64 nr_migrations; | ||
| 1058 | 1086 | ||
| 1059 | #ifdef CONFIG_SCHEDSTATS | 1087 | #ifdef CONFIG_SCHEDSTATS |
| 1060 | u64 wait_start; | 1088 | u64 wait_start; |
| @@ -1209,18 +1237,11 @@ struct task_struct { | |||
| 1209 | struct list_head ptraced; | 1237 | struct list_head ptraced; |
| 1210 | struct list_head ptrace_entry; | 1238 | struct list_head ptrace_entry; |
| 1211 | 1239 | ||
| 1212 | #ifdef CONFIG_X86_PTRACE_BTS | ||
| 1213 | /* | 1240 | /* |
| 1214 | * This is the tracer handle for the ptrace BTS extension. | 1241 | * This is the tracer handle for the ptrace BTS extension. |
| 1215 | * This field actually belongs to the ptracer task. | 1242 | * This field actually belongs to the ptracer task. |
| 1216 | */ | 1243 | */ |
| 1217 | struct bts_tracer *bts; | 1244 | struct bts_context *bts; |
| 1218 | /* | ||
| 1219 | * The buffer to hold the BTS data. | ||
| 1220 | */ | ||
| 1221 | void *bts_buffer; | ||
| 1222 | size_t bts_size; | ||
| 1223 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
| 1224 | 1245 | ||
| 1225 | /* PID/PID hash table linkage. */ | 1246 | /* PID/PID hash table linkage. */ |
| 1226 | struct pid_link pids[PIDTYPE_MAX]; | 1247 | struct pid_link pids[PIDTYPE_MAX]; |
| @@ -1247,7 +1268,9 @@ struct task_struct { | |||
| 1247 | * credentials (COW) */ | 1268 | * credentials (COW) */ |
| 1248 | const struct cred *cred; /* effective (overridable) subjective task | 1269 | const struct cred *cred; /* effective (overridable) subjective task |
| 1249 | * credentials (COW) */ | 1270 | * credentials (COW) */ |
| 1250 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ | 1271 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
| 1272 | * credential calculations | ||
| 1273 | * (notably. ptrace) */ | ||
| 1251 | 1274 | ||
| 1252 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1275 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 1253 | - access with [gs]et_task_comm (which lock | 1276 | - access with [gs]et_task_comm (which lock |
| @@ -1380,6 +1403,11 @@ struct task_struct { | |||
| 1380 | struct list_head pi_state_list; | 1403 | struct list_head pi_state_list; |
| 1381 | struct futex_pi_state *pi_state_cache; | 1404 | struct futex_pi_state *pi_state_cache; |
| 1382 | #endif | 1405 | #endif |
| 1406 | #ifdef CONFIG_PERF_COUNTERS | ||
| 1407 | struct perf_counter_context *perf_counter_ctxp; | ||
| 1408 | struct mutex perf_counter_mutex; | ||
| 1409 | struct list_head perf_counter_list; | ||
| 1410 | #endif | ||
| 1383 | #ifdef CONFIG_NUMA | 1411 | #ifdef CONFIG_NUMA |
| 1384 | struct mempolicy *mempolicy; | 1412 | struct mempolicy *mempolicy; |
| 1385 | short il_next; | 1413 | short il_next; |
| @@ -1428,7 +1456,9 @@ struct task_struct { | |||
| 1428 | #ifdef CONFIG_TRACING | 1456 | #ifdef CONFIG_TRACING |
| 1429 | /* state flags for use by tracers */ | 1457 | /* state flags for use by tracers */ |
| 1430 | unsigned long trace; | 1458 | unsigned long trace; |
| 1431 | #endif | 1459 | /* bitmask of trace recursion */ |
| 1460 | unsigned long trace_recursion; | ||
| 1461 | #endif /* CONFIG_TRACING */ | ||
| 1432 | }; | 1462 | }; |
| 1433 | 1463 | ||
| 1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1464 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
| @@ -1885,6 +1915,7 @@ extern void sched_dead(struct task_struct *p); | |||
| 1885 | 1915 | ||
| 1886 | extern void proc_caches_init(void); | 1916 | extern void proc_caches_init(void); |
| 1887 | extern void flush_signals(struct task_struct *); | 1917 | extern void flush_signals(struct task_struct *); |
| 1918 | extern void __flush_signals(struct task_struct *); | ||
| 1888 | extern void ignore_signals(struct task_struct *); | 1919 | extern void ignore_signals(struct task_struct *); |
| 1889 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 1920 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
| 1890 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 1921 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
| @@ -2001,8 +2032,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
| 2001 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2032 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
| 2002 | 2033 | ||
| 2003 | #ifdef CONFIG_SMP | 2034 | #ifdef CONFIG_SMP |
| 2035 | extern void wait_task_context_switch(struct task_struct *p); | ||
| 2004 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2036 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
| 2005 | #else | 2037 | #else |
| 2038 | static inline void wait_task_context_switch(struct task_struct *p) {} | ||
| 2006 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2039 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
| 2007 | long match_state) | 2040 | long match_state) |
| 2008 | { | 2041 | { |
| @@ -2010,7 +2043,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
| 2010 | } | 2043 | } |
| 2011 | #endif | 2044 | #endif |
| 2012 | 2045 | ||
| 2013 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 2046 | #define next_task(p) \ |
| 2047 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | ||
| 2014 | 2048 | ||
| 2015 | #define for_each_process(p) \ | 2049 | #define for_each_process(p) \ |
| 2016 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2050 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
| @@ -2049,8 +2083,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2) | |||
| 2049 | 2083 | ||
| 2050 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2084 | static inline struct task_struct *next_thread(const struct task_struct *p) |
| 2051 | { | 2085 | { |
| 2052 | return list_entry(rcu_dereference(p->thread_group.next), | 2086 | return list_entry_rcu(p->thread_group.next, |
| 2053 | struct task_struct, thread_group); | 2087 | struct task_struct, thread_group); |
| 2054 | } | 2088 | } |
| 2055 | 2089 | ||
| 2056 | static inline int thread_group_empty(struct task_struct *p) | 2090 | static inline int thread_group_empty(struct task_struct *p) |
| @@ -2388,6 +2422,13 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
| 2388 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2422 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
| 2389 | #endif | 2423 | #endif |
| 2390 | 2424 | ||
| 2425 | /* | ||
| 2426 | * Call the function if the target task is executing on a CPU right now: | ||
| 2427 | */ | ||
| 2428 | extern void task_oncpu_function_call(struct task_struct *p, | ||
| 2429 | void (*func) (void *info), void *info); | ||
| 2430 | |||
| 2431 | |||
| 2391 | #ifdef CONFIG_MM_OWNER | 2432 | #ifdef CONFIG_MM_OWNER |
| 2392 | extern void mm_update_next_owner(struct mm_struct *mm); | 2433 | extern void mm_update_next_owner(struct mm_struct *mm); |
| 2393 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2434 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |
