aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h53
1 files changed, 36 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4c38bc8049c..d1399660b776 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -77,6 +77,7 @@ struct sched_param {
77#include <linux/proportions.h> 77#include <linux/proportions.h>
78#include <linux/seccomp.h> 78#include <linux/seccomp.h>
79#include <linux/rcupdate.h> 79#include <linux/rcupdate.h>
80#include <linux/rculist.h>
80#include <linux/rtmutex.h> 81#include <linux/rtmutex.h>
81 82
82#include <linux/time.h> 83#include <linux/time.h>
@@ -96,8 +97,8 @@ struct exec_domain;
96struct futex_pi_state; 97struct futex_pi_state;
97struct robust_list_head; 98struct robust_list_head;
98struct bio; 99struct bio;
99struct bts_tracer;
100struct fs_struct; 100struct fs_struct;
101struct bts_context;
101 102
102/* 103/*
103 * List of flags we want to share for kernel threads, 104 * List of flags we want to share for kernel threads,
@@ -116,6 +117,7 @@ struct fs_struct;
116 * 11 bit fractions. 117 * 11 bit fractions.
117 */ 118 */
118extern unsigned long avenrun[]; /* Load averages */ 119extern unsigned long avenrun[]; /* Load averages */
120extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
119 121
120#define FSHIFT 11 /* nr of bits of precision */ 122#define FSHIFT 11 /* nr of bits of precision */
121#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 123#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
@@ -135,8 +137,8 @@ DECLARE_PER_CPU(unsigned long, process_counts);
135extern int nr_processes(void); 137extern int nr_processes(void);
136extern unsigned long nr_running(void); 138extern unsigned long nr_running(void);
137extern unsigned long nr_uninterruptible(void); 139extern unsigned long nr_uninterruptible(void);
138extern unsigned long nr_active(void);
139extern unsigned long nr_iowait(void); 140extern unsigned long nr_iowait(void);
141extern void calc_global_load(void);
140 142
141extern unsigned long get_parent_ip(unsigned long addr); 143extern unsigned long get_parent_ip(unsigned long addr);
142 144
@@ -838,7 +840,17 @@ struct sched_group {
838 */ 840 */
839 u32 reciprocal_cpu_power; 841 u32 reciprocal_cpu_power;
840 842
841 unsigned long cpumask[]; 843 /*
844 * The CPUs this group covers.
845 *
846 * NOTE: this field is variable length. (Allocated dynamically
847 * by attaching extra space to the end of the structure,
848 * depending on how many CPUs the kernel has booted up with)
849 *
850 * It is also be embedded into static data structures at build
851 * time. (See 'struct static_sched_group' in kernel/sched.c)
852 */
853 unsigned long cpumask[0];
842}; 854};
843 855
844static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 856static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
@@ -924,8 +936,17 @@ struct sched_domain {
924 char *name; 936 char *name;
925#endif 937#endif
926 938
927 /* span of all CPUs in this domain */ 939 /*
928 unsigned long span[]; 940 * Span of all CPUs in this domain.
941 *
942 * NOTE: this field is variable length. (Allocated dynamically
943 * by attaching extra space to the end of the structure,
944 * depending on how many CPUs the kernel has booted up with)
945 *
946 * It is also be embedded into static data structures at build
947 * time. (See 'struct static_sched_domain' in kernel/sched.c)
948 */
949 unsigned long span[0];
929}; 950};
930 951
931static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 952static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -1209,18 +1230,11 @@ struct task_struct {
1209 struct list_head ptraced; 1230 struct list_head ptraced;
1210 struct list_head ptrace_entry; 1231 struct list_head ptrace_entry;
1211 1232
1212#ifdef CONFIG_X86_PTRACE_BTS
1213 /* 1233 /*
1214 * This is the tracer handle for the ptrace BTS extension. 1234 * This is the tracer handle for the ptrace BTS extension.
1215 * This field actually belongs to the ptracer task. 1235 * This field actually belongs to the ptracer task.
1216 */ 1236 */
1217 struct bts_tracer *bts; 1237 struct bts_context *bts;
1218 /*
1219 * The buffer to hold the BTS data.
1220 */
1221 void *bts_buffer;
1222 size_t bts_size;
1223#endif /* CONFIG_X86_PTRACE_BTS */
1224 1238
1225 /* PID/PID hash table linkage. */ 1239 /* PID/PID hash table linkage. */
1226 struct pid_link pids[PIDTYPE_MAX]; 1240 struct pid_link pids[PIDTYPE_MAX];
@@ -1428,7 +1442,9 @@ struct task_struct {
1428#ifdef CONFIG_TRACING 1442#ifdef CONFIG_TRACING
1429 /* state flags for use by tracers */ 1443 /* state flags for use by tracers */
1430 unsigned long trace; 1444 unsigned long trace;
1431#endif 1445 /* bitmask of trace recursion */
1446 unsigned long trace_recursion;
1447#endif /* CONFIG_TRACING */
1432}; 1448};
1433 1449
1434/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1450/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2001,8 +2017,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2001extern char *get_task_comm(char *to, struct task_struct *tsk); 2017extern char *get_task_comm(char *to, struct task_struct *tsk);
2002 2018
2003#ifdef CONFIG_SMP 2019#ifdef CONFIG_SMP
2020extern void wait_task_context_switch(struct task_struct *p);
2004extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2021extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2005#else 2022#else
2023static inline void wait_task_context_switch(struct task_struct *p) {}
2006static inline unsigned long wait_task_inactive(struct task_struct *p, 2024static inline unsigned long wait_task_inactive(struct task_struct *p,
2007 long match_state) 2025 long match_state)
2008{ 2026{
@@ -2010,7 +2028,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2010} 2028}
2011#endif 2029#endif
2012 2030
2013#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 2031#define next_task(p) \
2032 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2014 2033
2015#define for_each_process(p) \ 2034#define for_each_process(p) \
2016 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2035 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
@@ -2049,8 +2068,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2049 2068
2050static inline struct task_struct *next_thread(const struct task_struct *p) 2069static inline struct task_struct *next_thread(const struct task_struct *p)
2051{ 2070{
2052 return list_entry(rcu_dereference(p->thread_group.next), 2071 return list_entry_rcu(p->thread_group.next,
2053 struct task_struct, thread_group); 2072 struct task_struct, thread_group);
2054} 2073}
2055 2074
2056static inline int thread_group_empty(struct task_struct *p) 2075static inline int thread_group_empty(struct task_struct *p)