aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-11 11:55:42 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-11 11:55:42 -0400
commit940010c5a314a7bd9b498593bc6ba1718ac5aec5 (patch)
treed141e08ced08c40c6a8e3ab2cdecde5ff14e560f /include/linux/sched.h
parent8dc8e5e8bc0ce00b0f656bf972f67cd8a72759e5 (diff)
parent991ec02cdca33b03a132a0cacfe6f0aa0be9aa8d (diff)
Merge branch 'linus' into perfcounters/core
Conflicts: arch/x86/kernel/irqinit.c arch/x86/kernel/irqinit_64.c arch/x86/kernel/traps.c arch/x86/mm/fault.c include/linux/sched.h kernel/exit.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h53
1 files changed, 36 insertions, 17 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bc9326dcdde1..28c774ff3cc7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -77,6 +77,7 @@ struct sched_param {
77#include <linux/proportions.h> 77#include <linux/proportions.h>
78#include <linux/seccomp.h> 78#include <linux/seccomp.h>
79#include <linux/rcupdate.h> 79#include <linux/rcupdate.h>
80#include <linux/rculist.h>
80#include <linux/rtmutex.h> 81#include <linux/rtmutex.h>
81 82
82#include <linux/time.h> 83#include <linux/time.h>
@@ -96,8 +97,8 @@ struct exec_domain;
96struct futex_pi_state; 97struct futex_pi_state;
97struct robust_list_head; 98struct robust_list_head;
98struct bio; 99struct bio;
99struct bts_tracer;
100struct fs_struct; 100struct fs_struct;
101struct bts_context;
101struct perf_counter_context; 102struct perf_counter_context;
102 103
103/* 104/*
@@ -117,6 +118,7 @@ struct perf_counter_context;
117 * 11 bit fractions. 118 * 11 bit fractions.
118 */ 119 */
119extern unsigned long avenrun[]; /* Load averages */ 120extern unsigned long avenrun[]; /* Load averages */
121extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
120 122
121#define FSHIFT 11 /* nr of bits of precision */ 123#define FSHIFT 11 /* nr of bits of precision */
122#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 124#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
@@ -136,8 +138,8 @@ DECLARE_PER_CPU(unsigned long, process_counts);
136extern int nr_processes(void); 138extern int nr_processes(void);
137extern unsigned long nr_running(void); 139extern unsigned long nr_running(void);
138extern unsigned long nr_uninterruptible(void); 140extern unsigned long nr_uninterruptible(void);
139extern unsigned long nr_active(void);
140extern unsigned long nr_iowait(void); 141extern unsigned long nr_iowait(void);
142extern void calc_global_load(void);
141extern u64 cpu_nr_migrations(int cpu); 143extern u64 cpu_nr_migrations(int cpu);
142 144
143extern unsigned long get_parent_ip(unsigned long addr); 145extern unsigned long get_parent_ip(unsigned long addr);
@@ -844,7 +846,17 @@ struct sched_group {
844 */ 846 */
845 u32 reciprocal_cpu_power; 847 u32 reciprocal_cpu_power;
846 848
847 unsigned long cpumask[]; 849 /*
850 * The CPUs this group covers.
851 *
852 * NOTE: this field is variable length. (Allocated dynamically
853 * by attaching extra space to the end of the structure,
854 * depending on how many CPUs the kernel has booted up with)
855 *
856 * It is also be embedded into static data structures at build
857 * time. (See 'struct static_sched_group' in kernel/sched.c)
858 */
859 unsigned long cpumask[0];
848}; 860};
849 861
850static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 862static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
@@ -930,8 +942,17 @@ struct sched_domain {
930 char *name; 942 char *name;
931#endif 943#endif
932 944
933 /* span of all CPUs in this domain */ 945 /*
934 unsigned long span[]; 946 * Span of all CPUs in this domain.
947 *
948 * NOTE: this field is variable length. (Allocated dynamically
949 * by attaching extra space to the end of the structure,
950 * depending on how many CPUs the kernel has booted up with)
951 *
952 * It is also be embedded into static data structures at build
953 * time. (See 'struct static_sched_domain' in kernel/sched.c)
954 */
955 unsigned long span[0];
935}; 956};
936 957
937static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 958static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -1216,18 +1237,11 @@ struct task_struct {
1216 struct list_head ptraced; 1237 struct list_head ptraced;
1217 struct list_head ptrace_entry; 1238 struct list_head ptrace_entry;
1218 1239
1219#ifdef CONFIG_X86_PTRACE_BTS
1220 /* 1240 /*
1221 * This is the tracer handle for the ptrace BTS extension. 1241 * This is the tracer handle for the ptrace BTS extension.
1222 * This field actually belongs to the ptracer task. 1242 * This field actually belongs to the ptracer task.
1223 */ 1243 */
1224 struct bts_tracer *bts; 1244 struct bts_context *bts;
1225 /*
1226 * The buffer to hold the BTS data.
1227 */
1228 void *bts_buffer;
1229 size_t bts_size;
1230#endif /* CONFIG_X86_PTRACE_BTS */
1231 1245
1232 /* PID/PID hash table linkage. */ 1246 /* PID/PID hash table linkage. */
1233 struct pid_link pids[PIDTYPE_MAX]; 1247 struct pid_link pids[PIDTYPE_MAX];
@@ -1440,7 +1454,9 @@ struct task_struct {
1440#ifdef CONFIG_TRACING 1454#ifdef CONFIG_TRACING
1441 /* state flags for use by tracers */ 1455 /* state flags for use by tracers */
1442 unsigned long trace; 1456 unsigned long trace;
1443#endif 1457 /* bitmask of trace recursion */
1458 unsigned long trace_recursion;
1459#endif /* CONFIG_TRACING */
1444}; 1460};
1445 1461
1446/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1462/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2013,8 +2029,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2013extern char *get_task_comm(char *to, struct task_struct *tsk); 2029extern char *get_task_comm(char *to, struct task_struct *tsk);
2014 2030
2015#ifdef CONFIG_SMP 2031#ifdef CONFIG_SMP
2032extern void wait_task_context_switch(struct task_struct *p);
2016extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2033extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2017#else 2034#else
2035static inline void wait_task_context_switch(struct task_struct *p) {}
2018static inline unsigned long wait_task_inactive(struct task_struct *p, 2036static inline unsigned long wait_task_inactive(struct task_struct *p,
2019 long match_state) 2037 long match_state)
2020{ 2038{
@@ -2022,7 +2040,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2022} 2040}
2023#endif 2041#endif
2024 2042
2025#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 2043#define next_task(p) \
2044 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2026 2045
2027#define for_each_process(p) \ 2046#define for_each_process(p) \
2028 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2047 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
@@ -2061,8 +2080,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2061 2080
2062static inline struct task_struct *next_thread(const struct task_struct *p) 2081static inline struct task_struct *next_thread(const struct task_struct *p)
2063{ 2082{
2064 return list_entry(rcu_dereference(p->thread_group.next), 2083 return list_entry_rcu(p->thread_group.next,
2065 struct task_struct, thread_group); 2084 struct task_struct, thread_group);
2066} 2085}
2067 2086
2068static inline int thread_group_empty(struct task_struct *p) 2087static inline int thread_group_empty(struct task_struct *p)