aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /include/linux/sched.h
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h79
1 files changed, 60 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d853f6bb0baf..fea9d188dbff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -77,6 +77,7 @@ struct sched_param {
77#include <linux/proportions.h> 77#include <linux/proportions.h>
78#include <linux/seccomp.h> 78#include <linux/seccomp.h>
79#include <linux/rcupdate.h> 79#include <linux/rcupdate.h>
80#include <linux/rculist.h>
80#include <linux/rtmutex.h> 81#include <linux/rtmutex.h>
81 82
82#include <linux/time.h> 83#include <linux/time.h>
@@ -96,8 +97,9 @@ struct exec_domain;
96struct futex_pi_state; 97struct futex_pi_state;
97struct robust_list_head; 98struct robust_list_head;
98struct bio; 99struct bio;
99struct bts_tracer;
100struct fs_struct; 100struct fs_struct;
101struct bts_context;
102struct perf_counter_context;
101 103
102/* 104/*
103 * List of flags we want to share for kernel threads, 105 * List of flags we want to share for kernel threads,
@@ -116,6 +118,7 @@ struct fs_struct;
116 * 11 bit fractions. 118 * 11 bit fractions.
117 */ 119 */
118extern unsigned long avenrun[]; /* Load averages */ 120extern unsigned long avenrun[]; /* Load averages */
121extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
119 122
120#define FSHIFT 11 /* nr of bits of precision */ 123#define FSHIFT 11 /* nr of bits of precision */
121#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 124#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
@@ -135,8 +138,9 @@ DECLARE_PER_CPU(unsigned long, process_counts);
135extern int nr_processes(void); 138extern int nr_processes(void);
136extern unsigned long nr_running(void); 139extern unsigned long nr_running(void);
137extern unsigned long nr_uninterruptible(void); 140extern unsigned long nr_uninterruptible(void);
138extern unsigned long nr_active(void);
139extern unsigned long nr_iowait(void); 141extern unsigned long nr_iowait(void);
142extern void calc_global_load(void);
143extern u64 cpu_nr_migrations(int cpu);
140 144
141extern unsigned long get_parent_ip(unsigned long addr); 145extern unsigned long get_parent_ip(unsigned long addr);
142 146
@@ -672,6 +676,10 @@ struct user_struct {
672 struct work_struct work; 676 struct work_struct work;
673#endif 677#endif
674#endif 678#endif
679
680#ifdef CONFIG_PERF_COUNTERS
681 atomic_long_t locked_vm;
682#endif
675}; 683};
676 684
677extern int uids_sysfs_init(void); 685extern int uids_sysfs_init(void);
@@ -838,7 +846,17 @@ struct sched_group {
838 */ 846 */
839 u32 reciprocal_cpu_power; 847 u32 reciprocal_cpu_power;
840 848
841 unsigned long cpumask[]; 849 /*
850 * The CPUs this group covers.
851 *
852 * NOTE: this field is variable length. (Allocated dynamically
853 * by attaching extra space to the end of the structure,
854 * depending on how many CPUs the kernel has booted up with)
855 *
856 * It is also be embedded into static data structures at build
857 * time. (See 'struct static_sched_group' in kernel/sched.c)
858 */
859 unsigned long cpumask[0];
842}; 860};
843 861
844static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 862static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
@@ -924,8 +942,17 @@ struct sched_domain {
924 char *name; 942 char *name;
925#endif 943#endif
926 944
927 /* span of all CPUs in this domain */ 945 /*
928 unsigned long span[]; 946 * Span of all CPUs in this domain.
947 *
948 * NOTE: this field is variable length. (Allocated dynamically
949 * by attaching extra space to the end of the structure,
950 * depending on how many CPUs the kernel has booted up with)
951 *
952 * It is also be embedded into static data structures at build
953 * time. (See 'struct static_sched_domain' in kernel/sched.c)
954 */
955 unsigned long span[0];
929}; 956};
930 957
931static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 958static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -1052,9 +1079,10 @@ struct sched_entity {
1052 u64 last_wakeup; 1079 u64 last_wakeup;
1053 u64 avg_overlap; 1080 u64 avg_overlap;
1054 1081
1082 u64 nr_migrations;
1083
1055 u64 start_runtime; 1084 u64 start_runtime;
1056 u64 avg_wakeup; 1085 u64 avg_wakeup;
1057 u64 nr_migrations;
1058 1086
1059#ifdef CONFIG_SCHEDSTATS 1087#ifdef CONFIG_SCHEDSTATS
1060 u64 wait_start; 1088 u64 wait_start;
@@ -1209,18 +1237,11 @@ struct task_struct {
1209 struct list_head ptraced; 1237 struct list_head ptraced;
1210 struct list_head ptrace_entry; 1238 struct list_head ptrace_entry;
1211 1239
1212#ifdef CONFIG_X86_PTRACE_BTS
1213 /* 1240 /*
1214 * This is the tracer handle for the ptrace BTS extension. 1241 * This is the tracer handle for the ptrace BTS extension.
1215 * This field actually belongs to the ptracer task. 1242 * This field actually belongs to the ptracer task.
1216 */ 1243 */
1217 struct bts_tracer *bts; 1244 struct bts_context *bts;
1218 /*
1219 * The buffer to hold the BTS data.
1220 */
1221 void *bts_buffer;
1222 size_t bts_size;
1223#endif /* CONFIG_X86_PTRACE_BTS */
1224 1245
1225 /* PID/PID hash table linkage. */ 1246 /* PID/PID hash table linkage. */
1226 struct pid_link pids[PIDTYPE_MAX]; 1247 struct pid_link pids[PIDTYPE_MAX];
@@ -1247,7 +1268,9 @@ struct task_struct {
1247 * credentials (COW) */ 1268 * credentials (COW) */
1248 const struct cred *cred; /* effective (overridable) subjective task 1269 const struct cred *cred; /* effective (overridable) subjective task
1249 * credentials (COW) */ 1270 * credentials (COW) */
1250 struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ 1271 struct mutex cred_guard_mutex; /* guard against foreign influences on
1272 * credential calculations
1273 * (notably. ptrace) */
1251 1274
1252 char comm[TASK_COMM_LEN]; /* executable name excluding path 1275 char comm[TASK_COMM_LEN]; /* executable name excluding path
1253 - access with [gs]et_task_comm (which lock 1276 - access with [gs]et_task_comm (which lock
@@ -1380,6 +1403,11 @@ struct task_struct {
1380 struct list_head pi_state_list; 1403 struct list_head pi_state_list;
1381 struct futex_pi_state *pi_state_cache; 1404 struct futex_pi_state *pi_state_cache;
1382#endif 1405#endif
1406#ifdef CONFIG_PERF_COUNTERS
1407 struct perf_counter_context *perf_counter_ctxp;
1408 struct mutex perf_counter_mutex;
1409 struct list_head perf_counter_list;
1410#endif
1383#ifdef CONFIG_NUMA 1411#ifdef CONFIG_NUMA
1384 struct mempolicy *mempolicy; 1412 struct mempolicy *mempolicy;
1385 short il_next; 1413 short il_next;
@@ -1428,7 +1456,9 @@ struct task_struct {
1428#ifdef CONFIG_TRACING 1456#ifdef CONFIG_TRACING
1429 /* state flags for use by tracers */ 1457 /* state flags for use by tracers */
1430 unsigned long trace; 1458 unsigned long trace;
1431#endif 1459 /* bitmask of trace recursion */
1460 unsigned long trace_recursion;
1461#endif /* CONFIG_TRACING */
1432}; 1462};
1433 1463
1434/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1464/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1885,6 +1915,7 @@ extern void sched_dead(struct task_struct *p);
1885 1915
1886extern void proc_caches_init(void); 1916extern void proc_caches_init(void);
1887extern void flush_signals(struct task_struct *); 1917extern void flush_signals(struct task_struct *);
1918extern void __flush_signals(struct task_struct *);
1888extern void ignore_signals(struct task_struct *); 1919extern void ignore_signals(struct task_struct *);
1889extern void flush_signal_handlers(struct task_struct *, int force_default); 1920extern void flush_signal_handlers(struct task_struct *, int force_default);
1890extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 1921extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -2001,8 +2032,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2001extern char *get_task_comm(char *to, struct task_struct *tsk); 2032extern char *get_task_comm(char *to, struct task_struct *tsk);
2002 2033
2003#ifdef CONFIG_SMP 2034#ifdef CONFIG_SMP
2035extern void wait_task_context_switch(struct task_struct *p);
2004extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2036extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2005#else 2037#else
2038static inline void wait_task_context_switch(struct task_struct *p) {}
2006static inline unsigned long wait_task_inactive(struct task_struct *p, 2039static inline unsigned long wait_task_inactive(struct task_struct *p,
2007 long match_state) 2040 long match_state)
2008{ 2041{
@@ -2010,7 +2043,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2010} 2043}
2011#endif 2044#endif
2012 2045
2013#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) 2046#define next_task(p) \
2047 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2014 2048
2015#define for_each_process(p) \ 2049#define for_each_process(p) \
2016 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2050 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
@@ -2049,8 +2083,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2049 2083
2050static inline struct task_struct *next_thread(const struct task_struct *p) 2084static inline struct task_struct *next_thread(const struct task_struct *p)
2051{ 2085{
2052 return list_entry(rcu_dereference(p->thread_group.next), 2086 return list_entry_rcu(p->thread_group.next,
2053 struct task_struct, thread_group); 2087 struct task_struct, thread_group);
2054} 2088}
2055 2089
2056static inline int thread_group_empty(struct task_struct *p) 2090static inline int thread_group_empty(struct task_struct *p)
@@ -2394,6 +2428,13 @@ static inline void inc_syscw(struct task_struct *tsk)
2394#define TASK_SIZE_OF(tsk) TASK_SIZE 2428#define TASK_SIZE_OF(tsk) TASK_SIZE
2395#endif 2429#endif
2396 2430
2431/*
2432 * Call the function if the target task is executing on a CPU right now:
2433 */
2434extern void task_oncpu_function_call(struct task_struct *p,
2435 void (*func) (void *info), void *info);
2436
2437
2397#ifdef CONFIG_MM_OWNER 2438#ifdef CONFIG_MM_OWNER
2398extern void mm_update_next_owner(struct mm_struct *mm); 2439extern void mm_update_next_owner(struct mm_struct *mm);
2399extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); 2440extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);