aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 17:12:58 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 17:12:58 -0400
commit1e09481365ce248dbb4eb06dad70129bb5807037 (patch)
treec0cff5bef95c8b5e7486f144718ade9a06c284dc /include/linux/sched.h
parent3e2f69fdd1b00166e7d589bce56b2d36a9e74374 (diff)
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/softlockup.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h77
1 files changed, 48 insertions, 29 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 69760a379b6d..7b2a356b97fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void);
134extern unsigned long nr_uninterruptible(void); 134extern unsigned long nr_uninterruptible(void);
135extern unsigned long nr_active(void); 135extern unsigned long nr_active(void);
136extern unsigned long nr_iowait(void); 136extern unsigned long nr_iowait(void);
137extern unsigned long weighted_cpuload(const int cpu);
138 137
139struct seq_file; 138struct seq_file;
140struct cfs_rq; 139struct cfs_rq;
@@ -246,6 +245,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
246extern void init_idle(struct task_struct *idle, int cpu); 245extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 246extern void init_idle_bootup_task(struct task_struct *idle);
248 247
248extern int runqueue_is_locked(void);
249
249extern cpumask_t nohz_cpu_mask; 250extern cpumask_t nohz_cpu_mask;
250#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 251#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
251extern int select_nohz_load_balancer(int cpu); 252extern int select_nohz_load_balancer(int cpu);
@@ -785,6 +786,8 @@ struct sched_domain {
785 unsigned int balance_interval; /* initialise to 1. units in ms. */ 786 unsigned int balance_interval; /* initialise to 1. units in ms. */
786 unsigned int nr_balance_failed; /* initialise to 0 */ 787 unsigned int nr_balance_failed; /* initialise to 0 */
787 788
789 u64 last_update;
790
788#ifdef CONFIG_SCHEDSTATS 791#ifdef CONFIG_SCHEDSTATS
789 /* load_balance() stats */ 792 /* load_balance() stats */
790 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 793 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -824,23 +827,6 @@ extern int arch_reinit_sched_domains(void);
824 827
825#endif /* CONFIG_SMP */ 828#endif /* CONFIG_SMP */
826 829
827/*
828 * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
829 * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
830 * task of nice 0 or enough lower priority tasks to bring up the
831 * weighted_cpuload
832 */
833static inline int above_background_load(void)
834{
835 unsigned long cpu;
836
837 for_each_online_cpu(cpu) {
838 if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
839 return 1;
840 }
841 return 0;
842}
843
844struct io_context; /* See blkdev.h */ 830struct io_context; /* See blkdev.h */
845#define NGROUPS_SMALL 32 831#define NGROUPS_SMALL 32
846#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) 832#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
@@ -922,8 +908,8 @@ struct sched_class {
922 void (*set_cpus_allowed)(struct task_struct *p, 908 void (*set_cpus_allowed)(struct task_struct *p,
923 const cpumask_t *newmask); 909 const cpumask_t *newmask);
924 910
925 void (*join_domain)(struct rq *rq); 911 void (*rq_online)(struct rq *rq);
926 void (*leave_domain)(struct rq *rq); 912 void (*rq_offline)(struct rq *rq);
927 913
928 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 914 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
929 int running); 915 int running);
@@ -1040,6 +1026,7 @@ struct task_struct {
1040#endif 1026#endif
1041 1027
1042 int prio, static_prio, normal_prio; 1028 int prio, static_prio, normal_prio;
1029 unsigned int rt_priority;
1043 const struct sched_class *sched_class; 1030 const struct sched_class *sched_class;
1044 struct sched_entity se; 1031 struct sched_entity se;
1045 struct sched_rt_entity rt; 1032 struct sched_rt_entity rt;
@@ -1123,7 +1110,6 @@ struct task_struct {
1123 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1110 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1124 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1111 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1125 1112
1126 unsigned int rt_priority;
1127 cputime_t utime, stime, utimescaled, stimescaled; 1113 cputime_t utime, stime, utimescaled, stimescaled;
1128 cputime_t gtime; 1114 cputime_t gtime;
1129 cputime_t prev_utime, prev_stime; 1115 cputime_t prev_utime, prev_stime;
@@ -1142,12 +1128,12 @@ struct task_struct {
1142 gid_t gid,egid,sgid,fsgid; 1128 gid_t gid,egid,sgid,fsgid;
1143 struct group_info *group_info; 1129 struct group_info *group_info;
1144 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1130 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
1145 unsigned securebits;
1146 struct user_struct *user; 1131 struct user_struct *user;
1132 unsigned securebits;
1147#ifdef CONFIG_KEYS 1133#ifdef CONFIG_KEYS
1134 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1148 struct key *request_key_auth; /* assumed request_key authority */ 1135 struct key *request_key_auth; /* assumed request_key authority */
1149 struct key *thread_keyring; /* keyring private to this thread */ 1136 struct key *thread_keyring; /* keyring private to this thread */
1150 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1151#endif 1137#endif
1152 char comm[TASK_COMM_LEN]; /* executable name excluding path 1138 char comm[TASK_COMM_LEN]; /* executable name excluding path
1153 - access with [gs]et_task_comm (which lock 1139 - access with [gs]et_task_comm (which lock
@@ -1234,8 +1220,8 @@ struct task_struct {
1234# define MAX_LOCK_DEPTH 48UL 1220# define MAX_LOCK_DEPTH 48UL
1235 u64 curr_chain_key; 1221 u64 curr_chain_key;
1236 int lockdep_depth; 1222 int lockdep_depth;
1237 struct held_lock held_locks[MAX_LOCK_DEPTH];
1238 unsigned int lockdep_recursion; 1223 unsigned int lockdep_recursion;
1224 struct held_lock held_locks[MAX_LOCK_DEPTH];
1239#endif 1225#endif
1240 1226
1241/* journalling filesystem info */ 1227/* journalling filesystem info */
@@ -1263,10 +1249,6 @@ struct task_struct {
1263 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1249 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1264 cputime_t acct_stimexpd;/* stime since last update */ 1250 cputime_t acct_stimexpd;/* stime since last update */
1265#endif 1251#endif
1266#ifdef CONFIG_NUMA
1267 struct mempolicy *mempolicy;
1268 short il_next;
1269#endif
1270#ifdef CONFIG_CPUSETS 1252#ifdef CONFIG_CPUSETS
1271 nodemask_t mems_allowed; 1253 nodemask_t mems_allowed;
1272 int cpuset_mems_generation; 1254 int cpuset_mems_generation;
@@ -1286,6 +1268,10 @@ struct task_struct {
1286 struct list_head pi_state_list; 1268 struct list_head pi_state_list;
1287 struct futex_pi_state *pi_state_cache; 1269 struct futex_pi_state *pi_state_cache;
1288#endif 1270#endif
1271#ifdef CONFIG_NUMA
1272 struct mempolicy *mempolicy;
1273 short il_next;
1274#endif
1289 atomic_t fs_excl; /* holding fs exclusive resources */ 1275 atomic_t fs_excl; /* holding fs exclusive resources */
1290 struct rcu_head rcu; 1276 struct rcu_head rcu;
1291 1277
@@ -1505,6 +1491,7 @@ static inline void put_task_struct(struct task_struct *t)
1505#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1491#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1506#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1492#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1507#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1493#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1494#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1508#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1495#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1509#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1496#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1510#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1497#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
@@ -1574,13 +1561,28 @@ static inline void sched_clock_idle_sleep_event(void)
1574static inline void sched_clock_idle_wakeup_event(u64 delta_ns) 1561static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1575{ 1562{
1576} 1563}
1577#else 1564
1565#ifdef CONFIG_NO_HZ
1566static inline void sched_clock_tick_stop(int cpu)
1567{
1568}
1569
1570static inline void sched_clock_tick_start(int cpu)
1571{
1572}
1573#endif
1574
1575#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
1578extern void sched_clock_init(void); 1576extern void sched_clock_init(void);
1579extern u64 sched_clock_cpu(int cpu); 1577extern u64 sched_clock_cpu(int cpu);
1580extern void sched_clock_tick(void); 1578extern void sched_clock_tick(void);
1581extern void sched_clock_idle_sleep_event(void); 1579extern void sched_clock_idle_sleep_event(void);
1582extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1580extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1581#ifdef CONFIG_NO_HZ
1582extern void sched_clock_tick_stop(int cpu);
1583extern void sched_clock_tick_start(int cpu);
1583#endif 1584#endif
1585#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
1584 1586
1585/* 1587/*
1586 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 1588 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -1623,6 +1625,7 @@ extern unsigned int sysctl_sched_child_runs_first;
1623extern unsigned int sysctl_sched_features; 1625extern unsigned int sysctl_sched_features;
1624extern unsigned int sysctl_sched_migration_cost; 1626extern unsigned int sysctl_sched_migration_cost;
1625extern unsigned int sysctl_sched_nr_migrate; 1627extern unsigned int sysctl_sched_nr_migrate;
1628extern unsigned int sysctl_sched_shares_ratelimit;
1626 1629
1627int sched_nr_latency_handler(struct ctl_table *table, int write, 1630int sched_nr_latency_handler(struct ctl_table *table, int write,
1628 struct file *file, void __user *buffer, size_t *length, 1631 struct file *file, void __user *buffer, size_t *length,
@@ -1656,6 +1659,8 @@ extern int can_nice(const struct task_struct *p, const int nice);
1656extern int task_curr(const struct task_struct *p); 1659extern int task_curr(const struct task_struct *p);
1657extern int idle_cpu(int cpu); 1660extern int idle_cpu(int cpu);
1658extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1661extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1662extern int sched_setscheduler_nocheck(struct task_struct *, int,
1663 struct sched_param *);
1659extern struct task_struct *idle_task(int cpu); 1664extern struct task_struct *idle_task(int cpu);
1660extern struct task_struct *curr_task(int cpu); 1665extern struct task_struct *curr_task(int cpu);
1661extern void set_curr_task(int cpu, struct task_struct *p); 1666extern void set_curr_task(int cpu, struct task_struct *p);
@@ -2132,6 +2137,18 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2132} 2137}
2133#endif 2138#endif
2134 2139
2140#ifdef CONFIG_TRACING
2141extern void
2142__trace_special(void *__tr, void *__data,
2143 unsigned long arg1, unsigned long arg2, unsigned long arg3);
2144#else
2145static inline void
2146__trace_special(void *__tr, void *__data,
2147 unsigned long arg1, unsigned long arg2, unsigned long arg3)
2148{
2149}
2150#endif
2151
2135extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2152extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
2136extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2153extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
2137 2154
@@ -2226,6 +2243,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2226} 2243}
2227#endif /* CONFIG_MM_OWNER */ 2244#endif /* CONFIG_MM_OWNER */
2228 2245
2246#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
2247
2229#endif /* __KERNEL__ */ 2248#endif /* __KERNEL__ */
2230 2249
2231#endif 2250#endif