aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h113
1 files changed, 67 insertions, 46 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 781abd137673..aaf71e08222c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -315,7 +315,6 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
315 void __user *buffer, 315 void __user *buffer,
316 size_t *lenp, loff_t *ppos); 316 size_t *lenp, loff_t *ppos);
317extern unsigned int softlockup_panic; 317extern unsigned int softlockup_panic;
318extern int softlockup_thresh;
319void lockup_detector_init(void); 318void lockup_detector_init(void);
320#else 319#else
321static inline void touch_softlockup_watchdog(void) 320static inline void touch_softlockup_watchdog(void)
@@ -360,7 +359,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
360extern signed long schedule_timeout_killable(signed long timeout); 359extern signed long schedule_timeout_killable(signed long timeout);
361extern signed long schedule_timeout_uninterruptible(signed long timeout); 360extern signed long schedule_timeout_uninterruptible(signed long timeout);
362asmlinkage void schedule(void); 361asmlinkage void schedule(void);
363extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); 362extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
364 363
365struct nsproxy; 364struct nsproxy;
366struct user_namespace; 365struct user_namespace;
@@ -653,9 +652,8 @@ struct signal_struct {
653 * Bits in flags field of signal_struct. 652 * Bits in flags field of signal_struct.
654 */ 653 */
655#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 654#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
656#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ 655#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
657#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ 656#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
658#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
659/* 657/*
660 * Pending notifications to parent. 658 * Pending notifications to parent.
661 */ 659 */
@@ -731,10 +729,6 @@ struct sched_info {
731 /* timestamps */ 729 /* timestamps */
732 unsigned long long last_arrival,/* when we last ran on a cpu */ 730 unsigned long long last_arrival,/* when we last ran on a cpu */
733 last_queued; /* when we were last queued to run */ 731 last_queued; /* when we were last queued to run */
734#ifdef CONFIG_SCHEDSTATS
735 /* BKL stats */
736 unsigned int bkl_count;
737#endif
738}; 732};
739#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 733#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
740 734
@@ -792,17 +786,39 @@ enum cpu_idle_type {
792}; 786};
793 787
794/* 788/*
795 * sched-domains (multiprocessor balancing) declarations: 789 * Increase resolution of nice-level calculations for 64-bit architectures.
790 * The extra resolution improves shares distribution and load balancing of
791 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
792 * hierarchies, especially on larger systems. This is not a user-visible change
793 * and does not change the user-interface for setting shares/weights.
794 *
795 * We increase resolution only if we have enough bits to allow this increased
796 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
797 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
798 * increased costs.
796 */ 799 */
800#if BITS_PER_LONG > 32
801# define SCHED_LOAD_RESOLUTION 10
802# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
803# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
804#else
805# define SCHED_LOAD_RESOLUTION 0
806# define scale_load(w) (w)
807# define scale_load_down(w) (w)
808#endif
797 809
798/* 810#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
799 * Increase resolution of nice-level calculations:
800 */
801#define SCHED_LOAD_SHIFT 10
802#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 811#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
803 812
804#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 813/*
814 * Increase resolution of cpu_power calculations
815 */
816#define SCHED_POWER_SHIFT 10
817#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
805 818
819/*
820 * sched-domains (multiprocessor balancing) declarations:
821 */
806#ifdef CONFIG_SMP 822#ifdef CONFIG_SMP
807#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ 823#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
808#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 824#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
@@ -868,6 +884,7 @@ static inline int sd_power_saving_flags(void)
868 884
869struct sched_group { 885struct sched_group {
870 struct sched_group *next; /* Must be a circular list */ 886 struct sched_group *next; /* Must be a circular list */
887 atomic_t ref;
871 888
872 /* 889 /*
873 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 890 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -882,9 +899,6 @@ struct sched_group {
882 * NOTE: this field is variable length. (Allocated dynamically 899 * NOTE: this field is variable length. (Allocated dynamically
883 * by attaching extra space to the end of the structure, 900 * by attaching extra space to the end of the structure,
884 * depending on how many CPUs the kernel has booted up with) 901 * depending on how many CPUs the kernel has booted up with)
885 *
886 * It is also be embedded into static data structures at build
887 * time. (See 'struct static_sched_group' in kernel/sched.c)
888 */ 902 */
889 unsigned long cpumask[0]; 903 unsigned long cpumask[0];
890}; 904};
@@ -894,17 +908,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
894 return to_cpumask(sg->cpumask); 908 return to_cpumask(sg->cpumask);
895} 909}
896 910
897enum sched_domain_level {
898 SD_LV_NONE = 0,
899 SD_LV_SIBLING,
900 SD_LV_MC,
901 SD_LV_BOOK,
902 SD_LV_CPU,
903 SD_LV_NODE,
904 SD_LV_ALLNODES,
905 SD_LV_MAX
906};
907
908struct sched_domain_attr { 911struct sched_domain_attr {
909 int relax_domain_level; 912 int relax_domain_level;
910}; 913};
@@ -913,6 +916,8 @@ struct sched_domain_attr {
913 .relax_domain_level = -1, \ 916 .relax_domain_level = -1, \
914} 917}
915 918
919extern int sched_domain_level_max;
920
916struct sched_domain { 921struct sched_domain {
917 /* These fields must be setup */ 922 /* These fields must be setup */
918 struct sched_domain *parent; /* top domain must be null terminated */ 923 struct sched_domain *parent; /* top domain must be null terminated */
@@ -930,7 +935,7 @@ struct sched_domain {
930 unsigned int forkexec_idx; 935 unsigned int forkexec_idx;
931 unsigned int smt_gain; 936 unsigned int smt_gain;
932 int flags; /* See SD_* */ 937 int flags; /* See SD_* */
933 enum sched_domain_level level; 938 int level;
934 939
935 /* Runtime fields. */ 940 /* Runtime fields. */
936 unsigned long last_balance; /* init to jiffies. units in jiffies */ 941 unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -973,6 +978,10 @@ struct sched_domain {
973#ifdef CONFIG_SCHED_DEBUG 978#ifdef CONFIG_SCHED_DEBUG
974 char *name; 979 char *name;
975#endif 980#endif
981 union {
982 void *private; /* used during construction */
983 struct rcu_head rcu; /* used during destruction */
984 };
976 985
977 unsigned int span_weight; 986 unsigned int span_weight;
978 /* 987 /*
@@ -981,9 +990,6 @@ struct sched_domain {
981 * NOTE: this field is variable length. (Allocated dynamically 990 * NOTE: this field is variable length. (Allocated dynamically
982 * by attaching extra space to the end of the structure, 991 * by attaching extra space to the end of the structure,
983 * depending on how many CPUs the kernel has booted up with) 992 * depending on how many CPUs the kernel has booted up with)
984 *
985 * It is also be embedded into static data structures at build
986 * time. (See 'struct static_sched_domain' in kernel/sched.c)
987 */ 993 */
988 unsigned long span[0]; 994 unsigned long span[0];
989}; 995};
@@ -1048,8 +1054,12 @@ struct sched_domain;
1048#define WF_FORK 0x02 /* child wakeup after fork */ 1054#define WF_FORK 0x02 /* child wakeup after fork */
1049 1055
1050#define ENQUEUE_WAKEUP 1 1056#define ENQUEUE_WAKEUP 1
1051#define ENQUEUE_WAKING 2 1057#define ENQUEUE_HEAD 2
1052#define ENQUEUE_HEAD 4 1058#ifdef CONFIG_SMP
1059#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1060#else
1061#define ENQUEUE_WAKING 0
1062#endif
1053 1063
1054#define DEQUEUE_SLEEP 1 1064#define DEQUEUE_SLEEP 1
1055 1065
@@ -1067,12 +1077,11 @@ struct sched_class {
1067 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1077 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1068 1078
1069#ifdef CONFIG_SMP 1079#ifdef CONFIG_SMP
1070 int (*select_task_rq)(struct rq *rq, struct task_struct *p, 1080 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1071 int sd_flag, int flags);
1072 1081
1073 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1082 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1074 void (*post_schedule) (struct rq *this_rq); 1083 void (*post_schedule) (struct rq *this_rq);
1075 void (*task_waking) (struct rq *this_rq, struct task_struct *task); 1084 void (*task_waking) (struct task_struct *task);
1076 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1085 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1077 1086
1078 void (*set_cpus_allowed)(struct task_struct *p, 1087 void (*set_cpus_allowed)(struct task_struct *p,
@@ -1197,13 +1206,11 @@ struct task_struct {
1197 unsigned int flags; /* per process flags, defined below */ 1206 unsigned int flags; /* per process flags, defined below */
1198 unsigned int ptrace; 1207 unsigned int ptrace;
1199 1208
1200 int lock_depth; /* BKL lock depth */
1201
1202#ifdef CONFIG_SMP 1209#ifdef CONFIG_SMP
1203#ifdef __ARCH_WANT_UNLOCKED_CTXSW 1210 struct task_struct *wake_entry;
1204 int oncpu; 1211 int on_cpu;
1205#endif
1206#endif 1212#endif
1213 int on_rq;
1207 1214
1208 int prio, static_prio, normal_prio; 1215 int prio, static_prio, normal_prio;
1209 unsigned int rt_priority; 1216 unsigned int rt_priority;
@@ -1264,6 +1271,7 @@ struct task_struct {
1264 int exit_state; 1271 int exit_state;
1265 int exit_code, exit_signal; 1272 int exit_code, exit_signal;
1266 int pdeath_signal; /* The signal sent when the parent dies */ 1273 int pdeath_signal; /* The signal sent when the parent dies */
1274 unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
1267 /* ??? */ 1275 /* ??? */
1268 unsigned int personality; 1276 unsigned int personality;
1269 unsigned did_exec:1; 1277 unsigned did_exec:1;
@@ -1274,6 +1282,7 @@ struct task_struct {
1274 1282
1275 /* Revert to default priority/policy when forking */ 1283 /* Revert to default priority/policy when forking */
1276 unsigned sched_reset_on_fork:1; 1284 unsigned sched_reset_on_fork:1;
1285 unsigned sched_contributes_to_load:1;
1277 1286
1278 pid_t pid; 1287 pid_t pid;
1279 pid_t tgid; 1288 pid_t tgid;
@@ -1783,6 +1792,17 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1783#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1792#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1784#define used_math() tsk_used_math(current) 1793#define used_math() tsk_used_math(current)
1785 1794
1795/*
1796 * task->group_stop flags
1797 */
1798#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
1799#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
1800#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
1801#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
1802#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
1803
1804extern void task_clear_group_stop_pending(struct task_struct *task);
1805
1786#ifdef CONFIG_PREEMPT_RCU 1806#ifdef CONFIG_PREEMPT_RCU
1787 1807
1788#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1808#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
@@ -2063,14 +2083,13 @@ extern void xtime_update(unsigned long ticks);
2063 2083
2064extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2084extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2065extern int wake_up_process(struct task_struct *tsk); 2085extern int wake_up_process(struct task_struct *tsk);
2066extern void wake_up_new_task(struct task_struct *tsk, 2086extern void wake_up_new_task(struct task_struct *tsk);
2067 unsigned long clone_flags);
2068#ifdef CONFIG_SMP 2087#ifdef CONFIG_SMP
2069 extern void kick_process(struct task_struct *tsk); 2088 extern void kick_process(struct task_struct *tsk);
2070#else 2089#else
2071 static inline void kick_process(struct task_struct *tsk) { } 2090 static inline void kick_process(struct task_struct *tsk) { }
2072#endif 2091#endif
2073extern void sched_fork(struct task_struct *p, int clone_flags); 2092extern void sched_fork(struct task_struct *p);
2074extern void sched_dead(struct task_struct *p); 2093extern void sched_dead(struct task_struct *p);
2075 2094
2076extern void proc_caches_init(void); 2095extern void proc_caches_init(void);
@@ -2195,8 +2214,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2195extern char *get_task_comm(char *to, struct task_struct *tsk); 2214extern char *get_task_comm(char *to, struct task_struct *tsk);
2196 2215
2197#ifdef CONFIG_SMP 2216#ifdef CONFIG_SMP
2217void scheduler_ipi(void);
2198extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2218extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2199#else 2219#else
2220static inline void scheduler_ipi(void) { }
2200static inline unsigned long wait_task_inactive(struct task_struct *p, 2221static inline unsigned long wait_task_inactive(struct task_struct *p,
2201 long match_state) 2222 long match_state)
2202{ 2223{