aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h146
1 files changed, 101 insertions, 45 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f1ea4a66957..8af3d249170e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,8 @@
38#define SCHED_BATCH 3 38#define SCHED_BATCH 3
39/* SCHED_ISO: reserved but not implemented yet */ 39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE 5 40#define SCHED_IDLE 5
41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42#define SCHED_RESET_ON_FORK 0x40000000
41 43
42#ifdef __KERNEL__ 44#ifdef __KERNEL__
43 45
@@ -188,6 +190,7 @@ extern unsigned long long time_sync_thresh;
188/* in tsk->state again */ 190/* in tsk->state again */
189#define TASK_DEAD 64 191#define TASK_DEAD 64
190#define TASK_WAKEKILL 128 192#define TASK_WAKEKILL 128
193#define TASK_WAKING 256
191 194
192/* Convenience macros for the sake of set_task_state */ 195/* Convenience macros for the sake of set_task_state */
193#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 196#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -796,18 +799,19 @@ enum cpu_idle_type {
796#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 799#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
797 800
798#ifdef CONFIG_SMP 801#ifdef CONFIG_SMP
799#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 802#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
800#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 803#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
801#define SD_BALANCE_EXEC 4 /* Balance on exec */ 804#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
802#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ 805#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
803#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ 806#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
804#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 807#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
805#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 808#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
806#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 809#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
807#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 810#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
808#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 811#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
809#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 812#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
810#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 813
814#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
811 815
812enum powersavings_balance_level { 816enum powersavings_balance_level {
813 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 817 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -827,7 +831,7 @@ static inline int sd_balance_for_mc_power(void)
827 if (sched_smt_power_savings) 831 if (sched_smt_power_savings)
828 return SD_POWERSAVINGS_BALANCE; 832 return SD_POWERSAVINGS_BALANCE;
829 833
830 return 0; 834 return SD_PREFER_SIBLING;
831} 835}
832 836
833static inline int sd_balance_for_package_power(void) 837static inline int sd_balance_for_package_power(void)
@@ -835,7 +839,7 @@ static inline int sd_balance_for_package_power(void)
835 if (sched_mc_power_savings | sched_smt_power_savings) 839 if (sched_mc_power_savings | sched_smt_power_savings)
836 return SD_POWERSAVINGS_BALANCE; 840 return SD_POWERSAVINGS_BALANCE;
837 841
838 return 0; 842 return SD_PREFER_SIBLING;
839} 843}
840 844
841/* 845/*
@@ -857,15 +861,9 @@ struct sched_group {
857 861
858 /* 862 /*
859 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 863 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
860 * single CPU. This is read only (except for setup, hotplug CPU). 864 * single CPU.
861 * Note : Never change cpu_power without recompute its reciprocal
862 */
863 unsigned int __cpu_power;
864 /*
865 * reciprocal value of cpu_power to avoid expensive divides
866 * (see include/linux/reciprocal_div.h)
867 */ 865 */
868 u32 reciprocal_cpu_power; 866 unsigned int cpu_power;
869 867
870 /* 868 /*
871 * The CPUs this group covers. 869 * The CPUs this group covers.
@@ -918,6 +916,7 @@ struct sched_domain {
918 unsigned int newidle_idx; 916 unsigned int newidle_idx;
919 unsigned int wake_idx; 917 unsigned int wake_idx;
920 unsigned int forkexec_idx; 918 unsigned int forkexec_idx;
919 unsigned int smt_gain;
921 int flags; /* See SD_* */ 920 int flags; /* See SD_* */
922 enum sched_domain_level level; 921 enum sched_domain_level level;
923 922
@@ -993,6 +992,9 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
993 return 0; 992 return 0;
994} 993}
995 994
995unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
996unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
997
996#else /* CONFIG_SMP */ 998#else /* CONFIG_SMP */
997 999
998struct sched_domain_attr; 1000struct sched_domain_attr;
@@ -1004,6 +1006,7 @@ partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1004} 1006}
1005#endif /* !CONFIG_SMP */ 1007#endif /* !CONFIG_SMP */
1006 1008
1009
1007struct io_context; /* See blkdev.h */ 1010struct io_context; /* See blkdev.h */
1008 1011
1009 1012
@@ -1021,6 +1024,12 @@ struct uts_namespace;
1021struct rq; 1024struct rq;
1022struct sched_domain; 1025struct sched_domain;
1023 1026
1027/*
1028 * wake flags
1029 */
1030#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1031#define WF_FORK 0x02 /* child wakeup after fork */
1032
1024struct sched_class { 1033struct sched_class {
1025 const struct sched_class *next; 1034 const struct sched_class *next;
1026 1035
@@ -1028,13 +1037,13 @@ struct sched_class {
1028 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1037 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1029 void (*yield_task) (struct rq *rq); 1038 void (*yield_task) (struct rq *rq);
1030 1039
1031 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); 1040 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1032 1041
1033 struct task_struct * (*pick_next_task) (struct rq *rq); 1042 struct task_struct * (*pick_next_task) (struct rq *rq);
1034 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1043 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1035 1044
1036#ifdef CONFIG_SMP 1045#ifdef CONFIG_SMP
1037 int (*select_task_rq)(struct task_struct *p, int sync); 1046 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1038 1047
1039 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 1048 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1040 struct rq *busiest, unsigned long max_load_move, 1049 struct rq *busiest, unsigned long max_load_move,
@@ -1045,7 +1054,6 @@ struct sched_class {
1045 struct rq *busiest, struct sched_domain *sd, 1054 struct rq *busiest, struct sched_domain *sd,
1046 enum cpu_idle_type idle); 1055 enum cpu_idle_type idle);
1047 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1056 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1048 int (*needs_post_schedule) (struct rq *this_rq);
1049 void (*post_schedule) (struct rq *this_rq); 1057 void (*post_schedule) (struct rq *this_rq);
1050 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1058 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1051 1059
@@ -1105,11 +1113,15 @@ struct sched_entity {
1105 u64 start_runtime; 1113 u64 start_runtime;
1106 u64 avg_wakeup; 1114 u64 avg_wakeup;
1107 1115
1116 u64 avg_running;
1117
1108#ifdef CONFIG_SCHEDSTATS 1118#ifdef CONFIG_SCHEDSTATS
1109 u64 wait_start; 1119 u64 wait_start;
1110 u64 wait_max; 1120 u64 wait_max;
1111 u64 wait_count; 1121 u64 wait_count;
1112 u64 wait_sum; 1122 u64 wait_sum;
1123 u64 iowait_count;
1124 u64 iowait_sum;
1113 1125
1114 u64 sleep_start; 1126 u64 sleep_start;
1115 u64 sleep_max; 1127 u64 sleep_max;
@@ -1163,6 +1175,8 @@ struct sched_rt_entity {
1163#endif 1175#endif
1164}; 1176};
1165 1177
1178struct rcu_node;
1179
1166struct task_struct { 1180struct task_struct {
1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1181 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1168 void *stack; 1182 void *stack;
@@ -1206,10 +1220,12 @@ struct task_struct {
1206 unsigned int policy; 1220 unsigned int policy;
1207 cpumask_t cpus_allowed; 1221 cpumask_t cpus_allowed;
1208 1222
1209#ifdef CONFIG_PREEMPT_RCU 1223#ifdef CONFIG_TREE_PREEMPT_RCU
1210 int rcu_read_lock_nesting; 1224 int rcu_read_lock_nesting;
1211 int rcu_flipctr_idx; 1225 char rcu_read_unlock_special;
1212#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1226 struct rcu_node *rcu_blocked_node;
1227 struct list_head rcu_node_entry;
1228#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1213 1229
1214#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1230#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1215 struct sched_info sched_info; 1231 struct sched_info sched_info;
@@ -1230,11 +1246,19 @@ struct task_struct {
1230 unsigned did_exec:1; 1246 unsigned did_exec:1;
1231 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1247 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1232 * execve */ 1248 * execve */
1249 unsigned in_iowait:1;
1250
1251
1252 /* Revert to default priority/policy when forking */
1253 unsigned sched_reset_on_fork:1;
1254
1233 pid_t pid; 1255 pid_t pid;
1234 pid_t tgid; 1256 pid_t tgid;
1235 1257
1258#ifdef CONFIG_CC_STACKPROTECTOR
1236 /* Canary value for the -fstack-protector gcc feature */ 1259 /* Canary value for the -fstack-protector gcc feature */
1237 unsigned long stack_canary; 1260 unsigned long stack_canary;
1261#endif
1238 1262
1239 /* 1263 /*
1240 * pointers to (original) parent process, youngest child, younger sibling, 1264 * pointers to (original) parent process, youngest child, younger sibling,
@@ -1292,6 +1316,7 @@ struct task_struct {
1292 struct mutex cred_guard_mutex; /* guard against foreign influences on 1316 struct mutex cred_guard_mutex; /* guard against foreign influences on
1293 * credential calculations 1317 * credential calculations
1294 * (notably. ptrace) */ 1318 * (notably. ptrace) */
1319 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1295 1320
1296 char comm[TASK_COMM_LEN]; /* executable name excluding path 1321 char comm[TASK_COMM_LEN]; /* executable name excluding path
1297 - access with [gs]et_task_comm (which lock 1322 - access with [gs]et_task_comm (which lock
@@ -1724,6 +1749,28 @@ extern cputime_t task_gtime(struct task_struct *p);
1724#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1749#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1725#define used_math() tsk_used_math(current) 1750#define used_math() tsk_used_math(current)
1726 1751
1752#ifdef CONFIG_TREE_PREEMPT_RCU
1753
1754#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1755#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1756#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
1757
1758static inline void rcu_copy_process(struct task_struct *p)
1759{
1760 p->rcu_read_lock_nesting = 0;
1761 p->rcu_read_unlock_special = 0;
1762 p->rcu_blocked_node = NULL;
1763 INIT_LIST_HEAD(&p->rcu_node_entry);
1764}
1765
1766#else
1767
1768static inline void rcu_copy_process(struct task_struct *p)
1769{
1770}
1771
1772#endif
1773
1727#ifdef CONFIG_SMP 1774#ifdef CONFIG_SMP
1728extern int set_cpus_allowed_ptr(struct task_struct *p, 1775extern int set_cpus_allowed_ptr(struct task_struct *p,
1729 const struct cpumask *new_mask); 1776 const struct cpumask *new_mask);
@@ -1813,11 +1860,12 @@ extern unsigned int sysctl_sched_min_granularity;
1813extern unsigned int sysctl_sched_wakeup_granularity; 1860extern unsigned int sysctl_sched_wakeup_granularity;
1814extern unsigned int sysctl_sched_shares_ratelimit; 1861extern unsigned int sysctl_sched_shares_ratelimit;
1815extern unsigned int sysctl_sched_shares_thresh; 1862extern unsigned int sysctl_sched_shares_thresh;
1816#ifdef CONFIG_SCHED_DEBUG
1817extern unsigned int sysctl_sched_child_runs_first; 1863extern unsigned int sysctl_sched_child_runs_first;
1864#ifdef CONFIG_SCHED_DEBUG
1818extern unsigned int sysctl_sched_features; 1865extern unsigned int sysctl_sched_features;
1819extern unsigned int sysctl_sched_migration_cost; 1866extern unsigned int sysctl_sched_migration_cost;
1820extern unsigned int sysctl_sched_nr_migrate; 1867extern unsigned int sysctl_sched_nr_migrate;
1868extern unsigned int sysctl_sched_time_avg;
1821extern unsigned int sysctl_timer_migration; 1869extern unsigned int sysctl_timer_migration;
1822 1870
1823int sched_nr_latency_handler(struct ctl_table *table, int write, 1871int sched_nr_latency_handler(struct ctl_table *table, int write,
@@ -2077,7 +2125,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2077#define for_each_process(p) \ 2125#define for_each_process(p) \
2078 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2126 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2079 2127
2080extern bool is_single_threaded(struct task_struct *); 2128extern bool current_is_single_threaded(void);
2081 2129
2082/* 2130/*
2083 * Careful: do_each_thread/while_each_thread is a double loop so 2131 * Careful: do_each_thread/while_each_thread is a double loop so
@@ -2281,23 +2329,31 @@ static inline int need_resched(void)
2281 * cond_resched_softirq() will enable bhs before scheduling. 2329 * cond_resched_softirq() will enable bhs before scheduling.
2282 */ 2330 */
2283extern int _cond_resched(void); 2331extern int _cond_resched(void);
2284#ifdef CONFIG_PREEMPT_BKL 2332
2285static inline int cond_resched(void) 2333#define cond_resched() ({ \
2286{ 2334 __might_sleep(__FILE__, __LINE__, 0); \
2287 return 0; 2335 _cond_resched(); \
2288} 2336})
2337
2338extern int __cond_resched_lock(spinlock_t *lock);
2339
2340#ifdef CONFIG_PREEMPT
2341#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2289#else 2342#else
2290static inline int cond_resched(void) 2343#define PREEMPT_LOCK_OFFSET 0
2291{
2292 return _cond_resched();
2293}
2294#endif 2344#endif
2295extern int cond_resched_lock(spinlock_t * lock); 2345
2296extern int cond_resched_softirq(void); 2346#define cond_resched_lock(lock) ({ \
2297static inline int cond_resched_bkl(void) 2347 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2298{ 2348 __cond_resched_lock(lock); \
2299 return _cond_resched(); 2349})
2300} 2350
2351extern int __cond_resched_softirq(void);
2352
2353#define cond_resched_softirq() ({ \
2354 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
2355 __cond_resched_softirq(); \
2356})
2301 2357
2302/* 2358/*
2303 * Does a critical section need to be broken due to another 2359 * Does a critical section need to be broken due to another