aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h129
1 files changed, 86 insertions, 43 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f1ea4a66957..f3d74bd04d18 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,8 @@
38#define SCHED_BATCH 3 38#define SCHED_BATCH 3
39/* SCHED_ISO: reserved but not implemented yet */ 39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE 5 40#define SCHED_IDLE 5
41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42#define SCHED_RESET_ON_FORK 0x40000000
41 43
42#ifdef __KERNEL__ 44#ifdef __KERNEL__
43 45
@@ -796,18 +798,19 @@ enum cpu_idle_type {
796#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 798#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
797 799
798#ifdef CONFIG_SMP 800#ifdef CONFIG_SMP
799#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 801#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
800#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 802#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
801#define SD_BALANCE_EXEC 4 /* Balance on exec */ 803#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
802#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ 804#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
803#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ 805#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */
804#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 806#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
805#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 807#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */
806#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 808#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
807#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 809#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
808#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 810#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
809#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 811#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
810#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 812#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */
813#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
811 814
812enum powersavings_balance_level { 815enum powersavings_balance_level {
813 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 816 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -827,7 +830,7 @@ static inline int sd_balance_for_mc_power(void)
827 if (sched_smt_power_savings) 830 if (sched_smt_power_savings)
828 return SD_POWERSAVINGS_BALANCE; 831 return SD_POWERSAVINGS_BALANCE;
829 832
830 return 0; 833 return SD_PREFER_SIBLING;
831} 834}
832 835
833static inline int sd_balance_for_package_power(void) 836static inline int sd_balance_for_package_power(void)
@@ -835,7 +838,7 @@ static inline int sd_balance_for_package_power(void)
835 if (sched_mc_power_savings | sched_smt_power_savings) 838 if (sched_mc_power_savings | sched_smt_power_savings)
836 return SD_POWERSAVINGS_BALANCE; 839 return SD_POWERSAVINGS_BALANCE;
837 840
838 return 0; 841 return SD_PREFER_SIBLING;
839} 842}
840 843
841/* 844/*
@@ -857,15 +860,9 @@ struct sched_group {
857 860
858 /* 861 /*
859 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 862 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
860 * single CPU. This is read only (except for setup, hotplug CPU). 863 * single CPU.
861 * Note : Never change cpu_power without recompute its reciprocal
862 */ 864 */
863 unsigned int __cpu_power; 865 unsigned int cpu_power;
864 /*
865 * reciprocal value of cpu_power to avoid expensive divides
866 * (see include/linux/reciprocal_div.h)
867 */
868 u32 reciprocal_cpu_power;
869 866
870 /* 867 /*
871 * The CPUs this group covers. 868 * The CPUs this group covers.
@@ -918,6 +915,7 @@ struct sched_domain {
918 unsigned int newidle_idx; 915 unsigned int newidle_idx;
919 unsigned int wake_idx; 916 unsigned int wake_idx;
920 unsigned int forkexec_idx; 917 unsigned int forkexec_idx;
918 unsigned int smt_gain;
921 int flags; /* See SD_* */ 919 int flags; /* See SD_* */
922 enum sched_domain_level level; 920 enum sched_domain_level level;
923 921
@@ -1045,7 +1043,6 @@ struct sched_class {
1045 struct rq *busiest, struct sched_domain *sd, 1043 struct rq *busiest, struct sched_domain *sd,
1046 enum cpu_idle_type idle); 1044 enum cpu_idle_type idle);
1047 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1048 int (*needs_post_schedule) (struct rq *this_rq);
1049 void (*post_schedule) (struct rq *this_rq); 1046 void (*post_schedule) (struct rq *this_rq);
1050 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1047 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1051 1048
@@ -1110,6 +1107,8 @@ struct sched_entity {
1110 u64 wait_max; 1107 u64 wait_max;
1111 u64 wait_count; 1108 u64 wait_count;
1112 u64 wait_sum; 1109 u64 wait_sum;
1110 u64 iowait_count;
1111 u64 iowait_sum;
1113 1112
1114 u64 sleep_start; 1113 u64 sleep_start;
1115 u64 sleep_max; 1114 u64 sleep_max;
@@ -1163,6 +1162,8 @@ struct sched_rt_entity {
1163#endif 1162#endif
1164}; 1163};
1165 1164
1165struct rcu_node;
1166
1166struct task_struct { 1167struct task_struct {
1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1168 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1168 void *stack; 1169 void *stack;
@@ -1206,10 +1207,12 @@ struct task_struct {
1206 unsigned int policy; 1207 unsigned int policy;
1207 cpumask_t cpus_allowed; 1208 cpumask_t cpus_allowed;
1208 1209
1209#ifdef CONFIG_PREEMPT_RCU 1210#ifdef CONFIG_TREE_PREEMPT_RCU
1210 int rcu_read_lock_nesting; 1211 int rcu_read_lock_nesting;
1211 int rcu_flipctr_idx; 1212 char rcu_read_unlock_special;
1212#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1213 struct rcu_node *rcu_blocked_node;
1214 struct list_head rcu_node_entry;
1215#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1213 1216
1214#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1217#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1215 struct sched_info sched_info; 1218 struct sched_info sched_info;
@@ -1230,11 +1233,19 @@ struct task_struct {
1230 unsigned did_exec:1; 1233 unsigned did_exec:1;
1231 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1234 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1232 * execve */ 1235 * execve */
1236 unsigned in_iowait:1;
1237
1238
1239 /* Revert to default priority/policy when forking */
1240 unsigned sched_reset_on_fork:1;
1241
1233 pid_t pid; 1242 pid_t pid;
1234 pid_t tgid; 1243 pid_t tgid;
1235 1244
1245#ifdef CONFIG_CC_STACKPROTECTOR
1236 /* Canary value for the -fstack-protector gcc feature */ 1246 /* Canary value for the -fstack-protector gcc feature */
1237 unsigned long stack_canary; 1247 unsigned long stack_canary;
1248#endif
1238 1249
1239 /* 1250 /*
1240 * pointers to (original) parent process, youngest child, younger sibling, 1251 * pointers to (original) parent process, youngest child, younger sibling,
@@ -1292,6 +1303,7 @@ struct task_struct {
1292 struct mutex cred_guard_mutex; /* guard against foreign influences on 1303 struct mutex cred_guard_mutex; /* guard against foreign influences on
1293 * credential calculations 1304 * credential calculations
1294 * (notably. ptrace) */ 1305 * (notably. ptrace) */
1306 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1295 1307
1296 char comm[TASK_COMM_LEN]; /* executable name excluding path 1308 char comm[TASK_COMM_LEN]; /* executable name excluding path
1297 - access with [gs]et_task_comm (which lock 1309 - access with [gs]et_task_comm (which lock
@@ -1724,6 +1736,28 @@ extern cputime_t task_gtime(struct task_struct *p);
1724#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1736#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1725#define used_math() tsk_used_math(current) 1737#define used_math() tsk_used_math(current)
1726 1738
1739#ifdef CONFIG_TREE_PREEMPT_RCU
1740
1741#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1742#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1743#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
1744
1745static inline void rcu_copy_process(struct task_struct *p)
1746{
1747 p->rcu_read_lock_nesting = 0;
1748 p->rcu_read_unlock_special = 0;
1749 p->rcu_blocked_node = NULL;
1750 INIT_LIST_HEAD(&p->rcu_node_entry);
1751}
1752
1753#else
1754
1755static inline void rcu_copy_process(struct task_struct *p)
1756{
1757}
1758
1759#endif
1760
1727#ifdef CONFIG_SMP 1761#ifdef CONFIG_SMP
1728extern int set_cpus_allowed_ptr(struct task_struct *p, 1762extern int set_cpus_allowed_ptr(struct task_struct *p,
1729 const struct cpumask *new_mask); 1763 const struct cpumask *new_mask);
@@ -1813,11 +1847,12 @@ extern unsigned int sysctl_sched_min_granularity;
1813extern unsigned int sysctl_sched_wakeup_granularity; 1847extern unsigned int sysctl_sched_wakeup_granularity;
1814extern unsigned int sysctl_sched_shares_ratelimit; 1848extern unsigned int sysctl_sched_shares_ratelimit;
1815extern unsigned int sysctl_sched_shares_thresh; 1849extern unsigned int sysctl_sched_shares_thresh;
1816#ifdef CONFIG_SCHED_DEBUG
1817extern unsigned int sysctl_sched_child_runs_first; 1850extern unsigned int sysctl_sched_child_runs_first;
1851#ifdef CONFIG_SCHED_DEBUG
1818extern unsigned int sysctl_sched_features; 1852extern unsigned int sysctl_sched_features;
1819extern unsigned int sysctl_sched_migration_cost; 1853extern unsigned int sysctl_sched_migration_cost;
1820extern unsigned int sysctl_sched_nr_migrate; 1854extern unsigned int sysctl_sched_nr_migrate;
1855extern unsigned int sysctl_sched_time_avg;
1821extern unsigned int sysctl_timer_migration; 1856extern unsigned int sysctl_timer_migration;
1822 1857
1823int sched_nr_latency_handler(struct ctl_table *table, int write, 1858int sched_nr_latency_handler(struct ctl_table *table, int write,
@@ -2077,7 +2112,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2077#define for_each_process(p) \ 2112#define for_each_process(p) \
2078 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2113 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2079 2114
2080extern bool is_single_threaded(struct task_struct *); 2115extern bool current_is_single_threaded(void);
2081 2116
2082/* 2117/*
2083 * Careful: do_each_thread/while_each_thread is a double loop so 2118 * Careful: do_each_thread/while_each_thread is a double loop so
@@ -2281,23 +2316,31 @@ static inline int need_resched(void)
2281 * cond_resched_softirq() will enable bhs before scheduling. 2316 * cond_resched_softirq() will enable bhs before scheduling.
2282 */ 2317 */
2283extern int _cond_resched(void); 2318extern int _cond_resched(void);
2284#ifdef CONFIG_PREEMPT_BKL 2319
2285static inline int cond_resched(void) 2320#define cond_resched() ({ \
2286{ 2321 __might_sleep(__FILE__, __LINE__, 0); \
2287 return 0; 2322 _cond_resched(); \
2288} 2323})
2324
2325extern int __cond_resched_lock(spinlock_t *lock);
2326
2327#ifdef CONFIG_PREEMPT
2328#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2289#else 2329#else
2290static inline int cond_resched(void) 2330#define PREEMPT_LOCK_OFFSET 0
2291{
2292 return _cond_resched();
2293}
2294#endif 2331#endif
2295extern int cond_resched_lock(spinlock_t * lock); 2332
2296extern int cond_resched_softirq(void); 2333#define cond_resched_lock(lock) ({ \
2297static inline int cond_resched_bkl(void) 2334 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2298{ 2335 __cond_resched_lock(lock); \
2299 return _cond_resched(); 2336})
2300} 2337
2338extern int __cond_resched_softirq(void);
2339
2340#define cond_resched_softirq() ({ \
2341 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
2342 __cond_resched_softirq(); \
2343})
2301 2344
2302/* 2345/*
2303 * Does a critical section need to be broken due to another 2346 * Does a critical section need to be broken due to another