aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h85
1 files changed, 69 insertions, 16 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e217d188a102..024d72b47a0c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -61,7 +61,6 @@ struct sched_param {
61#include <linux/mm_types.h> 61#include <linux/mm_types.h>
62 62
63#include <asm/system.h> 63#include <asm/system.h>
64#include <asm/semaphore.h>
65#include <asm/page.h> 64#include <asm/page.h>
66#include <asm/ptrace.h> 65#include <asm/ptrace.h>
67#include <asm/cputime.h> 66#include <asm/cputime.h>
@@ -69,7 +68,6 @@ struct sched_param {
69#include <linux/smp.h> 68#include <linux/smp.h>
70#include <linux/sem.h> 69#include <linux/sem.h>
71#include <linux/signal.h> 70#include <linux/signal.h>
72#include <linux/securebits.h>
73#include <linux/fs_struct.h> 71#include <linux/fs_struct.h>
74#include <linux/compiler.h> 72#include <linux/compiler.h>
75#include <linux/completion.h> 73#include <linux/completion.h>
@@ -242,6 +240,7 @@ struct task_struct;
242 240
243extern void sched_init(void); 241extern void sched_init(void);
244extern void sched_init_smp(void); 242extern void sched_init_smp(void);
243extern asmlinkage void schedule_tail(struct task_struct *prev);
245extern void init_idle(struct task_struct *idle, int cpu); 244extern void init_idle(struct task_struct *idle, int cpu);
246extern void init_idle_bootup_task(struct task_struct *idle); 245extern void init_idle_bootup_task(struct task_struct *idle);
247 246
@@ -703,6 +702,7 @@ enum cpu_idle_type {
703#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 702#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
704#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 703#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
705#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 704#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
705#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
706 706
707#define BALANCE_FOR_MC_POWER \ 707#define BALANCE_FOR_MC_POWER \
708 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 708 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
@@ -732,12 +732,31 @@ struct sched_group {
732 u32 reciprocal_cpu_power; 732 u32 reciprocal_cpu_power;
733}; 733};
734 734
735enum sched_domain_level {
736 SD_LV_NONE = 0,
737 SD_LV_SIBLING,
738 SD_LV_MC,
739 SD_LV_CPU,
740 SD_LV_NODE,
741 SD_LV_ALLNODES,
742 SD_LV_MAX
743};
744
745struct sched_domain_attr {
746 int relax_domain_level;
747};
748
749#define SD_ATTR_INIT (struct sched_domain_attr) { \
750 .relax_domain_level = -1, \
751}
752
735struct sched_domain { 753struct sched_domain {
736 /* These fields must be setup */ 754 /* These fields must be setup */
737 struct sched_domain *parent; /* top domain must be null terminated */ 755 struct sched_domain *parent; /* top domain must be null terminated */
738 struct sched_domain *child; /* bottom domain must be null terminated */ 756 struct sched_domain *child; /* bottom domain must be null terminated */
739 struct sched_group *groups; /* the balancing groups of the domain */ 757 struct sched_group *groups; /* the balancing groups of the domain */
740 cpumask_t span; /* span of all CPUs in this domain */ 758 cpumask_t span; /* span of all CPUs in this domain */
759 int first_cpu; /* cache of the first cpu in this domain */
741 unsigned long min_interval; /* Minimum balance interval ms */ 760 unsigned long min_interval; /* Minimum balance interval ms */
742 unsigned long max_interval; /* Maximum balance interval ms */ 761 unsigned long max_interval; /* Maximum balance interval ms */
743 unsigned int busy_factor; /* less balancing by factor if busy */ 762 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -749,6 +768,7 @@ struct sched_domain {
749 unsigned int wake_idx; 768 unsigned int wake_idx;
750 unsigned int forkexec_idx; 769 unsigned int forkexec_idx;
751 int flags; /* See SD_* */ 770 int flags; /* See SD_* */
771 enum sched_domain_level level;
752 772
753 /* Runtime fields. */ 773 /* Runtime fields. */
754 unsigned long last_balance; /* init to jiffies. units in jiffies */ 774 unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -788,7 +808,9 @@ struct sched_domain {
788#endif 808#endif
789}; 809};
790 810
791extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 811extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
812 struct sched_domain_attr *dattr_new);
813extern int arch_reinit_sched_domains(void);
792 814
793#endif /* CONFIG_SMP */ 815#endif /* CONFIG_SMP */
794 816
@@ -887,7 +909,8 @@ struct sched_class {
887 void (*set_curr_task) (struct rq *rq); 909 void (*set_curr_task) (struct rq *rq);
888 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 910 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
889 void (*task_new) (struct rq *rq, struct task_struct *p); 911 void (*task_new) (struct rq *rq, struct task_struct *p);
890 void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); 912 void (*set_cpus_allowed)(struct task_struct *p,
913 const cpumask_t *newmask);
891 914
892 void (*join_domain)(struct rq *rq); 915 void (*join_domain)(struct rq *rq);
893 void (*leave_domain)(struct rq *rq); 916 void (*leave_domain)(struct rq *rq);
@@ -898,6 +921,10 @@ struct sched_class {
898 int running); 921 int running);
899 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 922 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
900 int oldprio, int running); 923 int oldprio, int running);
924
925#ifdef CONFIG_FAIR_GROUP_SCHED
926 void (*moved_group) (struct task_struct *p);
927#endif
901}; 928};
902 929
903struct load_weight { 930struct load_weight {
@@ -917,6 +944,7 @@ struct load_weight {
917struct sched_entity { 944struct sched_entity {
918 struct load_weight load; /* for load-balancing */ 945 struct load_weight load; /* for load-balancing */
919 struct rb_node run_node; 946 struct rb_node run_node;
947 struct list_head group_node;
920 unsigned int on_rq; 948 unsigned int on_rq;
921 949
922 u64 exec_start; 950 u64 exec_start;
@@ -924,6 +952,9 @@ struct sched_entity {
924 u64 vruntime; 952 u64 vruntime;
925 u64 prev_sum_exec_runtime; 953 u64 prev_sum_exec_runtime;
926 954
955 u64 last_wakeup;
956 u64 avg_overlap;
957
927#ifdef CONFIG_SCHEDSTATS 958#ifdef CONFIG_SCHEDSTATS
928 u64 wait_start; 959 u64 wait_start;
929 u64 wait_max; 960 u64 wait_max;
@@ -973,6 +1004,7 @@ struct sched_rt_entity {
973 unsigned long timeout; 1004 unsigned long timeout;
974 int nr_cpus_allowed; 1005 int nr_cpus_allowed;
975 1006
1007 struct sched_rt_entity *back;
976#ifdef CONFIG_RT_GROUP_SCHED 1008#ifdef CONFIG_RT_GROUP_SCHED
977 struct sched_rt_entity *parent; 1009 struct sched_rt_entity *parent;
978 /* rq on which this entity is (to be) queued: */ 1010 /* rq on which this entity is (to be) queued: */
@@ -1100,7 +1132,7 @@ struct task_struct {
1100 gid_t gid,egid,sgid,fsgid; 1132 gid_t gid,egid,sgid,fsgid;
1101 struct group_info *group_info; 1133 struct group_info *group_info;
1102 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1134 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
1103 unsigned keep_capabilities:1; 1135 unsigned securebits;
1104 struct user_struct *user; 1136 struct user_struct *user;
1105#ifdef CONFIG_KEYS 1137#ifdef CONFIG_KEYS
1106 struct key *request_key_auth; /* assumed request_key authority */ 1138 struct key *request_key_auth; /* assumed request_key authority */
@@ -1189,7 +1221,7 @@ struct task_struct {
1189 int softirq_context; 1221 int softirq_context;
1190#endif 1222#endif
1191#ifdef CONFIG_LOCKDEP 1223#ifdef CONFIG_LOCKDEP
1192# define MAX_LOCK_DEPTH 30UL 1224# define MAX_LOCK_DEPTH 48UL
1193 u64 curr_chain_key; 1225 u64 curr_chain_key;
1194 int lockdep_depth; 1226 int lockdep_depth;
1195 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1227 struct held_lock held_locks[MAX_LOCK_DEPTH];
@@ -1493,15 +1525,21 @@ static inline void put_task_struct(struct task_struct *t)
1493#define used_math() tsk_used_math(current) 1525#define used_math() tsk_used_math(current)
1494 1526
1495#ifdef CONFIG_SMP 1527#ifdef CONFIG_SMP
1496extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); 1528extern int set_cpus_allowed_ptr(struct task_struct *p,
1529 const cpumask_t *new_mask);
1497#else 1530#else
1498static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1531static inline int set_cpus_allowed_ptr(struct task_struct *p,
1532 const cpumask_t *new_mask)
1499{ 1533{
1500 if (!cpu_isset(0, new_mask)) 1534 if (!cpu_isset(0, *new_mask))
1501 return -EINVAL; 1535 return -EINVAL;
1502 return 0; 1536 return 0;
1503} 1537}
1504#endif 1538#endif
1539static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1540{
1541 return set_cpus_allowed_ptr(p, &new_mask);
1542}
1505 1543
1506extern unsigned long long sched_clock(void); 1544extern unsigned long long sched_clock(void);
1507 1545
@@ -1532,19 +1570,20 @@ static inline void idle_task_exit(void) {}
1532 1570
1533extern void sched_idle_next(void); 1571extern void sched_idle_next(void);
1534 1572
1573#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1574extern void wake_up_idle_cpu(int cpu);
1575#else
1576static inline void wake_up_idle_cpu(int cpu) { }
1577#endif
1578
1535#ifdef CONFIG_SCHED_DEBUG 1579#ifdef CONFIG_SCHED_DEBUG
1536extern unsigned int sysctl_sched_latency; 1580extern unsigned int sysctl_sched_latency;
1537extern unsigned int sysctl_sched_min_granularity; 1581extern unsigned int sysctl_sched_min_granularity;
1538extern unsigned int sysctl_sched_wakeup_granularity; 1582extern unsigned int sysctl_sched_wakeup_granularity;
1539extern unsigned int sysctl_sched_batch_wakeup_granularity;
1540extern unsigned int sysctl_sched_child_runs_first; 1583extern unsigned int sysctl_sched_child_runs_first;
1541extern unsigned int sysctl_sched_features; 1584extern unsigned int sysctl_sched_features;
1542extern unsigned int sysctl_sched_migration_cost; 1585extern unsigned int sysctl_sched_migration_cost;
1543extern unsigned int sysctl_sched_nr_migrate; 1586extern unsigned int sysctl_sched_nr_migrate;
1544#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
1545extern unsigned int sysctl_sched_min_bal_int_shares;
1546extern unsigned int sysctl_sched_max_bal_int_shares;
1547#endif
1548 1587
1549int sched_nr_latency_handler(struct ctl_table *table, int write, 1588int sched_nr_latency_handler(struct ctl_table *table, int write,
1550 struct file *file, void __user *buffer, size_t *length, 1589 struct file *file, void __user *buffer, size_t *length,
@@ -1553,6 +1592,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
1553extern unsigned int sysctl_sched_rt_period; 1592extern unsigned int sysctl_sched_rt_period;
1554extern int sysctl_sched_rt_runtime; 1593extern int sysctl_sched_rt_runtime;
1555 1594
1595int sched_rt_handler(struct ctl_table *table, int write,
1596 struct file *filp, void __user *buffer, size_t *lenp,
1597 loff_t *ppos);
1598
1556extern unsigned int sysctl_sched_compat_yield; 1599extern unsigned int sysctl_sched_compat_yield;
1557 1600
1558#ifdef CONFIG_RT_MUTEXES 1601#ifdef CONFIG_RT_MUTEXES
@@ -1754,6 +1797,8 @@ extern void mmput(struct mm_struct *);
1754extern struct mm_struct *get_task_mm(struct task_struct *task); 1797extern struct mm_struct *get_task_mm(struct task_struct *task);
1755/* Remove the current tasks stale references to the old mm_struct */ 1798/* Remove the current tasks stale references to the old mm_struct */
1756extern void mm_release(struct task_struct *, struct mm_struct *); 1799extern void mm_release(struct task_struct *, struct mm_struct *);
1800/* Allocate a new mm structure and copy contents from tsk->mm */
1801extern struct mm_struct *dup_mm(struct task_struct *tsk);
1757 1802
1758extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1803extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
1759extern void flush_thread(void); 1804extern void flush_thread(void);
@@ -1882,6 +1927,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1882 1927
1883#endif 1928#endif
1884 1929
1930extern void thread_info_cache_init(void);
1931
1885/* set thread flags in other task's structures 1932/* set thread flags in other task's structures
1886 * - see asm/thread_info.h for TIF_xxxx flags available 1933 * - see asm/thread_info.h for TIF_xxxx flags available
1887 */ 1934 */
@@ -2020,7 +2067,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2020} 2067}
2021#endif 2068#endif
2022 2069
2023extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 2070extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
2024extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2071extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
2025 2072
2026extern int sched_mc_power_savings, sched_smt_power_savings; 2073extern int sched_mc_power_savings, sched_smt_power_savings;
@@ -2030,8 +2077,11 @@ extern void normalize_rt_tasks(void);
2030#ifdef CONFIG_GROUP_SCHED 2077#ifdef CONFIG_GROUP_SCHED
2031 2078
2032extern struct task_group init_task_group; 2079extern struct task_group init_task_group;
2080#ifdef CONFIG_USER_SCHED
2081extern struct task_group root_task_group;
2082#endif
2033 2083
2034extern struct task_group *sched_create_group(void); 2084extern struct task_group *sched_create_group(struct task_group *parent);
2035extern void sched_destroy_group(struct task_group *tg); 2085extern void sched_destroy_group(struct task_group *tg);
2036extern void sched_move_task(struct task_struct *tsk); 2086extern void sched_move_task(struct task_struct *tsk);
2037#ifdef CONFIG_FAIR_GROUP_SCHED 2087#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2042,6 +2092,9 @@ extern unsigned long sched_group_shares(struct task_group *tg);
2042extern int sched_group_set_rt_runtime(struct task_group *tg, 2092extern int sched_group_set_rt_runtime(struct task_group *tg,
2043 long rt_runtime_us); 2093 long rt_runtime_us);
2044extern long sched_group_rt_runtime(struct task_group *tg); 2094extern long sched_group_rt_runtime(struct task_group *tg);
2095extern int sched_group_set_rt_period(struct task_group *tg,
2096 long rt_period_us);
2097extern long sched_group_rt_period(struct task_group *tg);
2045#endif 2098#endif
2046#endif 2099#endif
2047 2100