aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h57
1 files changed, 48 insertions, 9 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6a1e7afb099b..311380e5fe89 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -61,7 +61,6 @@ struct sched_param {
61#include <linux/mm_types.h> 61#include <linux/mm_types.h>
62 62
63#include <asm/system.h> 63#include <asm/system.h>
64#include <asm/semaphore.h>
65#include <asm/page.h> 64#include <asm/page.h>
66#include <asm/ptrace.h> 65#include <asm/ptrace.h>
67#include <asm/cputime.h> 66#include <asm/cputime.h>
@@ -704,6 +703,7 @@ enum cpu_idle_type {
704#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 703#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
705#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 704#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
706#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 705#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
706#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
707 707
708#define BALANCE_FOR_MC_POWER \ 708#define BALANCE_FOR_MC_POWER \
709 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 709 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
@@ -733,12 +733,31 @@ struct sched_group {
733 u32 reciprocal_cpu_power; 733 u32 reciprocal_cpu_power;
734}; 734};
735 735
736enum sched_domain_level {
737 SD_LV_NONE = 0,
738 SD_LV_SIBLING,
739 SD_LV_MC,
740 SD_LV_CPU,
741 SD_LV_NODE,
742 SD_LV_ALLNODES,
743 SD_LV_MAX
744};
745
746struct sched_domain_attr {
747 int relax_domain_level;
748};
749
750#define SD_ATTR_INIT (struct sched_domain_attr) { \
751 .relax_domain_level = -1, \
752}
753
736struct sched_domain { 754struct sched_domain {
737 /* These fields must be setup */ 755 /* These fields must be setup */
738 struct sched_domain *parent; /* top domain must be null terminated */ 756 struct sched_domain *parent; /* top domain must be null terminated */
739 struct sched_domain *child; /* bottom domain must be null terminated */ 757 struct sched_domain *child; /* bottom domain must be null terminated */
740 struct sched_group *groups; /* the balancing groups of the domain */ 758 struct sched_group *groups; /* the balancing groups of the domain */
741 cpumask_t span; /* span of all CPUs in this domain */ 759 cpumask_t span; /* span of all CPUs in this domain */
760 int first_cpu; /* cache of the first cpu in this domain */
742 unsigned long min_interval; /* Minimum balance interval ms */ 761 unsigned long min_interval; /* Minimum balance interval ms */
743 unsigned long max_interval; /* Maximum balance interval ms */ 762 unsigned long max_interval; /* Maximum balance interval ms */
744 unsigned int busy_factor; /* less balancing by factor if busy */ 763 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -750,6 +769,7 @@ struct sched_domain {
750 unsigned int wake_idx; 769 unsigned int wake_idx;
751 unsigned int forkexec_idx; 770 unsigned int forkexec_idx;
752 int flags; /* See SD_* */ 771 int flags; /* See SD_* */
772 enum sched_domain_level level;
753 773
754 /* Runtime fields. */ 774 /* Runtime fields. */
755 unsigned long last_balance; /* init to jiffies. units in jiffies */ 775 unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -789,7 +809,8 @@ struct sched_domain {
789#endif 809#endif
790}; 810};
791 811
792extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 812extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
813 struct sched_domain_attr *dattr_new);
793extern int arch_reinit_sched_domains(void); 814extern int arch_reinit_sched_domains(void);
794 815
795#endif /* CONFIG_SMP */ 816#endif /* CONFIG_SMP */
@@ -889,7 +910,8 @@ struct sched_class {
889 void (*set_curr_task) (struct rq *rq); 910 void (*set_curr_task) (struct rq *rq);
890 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 911 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
891 void (*task_new) (struct rq *rq, struct task_struct *p); 912 void (*task_new) (struct rq *rq, struct task_struct *p);
892 void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); 913 void (*set_cpus_allowed)(struct task_struct *p,
914 const cpumask_t *newmask);
893 915
894 void (*join_domain)(struct rq *rq); 916 void (*join_domain)(struct rq *rq);
895 void (*leave_domain)(struct rq *rq); 917 void (*leave_domain)(struct rq *rq);
@@ -923,6 +945,7 @@ struct load_weight {
923struct sched_entity { 945struct sched_entity {
924 struct load_weight load; /* for load-balancing */ 946 struct load_weight load; /* for load-balancing */
925 struct rb_node run_node; 947 struct rb_node run_node;
948 struct list_head group_node;
926 unsigned int on_rq; 949 unsigned int on_rq;
927 950
928 u64 exec_start; 951 u64 exec_start;
@@ -982,6 +1005,7 @@ struct sched_rt_entity {
982 unsigned long timeout; 1005 unsigned long timeout;
983 int nr_cpus_allowed; 1006 int nr_cpus_allowed;
984 1007
1008 struct sched_rt_entity *back;
985#ifdef CONFIG_RT_GROUP_SCHED 1009#ifdef CONFIG_RT_GROUP_SCHED
986 struct sched_rt_entity *parent; 1010 struct sched_rt_entity *parent;
987 /* rq on which this entity is (to be) queued: */ 1011 /* rq on which this entity is (to be) queued: */
@@ -1502,15 +1526,21 @@ static inline void put_task_struct(struct task_struct *t)
1502#define used_math() tsk_used_math(current) 1526#define used_math() tsk_used_math(current)
1503 1527
1504#ifdef CONFIG_SMP 1528#ifdef CONFIG_SMP
1505extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); 1529extern int set_cpus_allowed_ptr(struct task_struct *p,
1530 const cpumask_t *new_mask);
1506#else 1531#else
1507static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1532static inline int set_cpus_allowed_ptr(struct task_struct *p,
1533 const cpumask_t *new_mask)
1508{ 1534{
1509 if (!cpu_isset(0, new_mask)) 1535 if (!cpu_isset(0, *new_mask))
1510 return -EINVAL; 1536 return -EINVAL;
1511 return 0; 1537 return 0;
1512} 1538}
1513#endif 1539#endif
1540static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1541{
1542 return set_cpus_allowed_ptr(p, &new_mask);
1543}
1514 1544
1515extern unsigned long long sched_clock(void); 1545extern unsigned long long sched_clock(void);
1516 1546
@@ -1551,7 +1581,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
1551extern unsigned int sysctl_sched_latency; 1581extern unsigned int sysctl_sched_latency;
1552extern unsigned int sysctl_sched_min_granularity; 1582extern unsigned int sysctl_sched_min_granularity;
1553extern unsigned int sysctl_sched_wakeup_granularity; 1583extern unsigned int sysctl_sched_wakeup_granularity;
1554extern unsigned int sysctl_sched_batch_wakeup_granularity;
1555extern unsigned int sysctl_sched_child_runs_first; 1584extern unsigned int sysctl_sched_child_runs_first;
1556extern unsigned int sysctl_sched_features; 1585extern unsigned int sysctl_sched_features;
1557extern unsigned int sysctl_sched_migration_cost; 1586extern unsigned int sysctl_sched_migration_cost;
@@ -1564,6 +1593,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
1564extern unsigned int sysctl_sched_rt_period; 1593extern unsigned int sysctl_sched_rt_period;
1565extern int sysctl_sched_rt_runtime; 1594extern int sysctl_sched_rt_runtime;
1566 1595
1596int sched_rt_handler(struct ctl_table *table, int write,
1597 struct file *filp, void __user *buffer, size_t *lenp,
1598 loff_t *ppos);
1599
1567extern unsigned int sysctl_sched_compat_yield; 1600extern unsigned int sysctl_sched_compat_yield;
1568 1601
1569#ifdef CONFIG_RT_MUTEXES 1602#ifdef CONFIG_RT_MUTEXES
@@ -2031,7 +2064,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2031} 2064}
2032#endif 2065#endif
2033 2066
2034extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 2067extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
2035extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2068extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
2036 2069
2037extern int sched_mc_power_savings, sched_smt_power_savings; 2070extern int sched_mc_power_savings, sched_smt_power_savings;
@@ -2041,8 +2074,11 @@ extern void normalize_rt_tasks(void);
2041#ifdef CONFIG_GROUP_SCHED 2074#ifdef CONFIG_GROUP_SCHED
2042 2075
2043extern struct task_group init_task_group; 2076extern struct task_group init_task_group;
2077#ifdef CONFIG_USER_SCHED
2078extern struct task_group root_task_group;
2079#endif
2044 2080
2045extern struct task_group *sched_create_group(void); 2081extern struct task_group *sched_create_group(struct task_group *parent);
2046extern void sched_destroy_group(struct task_group *tg); 2082extern void sched_destroy_group(struct task_group *tg);
2047extern void sched_move_task(struct task_struct *tsk); 2083extern void sched_move_task(struct task_struct *tsk);
2048#ifdef CONFIG_FAIR_GROUP_SCHED 2084#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2053,6 +2089,9 @@ extern unsigned long sched_group_shares(struct task_group *tg);
2053extern int sched_group_set_rt_runtime(struct task_group *tg, 2089extern int sched_group_set_rt_runtime(struct task_group *tg,
2054 long rt_runtime_us); 2090 long rt_runtime_us);
2055extern long sched_group_rt_runtime(struct task_group *tg); 2091extern long sched_group_rt_runtime(struct task_group *tg);
2092extern int sched_group_set_rt_period(struct task_group *tg,
2093 long rt_period_us);
2094extern long sched_group_rt_period(struct task_group *tg);
2056#endif 2095#endif
2057#endif 2096#endif
2058 2097