aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h56
1 files changed, 48 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6a1e7afb099b..be6914014c70 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -704,6 +704,7 @@ enum cpu_idle_type {
704#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 704#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
705#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 705#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
706#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 706#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
707#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
707 708
708#define BALANCE_FOR_MC_POWER \ 709#define BALANCE_FOR_MC_POWER \
709 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 710 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
@@ -733,12 +734,31 @@ struct sched_group {
733 u32 reciprocal_cpu_power; 734 u32 reciprocal_cpu_power;
734}; 735};
735 736
737enum sched_domain_level {
738 SD_LV_NONE = 0,
739 SD_LV_SIBLING,
740 SD_LV_MC,
741 SD_LV_CPU,
742 SD_LV_NODE,
743 SD_LV_ALLNODES,
744 SD_LV_MAX
745};
746
747struct sched_domain_attr {
748 int relax_domain_level;
749};
750
751#define SD_ATTR_INIT (struct sched_domain_attr) { \
752 .relax_domain_level = -1, \
753}
754
736struct sched_domain { 755struct sched_domain {
737 /* These fields must be setup */ 756 /* These fields must be setup */
738 struct sched_domain *parent; /* top domain must be null terminated */ 757 struct sched_domain *parent; /* top domain must be null terminated */
739 struct sched_domain *child; /* bottom domain must be null terminated */ 758 struct sched_domain *child; /* bottom domain must be null terminated */
740 struct sched_group *groups; /* the balancing groups of the domain */ 759 struct sched_group *groups; /* the balancing groups of the domain */
741 cpumask_t span; /* span of all CPUs in this domain */ 760 cpumask_t span; /* span of all CPUs in this domain */
761 int first_cpu; /* cache of the first cpu in this domain */
742 unsigned long min_interval; /* Minimum balance interval ms */ 762 unsigned long min_interval; /* Minimum balance interval ms */
743 unsigned long max_interval; /* Maximum balance interval ms */ 763 unsigned long max_interval; /* Maximum balance interval ms */
744 unsigned int busy_factor; /* less balancing by factor if busy */ 764 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -750,6 +770,7 @@ struct sched_domain {
750 unsigned int wake_idx; 770 unsigned int wake_idx;
751 unsigned int forkexec_idx; 771 unsigned int forkexec_idx;
752 int flags; /* See SD_* */ 772 int flags; /* See SD_* */
773 enum sched_domain_level level;
753 774
754 /* Runtime fields. */ 775 /* Runtime fields. */
755 unsigned long last_balance; /* init to jiffies. units in jiffies */ 776 unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -789,7 +810,8 @@ struct sched_domain {
789#endif 810#endif
790}; 811};
791 812
792extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new); 813extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
814 struct sched_domain_attr *dattr_new);
793extern int arch_reinit_sched_domains(void); 815extern int arch_reinit_sched_domains(void);
794 816
795#endif /* CONFIG_SMP */ 817#endif /* CONFIG_SMP */
@@ -889,7 +911,8 @@ struct sched_class {
889 void (*set_curr_task) (struct rq *rq); 911 void (*set_curr_task) (struct rq *rq);
890 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 912 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
891 void (*task_new) (struct rq *rq, struct task_struct *p); 913 void (*task_new) (struct rq *rq, struct task_struct *p);
892 void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); 914 void (*set_cpus_allowed)(struct task_struct *p,
915 const cpumask_t *newmask);
893 916
894 void (*join_domain)(struct rq *rq); 917 void (*join_domain)(struct rq *rq);
895 void (*leave_domain)(struct rq *rq); 918 void (*leave_domain)(struct rq *rq);
@@ -923,6 +946,7 @@ struct load_weight {
923struct sched_entity { 946struct sched_entity {
924 struct load_weight load; /* for load-balancing */ 947 struct load_weight load; /* for load-balancing */
925 struct rb_node run_node; 948 struct rb_node run_node;
949 struct list_head group_node;
926 unsigned int on_rq; 950 unsigned int on_rq;
927 951
928 u64 exec_start; 952 u64 exec_start;
@@ -982,6 +1006,7 @@ struct sched_rt_entity {
982 unsigned long timeout; 1006 unsigned long timeout;
983 int nr_cpus_allowed; 1007 int nr_cpus_allowed;
984 1008
1009 struct sched_rt_entity *back;
985#ifdef CONFIG_RT_GROUP_SCHED 1010#ifdef CONFIG_RT_GROUP_SCHED
986 struct sched_rt_entity *parent; 1011 struct sched_rt_entity *parent;
987 /* rq on which this entity is (to be) queued: */ 1012 /* rq on which this entity is (to be) queued: */
@@ -1502,15 +1527,21 @@ static inline void put_task_struct(struct task_struct *t)
1502#define used_math() tsk_used_math(current) 1527#define used_math() tsk_used_math(current)
1503 1528
1504#ifdef CONFIG_SMP 1529#ifdef CONFIG_SMP
1505extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); 1530extern int set_cpus_allowed_ptr(struct task_struct *p,
1531 const cpumask_t *new_mask);
1506#else 1532#else
1507static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1533static inline int set_cpus_allowed_ptr(struct task_struct *p,
1534 const cpumask_t *new_mask)
1508{ 1535{
1509 if (!cpu_isset(0, new_mask)) 1536 if (!cpu_isset(0, *new_mask))
1510 return -EINVAL; 1537 return -EINVAL;
1511 return 0; 1538 return 0;
1512} 1539}
1513#endif 1540#endif
1541static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1542{
1543 return set_cpus_allowed_ptr(p, &new_mask);
1544}
1514 1545
1515extern unsigned long long sched_clock(void); 1546extern unsigned long long sched_clock(void);
1516 1547
@@ -1551,7 +1582,6 @@ static inline void wake_up_idle_cpu(int cpu) { }
1551extern unsigned int sysctl_sched_latency; 1582extern unsigned int sysctl_sched_latency;
1552extern unsigned int sysctl_sched_min_granularity; 1583extern unsigned int sysctl_sched_min_granularity;
1553extern unsigned int sysctl_sched_wakeup_granularity; 1584extern unsigned int sysctl_sched_wakeup_granularity;
1554extern unsigned int sysctl_sched_batch_wakeup_granularity;
1555extern unsigned int sysctl_sched_child_runs_first; 1585extern unsigned int sysctl_sched_child_runs_first;
1556extern unsigned int sysctl_sched_features; 1586extern unsigned int sysctl_sched_features;
1557extern unsigned int sysctl_sched_migration_cost; 1587extern unsigned int sysctl_sched_migration_cost;
@@ -1564,6 +1594,10 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
1564extern unsigned int sysctl_sched_rt_period; 1594extern unsigned int sysctl_sched_rt_period;
1565extern int sysctl_sched_rt_runtime; 1595extern int sysctl_sched_rt_runtime;
1566 1596
1597int sched_rt_handler(struct ctl_table *table, int write,
1598 struct file *filp, void __user *buffer, size_t *lenp,
1599 loff_t *ppos);
1600
1567extern unsigned int sysctl_sched_compat_yield; 1601extern unsigned int sysctl_sched_compat_yield;
1568 1602
1569#ifdef CONFIG_RT_MUTEXES 1603#ifdef CONFIG_RT_MUTEXES
@@ -2031,7 +2065,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
2031} 2065}
2032#endif 2066#endif
2033 2067
2034extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 2068extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
2035extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2069extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
2036 2070
2037extern int sched_mc_power_savings, sched_smt_power_savings; 2071extern int sched_mc_power_savings, sched_smt_power_savings;
@@ -2041,8 +2075,11 @@ extern void normalize_rt_tasks(void);
2041#ifdef CONFIG_GROUP_SCHED 2075#ifdef CONFIG_GROUP_SCHED
2042 2076
2043extern struct task_group init_task_group; 2077extern struct task_group init_task_group;
2078#ifdef CONFIG_USER_SCHED
2079extern struct task_group root_task_group;
2080#endif
2044 2081
2045extern struct task_group *sched_create_group(void); 2082extern struct task_group *sched_create_group(struct task_group *parent);
2046extern void sched_destroy_group(struct task_group *tg); 2083extern void sched_destroy_group(struct task_group *tg);
2047extern void sched_move_task(struct task_struct *tsk); 2084extern void sched_move_task(struct task_struct *tsk);
2048#ifdef CONFIG_FAIR_GROUP_SCHED 2085#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2053,6 +2090,9 @@ extern unsigned long sched_group_shares(struct task_group *tg);
2053extern int sched_group_set_rt_runtime(struct task_group *tg, 2090extern int sched_group_set_rt_runtime(struct task_group *tg,
2054 long rt_runtime_us); 2091 long rt_runtime_us);
2055extern long sched_group_rt_runtime(struct task_group *tg); 2092extern long sched_group_rt_runtime(struct task_group *tg);
2093extern int sched_group_set_rt_period(struct task_group *tg,
2094 long rt_period_us);
2095extern long sched_group_rt_period(struct task_group *tg);
2056#endif 2096#endif
2057#endif 2097#endif
2058 2098