aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h92
1 files changed, 72 insertions, 20 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b475d4db8053..38a3f4b15394 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
250extern int runqueue_is_locked(void); 250extern int runqueue_is_locked(void);
251extern void task_rq_unlock_wait(struct task_struct *p); 251extern void task_rq_unlock_wait(struct task_struct *p);
252 252
253extern cpumask_t nohz_cpu_mask; 253extern cpumask_var_t nohz_cpu_mask;
254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
255extern int select_nohz_load_balancer(int cpu); 255extern int select_nohz_load_balancer(int cpu);
256#else 256#else
@@ -757,20 +757,51 @@ enum cpu_idle_type {
757#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 757#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
758#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 758#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
759 759
760#define BALANCE_FOR_MC_POWER \ 760enum powersavings_balance_level {
761 (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) 761 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
762 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
763 * first for long running threads
764 */
765 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
766 * cpu package for power savings
767 */
768 MAX_POWERSAVINGS_BALANCE_LEVELS
769};
762 770
763#define BALANCE_FOR_PKG_POWER \ 771extern int sched_mc_power_savings, sched_smt_power_savings;
764 ((sched_mc_power_savings || sched_smt_power_savings) ? \
765 SD_POWERSAVINGS_BALANCE : 0)
766 772
767#define test_sd_parent(sd, flag) ((sd->parent && \ 773static inline int sd_balance_for_mc_power(void)
768 (sd->parent->flags & flag)) ? 1 : 0) 774{
775 if (sched_smt_power_savings)
776 return SD_POWERSAVINGS_BALANCE;
769 777
778 return 0;
779}
780
781static inline int sd_balance_for_package_power(void)
782{
783 if (sched_mc_power_savings | sched_smt_power_savings)
784 return SD_POWERSAVINGS_BALANCE;
785
786 return 0;
787}
788
789/*
790 * Optimise SD flags for power savings:
791 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
792 * Keep default SD flags if sched_{smt,mc}_power_saving=0
793 */
794
795static inline int sd_power_saving_flags(void)
796{
797 if (sched_mc_power_savings | sched_smt_power_savings)
798 return SD_BALANCE_NEWIDLE;
799
800 return 0;
801}
770 802
771struct sched_group { 803struct sched_group {
772 struct sched_group *next; /* Must be a circular list */ 804 struct sched_group *next; /* Must be a circular list */
773 cpumask_t cpumask;
774 805
775 /* 806 /*
776 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 807 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -783,8 +814,15 @@ struct sched_group {
783 * (see include/linux/reciprocal_div.h) 814 * (see include/linux/reciprocal_div.h)
784 */ 815 */
785 u32 reciprocal_cpu_power; 816 u32 reciprocal_cpu_power;
817
818 unsigned long cpumask[];
786}; 819};
787 820
821static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
822{
823 return to_cpumask(sg->cpumask);
824}
825
788enum sched_domain_level { 826enum sched_domain_level {
789 SD_LV_NONE = 0, 827 SD_LV_NONE = 0,
790 SD_LV_SIBLING, 828 SD_LV_SIBLING,
@@ -808,7 +846,6 @@ struct sched_domain {
808 struct sched_domain *parent; /* top domain must be null terminated */ 846 struct sched_domain *parent; /* top domain must be null terminated */
809 struct sched_domain *child; /* bottom domain must be null terminated */ 847 struct sched_domain *child; /* bottom domain must be null terminated */
810 struct sched_group *groups; /* the balancing groups of the domain */ 848 struct sched_group *groups; /* the balancing groups of the domain */
811 cpumask_t span; /* span of all CPUs in this domain */
812 unsigned long min_interval; /* Minimum balance interval ms */ 849 unsigned long min_interval; /* Minimum balance interval ms */
813 unsigned long max_interval; /* Maximum balance interval ms */ 850 unsigned long max_interval; /* Maximum balance interval ms */
814 unsigned int busy_factor; /* less balancing by factor if busy */ 851 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -863,18 +900,35 @@ struct sched_domain {
863#ifdef CONFIG_SCHED_DEBUG 900#ifdef CONFIG_SCHED_DEBUG
864 char *name; 901 char *name;
865#endif 902#endif
903
904 /* span of all CPUs in this domain */
905 unsigned long span[];
866}; 906};
867 907
868extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 908static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
909{
910 return to_cpumask(sd->span);
911}
912
913extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
869 struct sched_domain_attr *dattr_new); 914 struct sched_domain_attr *dattr_new);
870extern int arch_reinit_sched_domains(void); 915extern int arch_reinit_sched_domains(void);
871 916
917/* Test a flag in parent sched domain */
918static inline int test_sd_parent(struct sched_domain *sd, int flag)
919{
920 if (sd->parent && (sd->parent->flags & flag))
921 return 1;
922
923 return 0;
924}
925
872#else /* CONFIG_SMP */ 926#else /* CONFIG_SMP */
873 927
874struct sched_domain_attr; 928struct sched_domain_attr;
875 929
876static inline void 930static inline void
877partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 931partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
878 struct sched_domain_attr *dattr_new) 932 struct sched_domain_attr *dattr_new)
879{ 933{
880} 934}
@@ -925,7 +979,7 @@ struct sched_class {
925 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 979 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
926 980
927 void (*set_cpus_allowed)(struct task_struct *p, 981 void (*set_cpus_allowed)(struct task_struct *p,
928 const cpumask_t *newmask); 982 const struct cpumask *newmask);
929 983
930 void (*rq_online)(struct rq *rq); 984 void (*rq_online)(struct rq *rq);
931 void (*rq_offline)(struct rq *rq); 985 void (*rq_offline)(struct rq *rq);
@@ -1578,12 +1632,12 @@ extern cputime_t task_gtime(struct task_struct *p);
1578 1632
1579#ifdef CONFIG_SMP 1633#ifdef CONFIG_SMP
1580extern int set_cpus_allowed_ptr(struct task_struct *p, 1634extern int set_cpus_allowed_ptr(struct task_struct *p,
1581 const cpumask_t *new_mask); 1635 const struct cpumask *new_mask);
1582#else 1636#else
1583static inline int set_cpus_allowed_ptr(struct task_struct *p, 1637static inline int set_cpus_allowed_ptr(struct task_struct *p,
1584 const cpumask_t *new_mask) 1638 const struct cpumask *new_mask)
1585{ 1639{
1586 if (!cpu_isset(0, *new_mask)) 1640 if (!cpumask_test_cpu(0, new_mask))
1587 return -EINVAL; 1641 return -EINVAL;
1588 return 0; 1642 return 0;
1589} 1643}
@@ -2194,10 +2248,8 @@ __trace_special(void *__tr, void *__data,
2194} 2248}
2195#endif 2249#endif
2196 2250
2197extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); 2251extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2198extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 2252extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2199
2200extern int sched_mc_power_savings, sched_smt_power_savings;
2201 2253
2202extern void normalize_rt_tasks(void); 2254extern void normalize_rt_tasks(void);
2203 2255