diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 93 |
1 files changed, 72 insertions, 21 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8395e715809d..38a3f4b15394 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
250 | extern int runqueue_is_locked(void); | 250 | extern int runqueue_is_locked(void); |
251 | extern void task_rq_unlock_wait(struct task_struct *p); | 251 | extern void task_rq_unlock_wait(struct task_struct *p); |
252 | 252 | ||
253 | extern cpumask_t nohz_cpu_mask; | 253 | extern cpumask_var_t nohz_cpu_mask; |
254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
255 | extern int select_nohz_load_balancer(int cpu); | 255 | extern int select_nohz_load_balancer(int cpu); |
256 | #else | 256 | #else |
@@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout); | |||
284 | 284 | ||
285 | extern void cpu_init (void); | 285 | extern void cpu_init (void); |
286 | extern void trap_init(void); | 286 | extern void trap_init(void); |
287 | extern void account_process_tick(struct task_struct *task, int user); | ||
288 | extern void update_process_times(int user); | 287 | extern void update_process_times(int user); |
289 | extern void scheduler_tick(void); | 288 | extern void scheduler_tick(void); |
290 | 289 | ||
@@ -758,20 +757,51 @@ enum cpu_idle_type { | |||
758 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 757 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
759 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | 758 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ |
760 | 759 | ||
761 | #define BALANCE_FOR_MC_POWER \ | 760 | enum powersavings_balance_level { |
762 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 761 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
762 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
763 | * first for long running threads | ||
764 | */ | ||
765 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
766 | * cpu package for power savings | ||
767 | */ | ||
768 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
769 | }; | ||
763 | 770 | ||
764 | #define BALANCE_FOR_PKG_POWER \ | 771 | extern int sched_mc_power_savings, sched_smt_power_savings; |
765 | ((sched_mc_power_savings || sched_smt_power_savings) ? \ | ||
766 | SD_POWERSAVINGS_BALANCE : 0) | ||
767 | 772 | ||
768 | #define test_sd_parent(sd, flag) ((sd->parent && \ | 773 | static inline int sd_balance_for_mc_power(void) |
769 | (sd->parent->flags & flag)) ? 1 : 0) | 774 | { |
775 | if (sched_smt_power_savings) | ||
776 | return SD_POWERSAVINGS_BALANCE; | ||
770 | 777 | ||
778 | return 0; | ||
779 | } | ||
780 | |||
781 | static inline int sd_balance_for_package_power(void) | ||
782 | { | ||
783 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
784 | return SD_POWERSAVINGS_BALANCE; | ||
785 | |||
786 | return 0; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * Optimise SD flags for power savings: | ||
791 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | ||
792 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
793 | */ | ||
794 | |||
795 | static inline int sd_power_saving_flags(void) | ||
796 | { | ||
797 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
798 | return SD_BALANCE_NEWIDLE; | ||
799 | |||
800 | return 0; | ||
801 | } | ||
771 | 802 | ||
772 | struct sched_group { | 803 | struct sched_group { |
773 | struct sched_group *next; /* Must be a circular list */ | 804 | struct sched_group *next; /* Must be a circular list */ |
774 | cpumask_t cpumask; | ||
775 | 805 | ||
776 | /* | 806 | /* |
777 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 807 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -784,8 +814,15 @@ struct sched_group { | |||
784 | * (see include/linux/reciprocal_div.h) | 814 | * (see include/linux/reciprocal_div.h) |
785 | */ | 815 | */ |
786 | u32 reciprocal_cpu_power; | 816 | u32 reciprocal_cpu_power; |
817 | |||
818 | unsigned long cpumask[]; | ||
787 | }; | 819 | }; |
788 | 820 | ||
821 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
822 | { | ||
823 | return to_cpumask(sg->cpumask); | ||
824 | } | ||
825 | |||
789 | enum sched_domain_level { | 826 | enum sched_domain_level { |
790 | SD_LV_NONE = 0, | 827 | SD_LV_NONE = 0, |
791 | SD_LV_SIBLING, | 828 | SD_LV_SIBLING, |
@@ -809,7 +846,6 @@ struct sched_domain { | |||
809 | struct sched_domain *parent; /* top domain must be null terminated */ | 846 | struct sched_domain *parent; /* top domain must be null terminated */ |
810 | struct sched_domain *child; /* bottom domain must be null terminated */ | 847 | struct sched_domain *child; /* bottom domain must be null terminated */ |
811 | struct sched_group *groups; /* the balancing groups of the domain */ | 848 | struct sched_group *groups; /* the balancing groups of the domain */ |
812 | cpumask_t span; /* span of all CPUs in this domain */ | ||
813 | unsigned long min_interval; /* Minimum balance interval ms */ | 849 | unsigned long min_interval; /* Minimum balance interval ms */ |
814 | unsigned long max_interval; /* Maximum balance interval ms */ | 850 | unsigned long max_interval; /* Maximum balance interval ms */ |
815 | unsigned int busy_factor; /* less balancing by factor if busy */ | 851 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -864,18 +900,35 @@ struct sched_domain { | |||
864 | #ifdef CONFIG_SCHED_DEBUG | 900 | #ifdef CONFIG_SCHED_DEBUG |
865 | char *name; | 901 | char *name; |
866 | #endif | 902 | #endif |
903 | |||
904 | /* span of all CPUs in this domain */ | ||
905 | unsigned long span[]; | ||
867 | }; | 906 | }; |
868 | 907 | ||
869 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 908 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
909 | { | ||
910 | return to_cpumask(sd->span); | ||
911 | } | ||
912 | |||
913 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
870 | struct sched_domain_attr *dattr_new); | 914 | struct sched_domain_attr *dattr_new); |
871 | extern int arch_reinit_sched_domains(void); | 915 | extern int arch_reinit_sched_domains(void); |
872 | 916 | ||
917 | /* Test a flag in parent sched domain */ | ||
918 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
919 | { | ||
920 | if (sd->parent && (sd->parent->flags & flag)) | ||
921 | return 1; | ||
922 | |||
923 | return 0; | ||
924 | } | ||
925 | |||
873 | #else /* CONFIG_SMP */ | 926 | #else /* CONFIG_SMP */ |
874 | 927 | ||
875 | struct sched_domain_attr; | 928 | struct sched_domain_attr; |
876 | 929 | ||
877 | static inline void | 930 | static inline void |
878 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 931 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, |
879 | struct sched_domain_attr *dattr_new) | 932 | struct sched_domain_attr *dattr_new) |
880 | { | 933 | { |
881 | } | 934 | } |
@@ -926,7 +979,7 @@ struct sched_class { | |||
926 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 979 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
927 | 980 | ||
928 | void (*set_cpus_allowed)(struct task_struct *p, | 981 | void (*set_cpus_allowed)(struct task_struct *p, |
929 | const cpumask_t *newmask); | 982 | const struct cpumask *newmask); |
930 | 983 | ||
931 | void (*rq_online)(struct rq *rq); | 984 | void (*rq_online)(struct rq *rq); |
932 | void (*rq_offline)(struct rq *rq); | 985 | void (*rq_offline)(struct rq *rq); |
@@ -1579,12 +1632,12 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1579 | 1632 | ||
1580 | #ifdef CONFIG_SMP | 1633 | #ifdef CONFIG_SMP |
1581 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1634 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1582 | const cpumask_t *new_mask); | 1635 | const struct cpumask *new_mask); |
1583 | #else | 1636 | #else |
1584 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1637 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1585 | const cpumask_t *new_mask) | 1638 | const struct cpumask *new_mask) |
1586 | { | 1639 | { |
1587 | if (!cpu_isset(0, *new_mask)) | 1640 | if (!cpumask_test_cpu(0, new_mask)) |
1588 | return -EINVAL; | 1641 | return -EINVAL; |
1589 | return 0; | 1642 | return 0; |
1590 | } | 1643 | } |
@@ -2195,10 +2248,8 @@ __trace_special(void *__tr, void *__data, | |||
2195 | } | 2248 | } |
2196 | #endif | 2249 | #endif |
2197 | 2250 | ||
2198 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2251 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2199 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2252 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2200 | |||
2201 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
2202 | 2253 | ||
2203 | extern void normalize_rt_tasks(void); | 2254 | extern void normalize_rt_tasks(void); |
2204 | 2255 | ||