aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2013-03-05 03:06:23 -0500
committerIngo Molnar <mingo@kernel.org>2013-03-06 05:24:31 -0500
commit5e6521eaa1ee581a13b904f35b80c5efeb2baccb (patch)
tree4a8e82ba57da872636ff432edc036914163249e5
parentcc1f4b1f3faed9f2040eff2a75f510b424b3cf18 (diff)
sched: Move struct sched_group to kernel/sched/sched.h
Move struct sched_group_power and sched_group and related inline functions to kernel/sched/sched.h, as they are used internally only. Signed-off-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/5135A77F.2010705@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/sched.h58
-rw-r--r--kernel/sched/sched.h56
2 files changed, 58 insertions, 56 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f8826d04fb12..0d641304c0ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -780,62 +780,6 @@ enum cpu_idle_type {
780 780
781extern int __weak arch_sd_sibiling_asym_packing(void); 781extern int __weak arch_sd_sibiling_asym_packing(void);
782 782
783struct sched_group_power {
784 atomic_t ref;
785 /*
786 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
787 * single CPU.
788 */
789 unsigned int power, power_orig;
790 unsigned long next_update;
791 /*
792 * Number of busy cpus in this group.
793 */
794 atomic_t nr_busy_cpus;
795
796 unsigned long cpumask[0]; /* iteration mask */
797};
798
799struct sched_group {
800 struct sched_group *next; /* Must be a circular list */
801 atomic_t ref;
802
803 unsigned int group_weight;
804 struct sched_group_power *sgp;
805
806 /*
807 * The CPUs this group covers.
808 *
809 * NOTE: this field is variable length. (Allocated dynamically
810 * by attaching extra space to the end of the structure,
811 * depending on how many CPUs the kernel has booted up with)
812 */
813 unsigned long cpumask[0];
814};
815
816static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
817{
818 return to_cpumask(sg->cpumask);
819}
820
821/*
822 * cpumask masking which cpus in the group are allowed to iterate up the domain
823 * tree.
824 */
825static inline struct cpumask *sched_group_mask(struct sched_group *sg)
826{
827 return to_cpumask(sg->sgp->cpumask);
828}
829
830/**
831 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
832 * @group: The group whose first cpu is to be returned.
833 */
834static inline unsigned int group_first_cpu(struct sched_group *group)
835{
836 return cpumask_first(sched_group_cpus(group));
837}
838
839struct sched_domain_attr { 783struct sched_domain_attr {
840 int relax_domain_level; 784 int relax_domain_level;
841}; 785};
@@ -846,6 +790,8 @@ struct sched_domain_attr {
846 790
847extern int sched_domain_level_max; 791extern int sched_domain_level_max;
848 792
793struct sched_group;
794
849struct sched_domain { 795struct sched_domain {
850 /* These fields must be setup */ 796 /* These fields must be setup */
851 struct sched_domain *parent; /* top domain must be null terminated */ 797 struct sched_domain *parent; /* top domain must be null terminated */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 709a30cdfd85..1a4a2b19c2f4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -572,6 +572,62 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
572DECLARE_PER_CPU(struct sched_domain *, sd_llc); 572DECLARE_PER_CPU(struct sched_domain *, sd_llc);
573DECLARE_PER_CPU(int, sd_llc_id); 573DECLARE_PER_CPU(int, sd_llc_id);
574 574
575struct sched_group_power {
576 atomic_t ref;
577 /*
578 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
579 * single CPU.
580 */
581 unsigned int power, power_orig;
582 unsigned long next_update;
583 /*
584 * Number of busy cpus in this group.
585 */
586 atomic_t nr_busy_cpus;
587
588 unsigned long cpumask[0]; /* iteration mask */
589};
590
591struct sched_group {
592 struct sched_group *next; /* Must be a circular list */
593 atomic_t ref;
594
595 unsigned int group_weight;
596 struct sched_group_power *sgp;
597
598 /*
599 * The CPUs this group covers.
600 *
601 * NOTE: this field is variable length. (Allocated dynamically
602 * by attaching extra space to the end of the structure,
603 * depending on how many CPUs the kernel has booted up with)
604 */
605 unsigned long cpumask[0];
606};
607
608static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
609{
610 return to_cpumask(sg->cpumask);
611}
612
613/*
614 * cpumask masking which cpus in the group are allowed to iterate up the domain
615 * tree.
616 */
617static inline struct cpumask *sched_group_mask(struct sched_group *sg)
618{
619 return to_cpumask(sg->sgp->cpumask);
620}
621
622/**
623 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
624 * @group: The group whose first cpu is to be returned.
625 */
626static inline unsigned int group_first_cpu(struct sched_group *group)
627{
628 return cpumask_first(sched_group_cpus(group));
629}
630
575extern int group_balance_cpu(struct sched_group *sg); 631extern int group_balance_cpu(struct sched_group *sg);
576 632
577#endif /* CONFIG_SMP */ 633#endif /* CONFIG_SMP */