diff options
author | Li Zefan <lizefan@huawei.com> | 2013-03-05 03:06:23 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-03-06 05:24:31 -0500 |
commit | 5e6521eaa1ee581a13b904f35b80c5efeb2baccb (patch) | |
tree | 4a8e82ba57da872636ff432edc036914163249e5 /kernel/sched/sched.h | |
parent | cc1f4b1f3faed9f2040eff2a75f510b424b3cf18 (diff) |
sched: Move struct sched_group to kernel/sched/sched.h
Move struct sched_group_power and sched_group and related inline
functions to kernel/sched/sched.h, as they are used internally
only.
Signed-off-by: Li Zefan <lizefan@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/5135A77F.2010705@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 709a30cdfd85..1a4a2b19c2f4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -572,6 +572,62 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) | |||
572 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); | 572 | DECLARE_PER_CPU(struct sched_domain *, sd_llc); |
573 | DECLARE_PER_CPU(int, sd_llc_id); | 573 | DECLARE_PER_CPU(int, sd_llc_id); |
574 | 574 | ||
575 | struct sched_group_power { | ||
576 | atomic_t ref; | ||
577 | /* | ||
578 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | ||
579 | * single CPU. | ||
580 | */ | ||
581 | unsigned int power, power_orig; | ||
582 | unsigned long next_update; | ||
583 | /* | ||
584 | * Number of busy cpus in this group. | ||
585 | */ | ||
586 | atomic_t nr_busy_cpus; | ||
587 | |||
588 | unsigned long cpumask[0]; /* iteration mask */ | ||
589 | }; | ||
590 | |||
591 | struct sched_group { | ||
592 | struct sched_group *next; /* Must be a circular list */ | ||
593 | atomic_t ref; | ||
594 | |||
595 | unsigned int group_weight; | ||
596 | struct sched_group_power *sgp; | ||
597 | |||
598 | /* | ||
599 | * The CPUs this group covers. | ||
600 | * | ||
601 | * NOTE: this field is variable length. (Allocated dynamically | ||
602 | * by attaching extra space to the end of the structure, | ||
603 | * depending on how many CPUs the kernel has booted up with) | ||
604 | */ | ||
605 | unsigned long cpumask[0]; | ||
606 | }; | ||
607 | |||
608 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
609 | { | ||
610 | return to_cpumask(sg->cpumask); | ||
611 | } | ||
612 | |||
613 | /* | ||
614 | * cpumask masking which cpus in the group are allowed to iterate up the domain | ||
615 | * tree. | ||
616 | */ | ||
617 | static inline struct cpumask *sched_group_mask(struct sched_group *sg) | ||
618 | { | ||
619 | return to_cpumask(sg->sgp->cpumask); | ||
620 | } | ||
621 | |||
622 | /** | ||
623 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
624 | * @group: The group whose first cpu is to be returned. | ||
625 | */ | ||
626 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
627 | { | ||
628 | return cpumask_first(sched_group_cpus(group)); | ||
629 | } | ||
630 | |||
575 | extern int group_balance_cpu(struct sched_group *sg); | 631 | extern int group_balance_cpu(struct sched_group *sg); |
576 | 632 | ||
577 | #endif /* CONFIG_SMP */ | 633 | #endif /* CONFIG_SMP */ |