aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:04 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:50:57 -0500
commit6c99e9ad47d9c082bd096f42fb49e397b05d58a8 (patch)
tree7b0adff66f353b173a3adc05b03b3279bef63c40 /include/linux
parent758b2cdc6f6a22c702bd8f2344382fb1270b2161 (diff)
sched: convert struct sched_group/sched_domain cpumask_ts to variable bitmaps
Impact: (future) size reduction for large NR_CPUS. We move the 'cpumask' member of sched_group to the end, so when we kmalloc it we can do a minimal allocation: saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. Similar trick for 'span' in sched_domain. This isn't quite as good as converting to a cpumask_var_t, as some sched_groups are actually static, but it's safer: we don't have to figure out where to call alloc_cpumask_var/free_cpumask_var. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h11
1 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b95aa9f779b..c5be6c6bc741 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -771,7 +771,6 @@ enum cpu_idle_type {
771 771
772struct sched_group { 772struct sched_group {
773 struct sched_group *next; /* Must be a circular list */ 773 struct sched_group *next; /* Must be a circular list */
774 cpumask_t cpumask;
775 774
776 /* 775 /*
777 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 776 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -784,11 +783,13 @@ struct sched_group {
784 * (see include/linux/reciprocal_div.h) 783 * (see include/linux/reciprocal_div.h)
785 */ 784 */
786 u32 reciprocal_cpu_power; 785 u32 reciprocal_cpu_power;
786
787 unsigned long cpumask[];
787}; 788};
788 789
789static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 790static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
790{ 791{
791 return &sg->cpumask; 792 return to_cpumask(sg->cpumask);
792} 793}
793 794
794enum sched_domain_level { 795enum sched_domain_level {
@@ -814,7 +815,6 @@ struct sched_domain {
814 struct sched_domain *parent; /* top domain must be null terminated */ 815 struct sched_domain *parent; /* top domain must be null terminated */
815 struct sched_domain *child; /* bottom domain must be null terminated */ 816 struct sched_domain *child; /* bottom domain must be null terminated */
816 struct sched_group *groups; /* the balancing groups of the domain */ 817 struct sched_group *groups; /* the balancing groups of the domain */
817 cpumask_t span; /* span of all CPUs in this domain */
818 unsigned long min_interval; /* Minimum balance interval ms */ 818 unsigned long min_interval; /* Minimum balance interval ms */
819 unsigned long max_interval; /* Maximum balance interval ms */ 819 unsigned long max_interval; /* Maximum balance interval ms */
820 unsigned int busy_factor; /* less balancing by factor if busy */ 820 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -869,11 +869,14 @@ struct sched_domain {
869#ifdef CONFIG_SCHED_DEBUG 869#ifdef CONFIG_SCHED_DEBUG
870 char *name; 870 char *name;
871#endif 871#endif
872
873 /* span of all CPUs in this domain */
874 unsigned long span[];
872}; 875};
873 876
874static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 877static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
875{ 878{
876 return &sd->span; 879 return to_cpumask(sd->span);
877} 880}
878 881
879extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 882extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,