aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h11
-rw-r--r--kernel/sched.c65
2 files changed, 48 insertions, 28 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b95aa9f779b..c5be6c6bc741 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -771,7 +771,6 @@ enum cpu_idle_type {
771 771
772struct sched_group { 772struct sched_group {
773 struct sched_group *next; /* Must be a circular list */ 773 struct sched_group *next; /* Must be a circular list */
774 cpumask_t cpumask;
775 774
776 /* 775 /*
777 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 776 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -784,11 +783,13 @@ struct sched_group {
784 * (see include/linux/reciprocal_div.h) 783 * (see include/linux/reciprocal_div.h)
785 */ 784 */
786 u32 reciprocal_cpu_power; 785 u32 reciprocal_cpu_power;
786
787 unsigned long cpumask[];
787}; 788};
788 789
789static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 790static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
790{ 791{
791 return &sg->cpumask; 792 return to_cpumask(sg->cpumask);
792} 793}
793 794
794enum sched_domain_level { 795enum sched_domain_level {
@@ -814,7 +815,6 @@ struct sched_domain {
814 struct sched_domain *parent; /* top domain must be null terminated */ 815 struct sched_domain *parent; /* top domain must be null terminated */
815 struct sched_domain *child; /* bottom domain must be null terminated */ 816 struct sched_domain *child; /* bottom domain must be null terminated */
816 struct sched_group *groups; /* the balancing groups of the domain */ 817 struct sched_group *groups; /* the balancing groups of the domain */
817 cpumask_t span; /* span of all CPUs in this domain */
818 unsigned long min_interval; /* Minimum balance interval ms */ 818 unsigned long min_interval; /* Minimum balance interval ms */
819 unsigned long max_interval; /* Maximum balance interval ms */ 819 unsigned long max_interval; /* Maximum balance interval ms */
820 unsigned int busy_factor; /* less balancing by factor if busy */ 820 unsigned int busy_factor; /* less balancing by factor if busy */
@@ -869,11 +869,14 @@ struct sched_domain {
869#ifdef CONFIG_SCHED_DEBUG 869#ifdef CONFIG_SCHED_DEBUG
870 char *name; 870 char *name;
871#endif 871#endif
872
873 /* span of all CPUs in this domain */
874 unsigned long span[];
872}; 875};
873 876
874static inline struct cpumask *sched_domain_span(struct sched_domain *sd) 877static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
875{ 878{
876 return &sd->span; 879 return to_cpumask(sd->span);
877} 880}
878 881
879extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 882extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
diff --git a/kernel/sched.c b/kernel/sched.c
index 575f38acf4da..6b9606a6cabf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7006,18 +7006,33 @@ static void sched_domain_node_span(int node, cpumask_t *span)
7006int sched_smt_power_savings = 0, sched_mc_power_savings = 0; 7006int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7007 7007
7008/* 7008/*
7009 * The cpus mask in sched_group and sched_domain hangs off the end.
7010 * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
7011 * for nr_cpu_ids < CONFIG_NR_CPUS.
7012 */
7013struct static_sched_group {
7014 struct sched_group sg;
7015 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
7016};
7017
7018struct static_sched_domain {
7019 struct sched_domain sd;
7020 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
7021};
7022
7023/*
7009 * SMT sched-domains: 7024 * SMT sched-domains:
7010 */ 7025 */
7011#ifdef CONFIG_SCHED_SMT 7026#ifdef CONFIG_SCHED_SMT
7012static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 7027static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
7013static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); 7028static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
7014 7029
7015static int 7030static int
7016cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7031cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7017 cpumask_t *unused) 7032 cpumask_t *unused)
7018{ 7033{
7019 if (sg) 7034 if (sg)
7020 *sg = &per_cpu(sched_group_cpus, cpu); 7035 *sg = &per_cpu(sched_group_cpus, cpu).sg;
7021 return cpu; 7036 return cpu;
7022} 7037}
7023#endif /* CONFIG_SCHED_SMT */ 7038#endif /* CONFIG_SCHED_SMT */
@@ -7026,8 +7041,8 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7026 * multi-core sched-domains: 7041 * multi-core sched-domains:
7027 */ 7042 */
7028#ifdef CONFIG_SCHED_MC 7043#ifdef CONFIG_SCHED_MC
7029static DEFINE_PER_CPU(struct sched_domain, core_domains); 7044static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
7030static DEFINE_PER_CPU(struct sched_group, sched_group_core); 7045static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
7031#endif /* CONFIG_SCHED_MC */ 7046#endif /* CONFIG_SCHED_MC */
7032 7047
7033#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 7048#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
@@ -7041,7 +7056,7 @@ cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7041 cpus_and(*mask, *mask, *cpu_map); 7056 cpus_and(*mask, *mask, *cpu_map);
7042 group = first_cpu(*mask); 7057 group = first_cpu(*mask);
7043 if (sg) 7058 if (sg)
7044 *sg = &per_cpu(sched_group_core, group); 7059 *sg = &per_cpu(sched_group_core, group).sg;
7045 return group; 7060 return group;
7046} 7061}
7047#elif defined(CONFIG_SCHED_MC) 7062#elif defined(CONFIG_SCHED_MC)
@@ -7050,13 +7065,13 @@ cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7050 cpumask_t *unused) 7065 cpumask_t *unused)
7051{ 7066{
7052 if (sg) 7067 if (sg)
7053 *sg = &per_cpu(sched_group_core, cpu); 7068 *sg = &per_cpu(sched_group_core, cpu).sg;
7054 return cpu; 7069 return cpu;
7055} 7070}
7056#endif 7071#endif
7057 7072
7058static DEFINE_PER_CPU(struct sched_domain, phys_domains); 7073static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
7059static DEFINE_PER_CPU(struct sched_group, sched_group_phys); 7074static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
7060 7075
7061static int 7076static int
7062cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7077cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
@@ -7075,7 +7090,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7075 group = cpu; 7090 group = cpu;
7076#endif 7091#endif
7077 if (sg) 7092 if (sg)
7078 *sg = &per_cpu(sched_group_phys, group); 7093 *sg = &per_cpu(sched_group_phys, group).sg;
7079 return group; 7094 return group;
7080} 7095}
7081 7096
@@ -7089,7 +7104,7 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains);
7089static struct sched_group ***sched_group_nodes_bycpu; 7104static struct sched_group ***sched_group_nodes_bycpu;
7090 7105
7091static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7106static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
7092static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); 7107static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7093 7108
7094static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, 7109static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
7095 struct sched_group **sg, cpumask_t *nodemask) 7110 struct sched_group **sg, cpumask_t *nodemask)
@@ -7101,7 +7116,7 @@ static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
7101 group = first_cpu(*nodemask); 7116 group = first_cpu(*nodemask);
7102 7117
7103 if (sg) 7118 if (sg)
7104 *sg = &per_cpu(sched_group_allnodes, group); 7119 *sg = &per_cpu(sched_group_allnodes, group).sg;
7105 return group; 7120 return group;
7106} 7121}
7107 7122
@@ -7116,7 +7131,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7116 for_each_cpu(j, sched_group_cpus(sg)) { 7131 for_each_cpu(j, sched_group_cpus(sg)) {
7117 struct sched_domain *sd; 7132 struct sched_domain *sd;
7118 7133
7119 sd = &per_cpu(phys_domains, j); 7134 sd = &per_cpu(phys_domains, j).sd;
7120 if (j != cpumask_first(sched_group_cpus(sd->groups))) { 7135 if (j != cpumask_first(sched_group_cpus(sd->groups))) {
7121 /* 7136 /*
7122 * Only add "power" once for each 7137 * Only add "power" once for each
@@ -7385,7 +7400,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7385#endif 7400#endif
7386 7401
7387 p = sd; 7402 p = sd;
7388 sd = &per_cpu(phys_domains, i); 7403 sd = &per_cpu(phys_domains, i).sd;
7389 SD_INIT(sd, CPU); 7404 SD_INIT(sd, CPU);
7390 set_domain_attribute(sd, attr); 7405 set_domain_attribute(sd, attr);
7391 cpumask_copy(sched_domain_span(sd), nodemask); 7406 cpumask_copy(sched_domain_span(sd), nodemask);
@@ -7396,7 +7411,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7396 7411
7397#ifdef CONFIG_SCHED_MC 7412#ifdef CONFIG_SCHED_MC
7398 p = sd; 7413 p = sd;
7399 sd = &per_cpu(core_domains, i); 7414 sd = &per_cpu(core_domains, i).sd;
7400 SD_INIT(sd, MC); 7415 SD_INIT(sd, MC);
7401 set_domain_attribute(sd, attr); 7416 set_domain_attribute(sd, attr);
7402 *sched_domain_span(sd) = cpu_coregroup_map(i); 7417 *sched_domain_span(sd) = cpu_coregroup_map(i);
@@ -7409,7 +7424,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7409 7424
7410#ifdef CONFIG_SCHED_SMT 7425#ifdef CONFIG_SCHED_SMT
7411 p = sd; 7426 p = sd;
7412 sd = &per_cpu(cpu_domains, i); 7427 sd = &per_cpu(cpu_domains, i).sd;
7413 SD_INIT(sd, SIBLING); 7428 SD_INIT(sd, SIBLING);
7414 set_domain_attribute(sd, attr); 7429 set_domain_attribute(sd, attr);
7415 cpumask_and(sched_domain_span(sd), 7430 cpumask_and(sched_domain_span(sd),
@@ -7485,7 +7500,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7485 sched_domain_node_span(i, domainspan); 7500 sched_domain_node_span(i, domainspan);
7486 cpus_and(*domainspan, *domainspan, *cpu_map); 7501 cpus_and(*domainspan, *domainspan, *cpu_map);
7487 7502
7488 sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); 7503 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7504 GFP_KERNEL, i);
7489 if (!sg) { 7505 if (!sg) {
7490 printk(KERN_WARNING "Can not alloc domain group for " 7506 printk(KERN_WARNING "Can not alloc domain group for "
7491 "node %d\n", i); 7507 "node %d\n", i);
@@ -7518,7 +7534,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7518 if (cpus_empty(*tmpmask)) 7534 if (cpus_empty(*tmpmask))
7519 continue; 7535 continue;
7520 7536
7521 sg = kmalloc_node(sizeof(struct sched_group), 7537 sg = kmalloc_node(sizeof(struct sched_group) +
7538 cpumask_size(),
7522 GFP_KERNEL, i); 7539 GFP_KERNEL, i);
7523 if (!sg) { 7540 if (!sg) {
7524 printk(KERN_WARNING 7541 printk(KERN_WARNING
@@ -7538,21 +7555,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7538 /* Calculate CPU power for physical packages and nodes */ 7555 /* Calculate CPU power for physical packages and nodes */
7539#ifdef CONFIG_SCHED_SMT 7556#ifdef CONFIG_SCHED_SMT
7540 for_each_cpu(i, cpu_map) { 7557 for_each_cpu(i, cpu_map) {
7541 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7558 struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
7542 7559
7543 init_sched_groups_power(i, sd); 7560 init_sched_groups_power(i, sd);
7544 } 7561 }
7545#endif 7562#endif
7546#ifdef CONFIG_SCHED_MC 7563#ifdef CONFIG_SCHED_MC
7547 for_each_cpu(i, cpu_map) { 7564 for_each_cpu(i, cpu_map) {
7548 struct sched_domain *sd = &per_cpu(core_domains, i); 7565 struct sched_domain *sd = &per_cpu(core_domains, i).sd;
7549 7566
7550 init_sched_groups_power(i, sd); 7567 init_sched_groups_power(i, sd);
7551 } 7568 }
7552#endif 7569#endif
7553 7570
7554 for_each_cpu(i, cpu_map) { 7571 for_each_cpu(i, cpu_map) {
7555 struct sched_domain *sd = &per_cpu(phys_domains, i); 7572 struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
7556 7573
7557 init_sched_groups_power(i, sd); 7574 init_sched_groups_power(i, sd);
7558 } 7575 }
@@ -7574,11 +7591,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7574 for_each_cpu(i, cpu_map) { 7591 for_each_cpu(i, cpu_map) {
7575 struct sched_domain *sd; 7592 struct sched_domain *sd;
7576#ifdef CONFIG_SCHED_SMT 7593#ifdef CONFIG_SCHED_SMT
7577 sd = &per_cpu(cpu_domains, i); 7594 sd = &per_cpu(cpu_domains, i).sd;
7578#elif defined(CONFIG_SCHED_MC) 7595#elif defined(CONFIG_SCHED_MC)
7579 sd = &per_cpu(core_domains, i); 7596 sd = &per_cpu(core_domains, i).sd;
7580#else 7597#else
7581 sd = &per_cpu(phys_domains, i); 7598 sd = &per_cpu(phys_domains, i).sd;
7582#endif 7599#endif
7583 cpu_attach_domain(sd, rd, i); 7600 cpu_attach_domain(sd, rd, i);
7584 } 7601 }