aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/cpuset.c19
-rw-r--r--kernel/sched.c68
3 files changed, 61 insertions, 34 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dfc21fb76bf1..78ba664474f3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1009,9 +1009,13 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1009 return to_cpumask(sd->span); 1009 return to_cpumask(sd->span);
1010} 1010}
1011 1011
1012extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 1012extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1013 struct sched_domain_attr *dattr_new); 1013 struct sched_domain_attr *dattr_new);
1014 1014
1015/* Allocate an array of sched domains, for partition_sched_domains(). */
1016cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1017void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1018
1015/* Test a flag in parent sched domain */ 1019/* Test a flag in parent sched domain */
1016static inline int test_sd_parent(struct sched_domain *sd, int flag) 1020static inline int test_sd_parent(struct sched_domain *sd, int flag)
1017{ 1021{
@@ -1029,7 +1033,7 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1029struct sched_domain_attr; 1033struct sched_domain_attr;
1030 1034
1031static inline void 1035static inline void
1032partition_sched_domains(int ndoms_new, struct cpumask *doms_new, 1036partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1033 struct sched_domain_attr *dattr_new) 1037 struct sched_domain_attr *dattr_new)
1034{ 1038{
1035} 1039}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d247381e7371..3cf2183b472d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -537,8 +537,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
537 * element of the partition (one sched domain) to be passed to 537 * element of the partition (one sched domain) to be passed to
538 * partition_sched_domains(). 538 * partition_sched_domains().
539 */ 539 */
540/* FIXME: see the FIXME in partition_sched_domains() */ 540static int generate_sched_domains(cpumask_var_t **domains,
541static int generate_sched_domains(struct cpumask **domains,
542 struct sched_domain_attr **attributes) 541 struct sched_domain_attr **attributes)
543{ 542{
544 LIST_HEAD(q); /* queue of cpusets to be scanned */ 543 LIST_HEAD(q); /* queue of cpusets to be scanned */
@@ -546,7 +545,7 @@ static int generate_sched_domains(struct cpumask **domains,
546 struct cpuset **csa; /* array of all cpuset ptrs */ 545 struct cpuset **csa; /* array of all cpuset ptrs */
547 int csn; /* how many cpuset ptrs in csa so far */ 546 int csn; /* how many cpuset ptrs in csa so far */
548 int i, j, k; /* indices for partition finding loops */ 547 int i, j, k; /* indices for partition finding loops */
549 struct cpumask *doms; /* resulting partition; i.e. sched domains */ 548 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
550 struct sched_domain_attr *dattr; /* attributes for custom domains */ 549 struct sched_domain_attr *dattr; /* attributes for custom domains */
551 int ndoms = 0; /* number of sched domains in result */ 550 int ndoms = 0; /* number of sched domains in result */
552 int nslot; /* next empty doms[] struct cpumask slot */ 551 int nslot; /* next empty doms[] struct cpumask slot */
@@ -557,7 +556,8 @@ static int generate_sched_domains(struct cpumask **domains,
557 556
558 /* Special case for the 99% of systems with one, full, sched domain */ 557 /* Special case for the 99% of systems with one, full, sched domain */
559 if (is_sched_load_balance(&top_cpuset)) { 558 if (is_sched_load_balance(&top_cpuset)) {
560 doms = kmalloc(cpumask_size(), GFP_KERNEL); 559 ndoms = 1;
560 doms = alloc_sched_domains(ndoms);
561 if (!doms) 561 if (!doms)
562 goto done; 562 goto done;
563 563
@@ -566,9 +566,8 @@ static int generate_sched_domains(struct cpumask **domains,
566 *dattr = SD_ATTR_INIT; 566 *dattr = SD_ATTR_INIT;
567 update_domain_attr_tree(dattr, &top_cpuset); 567 update_domain_attr_tree(dattr, &top_cpuset);
568 } 568 }
569 cpumask_copy(doms, top_cpuset.cpus_allowed); 569 cpumask_copy(doms[0], top_cpuset.cpus_allowed);
570 570
571 ndoms = 1;
572 goto done; 571 goto done;
573 } 572 }
574 573
@@ -636,7 +635,7 @@ restart:
636 * Now we know how many domains to create. 635 * Now we know how many domains to create.
637 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 636 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
638 */ 637 */
639 doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL); 638 doms = alloc_sched_domains(ndoms);
640 if (!doms) 639 if (!doms)
641 goto done; 640 goto done;
642 641
@@ -656,7 +655,7 @@ restart:
656 continue; 655 continue;
657 } 656 }
658 657
659 dp = doms + nslot; 658 dp = doms[nslot];
660 659
661 if (nslot == ndoms) { 660 if (nslot == ndoms) {
662 static int warnings = 10; 661 static int warnings = 10;
@@ -718,7 +717,7 @@ done:
718static void do_rebuild_sched_domains(struct work_struct *unused) 717static void do_rebuild_sched_domains(struct work_struct *unused)
719{ 718{
720 struct sched_domain_attr *attr; 719 struct sched_domain_attr *attr;
721 struct cpumask *doms; 720 cpumask_var_t *doms;
722 int ndoms; 721 int ndoms;
723 722
724 get_online_cpus(); 723 get_online_cpus();
@@ -2052,7 +2051,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2052 unsigned long phase, void *unused_cpu) 2051 unsigned long phase, void *unused_cpu)
2053{ 2052{
2054 struct sched_domain_attr *attr; 2053 struct sched_domain_attr *attr;
2055 struct cpumask *doms; 2054 cpumask_var_t *doms;
2056 int ndoms; 2055 int ndoms;
2057 2056
2058 switch (phase) { 2057 switch (phase) {
diff --git a/kernel/sched.c b/kernel/sched.c
index 30fd0ba5f603..ae026aad145b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8846,7 +8846,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
8846 return __build_sched_domains(cpu_map, NULL); 8846 return __build_sched_domains(cpu_map, NULL);
8847} 8847}
8848 8848
8849static struct cpumask *doms_cur; /* current sched domains */ 8849static cpumask_var_t *doms_cur; /* current sched domains */
8850static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 8850static int ndoms_cur; /* number of sched domains in 'doms_cur' */
8851static struct sched_domain_attr *dattr_cur; 8851static struct sched_domain_attr *dattr_cur;
8852 /* attribues of custom domains in 'doms_cur' */ 8852 /* attribues of custom domains in 'doms_cur' */
@@ -8868,6 +8868,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
8868 return 0; 8868 return 0;
8869} 8869}
8870 8870
8871cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
8872{
8873 int i;
8874 cpumask_var_t *doms;
8875
8876 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
8877 if (!doms)
8878 return NULL;
8879 for (i = 0; i < ndoms; i++) {
8880 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
8881 free_sched_domains(doms, i);
8882 return NULL;
8883 }
8884 }
8885 return doms;
8886}
8887
8888void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
8889{
8890 unsigned int i;
8891 for (i = 0; i < ndoms; i++)
8892 free_cpumask_var(doms[i]);
8893 kfree(doms);
8894}
8895
8871/* 8896/*
8872 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 8897 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
8873 * For now this just excludes isolated cpus, but could be used to 8898 * For now this just excludes isolated cpus, but could be used to
@@ -8879,12 +8904,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
8879 8904
8880 arch_update_cpu_topology(); 8905 arch_update_cpu_topology();
8881 ndoms_cur = 1; 8906 ndoms_cur = 1;
8882 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 8907 doms_cur = alloc_sched_domains(ndoms_cur);
8883 if (!doms_cur) 8908 if (!doms_cur)
8884 doms_cur = fallback_doms; 8909 doms_cur = &fallback_doms;
8885 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 8910 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
8886 dattr_cur = NULL; 8911 dattr_cur = NULL;
8887 err = build_sched_domains(doms_cur); 8912 err = build_sched_domains(doms_cur[0]);
8888 register_sched_domain_sysctl(); 8913 register_sched_domain_sysctl();
8889 8914
8890 return err; 8915 return err;
@@ -8934,19 +8959,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8934 * doms_new[] to the current sched domain partitioning, doms_cur[]. 8959 * doms_new[] to the current sched domain partitioning, doms_cur[].
8935 * It destroys each deleted domain and builds each new domain. 8960 * It destroys each deleted domain and builds each new domain.
8936 * 8961 *
8937 * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 8962 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
8938 * The masks don't intersect (don't overlap.) We should setup one 8963 * The masks don't intersect (don't overlap.) We should setup one
8939 * sched domain for each mask. CPUs not in any of the cpumasks will 8964 * sched domain for each mask. CPUs not in any of the cpumasks will
8940 * not be load balanced. If the same cpumask appears both in the 8965 * not be load balanced. If the same cpumask appears both in the
8941 * current 'doms_cur' domains and in the new 'doms_new', we can leave 8966 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8942 * it as it is. 8967 * it as it is.
8943 * 8968 *
8944 * The passed in 'doms_new' should be kmalloc'd. This routine takes 8969 * The passed in 'doms_new' should be allocated using
8945 * ownership of it and will kfree it when done with it. If the caller 8970 * alloc_sched_domains. This routine takes ownership of it and will
8946 * failed the kmalloc call, then it can pass in doms_new == NULL && 8971 * free_sched_domains it when done with it. If the caller failed the
8947 * ndoms_new == 1, and partition_sched_domains() will fallback to 8972 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8948 * the single partition 'fallback_doms', it also forces the domains 8973 * and partition_sched_domains() will fallback to the single partition
8949 * to be rebuilt. 8974 * 'fallback_doms', it also forces the domains to be rebuilt.
8950 * 8975 *
8951 * If doms_new == NULL it will be replaced with cpu_online_mask. 8976 * If doms_new == NULL it will be replaced with cpu_online_mask.
8952 * ndoms_new == 0 is a special case for destroying existing domains, 8977 * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8954,8 +8979,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8954 * 8979 *
8955 * Call with hotplug lock held 8980 * Call with hotplug lock held
8956 */ 8981 */
8957/* FIXME: Change to struct cpumask *doms_new[] */ 8982void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8958void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8959 struct sched_domain_attr *dattr_new) 8983 struct sched_domain_attr *dattr_new)
8960{ 8984{
8961 int i, j, n; 8985 int i, j, n;
@@ -8974,40 +8998,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8974 /* Destroy deleted domains */ 8998 /* Destroy deleted domains */
8975 for (i = 0; i < ndoms_cur; i++) { 8999 for (i = 0; i < ndoms_cur; i++) {
8976 for (j = 0; j < n && !new_topology; j++) { 9000 for (j = 0; j < n && !new_topology; j++) {
8977 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 9001 if (cpumask_equal(doms_cur[i], doms_new[j])
8978 && dattrs_equal(dattr_cur, i, dattr_new, j)) 9002 && dattrs_equal(dattr_cur, i, dattr_new, j))
8979 goto match1; 9003 goto match1;
8980 } 9004 }
8981 /* no match - a current sched domain not in new doms_new[] */ 9005 /* no match - a current sched domain not in new doms_new[] */
8982 detach_destroy_domains(doms_cur + i); 9006 detach_destroy_domains(doms_cur[i]);
8983match1: 9007match1:
8984 ; 9008 ;
8985 } 9009 }
8986 9010
8987 if (doms_new == NULL) { 9011 if (doms_new == NULL) {
8988 ndoms_cur = 0; 9012 ndoms_cur = 0;
8989 doms_new = fallback_doms; 9013 doms_new = &fallback_doms;
8990 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 9014 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
8991 WARN_ON_ONCE(dattr_new); 9015 WARN_ON_ONCE(dattr_new);
8992 } 9016 }
8993 9017
8994 /* Build new domains */ 9018 /* Build new domains */
8995 for (i = 0; i < ndoms_new; i++) { 9019 for (i = 0; i < ndoms_new; i++) {
8996 for (j = 0; j < ndoms_cur && !new_topology; j++) { 9020 for (j = 0; j < ndoms_cur && !new_topology; j++) {
8997 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 9021 if (cpumask_equal(doms_new[i], doms_cur[j])
8998 && dattrs_equal(dattr_new, i, dattr_cur, j)) 9022 && dattrs_equal(dattr_new, i, dattr_cur, j))
8999 goto match2; 9023 goto match2;
9000 } 9024 }
9001 /* no match - add a new doms_new */ 9025 /* no match - add a new doms_new */
9002 __build_sched_domains(doms_new + i, 9026 __build_sched_domains(doms_new[i],
9003 dattr_new ? dattr_new + i : NULL); 9027 dattr_new ? dattr_new + i : NULL);
9004match2: 9028match2:
9005 ; 9029 ;
9006 } 9030 }
9007 9031
9008 /* Remember the new sched domains */ 9032 /* Remember the new sched domains */
9009 if (doms_cur != fallback_doms) 9033 if (doms_cur != &fallback_doms)
9010 kfree(doms_cur); 9034 free_sched_domains(doms_cur, ndoms_cur);
9011 kfree(dattr_cur); /* kfree(NULL) is safe */ 9035 kfree(dattr_cur); /* kfree(NULL) is safe */
9012 doms_cur = doms_new; 9036 doms_cur = doms_new;
9013 dattr_cur = dattr_new; 9037 dattr_cur = dattr_new;