diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-11-02 23:23:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-04 07:16:40 -0500 |
commit | acc3f5d7cabbfd6cec71f0c1f9900621fa2d6ae7 (patch) | |
tree | 672ed45f6df2f52e8f3cced2ee11ad29a1533890 /kernel/sched.c | |
parent | e2c880630438f80b474378d5487b511b07665051 (diff) |
cpumask: Partition_sched_domains takes array of cpumask_var_t
Currently partition_sched_domains() takes a 'struct cpumask
*doms_new' which is a kmalloc'ed array of cpumask_t. You can't
have such an array if 'struct cpumask' is undefined, as we plan
for CONFIG_CPUMASK_OFFSTACK=y.
So, we make this an array of cpumask_var_t instead: this is the
same for the CONFIG_CPUMASK_OFFSTACK=n case, but requires
multiple allocations for the CONFIG_CPUMASK_OFFSTACK=y case.
Hence we add alloc_sched_domains() and free_sched_domains()
functions.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <200911031453.40668.rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 68 |
1 files changed, 46 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 30fd0ba5f603..ae026aad145b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8846,7 +8846,7 @@ static int build_sched_domains(const struct cpumask *cpu_map) | |||
8846 | return __build_sched_domains(cpu_map, NULL); | 8846 | return __build_sched_domains(cpu_map, NULL); |
8847 | } | 8847 | } |
8848 | 8848 | ||
8849 | static struct cpumask *doms_cur; /* current sched domains */ | 8849 | static cpumask_var_t *doms_cur; /* current sched domains */ |
8850 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 8850 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
8851 | static struct sched_domain_attr *dattr_cur; | 8851 | static struct sched_domain_attr *dattr_cur; |
8852 | /* attribues of custom domains in 'doms_cur' */ | 8852 | /* attribues of custom domains in 'doms_cur' */ |
@@ -8868,6 +8868,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void) | |||
8868 | return 0; | 8868 | return 0; |
8869 | } | 8869 | } |
8870 | 8870 | ||
8871 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) | ||
8872 | { | ||
8873 | int i; | ||
8874 | cpumask_var_t *doms; | ||
8875 | |||
8876 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); | ||
8877 | if (!doms) | ||
8878 | return NULL; | ||
8879 | for (i = 0; i < ndoms; i++) { | ||
8880 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { | ||
8881 | free_sched_domains(doms, i); | ||
8882 | return NULL; | ||
8883 | } | ||
8884 | } | ||
8885 | return doms; | ||
8886 | } | ||
8887 | |||
8888 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) | ||
8889 | { | ||
8890 | unsigned int i; | ||
8891 | for (i = 0; i < ndoms; i++) | ||
8892 | free_cpumask_var(doms[i]); | ||
8893 | kfree(doms); | ||
8894 | } | ||
8895 | |||
8871 | /* | 8896 | /* |
8872 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 8897 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
8873 | * For now this just excludes isolated cpus, but could be used to | 8898 | * For now this just excludes isolated cpus, but could be used to |
@@ -8879,12 +8904,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map) | |||
8879 | 8904 | ||
8880 | arch_update_cpu_topology(); | 8905 | arch_update_cpu_topology(); |
8881 | ndoms_cur = 1; | 8906 | ndoms_cur = 1; |
8882 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); | 8907 | doms_cur = alloc_sched_domains(ndoms_cur); |
8883 | if (!doms_cur) | 8908 | if (!doms_cur) |
8884 | doms_cur = fallback_doms; | 8909 | doms_cur = &fallback_doms; |
8885 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); | 8910 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
8886 | dattr_cur = NULL; | 8911 | dattr_cur = NULL; |
8887 | err = build_sched_domains(doms_cur); | 8912 | err = build_sched_domains(doms_cur[0]); |
8888 | register_sched_domain_sysctl(); | 8913 | register_sched_domain_sysctl(); |
8889 | 8914 | ||
8890 | return err; | 8915 | return err; |
@@ -8934,19 +8959,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
8934 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 8959 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
8935 | * It destroys each deleted domain and builds each new domain. | 8960 | * It destroys each deleted domain and builds each new domain. |
8936 | * | 8961 | * |
8937 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. | 8962 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
8938 | * The masks don't intersect (don't overlap.) We should setup one | 8963 | * The masks don't intersect (don't overlap.) We should setup one |
8939 | * sched domain for each mask. CPUs not in any of the cpumasks will | 8964 | * sched domain for each mask. CPUs not in any of the cpumasks will |
8940 | * not be load balanced. If the same cpumask appears both in the | 8965 | * not be load balanced. If the same cpumask appears both in the |
8941 | * current 'doms_cur' domains and in the new 'doms_new', we can leave | 8966 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
8942 | * it as it is. | 8967 | * it as it is. |
8943 | * | 8968 | * |
8944 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 8969 | * The passed in 'doms_new' should be allocated using |
8945 | * ownership of it and will kfree it when done with it. If the caller | 8970 | * alloc_sched_domains. This routine takes ownership of it and will |
8946 | * failed the kmalloc call, then it can pass in doms_new == NULL && | 8971 | * free_sched_domains it when done with it. If the caller failed the |
8947 | * ndoms_new == 1, and partition_sched_domains() will fallback to | 8972 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
8948 | * the single partition 'fallback_doms', it also forces the domains | 8973 | * and partition_sched_domains() will fallback to the single partition |
8949 | * to be rebuilt. | 8974 | * 'fallback_doms', it also forces the domains to be rebuilt. |
8950 | * | 8975 | * |
8951 | * If doms_new == NULL it will be replaced with cpu_online_mask. | 8976 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
8952 | * ndoms_new == 0 is a special case for destroying existing domains, | 8977 | * ndoms_new == 0 is a special case for destroying existing domains, |
@@ -8954,8 +8979,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
8954 | * | 8979 | * |
8955 | * Call with hotplug lock held | 8980 | * Call with hotplug lock held |
8956 | */ | 8981 | */ |
8957 | /* FIXME: Change to struct cpumask *doms_new[] */ | 8982 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
8958 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
8959 | struct sched_domain_attr *dattr_new) | 8983 | struct sched_domain_attr *dattr_new) |
8960 | { | 8984 | { |
8961 | int i, j, n; | 8985 | int i, j, n; |
@@ -8974,40 +8998,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | |||
8974 | /* Destroy deleted domains */ | 8998 | /* Destroy deleted domains */ |
8975 | for (i = 0; i < ndoms_cur; i++) { | 8999 | for (i = 0; i < ndoms_cur; i++) { |
8976 | for (j = 0; j < n && !new_topology; j++) { | 9000 | for (j = 0; j < n && !new_topology; j++) { |
8977 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) | 9001 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
8978 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 9002 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
8979 | goto match1; | 9003 | goto match1; |
8980 | } | 9004 | } |
8981 | /* no match - a current sched domain not in new doms_new[] */ | 9005 | /* no match - a current sched domain not in new doms_new[] */ |
8982 | detach_destroy_domains(doms_cur + i); | 9006 | detach_destroy_domains(doms_cur[i]); |
8983 | match1: | 9007 | match1: |
8984 | ; | 9008 | ; |
8985 | } | 9009 | } |
8986 | 9010 | ||
8987 | if (doms_new == NULL) { | 9011 | if (doms_new == NULL) { |
8988 | ndoms_cur = 0; | 9012 | ndoms_cur = 0; |
8989 | doms_new = fallback_doms; | 9013 | doms_new = &fallback_doms; |
8990 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); | 9014 | cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map); |
8991 | WARN_ON_ONCE(dattr_new); | 9015 | WARN_ON_ONCE(dattr_new); |
8992 | } | 9016 | } |
8993 | 9017 | ||
8994 | /* Build new domains */ | 9018 | /* Build new domains */ |
8995 | for (i = 0; i < ndoms_new; i++) { | 9019 | for (i = 0; i < ndoms_new; i++) { |
8996 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 9020 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
8997 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) | 9021 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
8998 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 9022 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
8999 | goto match2; | 9023 | goto match2; |
9000 | } | 9024 | } |
9001 | /* no match - add a new doms_new */ | 9025 | /* no match - add a new doms_new */ |
9002 | __build_sched_domains(doms_new + i, | 9026 | __build_sched_domains(doms_new[i], |
9003 | dattr_new ? dattr_new + i : NULL); | 9027 | dattr_new ? dattr_new + i : NULL); |
9004 | match2: | 9028 | match2: |
9005 | ; | 9029 | ; |
9006 | } | 9030 | } |
9007 | 9031 | ||
9008 | /* Remember the new sched domains */ | 9032 | /* Remember the new sched domains */ |
9009 | if (doms_cur != fallback_doms) | 9033 | if (doms_cur != &fallback_doms) |
9010 | kfree(doms_cur); | 9034 | free_sched_domains(doms_cur, ndoms_cur); |
9011 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 9035 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
9012 | doms_cur = doms_new; | 9036 | doms_cur = doms_new; |
9013 | dattr_cur = dattr_new; | 9037 | dattr_cur = dattr_new; |