diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:52:15 -0500 |
commit | 4212823fb459eacc8098dd420bb68ebb9917989d (patch) | |
tree | 4ef0e34966cbdbb25ac99b4be797ce985eee796a /kernel | |
parent | dcc30a35f71bcf51f1e9b336dc5e41923071509a (diff) |
sched: convert falback_doms to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS.
Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 526618fe4a78..42588ad93b25 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7697,10 +7697,10 @@ static struct sched_domain_attr *dattr_cur; | |||
7697 | 7697 | ||
7698 | /* | 7698 | /* |
7699 | * Special case: If a kmalloc of a doms_cur partition (array of | 7699 | * Special case: If a kmalloc of a doms_cur partition (array of |
7700 | * cpumask_t) fails, then fallback to a single sched domain, | 7700 | * cpumask) fails, then fallback to a single sched domain, |
7701 | * as determined by the single cpumask_t fallback_doms. | 7701 | * as determined by the single cpumask fallback_doms. |
7702 | */ | 7702 | */ |
7703 | static cpumask_t fallback_doms; | 7703 | static cpumask_var_t fallback_doms; |
7704 | 7704 | ||
7705 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7705 | void __attribute__((weak)) arch_update_cpu_topology(void) |
7706 | { | 7706 | { |
@@ -7719,7 +7719,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7719 | ndoms_cur = 1; | 7719 | ndoms_cur = 1; |
7720 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7720 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
7721 | if (!doms_cur) | 7721 | if (!doms_cur) |
7722 | doms_cur = &fallback_doms; | 7722 | doms_cur = fallback_doms; |
7723 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); | 7723 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7724 | dattr_cur = NULL; | 7724 | dattr_cur = NULL; |
7725 | err = build_sched_domains(doms_cur); | 7725 | err = build_sched_domains(doms_cur); |
@@ -7818,7 +7818,7 @@ match1: | |||
7818 | 7818 | ||
7819 | if (doms_new == NULL) { | 7819 | if (doms_new == NULL) { |
7820 | ndoms_cur = 0; | 7820 | ndoms_cur = 0; |
7821 | doms_new = &fallback_doms; | 7821 | doms_new = fallback_doms; |
7822 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); | 7822 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7823 | WARN_ON_ONCE(dattr_new); | 7823 | WARN_ON_ONCE(dattr_new); |
7824 | } | 7824 | } |
@@ -7838,7 +7838,7 @@ match2: | |||
7838 | } | 7838 | } |
7839 | 7839 | ||
7840 | /* Remember the new sched domains */ | 7840 | /* Remember the new sched domains */ |
7841 | if (doms_cur != &fallback_doms) | 7841 | if (doms_cur != fallback_doms) |
7842 | kfree(doms_cur); | 7842 | kfree(doms_cur); |
7843 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7843 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
7844 | doms_cur = doms_new; | 7844 | doms_cur = doms_new; |
@@ -8011,6 +8011,8 @@ void __init sched_init_smp(void) | |||
8011 | BUG(); | 8011 | BUG(); |
8012 | sched_init_granularity(); | 8012 | sched_init_granularity(); |
8013 | free_cpumask_var(non_isolated_cpus); | 8013 | free_cpumask_var(non_isolated_cpus); |
8014 | |||
8015 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
8014 | } | 8016 | } |
8015 | #else | 8017 | #else |
8016 | void __init sched_init_smp(void) | 8018 | void __init sched_init_smp(void) |