diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:52:10 -0500 |
commit | dcc30a35f71bcf51f1e9b336dc5e41923071509a (patch) | |
tree | 37334b442315285779614990aef82022c11e1b72 /kernel/sched.c | |
parent | d5dd3db1dce73cdd5c45c5a3498c51bd21b8864b (diff) |
sched: convert cpu_isolated_map to cpumask_var_t.
Impact: stack usage reduction, (future) size reduction, cleanup
Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.
We can also use cpulist_parse() instead of doing it manually in
isolated_cpu_setup.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 24012c2a8892..526618fe4a78 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6917,19 +6917,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6917 | } | 6917 | } |
6918 | 6918 | ||
6919 | /* cpus with isolated domains */ | 6919 | /* cpus with isolated domains */ |
6920 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 6920 | static cpumask_var_t cpu_isolated_map; |
6921 | 6921 | ||
6922 | /* Setup the mask of cpus configured for isolated domains */ | 6922 | /* Setup the mask of cpus configured for isolated domains */ |
6923 | static int __init isolated_cpu_setup(char *str) | 6923 | static int __init isolated_cpu_setup(char *str) |
6924 | { | 6924 | { |
6925 | static int __initdata ints[NR_CPUS]; | 6925 | cpulist_parse(str, *cpu_isolated_map); |
6926 | int i; | ||
6927 | |||
6928 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
6929 | cpus_clear(cpu_isolated_map); | ||
6930 | for (i = 1; i <= ints[0]; i++) | ||
6931 | if (ints[i] < NR_CPUS) | ||
6932 | cpu_set(ints[i], cpu_isolated_map); | ||
6933 | return 1; | 6926 | return 1; |
6934 | } | 6927 | } |
6935 | 6928 | ||
@@ -7727,7 +7720,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7727 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7720 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
7728 | if (!doms_cur) | 7721 | if (!doms_cur) |
7729 | doms_cur = &fallback_doms; | 7722 | doms_cur = &fallback_doms; |
7730 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7723 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7731 | dattr_cur = NULL; | 7724 | dattr_cur = NULL; |
7732 | err = build_sched_domains(doms_cur); | 7725 | err = build_sched_domains(doms_cur); |
7733 | register_sched_domain_sysctl(); | 7726 | register_sched_domain_sysctl(); |
@@ -7826,7 +7819,7 @@ match1: | |||
7826 | if (doms_new == NULL) { | 7819 | if (doms_new == NULL) { |
7827 | ndoms_cur = 0; | 7820 | ndoms_cur = 0; |
7828 | doms_new = &fallback_doms; | 7821 | doms_new = &fallback_doms; |
7829 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7822 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7830 | WARN_ON_ONCE(dattr_new); | 7823 | WARN_ON_ONCE(dattr_new); |
7831 | } | 7824 | } |
7832 | 7825 | ||
@@ -7985,7 +7978,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
7985 | 7978 | ||
7986 | void __init sched_init_smp(void) | 7979 | void __init sched_init_smp(void) |
7987 | { | 7980 | { |
7988 | cpumask_t non_isolated_cpus; | 7981 | cpumask_var_t non_isolated_cpus; |
7982 | |||
7983 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
7989 | 7984 | ||
7990 | #if defined(CONFIG_NUMA) | 7985 | #if defined(CONFIG_NUMA) |
7991 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 7986 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -7994,10 +7989,10 @@ void __init sched_init_smp(void) | |||
7994 | #endif | 7989 | #endif |
7995 | get_online_cpus(); | 7990 | get_online_cpus(); |
7996 | mutex_lock(&sched_domains_mutex); | 7991 | mutex_lock(&sched_domains_mutex); |
7997 | arch_init_sched_domains(&cpu_online_map); | 7992 | arch_init_sched_domains(cpu_online_mask); |
7998 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 7993 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
7999 | if (cpus_empty(non_isolated_cpus)) | 7994 | if (cpumask_empty(non_isolated_cpus)) |
8000 | cpu_set(smp_processor_id(), non_isolated_cpus); | 7995 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
8001 | mutex_unlock(&sched_domains_mutex); | 7996 | mutex_unlock(&sched_domains_mutex); |
8002 | put_online_cpus(); | 7997 | put_online_cpus(); |
8003 | 7998 | ||
@@ -8012,9 +8007,10 @@ void __init sched_init_smp(void) | |||
8012 | init_hrtick(); | 8007 | init_hrtick(); |
8013 | 8008 | ||
8014 | /* Move init over to a non-isolated CPU */ | 8009 | /* Move init over to a non-isolated CPU */ |
8015 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8010 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
8016 | BUG(); | 8011 | BUG(); |
8017 | sched_init_granularity(); | 8012 | sched_init_granularity(); |
8013 | free_cpumask_var(non_isolated_cpus); | ||
8018 | } | 8014 | } |
8019 | #else | 8015 | #else |
8020 | void __init sched_init_smp(void) | 8016 | void __init sched_init_smp(void) |
@@ -8334,6 +8330,7 @@ void __init sched_init(void) | |||
8334 | #ifdef CONFIG_NO_HZ | 8330 | #ifdef CONFIG_NO_HZ |
8335 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | 8331 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); |
8336 | #endif | 8332 | #endif |
8333 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8337 | 8334 | ||
8338 | scheduler_running = 1; | 8335 | scheduler_running = 1; |
8339 | } | 8336 | } |