diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:51:18 -0500 |
commit | c6c4927b22a3514c6660f0e72c78716226bd3cc8 (patch) | |
tree | ee7fc9031eb14ac1c9d2f0732950b4faae2b6245 /kernel/sched.c | |
parent | 6a7b3dc3440f7b5a9b67594af01ed562cdeb41e4 (diff) |
sched: convert struct root_domain to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS.
Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.
def_root_domain is static, and so its masks are initialized with
alloc_bootmem_cpumask_var. After that, alloc_cpumask_var is used.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 69 |
1 files changed, 51 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2723d7a4a42d..93309c3034de 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -487,14 +487,14 @@ struct rt_rq { | |||
487 | */ | 487 | */ |
488 | struct root_domain { | 488 | struct root_domain { |
489 | atomic_t refcount; | 489 | atomic_t refcount; |
490 | cpumask_t span; | 490 | cpumask_var_t span; |
491 | cpumask_t online; | 491 | cpumask_var_t online; |
492 | 492 | ||
493 | /* | 493 | /* |
494 | * The "RT overload" flag: it gets set if a CPU has more than | 494 | * The "RT overload" flag: it gets set if a CPU has more than |
495 | * one runnable RT task. | 495 | * one runnable RT task. |
496 | */ | 496 | */ |
497 | cpumask_t rto_mask; | 497 | cpumask_var_t rto_mask; |
498 | atomic_t rto_count; | 498 | atomic_t rto_count; |
499 | #ifdef CONFIG_SMP | 499 | #ifdef CONFIG_SMP |
500 | struct cpupri cpupri; | 500 | struct cpupri cpupri; |
@@ -6444,7 +6444,7 @@ static void set_rq_online(struct rq *rq) | |||
6444 | if (!rq->online) { | 6444 | if (!rq->online) { |
6445 | const struct sched_class *class; | 6445 | const struct sched_class *class; |
6446 | 6446 | ||
6447 | cpu_set(rq->cpu, rq->rd->online); | 6447 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
6448 | rq->online = 1; | 6448 | rq->online = 1; |
6449 | 6449 | ||
6450 | for_each_class(class) { | 6450 | for_each_class(class) { |
@@ -6464,7 +6464,7 @@ static void set_rq_offline(struct rq *rq) | |||
6464 | class->rq_offline(rq); | 6464 | class->rq_offline(rq); |
6465 | } | 6465 | } |
6466 | 6466 | ||
6467 | cpu_clear(rq->cpu, rq->rd->online); | 6467 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
6468 | rq->online = 0; | 6468 | rq->online = 0; |
6469 | } | 6469 | } |
6470 | } | 6470 | } |
@@ -6505,7 +6505,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6505 | rq = cpu_rq(cpu); | 6505 | rq = cpu_rq(cpu); |
6506 | spin_lock_irqsave(&rq->lock, flags); | 6506 | spin_lock_irqsave(&rq->lock, flags); |
6507 | if (rq->rd) { | 6507 | if (rq->rd) { |
6508 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6508 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6509 | 6509 | ||
6510 | set_rq_online(rq); | 6510 | set_rq_online(rq); |
6511 | } | 6511 | } |
@@ -6567,7 +6567,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6567 | rq = cpu_rq(cpu); | 6567 | rq = cpu_rq(cpu); |
6568 | spin_lock_irqsave(&rq->lock, flags); | 6568 | spin_lock_irqsave(&rq->lock, flags); |
6569 | if (rq->rd) { | 6569 | if (rq->rd) { |
6570 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6570 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6571 | set_rq_offline(rq); | 6571 | set_rq_offline(rq); |
6572 | } | 6572 | } |
6573 | spin_unlock_irqrestore(&rq->lock, flags); | 6573 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -6768,6 +6768,14 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6768 | return 1; | 6768 | return 1; |
6769 | } | 6769 | } |
6770 | 6770 | ||
6771 | static void free_rootdomain(struct root_domain *rd) | ||
6772 | { | ||
6773 | free_cpumask_var(rd->rto_mask); | ||
6774 | free_cpumask_var(rd->online); | ||
6775 | free_cpumask_var(rd->span); | ||
6776 | kfree(rd); | ||
6777 | } | ||
6778 | |||
6771 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6779 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6772 | { | 6780 | { |
6773 | unsigned long flags; | 6781 | unsigned long flags; |
@@ -6777,38 +6785,60 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6777 | if (rq->rd) { | 6785 | if (rq->rd) { |
6778 | struct root_domain *old_rd = rq->rd; | 6786 | struct root_domain *old_rd = rq->rd; |
6779 | 6787 | ||
6780 | if (cpu_isset(rq->cpu, old_rd->online)) | 6788 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6781 | set_rq_offline(rq); | 6789 | set_rq_offline(rq); |
6782 | 6790 | ||
6783 | cpu_clear(rq->cpu, old_rd->span); | 6791 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6784 | 6792 | ||
6785 | if (atomic_dec_and_test(&old_rd->refcount)) | 6793 | if (atomic_dec_and_test(&old_rd->refcount)) |
6786 | kfree(old_rd); | 6794 | free_rootdomain(old_rd); |
6787 | } | 6795 | } |
6788 | 6796 | ||
6789 | atomic_inc(&rd->refcount); | 6797 | atomic_inc(&rd->refcount); |
6790 | rq->rd = rd; | 6798 | rq->rd = rd; |
6791 | 6799 | ||
6792 | cpu_set(rq->cpu, rd->span); | 6800 | cpumask_set_cpu(rq->cpu, rd->span); |
6793 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6801 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
6794 | set_rq_online(rq); | 6802 | set_rq_online(rq); |
6795 | 6803 | ||
6796 | spin_unlock_irqrestore(&rq->lock, flags); | 6804 | spin_unlock_irqrestore(&rq->lock, flags); |
6797 | } | 6805 | } |
6798 | 6806 | ||
6799 | static void init_rootdomain(struct root_domain *rd) | 6807 | static int init_rootdomain(struct root_domain *rd, bool bootmem) |
6800 | { | 6808 | { |
6801 | memset(rd, 0, sizeof(*rd)); | 6809 | memset(rd, 0, sizeof(*rd)); |
6802 | 6810 | ||
6803 | cpus_clear(rd->span); | 6811 | if (bootmem) { |
6804 | cpus_clear(rd->online); | 6812 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6813 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
6814 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
6815 | cpupri_init(&rd->cpupri); | ||
6816 | return 0; | ||
6817 | } | ||
6818 | |||
6819 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | ||
6820 | goto free_rd; | ||
6821 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
6822 | goto free_span; | ||
6823 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
6824 | goto free_online; | ||
6805 | 6825 | ||
6806 | cpupri_init(&rd->cpupri); | 6826 | cpupri_init(&rd->cpupri); |
6827 | return 0; | ||
6828 | |||
6829 | free_online: | ||
6830 | free_cpumask_var(rd->online); | ||
6831 | free_span: | ||
6832 | free_cpumask_var(rd->span); | ||
6833 | free_rd: | ||
6834 | kfree(rd); | ||
6835 | return -ENOMEM; | ||
6807 | } | 6836 | } |
6808 | 6837 | ||
6809 | static void init_defrootdomain(void) | 6838 | static void init_defrootdomain(void) |
6810 | { | 6839 | { |
6811 | init_rootdomain(&def_root_domain); | 6840 | init_rootdomain(&def_root_domain, true); |
6841 | |||
6812 | atomic_set(&def_root_domain.refcount, 1); | 6842 | atomic_set(&def_root_domain.refcount, 1); |
6813 | } | 6843 | } |
6814 | 6844 | ||
@@ -6820,7 +6850,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
6820 | if (!rd) | 6850 | if (!rd) |
6821 | return NULL; | 6851 | return NULL; |
6822 | 6852 | ||
6823 | init_rootdomain(rd); | 6853 | if (init_rootdomain(rd, false) != 0) { |
6854 | kfree(rd); | ||
6855 | return NULL; | ||
6856 | } | ||
6824 | 6857 | ||
6825 | return rd; | 6858 | return rd; |
6826 | } | 6859 | } |
@@ -7632,7 +7665,7 @@ free_sched_groups: | |||
7632 | #ifdef CONFIG_NUMA | 7665 | #ifdef CONFIG_NUMA |
7633 | error: | 7666 | error: |
7634 | free_sched_groups(cpu_map, tmpmask); | 7667 | free_sched_groups(cpu_map, tmpmask); |
7635 | kfree(rd); | 7668 | free_rootdomain(rd); |
7636 | goto free_tmpmask; | 7669 | goto free_tmpmask; |
7637 | #endif | 7670 | #endif |
7638 | } | 7671 | } |