diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:51:24 -0500 |
commit | 7d1e6a9b95e3edeac91888bc683ae62f18519432 (patch) | |
tree | 2459258c480790b6f12beb7db39e6f5345aa1b9c /kernel | |
parent | c6c4927b22a3514c6660f0e72c78716226bd3cc8 (diff) |
sched: convert nohz struct to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS.
Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 29 |
1 files changed, 16 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 93309c3034de..2f8ea99df16a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3758,10 +3758,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3758 | #ifdef CONFIG_NO_HZ | 3758 | #ifdef CONFIG_NO_HZ |
3759 | static struct { | 3759 | static struct { |
3760 | atomic_t load_balancer; | 3760 | atomic_t load_balancer; |
3761 | cpumask_t cpu_mask; | 3761 | cpumask_var_t cpu_mask; |
3762 | } nohz ____cacheline_aligned = { | 3762 | } nohz ____cacheline_aligned = { |
3763 | .load_balancer = ATOMIC_INIT(-1), | 3763 | .load_balancer = ATOMIC_INIT(-1), |
3764 | .cpu_mask = CPU_MASK_NONE, | ||
3765 | }; | 3764 | }; |
3766 | 3765 | ||
3767 | /* | 3766 | /* |
@@ -3789,7 +3788,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3789 | int cpu = smp_processor_id(); | 3788 | int cpu = smp_processor_id(); |
3790 | 3789 | ||
3791 | if (stop_tick) { | 3790 | if (stop_tick) { |
3792 | cpu_set(cpu, nohz.cpu_mask); | 3791 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
3793 | cpu_rq(cpu)->in_nohz_recently = 1; | 3792 | cpu_rq(cpu)->in_nohz_recently = 1; |
3794 | 3793 | ||
3795 | /* | 3794 | /* |
@@ -3803,7 +3802,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3803 | } | 3802 | } |
3804 | 3803 | ||
3805 | /* time for ilb owner also to sleep */ | 3804 | /* time for ilb owner also to sleep */ |
3806 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3805 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3807 | if (atomic_read(&nohz.load_balancer) == cpu) | 3806 | if (atomic_read(&nohz.load_balancer) == cpu) |
3808 | atomic_set(&nohz.load_balancer, -1); | 3807 | atomic_set(&nohz.load_balancer, -1); |
3809 | return 0; | 3808 | return 0; |
@@ -3816,10 +3815,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
3816 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3815 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
3817 | return 1; | 3816 | return 1; |
3818 | } else { | 3817 | } else { |
3819 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3818 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
3820 | return 0; | 3819 | return 0; |
3821 | 3820 | ||
3822 | cpu_clear(cpu, nohz.cpu_mask); | 3821 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3823 | 3822 | ||
3824 | if (atomic_read(&nohz.load_balancer) == cpu) | 3823 | if (atomic_read(&nohz.load_balancer) == cpu) |
3825 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3824 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
@@ -3930,12 +3929,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3930 | */ | 3929 | */ |
3931 | if (this_rq->idle_at_tick && | 3930 | if (this_rq->idle_at_tick && |
3932 | atomic_read(&nohz.load_balancer) == this_cpu) { | 3931 | atomic_read(&nohz.load_balancer) == this_cpu) { |
3933 | cpumask_t cpus = nohz.cpu_mask; | ||
3934 | struct rq *rq; | 3932 | struct rq *rq; |
3935 | int balance_cpu; | 3933 | int balance_cpu; |
3936 | 3934 | ||
3937 | cpu_clear(this_cpu, cpus); | 3935 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
3938 | for_each_cpu(balance_cpu, &cpus) { | 3936 | if (balance_cpu == this_cpu) |
3937 | continue; | ||
3938 | |||
3939 | /* | 3939 | /* |
3940 | * If this cpu gets work to do, stop the load balancing | 3940 | * If this cpu gets work to do, stop the load balancing |
3941 | * work being done for other cpus. Next load | 3941 | * work being done for other cpus. Next load |
@@ -3973,7 +3973,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3973 | rq->in_nohz_recently = 0; | 3973 | rq->in_nohz_recently = 0; |
3974 | 3974 | ||
3975 | if (atomic_read(&nohz.load_balancer) == cpu) { | 3975 | if (atomic_read(&nohz.load_balancer) == cpu) { |
3976 | cpu_clear(cpu, nohz.cpu_mask); | 3976 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3977 | atomic_set(&nohz.load_balancer, -1); | 3977 | atomic_set(&nohz.load_balancer, -1); |
3978 | } | 3978 | } |
3979 | 3979 | ||
@@ -3986,7 +3986,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3986 | * TBD: Traverse the sched domains and nominate | 3986 | * TBD: Traverse the sched domains and nominate |
3987 | * the nearest cpu in the nohz.cpu_mask. | 3987 | * the nearest cpu in the nohz.cpu_mask. |
3988 | */ | 3988 | */ |
3989 | int ilb = first_cpu(nohz.cpu_mask); | 3989 | int ilb = cpumask_first(nohz.cpu_mask); |
3990 | 3990 | ||
3991 | if (ilb < nr_cpu_ids) | 3991 | if (ilb < nr_cpu_ids) |
3992 | resched_cpu(ilb); | 3992 | resched_cpu(ilb); |
@@ -3998,7 +3998,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3998 | * cpus with ticks stopped, is it time for that to stop? | 3998 | * cpus with ticks stopped, is it time for that to stop? |
3999 | */ | 3999 | */ |
4000 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4000 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
4001 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4001 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
4002 | resched_cpu(cpu); | 4002 | resched_cpu(cpu); |
4003 | return; | 4003 | return; |
4004 | } | 4004 | } |
@@ -4008,7 +4008,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4008 | * someone else, then no need raise the SCHED_SOFTIRQ | 4008 | * someone else, then no need raise the SCHED_SOFTIRQ |
4009 | */ | 4009 | */ |
4010 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4010 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
4011 | cpu_isset(cpu, nohz.cpu_mask)) | 4011 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
4012 | return; | 4012 | return; |
4013 | #endif | 4013 | #endif |
4014 | if (time_after_eq(jiffies, rq->next_balance)) | 4014 | if (time_after_eq(jiffies, rq->next_balance)) |
@@ -8309,6 +8309,9 @@ void __init sched_init(void) | |||
8309 | 8309 | ||
8310 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 8310 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
8311 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | 8311 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); |
8312 | #ifdef CONFIG_NO_HZ | ||
8313 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
8314 | #endif | ||
8312 | 8315 | ||
8313 | scheduler_running = 1; | 8316 | scheduler_running = 1; |
8314 | } | 8317 | } |