diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 18:42:26 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 18:42:26 -0500 |
commit | bd232f97b30f6bb630efa136a777647545db3039 (patch) | |
tree | 0dd55c07abfee9e1f4c83f9e8cbf853f817ff226 /kernel/rcupreempt.c | |
parent | d036e67b40f52bdd95392390108defbac7e53837 (diff) |
cpumask: convert RCU implementations
Impact: use new cpumask API.
rcu_ctrlblk contains a cpumask, and it's highly optimized so I don't want
a cpumask_var_t (ie. a pointer) for the CONFIG_CPUMASK_OFFSTACK case. It
could use a dangling bitmap, and be allocated in __rcu_init to save memory,
but for the moment we use a bitmap.
(Eventually 'struct cpumask' will be undefined for CONFIG_CPUMASK_OFFSTACK,
so we use a bitmap here to show we really mean it).
We remove on-stack cpumasks, using cpumask_var_t for
rcu_torture_shuffle_tasks() and for_each_cpu_and in force_quiescent_state().
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/rcupreempt.c')
-rw-r--r-- | kernel/rcupreempt.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 04982659875a..f9dc8f3720f6 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] = | |||
164 | { "idle", "waitack", "waitzero", "waitmb" }; | 164 | { "idle", "waitack", "waitzero", "waitmb" }; |
165 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 165 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
166 | 166 | ||
167 | static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; | 167 | static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly |
168 | = CPU_BITS_NONE; | ||
168 | 169 | ||
169 | /* | 170 | /* |
170 | * Enum and per-CPU flag to determine when each CPU has seen | 171 | * Enum and per-CPU flag to determine when each CPU has seen |
@@ -758,7 +759,7 @@ rcu_try_flip_idle(void) | |||
758 | 759 | ||
759 | /* Now ask each CPU for acknowledgement of the flip. */ | 760 | /* Now ask each CPU for acknowledgement of the flip. */ |
760 | 761 | ||
761 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { | 762 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
762 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 763 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
763 | dyntick_save_progress_counter(cpu); | 764 | dyntick_save_progress_counter(cpu); |
764 | } | 765 | } |
@@ -776,7 +777,7 @@ rcu_try_flip_waitack(void) | |||
776 | int cpu; | 777 | int cpu; |
777 | 778 | ||
778 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 779 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
779 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 780 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
780 | if (rcu_try_flip_waitack_needed(cpu) && | 781 | if (rcu_try_flip_waitack_needed(cpu) && |
781 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 782 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
782 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 783 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
@@ -808,7 +809,7 @@ rcu_try_flip_waitzero(void) | |||
808 | /* Check to see if the sum of the "last" counters is zero. */ | 809 | /* Check to see if the sum of the "last" counters is zero. */ |
809 | 810 | ||
810 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 811 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
811 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 812 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
812 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 813 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
813 | if (sum != 0) { | 814 | if (sum != 0) { |
814 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 815 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
@@ -823,7 +824,7 @@ rcu_try_flip_waitzero(void) | |||
823 | smp_mb(); /* ^^^^^^^^^^^^ */ | 824 | smp_mb(); /* ^^^^^^^^^^^^ */ |
824 | 825 | ||
825 | /* Call for a memory barrier from each CPU. */ | 826 | /* Call for a memory barrier from each CPU. */ |
826 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { | 827 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
827 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 828 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
828 | dyntick_save_progress_counter(cpu); | 829 | dyntick_save_progress_counter(cpu); |
829 | } | 830 | } |
@@ -843,7 +844,7 @@ rcu_try_flip_waitmb(void) | |||
843 | int cpu; | 844 | int cpu; |
844 | 845 | ||
845 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 846 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
846 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 847 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
847 | if (rcu_try_flip_waitmb_needed(cpu) && | 848 | if (rcu_try_flip_waitmb_needed(cpu) && |
848 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 849 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
849 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 850 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
@@ -1032,7 +1033,7 @@ void rcu_offline_cpu(int cpu) | |||
1032 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; | 1033 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; |
1033 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; | 1034 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; |
1034 | 1035 | ||
1035 | cpu_clear(cpu, rcu_cpu_online_map); | 1036 | cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
1036 | 1037 | ||
1037 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1038 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
1038 | 1039 | ||
@@ -1072,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu) | |||
1072 | struct rcu_data *rdp; | 1073 | struct rcu_data *rdp; |
1073 | 1074 | ||
1074 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); | 1075 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); |
1075 | cpu_set(cpu, rcu_cpu_online_map); | 1076 | cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
1076 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1077 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
1077 | 1078 | ||
1078 | /* | 1079 | /* |
@@ -1430,7 +1431,7 @@ void __init __rcu_init(void) | |||
1430 | * We don't need protection against CPU-Hotplug here | 1431 | * We don't need protection against CPU-Hotplug here |
1431 | * since | 1432 | * since |
1432 | * a) If a CPU comes online while we are iterating over the | 1433 | * a) If a CPU comes online while we are iterating over the |
1433 | * cpu_online_map below, we would only end up making a | 1434 | * cpu_online_mask below, we would only end up making a |
1434 | * duplicate call to rcu_online_cpu() which sets the corresponding | 1435 | * duplicate call to rcu_online_cpu() which sets the corresponding |
1435 | * CPU's mask in the rcu_cpu_online_map. | 1436 | * CPU's mask in the rcu_cpu_online_map. |
1436 | * | 1437 | * |