diff options
Diffstat (limited to 'kernel/rcupreempt.c')
-rw-r--r-- | kernel/rcupreempt.c | 40 |
1 files changed, 30 insertions, 10 deletions
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 59236e8b9daa..33cfc50781f9 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] = | |||
164 | { "idle", "waitack", "waitzero", "waitmb" }; | 164 | { "idle", "waitack", "waitzero", "waitmb" }; |
165 | #endif /* #ifdef CONFIG_RCU_TRACE */ | 165 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
166 | 166 | ||
167 | static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; | 167 | static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly |
168 | = CPU_BITS_NONE; | ||
168 | 169 | ||
169 | /* | 170 | /* |
170 | * Enum and per-CPU flag to determine when each CPU has seen | 171 | * Enum and per-CPU flag to determine when each CPU has seen |
@@ -551,6 +552,16 @@ void rcu_irq_exit(void) | |||
551 | } | 552 | } |
552 | } | 553 | } |
553 | 554 | ||
555 | void rcu_nmi_enter(void) | ||
556 | { | ||
557 | rcu_irq_enter(); | ||
558 | } | ||
559 | |||
560 | void rcu_nmi_exit(void) | ||
561 | { | ||
562 | rcu_irq_exit(); | ||
563 | } | ||
564 | |||
554 | static void dyntick_save_progress_counter(int cpu) | 565 | static void dyntick_save_progress_counter(int cpu) |
555 | { | 566 | { |
556 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | 567 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); |
@@ -748,7 +759,7 @@ rcu_try_flip_idle(void) | |||
748 | 759 | ||
749 | /* Now ask each CPU for acknowledgement of the flip. */ | 760 | /* Now ask each CPU for acknowledgement of the flip. */ |
750 | 761 | ||
751 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { | 762 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
752 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 763 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
753 | dyntick_save_progress_counter(cpu); | 764 | dyntick_save_progress_counter(cpu); |
754 | } | 765 | } |
@@ -766,7 +777,7 @@ rcu_try_flip_waitack(void) | |||
766 | int cpu; | 777 | int cpu; |
767 | 778 | ||
768 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 779 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
769 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 780 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
770 | if (rcu_try_flip_waitack_needed(cpu) && | 781 | if (rcu_try_flip_waitack_needed(cpu) && |
771 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 782 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
772 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 783 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
@@ -798,7 +809,7 @@ rcu_try_flip_waitzero(void) | |||
798 | /* Check to see if the sum of the "last" counters is zero. */ | 809 | /* Check to see if the sum of the "last" counters is zero. */ |
799 | 810 | ||
800 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 811 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
801 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 812 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
802 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 813 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
803 | if (sum != 0) { | 814 | if (sum != 0) { |
804 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 815 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
@@ -813,7 +824,7 @@ rcu_try_flip_waitzero(void) | |||
813 | smp_mb(); /* ^^^^^^^^^^^^ */ | 824 | smp_mb(); /* ^^^^^^^^^^^^ */ |
814 | 825 | ||
815 | /* Call for a memory barrier from each CPU. */ | 826 | /* Call for a memory barrier from each CPU. */ |
816 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { | 827 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { |
817 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 828 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
818 | dyntick_save_progress_counter(cpu); | 829 | dyntick_save_progress_counter(cpu); |
819 | } | 830 | } |
@@ -833,7 +844,7 @@ rcu_try_flip_waitmb(void) | |||
833 | int cpu; | 844 | int cpu; |
834 | 845 | ||
835 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 846 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
836 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) | 847 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) |
837 | if (rcu_try_flip_waitmb_needed(cpu) && | 848 | if (rcu_try_flip_waitmb_needed(cpu) && |
838 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 849 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
839 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 850 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
@@ -1022,7 +1033,7 @@ void rcu_offline_cpu(int cpu) | |||
1022 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; | 1033 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; |
1023 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; | 1034 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; |
1024 | 1035 | ||
1025 | cpu_clear(cpu, rcu_cpu_online_map); | 1036 | cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
1026 | 1037 | ||
1027 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1038 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
1028 | 1039 | ||
@@ -1062,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu) | |||
1062 | struct rcu_data *rdp; | 1073 | struct rcu_data *rdp; |
1063 | 1074 | ||
1064 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); | 1075 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); |
1065 | cpu_set(cpu, rcu_cpu_online_map); | 1076 | cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); |
1066 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | 1077 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); |
1067 | 1078 | ||
1068 | /* | 1079 | /* |
@@ -1166,7 +1177,16 @@ EXPORT_SYMBOL_GPL(call_rcu_sched); | |||
1166 | * in -rt this does -not- necessarily result in all currently executing | 1177 | * in -rt this does -not- necessarily result in all currently executing |
1167 | * interrupt -handlers- having completed. | 1178 | * interrupt -handlers- having completed. |
1168 | */ | 1179 | */ |
1169 | synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched) | 1180 | void __synchronize_sched(void) |
1181 | { | ||
1182 | struct rcu_synchronize rcu; | ||
1183 | |||
1184 | init_completion(&rcu.completion); | ||
1185 | /* Will wake me after RCU finished. */ | ||
1186 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1187 | /* Wait for it. */ | ||
1188 | wait_for_completion(&rcu.completion); | ||
1189 | } | ||
1170 | EXPORT_SYMBOL_GPL(__synchronize_sched); | 1190 | EXPORT_SYMBOL_GPL(__synchronize_sched); |
1171 | 1191 | ||
1172 | /* | 1192 | /* |
@@ -1420,7 +1440,7 @@ void __init __rcu_init(void) | |||
1420 | * We don't need protection against CPU-Hotplug here | 1440 | * We don't need protection against CPU-Hotplug here |
1421 | * since | 1441 | * since |
1422 | * a) If a CPU comes online while we are iterating over the | 1442 | * a) If a CPU comes online while we are iterating over the |
1423 | * cpu_online_map below, we would only end up making a | 1443 | * cpu_online_mask below, we would only end up making a |
1424 | * duplicate call to rcu_online_cpu() which sets the corresponding | 1444 | * duplicate call to rcu_online_cpu() which sets the corresponding |
1425 | * CPU's mask in the rcu_cpu_online_map. | 1445 | * CPU's mask in the rcu_cpu_online_map. |
1426 | * | 1446 | * |