aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c188
1 files changed, 52 insertions, 136 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index bd5d5c8e5140..400183346ad2 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -19,7 +19,7 @@
19 * 19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com> 21 * Manfred Spraul <manfred@colorfullife.com>
22 * 22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers: 25 * Papers:
@@ -27,7 +27,7 @@
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 * 28 *
29 * For detailed explanation of Read-Copy Update mechanism see - 29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html 30 * http://lse.sourceforge.net/locking/rcupdate.html
31 * 31 *
32 */ 32 */
33#include <linux/types.h> 33#include <linux/types.h>
@@ -46,22 +46,15 @@
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h> 47#include <linux/kernel_stat.h>
48 48
49enum rcu_barrier { 49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50 RCU_BARRIER_STD, 50static struct lock_class_key rcu_lock_key;
51 RCU_BARRIER_BH, 51struct lockdep_map rcu_lock_map =
52 RCU_BARRIER_SCHED, 52 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
53}; 53EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif
54 55
55static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
56static atomic_t rcu_barrier_cpu_count;
57static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 56int rcu_scheduler_active __read_mostly;
60 57
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
65/* 58/*
66 * Awaken the corresponding synchronize_rcu() instance now that a 59 * Awaken the corresponding synchronize_rcu() instance now that a
67 * grace period has elapsed. 60 * grace period has elapsed.
@@ -74,6 +67,8 @@ void wakeme_after_rcu(struct rcu_head *head)
74 complete(&rcu->completion); 67 complete(&rcu->completion);
75} 68}
76 69
70#ifdef CONFIG_TREE_PREEMPT_RCU
71
77/** 72/**
78 * synchronize_rcu - wait until a grace period has elapsed. 73 * synchronize_rcu - wait until a grace period has elapsed.
79 * 74 *
@@ -87,7 +82,7 @@ void synchronize_rcu(void)
87{ 82{
88 struct rcu_synchronize rcu; 83 struct rcu_synchronize rcu;
89 84
90 if (rcu_blocking_is_gp()) 85 if (!rcu_scheduler_active)
91 return; 86 return;
92 87
93 init_completion(&rcu.completion); 88 init_completion(&rcu.completion);
@@ -98,6 +93,46 @@ void synchronize_rcu(void)
98} 93}
99EXPORT_SYMBOL_GPL(synchronize_rcu); 94EXPORT_SYMBOL_GPL(synchronize_rcu);
100 95
96#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
97
98/**
99 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
100 *
101 * Control will return to the caller some time after a full rcu-sched
102 * grace period has elapsed, in other words after all currently executing
103 * rcu-sched read-side critical sections have completed. These read-side
104 * critical sections are delimited by rcu_read_lock_sched() and
105 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
106 * local_irq_disable(), and so on may be used in place of
107 * rcu_read_lock_sched().
108 *
109 * This means that all preempt_disable code sequences, including NMI and
110 * hardware-interrupt handlers, in progress on entry will have completed
111 * before this primitive returns. However, this does not guarantee that
112 * softirq handlers will have completed, since in some kernels, these
113 * handlers can run in process context, and can block.
114 *
115 * This primitive provides the guarantees made by the (now removed)
116 * synchronize_kernel() API. In contrast, synchronize_rcu() only
117 * guarantees that rcu_read_lock() sections will have completed.
118 * In "classic RCU", these two guarantees happen to be one and
119 * the same, but can differ in realtime RCU implementations.
120 */
121void synchronize_sched(void)
122{
123 struct rcu_synchronize rcu;
124
125 if (rcu_blocking_is_gp())
126 return;
127
128 init_completion(&rcu.completion);
129 /* Will wake me after RCU finished. */
130 call_rcu_sched(&rcu.head, wakeme_after_rcu);
131 /* Wait for it. */
132 wait_for_completion(&rcu.completion);
133}
134EXPORT_SYMBOL_GPL(synchronize_sched);
135
101/** 136/**
102 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. 137 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
103 * 138 *
@@ -122,129 +157,10 @@ void synchronize_rcu_bh(void)
122} 157}
123EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 158EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
124 159
125static void rcu_barrier_callback(struct rcu_head *notused)
126{
127 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
128 complete(&rcu_barrier_completion);
129}
130
131/*
132 * Called with preemption disabled, and from cross-cpu IRQ context.
133 */
134static void rcu_barrier_func(void *type)
135{
136 int cpu = smp_processor_id();
137 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
138
139 atomic_inc(&rcu_barrier_cpu_count);
140 switch ((enum rcu_barrier)type) {
141 case RCU_BARRIER_STD:
142 call_rcu(head, rcu_barrier_callback);
143 break;
144 case RCU_BARRIER_BH:
145 call_rcu_bh(head, rcu_barrier_callback);
146 break;
147 case RCU_BARRIER_SCHED:
148 call_rcu_sched(head, rcu_barrier_callback);
149 break;
150 }
151}
152
153static inline void wait_migrated_callbacks(void)
154{
155 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
156 smp_mb(); /* In case we didn't sleep. */
157}
158
159/*
160 * Orchestrate the specified type of RCU barrier, waiting for all
161 * RCU callbacks of the specified type to complete.
162 */
163static void _rcu_barrier(enum rcu_barrier type)
164{
165 BUG_ON(in_interrupt());
166 /* Take cpucontrol mutex to protect against CPU hotplug */
167 mutex_lock(&rcu_barrier_mutex);
168 init_completion(&rcu_barrier_completion);
169 /*
170 * Initialize rcu_barrier_cpu_count to 1, then invoke
171 * rcu_barrier_func() on each CPU, so that each CPU also has
172 * incremented rcu_barrier_cpu_count. Only then is it safe to
173 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
174 * might complete its grace period before all of the other CPUs
175 * did their increment, causing this function to return too
176 * early.
177 */
178 atomic_set(&rcu_barrier_cpu_count, 1);
179 on_each_cpu(rcu_barrier_func, (void *)type, 1);
180 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
181 complete(&rcu_barrier_completion);
182 wait_for_completion(&rcu_barrier_completion);
183 mutex_unlock(&rcu_barrier_mutex);
184 wait_migrated_callbacks();
185}
186
187/**
188 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
189 */
190void rcu_barrier(void)
191{
192 _rcu_barrier(RCU_BARRIER_STD);
193}
194EXPORT_SYMBOL_GPL(rcu_barrier);
195
196/**
197 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
198 */
199void rcu_barrier_bh(void)
200{
201 _rcu_barrier(RCU_BARRIER_BH);
202}
203EXPORT_SYMBOL_GPL(rcu_barrier_bh);
204
205/**
206 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
207 */
208void rcu_barrier_sched(void)
209{
210 _rcu_barrier(RCU_BARRIER_SCHED);
211}
212EXPORT_SYMBOL_GPL(rcu_barrier_sched);
213
214static void rcu_migrate_callback(struct rcu_head *notused)
215{
216 if (atomic_dec_and_test(&rcu_migrate_type_count))
217 wake_up(&rcu_migrate_wq);
218}
219
220extern int rcu_cpu_notify(struct notifier_block *self,
221 unsigned long action, void *hcpu);
222
223static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 160static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
224 unsigned long action, void *hcpu) 161 unsigned long action, void *hcpu)
225{ 162{
226 rcu_cpu_notify(self, action, hcpu); 163 return rcu_cpu_notify(self, action, hcpu);
227 if (action == CPU_DYING) {
228 /*
229 * preempt_disable() in on_each_cpu() prevents stop_machine(),
230 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
231 * returns, all online cpus have queued rcu_barrier_func(),
232 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
233 *
234 * These callbacks ensure _rcu_barrier() waits for all
235 * RCU callbacks of the specified type to complete.
236 */
237 atomic_set(&rcu_migrate_type_count, 3);
238 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
239 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
240 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
241 } else if (action == CPU_DOWN_PREPARE) {
242 /* Don't need to wait until next removal operation. */
243 /* rcu_migrate_head is protected by cpu_add_remove_lock */
244 wait_migrated_callbacks();
245 }
246
247 return NOTIFY_OK;
248} 164}
249 165
250void __init rcu_init(void) 166void __init rcu_init(void)