aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcupdate.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r--kernel/rcupdate.c182
1 files changed, 8 insertions, 174 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a967c9feb90a..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -19,7 +19,7 @@
19 * 19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com> 21 * Manfred Spraul <manfred@colorfullife.com>
22 * 22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers: 25 * Papers:
@@ -27,7 +27,7 @@
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 * 28 *
29 * For detailed explanation of Read-Copy Update mechanism see - 29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html 30 * http://lse.sourceforge.net/locking/rcupdate.html
31 * 31 *
32 */ 32 */
33#include <linux/types.h> 33#include <linux/types.h>
@@ -44,23 +44,13 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
48
49enum rcu_barrier {
50 RCU_BARRIER_STD,
51 RCU_BARRIER_BH,
52 RCU_BARRIER_SCHED,
53};
54 47
55static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 48#ifdef CONFIG_DEBUG_LOCK_ALLOC
56static atomic_t rcu_barrier_cpu_count; 49static struct lock_class_key rcu_lock_key;
57static DEFINE_MUTEX(rcu_barrier_mutex); 50struct lockdep_map rcu_lock_map =
58static struct completion rcu_barrier_completion; 51 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
59int rcu_scheduler_active __read_mostly; 52EXPORT_SYMBOL_GPL(rcu_lock_map);
60 53#endif
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64 54
65/* 55/*
66 * Awaken the corresponding synchronize_rcu() instance now that a 56 * Awaken the corresponding synchronize_rcu() instance now that a
@@ -73,159 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
73 rcu = container_of(head, struct rcu_synchronize, head); 63 rcu = container_of(head, struct rcu_synchronize, head);
74 complete(&rcu->completion); 64 complete(&rcu->completion);
75} 65}
76
77/**
78 * synchronize_rcu - wait until a grace period has elapsed.
79 *
80 * Control will return to the caller some time after a full grace
81 * period has elapsed, in other words after all currently executing RCU
82 * read-side critical sections have completed. RCU read-side critical
83 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
84 * and may be nested.
85 */
86void synchronize_rcu(void)
87{
88 struct rcu_synchronize rcu;
89
90 if (rcu_blocking_is_gp())
91 return;
92
93 init_completion(&rcu.completion);
94 /* Will wake me after RCU finished. */
95 call_rcu(&rcu.head, wakeme_after_rcu);
96 /* Wait for it. */
97 wait_for_completion(&rcu.completion);
98}
99EXPORT_SYMBOL_GPL(synchronize_rcu);
100
101static void rcu_barrier_callback(struct rcu_head *notused)
102{
103 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
104 complete(&rcu_barrier_completion);
105}
106
107/*
108 * Called with preemption disabled, and from cross-cpu IRQ context.
109 */
110static void rcu_barrier_func(void *type)
111{
112 int cpu = smp_processor_id();
113 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
114
115 atomic_inc(&rcu_barrier_cpu_count);
116 switch ((enum rcu_barrier)type) {
117 case RCU_BARRIER_STD:
118 call_rcu(head, rcu_barrier_callback);
119 break;
120 case RCU_BARRIER_BH:
121 call_rcu_bh(head, rcu_barrier_callback);
122 break;
123 case RCU_BARRIER_SCHED:
124 call_rcu_sched(head, rcu_barrier_callback);
125 break;
126 }
127}
128
129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
133
134/*
135 * Orchestrate the specified type of RCU barrier, waiting for all
136 * RCU callbacks of the specified type to complete.
137 */
138static void _rcu_barrier(enum rcu_barrier type)
139{
140 BUG_ON(in_interrupt());
141 /* Take cpucontrol mutex to protect against CPU hotplug */
142 mutex_lock(&rcu_barrier_mutex);
143 init_completion(&rcu_barrier_completion);
144 /*
145 * Initialize rcu_barrier_cpu_count to 1, then invoke
146 * rcu_barrier_func() on each CPU, so that each CPU also has
147 * incremented rcu_barrier_cpu_count. Only then is it safe to
148 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
149 * might complete its grace period before all of the other CPUs
150 * did their increment, causing this function to return too
151 * early.
152 */
153 atomic_set(&rcu_barrier_cpu_count, 1);
154 on_each_cpu(rcu_barrier_func, (void *)type, 1);
155 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
156 complete(&rcu_barrier_completion);
157 wait_for_completion(&rcu_barrier_completion);
158 mutex_unlock(&rcu_barrier_mutex);
159 wait_migrated_callbacks();
160}
161
162/**
163 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
164 */
165void rcu_barrier(void)
166{
167 _rcu_barrier(RCU_BARRIER_STD);
168}
169EXPORT_SYMBOL_GPL(rcu_barrier);
170
171/**
172 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
173 */
174void rcu_barrier_bh(void)
175{
176 _rcu_barrier(RCU_BARRIER_BH);
177}
178EXPORT_SYMBOL_GPL(rcu_barrier_bh);
179
180/**
181 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
182 */
183void rcu_barrier_sched(void)
184{
185 _rcu_barrier(RCU_BARRIER_SCHED);
186}
187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
188
189static void rcu_migrate_callback(struct rcu_head *notused)
190{
191 if (atomic_dec_and_test(&rcu_migrate_type_count))
192 wake_up(&rcu_migrate_wq);
193}
194
195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
196 unsigned long action, void *hcpu)
197{
198 if (action == CPU_DYING) {
199 /*
200 * preempt_disable() in on_each_cpu() prevents stop_machine(),
201 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
202 * returns, all online cpus have queued rcu_barrier_func(),
203 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
204 *
205 * These callbacks ensure _rcu_barrier() waits for all
206 * RCU callbacks of the specified type to complete.
207 */
208 atomic_set(&rcu_migrate_type_count, 3);
209 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
210 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
211 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
212 } else if (action == CPU_POST_DEAD) {
213 /* rcu_migrate_head is protected by cpu_add_remove_lock */
214 wait_migrated_callbacks();
215 }
216
217 return NOTIFY_OK;
218}
219
220void __init rcu_init(void)
221{
222 __rcu_init();
223 hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
224}
225
226void rcu_scheduler_starting(void)
227{
228 WARN_ON(num_online_cpus() != 1);
229 WARN_ON(nr_context_switches() > 0);
230 rcu_scheduler_active = 1;
231}