diff options
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r-- | kernel/rcupdate.c | 190 |
1 files changed, 73 insertions, 117 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a967c9feb90a..400183346ad2 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * | 19 | * |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * Manfred Spraul <manfred@colorfullife.com> | 21 | * Manfred Spraul <manfred@colorfullife.com> |
22 | * | 22 | * |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
25 | * Papers: | 25 | * Papers: |
@@ -27,7 +27,7 @@ | |||
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | 27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
28 | * | 28 | * |
29 | * For detailed explanation of Read-Copy Update mechanism see - | 29 | * For detailed explanation of Read-Copy Update mechanism see - |
30 | * http://lse.sourceforge.net/locking/rcupdate.html | 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
@@ -46,22 +46,15 @@ | |||
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | 47 | #include <linux/kernel_stat.h> |
48 | 48 | ||
49 | enum rcu_barrier { | 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | RCU_BARRIER_STD, | 50 | static struct lock_class_key rcu_lock_key; |
51 | RCU_BARRIER_BH, | 51 | struct lockdep_map rcu_lock_map = |
52 | RCU_BARRIER_SCHED, | 52 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
53 | }; | 53 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | ||
54 | 55 | ||
55 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
56 | static atomic_t rcu_barrier_cpu_count; | ||
57 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
58 | static struct completion rcu_barrier_completion; | ||
59 | int rcu_scheduler_active __read_mostly; | 56 | int rcu_scheduler_active __read_mostly; |
60 | 57 | ||
61 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
62 | static struct rcu_head rcu_migrate_head[3]; | ||
63 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
64 | |||
65 | /* | 58 | /* |
66 | * Awaken the corresponding synchronize_rcu() instance now that a | 59 | * Awaken the corresponding synchronize_rcu() instance now that a |
67 | * grace period has elapsed. | 60 | * grace period has elapsed. |
@@ -74,6 +67,8 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
74 | complete(&rcu->completion); | 67 | complete(&rcu->completion); |
75 | } | 68 | } |
76 | 69 | ||
70 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
71 | |||
77 | /** | 72 | /** |
78 | * synchronize_rcu - wait until a grace period has elapsed. | 73 | * synchronize_rcu - wait until a grace period has elapsed. |
79 | * | 74 | * |
@@ -87,7 +82,7 @@ void synchronize_rcu(void) | |||
87 | { | 82 | { |
88 | struct rcu_synchronize rcu; | 83 | struct rcu_synchronize rcu; |
89 | 84 | ||
90 | if (rcu_blocking_is_gp()) | 85 | if (!rcu_scheduler_active) |
91 | return; | 86 | return; |
92 | 87 | ||
93 | init_completion(&rcu.completion); | 88 | init_completion(&rcu.completion); |
@@ -98,129 +93,90 @@ void synchronize_rcu(void) | |||
98 | } | 93 | } |
99 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 94 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
100 | 95 | ||
101 | static void rcu_barrier_callback(struct rcu_head *notused) | 96 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
102 | { | ||
103 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
104 | complete(&rcu_barrier_completion); | ||
105 | } | ||
106 | 97 | ||
107 | /* | 98 | /** |
108 | * Called with preemption disabled, and from cross-cpu IRQ context. | 99 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. |
100 | * | ||
101 | * Control will return to the caller some time after a full rcu-sched | ||
102 | * grace period has elapsed, in other words after all currently executing | ||
103 | * rcu-sched read-side critical sections have completed. These read-side | ||
104 | * critical sections are delimited by rcu_read_lock_sched() and | ||
105 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
106 | * local_irq_disable(), and so on may be used in place of | ||
107 | * rcu_read_lock_sched(). | ||
108 | * | ||
109 | * This means that all preempt_disable code sequences, including NMI and | ||
110 | * hardware-interrupt handlers, in progress on entry will have completed | ||
111 | * before this primitive returns. However, this does not guarantee that | ||
112 | * softirq handlers will have completed, since in some kernels, these | ||
113 | * handlers can run in process context, and can block. | ||
114 | * | ||
115 | * This primitive provides the guarantees made by the (now removed) | ||
116 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
117 | * guarantees that rcu_read_lock() sections will have completed. | ||
118 | * In "classic RCU", these two guarantees happen to be one and | ||
119 | * the same, but can differ in realtime RCU implementations. | ||
109 | */ | 120 | */ |
110 | static void rcu_barrier_func(void *type) | 121 | void synchronize_sched(void) |
111 | { | ||
112 | int cpu = smp_processor_id(); | ||
113 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
114 | |||
115 | atomic_inc(&rcu_barrier_cpu_count); | ||
116 | switch ((enum rcu_barrier)type) { | ||
117 | case RCU_BARRIER_STD: | ||
118 | call_rcu(head, rcu_barrier_callback); | ||
119 | break; | ||
120 | case RCU_BARRIER_BH: | ||
121 | call_rcu_bh(head, rcu_barrier_callback); | ||
122 | break; | ||
123 | case RCU_BARRIER_SCHED: | ||
124 | call_rcu_sched(head, rcu_barrier_callback); | ||
125 | break; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static inline void wait_migrated_callbacks(void) | ||
130 | { | 122 | { |
131 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | 123 | struct rcu_synchronize rcu; |
132 | } | ||
133 | 124 | ||
134 | /* | 125 | if (rcu_blocking_is_gp()) |
135 | * Orchestrate the specified type of RCU barrier, waiting for all | 126 | return; |
136 | * RCU callbacks of the specified type to complete. | ||
137 | */ | ||
138 | static void _rcu_barrier(enum rcu_barrier type) | ||
139 | { | ||
140 | BUG_ON(in_interrupt()); | ||
141 | /* Take cpucontrol mutex to protect against CPU hotplug */ | ||
142 | mutex_lock(&rcu_barrier_mutex); | ||
143 | init_completion(&rcu_barrier_completion); | ||
144 | /* | ||
145 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
146 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
147 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
148 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
149 | * might complete its grace period before all of the other CPUs | ||
150 | * did their increment, causing this function to return too | ||
151 | * early. | ||
152 | */ | ||
153 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
154 | on_each_cpu(rcu_barrier_func, (void *)type, 1); | ||
155 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
156 | complete(&rcu_barrier_completion); | ||
157 | wait_for_completion(&rcu_barrier_completion); | ||
158 | mutex_unlock(&rcu_barrier_mutex); | ||
159 | wait_migrated_callbacks(); | ||
160 | } | ||
161 | 127 | ||
162 | /** | 128 | init_completion(&rcu.completion); |
163 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | 129 | /* Will wake me after RCU finished. */ |
164 | */ | 130 | call_rcu_sched(&rcu.head, wakeme_after_rcu); |
165 | void rcu_barrier(void) | 131 | /* Wait for it. */ |
166 | { | 132 | wait_for_completion(&rcu.completion); |
167 | _rcu_barrier(RCU_BARRIER_STD); | ||
168 | } | 133 | } |
169 | EXPORT_SYMBOL_GPL(rcu_barrier); | 134 | EXPORT_SYMBOL_GPL(synchronize_sched); |
170 | 135 | ||
171 | /** | 136 | /** |
172 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | 137 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. |
138 | * | ||
139 | * Control will return to the caller some time after a full rcu_bh grace | ||
140 | * period has elapsed, in other words after all currently executing rcu_bh | ||
141 | * read-side critical sections have completed. RCU read-side critical | ||
142 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
143 | * and may be nested. | ||
173 | */ | 144 | */ |
174 | void rcu_barrier_bh(void) | 145 | void synchronize_rcu_bh(void) |
175 | { | 146 | { |
176 | _rcu_barrier(RCU_BARRIER_BH); | 147 | struct rcu_synchronize rcu; |
177 | } | ||
178 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
179 | 148 | ||
180 | /** | 149 | if (rcu_blocking_is_gp()) |
181 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | 150 | return; |
182 | */ | ||
183 | void rcu_barrier_sched(void) | ||
184 | { | ||
185 | _rcu_barrier(RCU_BARRIER_SCHED); | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
188 | 151 | ||
189 | static void rcu_migrate_callback(struct rcu_head *notused) | 152 | init_completion(&rcu.completion); |
190 | { | 153 | /* Will wake me after RCU finished. */ |
191 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | 154 | call_rcu_bh(&rcu.head, wakeme_after_rcu); |
192 | wake_up(&rcu_migrate_wq); | 155 | /* Wait for it. */ |
156 | wait_for_completion(&rcu.completion); | ||
193 | } | 157 | } |
158 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
194 | 159 | ||
195 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 160 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
196 | unsigned long action, void *hcpu) | 161 | unsigned long action, void *hcpu) |
197 | { | 162 | { |
198 | if (action == CPU_DYING) { | 163 | return rcu_cpu_notify(self, action, hcpu); |
199 | /* | ||
200 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | ||
201 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
202 | * returns, all online cpus have queued rcu_barrier_func(), | ||
203 | * and the dead cpu(if it exist) queues rcu_migrate_callback()s. | ||
204 | * | ||
205 | * These callbacks ensure _rcu_barrier() waits for all | ||
206 | * RCU callbacks of the specified type to complete. | ||
207 | */ | ||
208 | atomic_set(&rcu_migrate_type_count, 3); | ||
209 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | ||
210 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | ||
211 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | ||
212 | } else if (action == CPU_POST_DEAD) { | ||
213 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | ||
214 | wait_migrated_callbacks(); | ||
215 | } | ||
216 | |||
217 | return NOTIFY_OK; | ||
218 | } | 164 | } |
219 | 165 | ||
220 | void __init rcu_init(void) | 166 | void __init rcu_init(void) |
221 | { | 167 | { |
168 | int i; | ||
169 | |||
222 | __rcu_init(); | 170 | __rcu_init(); |
223 | hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); | 171 | cpu_notifier(rcu_barrier_cpu_hotplug, 0); |
172 | |||
173 | /* | ||
174 | * We don't need protection against CPU-hotplug here because | ||
175 | * this is called early in boot, before either interrupts | ||
176 | * or the scheduler are operational. | ||
177 | */ | ||
178 | for_each_online_cpu(i) | ||
179 | rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
224 | } | 180 | } |
225 | 181 | ||
226 | void rcu_scheduler_starting(void) | 182 | void rcu_scheduler_starting(void) |