diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:20:18 -0400 |
commit | eee2775d9924b22643bd89b2e568cc5eed7e8a04 (patch) | |
tree | 095ad7851895c5d39596f3ff7ee1e078235a2501 /kernel | |
parent | 53e16fbd30005905168d9b75555fdc7e0a2eac58 (diff) | |
parent | 7db905e636f08ea5bc9825c1f73d77802e8ccad5 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
rcu: Move end of special early-boot RCU operation earlier
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
rcu: Remove lockdep annotations from RCU's _notrace() API members
rcu: Add #ifdef to suppress __rcu_offline_cpu() warning in !HOTPLUG_CPU builds
rcu: Add CPU-offline processing for single-node configurations
rcu: Add "notrace" to RCU function headers used by ftrace
rcu: Remove CONFIG_PREEMPT_RCU
rcu: Merge preemptable-RCU functionality into hierarchical RCU
rcu: Simplify rcu_pending()/rcu_check_callbacks() API
rcu: Use debugfs_remove_recursive() simplify code.
rcu: Merge per-RCU-flavor initialization into pre-existing macro
rcu: Fix online/offline indication for rcudata.csv trace file
rcu: Consolidate sparse and lockdep declarations in include/linux/rcupdate.h
rcu: Renamings to increase RCU clarity
rcu: Move private definitions from include/linux/rcutree.h to kernel/rcutree.h
rcu: Expunge lingering references to CONFIG_CLASSIC_RCU, optimize on !SMP
rcu: Delay rcu_barrier() wait until beginning of next CPU-hotunplug operation.
rcu: Fix typo in rcu_irq_exit() comment header
rcu: Make rcupreempt_trace.c look at offline CPUs
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 4 | ||||
-rw-r--r-- | kernel/exit.c | 1 | ||||
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | kernel/rcuclassic.c | 807 | ||||
-rw-r--r-- | kernel/rcupdate.c | 44 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 1539 | ||||
-rw-r--r-- | kernel/rcupreempt_trace.c | 334 | ||||
-rw-r--r-- | kernel/rcutorture.c | 202 | ||||
-rw-r--r-- | kernel/rcutree.c | 273 | ||||
-rw-r--r-- | kernel/rcutree.h | 253 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 532 | ||||
-rw-r--r-- | kernel/rcutree_trace.c | 88 | ||||
-rw-r--r-- | kernel/sched.c | 131 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 3 |
15 files changed, 1293 insertions, 2927 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 2093a691f1c2..b833bd5cc127 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -80,11 +80,9 @@ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o | |||
80 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ | 80 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ |
81 | obj-$(CONFIG_SECCOMP) += seccomp.o | 81 | obj-$(CONFIG_SECCOMP) += seccomp.o |
82 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 82 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
83 | obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o | ||
84 | obj-$(CONFIG_TREE_RCU) += rcutree.o | 83 | obj-$(CONFIG_TREE_RCU) += rcutree.o |
85 | obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o | 84 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o |
86 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | 85 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o |
87 | obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o | ||
88 | obj-$(CONFIG_RELAY) += relay.o | 86 | obj-$(CONFIG_RELAY) += relay.o |
89 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 87 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
90 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 88 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
diff --git a/kernel/exit.c b/kernel/exit.c index c98ff7a8025f..ae5d8660ddff 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1014,6 +1014,7 @@ NORET_TYPE void do_exit(long code) | |||
1014 | validate_creds_for_do_exit(tsk); | 1014 | validate_creds_for_do_exit(tsk); |
1015 | 1015 | ||
1016 | preempt_disable(); | 1016 | preempt_disable(); |
1017 | exit_rcu(); | ||
1017 | /* causes final put_task_struct in finish_task_switch(). */ | 1018 | /* causes final put_task_struct in finish_task_switch(). */ |
1018 | tsk->state = TASK_DEAD; | 1019 | tsk->state = TASK_DEAD; |
1019 | schedule(); | 1020 | schedule(); |
diff --git a/kernel/fork.c b/kernel/fork.c index aab8579c6093..bfee931ee3fb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1007,10 +1007,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1007 | copy_flags(clone_flags, p); | 1007 | copy_flags(clone_flags, p); |
1008 | INIT_LIST_HEAD(&p->children); | 1008 | INIT_LIST_HEAD(&p->children); |
1009 | INIT_LIST_HEAD(&p->sibling); | 1009 | INIT_LIST_HEAD(&p->sibling); |
1010 | #ifdef CONFIG_PREEMPT_RCU | 1010 | rcu_copy_process(p); |
1011 | p->rcu_read_lock_nesting = 0; | ||
1012 | p->rcu_flipctr_idx = 0; | ||
1013 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
1014 | p->vfork_done = NULL; | 1011 | p->vfork_done = NULL; |
1015 | spin_lock_init(&p->alloc_lock); | 1012 | spin_lock_init(&p->alloc_lock); |
1016 | 1013 | ||
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c deleted file mode 100644 index 0f2b0b311304..000000000000 --- a/kernel/rcuclassic.c +++ /dev/null | |||
@@ -1,807 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2001 | ||
19 | * | ||
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * Manfred Spraul <manfred@colorfullife.com> | ||
22 | * | ||
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
25 | * Papers: | ||
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
28 | * | ||
29 | * For detailed explanation of Read-Copy Update mechanism see - | ||
30 | * Documentation/RCU | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/smp.h> | ||
38 | #include <linux/rcupdate.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <asm/atomic.h> | ||
42 | #include <linux/bitops.h> | ||
43 | #include <linux/module.h> | ||
44 | #include <linux/completion.h> | ||
45 | #include <linux/moduleparam.h> | ||
46 | #include <linux/percpu.h> | ||
47 | #include <linux/notifier.h> | ||
48 | #include <linux/cpu.h> | ||
49 | #include <linux/mutex.h> | ||
50 | #include <linux/time.h> | ||
51 | |||
52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
53 | static struct lock_class_key rcu_lock_key; | ||
54 | struct lockdep_map rcu_lock_map = | ||
55 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | ||
56 | EXPORT_SYMBOL_GPL(rcu_lock_map); | ||
57 | #endif | ||
58 | |||
59 | |||
60 | /* Definition for rcupdate control block. */ | ||
61 | static struct rcu_ctrlblk rcu_ctrlblk = { | ||
62 | .cur = -300, | ||
63 | .completed = -300, | ||
64 | .pending = -300, | ||
65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | ||
66 | .cpumask = CPU_BITS_NONE, | ||
67 | }; | ||
68 | |||
69 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
70 | .cur = -300, | ||
71 | .completed = -300, | ||
72 | .pending = -300, | ||
73 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), | ||
74 | .cpumask = CPU_BITS_NONE, | ||
75 | }; | ||
76 | |||
77 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); | ||
78 | static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
79 | |||
80 | /* | ||
81 | * Increment the quiescent state counter. | ||
82 | * The counter is a bit degenerated: We do not need to know | ||
83 | * how many quiescent states passed, just if there was at least | ||
84 | * one since the start of the grace period. Thus just a flag. | ||
85 | */ | ||
86 | void rcu_qsctr_inc(int cpu) | ||
87 | { | ||
88 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
89 | rdp->passed_quiesc = 1; | ||
90 | } | ||
91 | |||
92 | void rcu_bh_qsctr_inc(int cpu) | ||
93 | { | ||
94 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
95 | rdp->passed_quiesc = 1; | ||
96 | } | ||
97 | |||
98 | static int blimit = 10; | ||
99 | static int qhimark = 10000; | ||
100 | static int qlowmark = 100; | ||
101 | |||
102 | #ifdef CONFIG_SMP | ||
103 | static void force_quiescent_state(struct rcu_data *rdp, | ||
104 | struct rcu_ctrlblk *rcp) | ||
105 | { | ||
106 | int cpu; | ||
107 | unsigned long flags; | ||
108 | |||
109 | set_need_resched(); | ||
110 | spin_lock_irqsave(&rcp->lock, flags); | ||
111 | if (unlikely(!rcp->signaled)) { | ||
112 | rcp->signaled = 1; | ||
113 | /* | ||
114 | * Don't send IPI to itself. With irqs disabled, | ||
115 | * rdp->cpu is the current cpu. | ||
116 | * | ||
117 | * cpu_online_mask is updated by the _cpu_down() | ||
118 | * using __stop_machine(). Since we're in irqs disabled | ||
119 | * section, __stop_machine() is not exectuting, hence | ||
120 | * the cpu_online_mask is stable. | ||
121 | * | ||
122 | * However, a cpu might have been offlined _just_ before | ||
123 | * we disabled irqs while entering here. | ||
124 | * And rcu subsystem might not yet have handled the CPU_DEAD | ||
125 | * notification, leading to the offlined cpu's bit | ||
126 | * being set in the rcp->cpumask. | ||
127 | * | ||
128 | * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent | ||
129 | * sending smp_reschedule() to an offlined CPU. | ||
130 | */ | ||
131 | for_each_cpu_and(cpu, | ||
132 | to_cpumask(rcp->cpumask), cpu_online_mask) { | ||
133 | if (cpu != rdp->cpu) | ||
134 | smp_send_reschedule(cpu); | ||
135 | } | ||
136 | } | ||
137 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
138 | } | ||
139 | #else | ||
140 | static inline void force_quiescent_state(struct rcu_data *rdp, | ||
141 | struct rcu_ctrlblk *rcp) | ||
142 | { | ||
143 | set_need_resched(); | ||
144 | } | ||
145 | #endif | ||
146 | |||
147 | static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, | ||
148 | struct rcu_data *rdp) | ||
149 | { | ||
150 | long batch; | ||
151 | |||
152 | head->next = NULL; | ||
153 | smp_mb(); /* Read of rcu->cur must happen after any change by caller. */ | ||
154 | |||
155 | /* | ||
156 | * Determine the batch number of this callback. | ||
157 | * | ||
158 | * Using ACCESS_ONCE to avoid the following error when gcc eliminates | ||
159 | * local variable "batch" and emits codes like this: | ||
160 | * 1) rdp->batch = rcp->cur + 1 # gets old value | ||
161 | * ...... | ||
162 | * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value | ||
163 | * then [*nxttail[0], *nxttail[1]) may contain callbacks | ||
164 | * that batch# = rdp->batch, see the comment of struct rcu_data. | ||
165 | */ | ||
166 | batch = ACCESS_ONCE(rcp->cur) + 1; | ||
167 | |||
168 | if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { | ||
169 | /* process callbacks */ | ||
170 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
171 | rdp->nxttail[1] = rdp->nxttail[2]; | ||
172 | if (rcu_batch_after(batch - 1, rdp->batch)) | ||
173 | rdp->nxttail[0] = rdp->nxttail[2]; | ||
174 | } | ||
175 | |||
176 | rdp->batch = batch; | ||
177 | *rdp->nxttail[2] = head; | ||
178 | rdp->nxttail[2] = &head->next; | ||
179 | |||
180 | if (unlikely(++rdp->qlen > qhimark)) { | ||
181 | rdp->blimit = INT_MAX; | ||
182 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
183 | } | ||
184 | } | ||
185 | |||
186 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
187 | |||
188 | static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) | ||
189 | { | ||
190 | rcp->gp_start = jiffies; | ||
191 | rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; | ||
192 | } | ||
193 | |||
194 | static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | ||
195 | { | ||
196 | int cpu; | ||
197 | long delta; | ||
198 | unsigned long flags; | ||
199 | |||
200 | /* Only let one CPU complain about others per time interval. */ | ||
201 | |||
202 | spin_lock_irqsave(&rcp->lock, flags); | ||
203 | delta = jiffies - rcp->jiffies_stall; | ||
204 | if (delta < 2 || rcp->cur != rcp->completed) { | ||
205 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
206 | return; | ||
207 | } | ||
208 | rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
209 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
210 | |||
211 | /* OK, time to rat on our buddy... */ | ||
212 | |||
213 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | ||
214 | for_each_possible_cpu(cpu) { | ||
215 | if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask))) | ||
216 | printk(" %d", cpu); | ||
217 | } | ||
218 | printk(" (detected by %d, t=%ld jiffies)\n", | ||
219 | smp_processor_id(), (long)(jiffies - rcp->gp_start)); | ||
220 | } | ||
221 | |||
222 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) | ||
223 | { | ||
224 | unsigned long flags; | ||
225 | |||
226 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n", | ||
227 | smp_processor_id(), jiffies, | ||
228 | jiffies - rcp->gp_start); | ||
229 | dump_stack(); | ||
230 | spin_lock_irqsave(&rcp->lock, flags); | ||
231 | if ((long)(jiffies - rcp->jiffies_stall) >= 0) | ||
232 | rcp->jiffies_stall = | ||
233 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
234 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
235 | set_need_resched(); /* kick ourselves to get things going. */ | ||
236 | } | ||
237 | |||
238 | static void check_cpu_stall(struct rcu_ctrlblk *rcp) | ||
239 | { | ||
240 | long delta; | ||
241 | |||
242 | delta = jiffies - rcp->jiffies_stall; | ||
243 | if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) && | ||
244 | delta >= 0) { | ||
245 | |||
246 | /* We haven't checked in, so go dump stack. */ | ||
247 | print_cpu_stall(rcp); | ||
248 | |||
249 | } else if (rcp->cur != rcp->completed && delta >= 2) { | ||
250 | |||
251 | /* They had two seconds to dump stack, so complain. */ | ||
252 | print_other_cpu_stall(rcp); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
257 | |||
258 | static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) | ||
259 | { | ||
260 | } | ||
261 | |||
262 | static inline void check_cpu_stall(struct rcu_ctrlblk *rcp) | ||
263 | { | ||
264 | } | ||
265 | |||
266 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
267 | |||
268 | /** | ||
269 | * call_rcu - Queue an RCU callback for invocation after a grace period. | ||
270 | * @head: structure to be used for queueing the RCU updates. | ||
271 | * @func: actual update function to be invoked after the grace period | ||
272 | * | ||
273 | * The update function will be invoked some time after a full grace | ||
274 | * period elapses, in other words after all currently executing RCU | ||
275 | * read-side critical sections have completed. RCU read-side critical | ||
276 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
277 | * and may be nested. | ||
278 | */ | ||
279 | void call_rcu(struct rcu_head *head, | ||
280 | void (*func)(struct rcu_head *rcu)) | ||
281 | { | ||
282 | unsigned long flags; | ||
283 | |||
284 | head->func = func; | ||
285 | local_irq_save(flags); | ||
286 | __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); | ||
287 | local_irq_restore(flags); | ||
288 | } | ||
289 | EXPORT_SYMBOL_GPL(call_rcu); | ||
290 | |||
291 | /** | ||
292 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | ||
293 | * @head: structure to be used for queueing the RCU updates. | ||
294 | * @func: actual update function to be invoked after the grace period | ||
295 | * | ||
296 | * The update function will be invoked some time after a full grace | ||
297 | * period elapses, in other words after all currently executing RCU | ||
298 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
299 | * that the read-side critical sections end on completion of a softirq | ||
300 | * handler. This means that read-side critical sections in process | ||
301 | * context must not be interrupted by softirqs. This interface is to be | ||
302 | * used when most of the read-side critical sections are in softirq context. | ||
303 | * RCU read-side critical sections are delimited by rcu_read_lock() and | ||
304 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() | ||
305 | * and rcu_read_unlock_bh(), if in process context. These may be nested. | ||
306 | */ | ||
307 | void call_rcu_bh(struct rcu_head *head, | ||
308 | void (*func)(struct rcu_head *rcu)) | ||
309 | { | ||
310 | unsigned long flags; | ||
311 | |||
312 | head->func = func; | ||
313 | local_irq_save(flags); | ||
314 | __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | ||
315 | local_irq_restore(flags); | ||
316 | } | ||
317 | EXPORT_SYMBOL_GPL(call_rcu_bh); | ||
318 | |||
319 | /* | ||
320 | * Return the number of RCU batches processed thus far. Useful | ||
321 | * for debug and statistics. | ||
322 | */ | ||
323 | long rcu_batches_completed(void) | ||
324 | { | ||
325 | return rcu_ctrlblk.completed; | ||
326 | } | ||
327 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
328 | |||
329 | /* | ||
330 | * Return the number of RCU batches processed thus far. Useful | ||
331 | * for debug and statistics. | ||
332 | */ | ||
333 | long rcu_batches_completed_bh(void) | ||
334 | { | ||
335 | return rcu_bh_ctrlblk.completed; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | ||
338 | |||
339 | /* Raises the softirq for processing rcu_callbacks. */ | ||
340 | static inline void raise_rcu_softirq(void) | ||
341 | { | ||
342 | raise_softirq(RCU_SOFTIRQ); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Invoke the completed RCU callbacks. They are expected to be in | ||
347 | * a per-cpu list. | ||
348 | */ | ||
349 | static void rcu_do_batch(struct rcu_data *rdp) | ||
350 | { | ||
351 | unsigned long flags; | ||
352 | struct rcu_head *next, *list; | ||
353 | int count = 0; | ||
354 | |||
355 | list = rdp->donelist; | ||
356 | while (list) { | ||
357 | next = list->next; | ||
358 | prefetch(next); | ||
359 | list->func(list); | ||
360 | list = next; | ||
361 | if (++count >= rdp->blimit) | ||
362 | break; | ||
363 | } | ||
364 | rdp->donelist = list; | ||
365 | |||
366 | local_irq_save(flags); | ||
367 | rdp->qlen -= count; | ||
368 | local_irq_restore(flags); | ||
369 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | ||
370 | rdp->blimit = blimit; | ||
371 | |||
372 | if (!rdp->donelist) | ||
373 | rdp->donetail = &rdp->donelist; | ||
374 | else | ||
375 | raise_rcu_softirq(); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Grace period handling: | ||
380 | * The grace period handling consists out of two steps: | ||
381 | * - A new grace period is started. | ||
382 | * This is done by rcu_start_batch. The start is not broadcasted to | ||
383 | * all cpus, they must pick this up by comparing rcp->cur with | ||
384 | * rdp->quiescbatch. All cpus are recorded in the | ||
385 | * rcu_ctrlblk.cpumask bitmap. | ||
386 | * - All cpus must go through a quiescent state. | ||
387 | * Since the start of the grace period is not broadcasted, at least two | ||
388 | * calls to rcu_check_quiescent_state are required: | ||
389 | * The first call just notices that a new grace period is running. The | ||
390 | * following calls check if there was a quiescent state since the beginning | ||
391 | * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If | ||
392 | * the bitmap is empty, then the grace period is completed. | ||
393 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | ||
394 | * period (if necessary). | ||
395 | */ | ||
396 | |||
397 | /* | ||
398 | * Register a new batch of callbacks, and start it up if there is currently no | ||
399 | * active batch and the batch to be registered has not already occurred. | ||
400 | * Caller must hold rcu_ctrlblk.lock. | ||
401 | */ | ||
402 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | ||
403 | { | ||
404 | if (rcp->cur != rcp->pending && | ||
405 | rcp->completed == rcp->cur) { | ||
406 | rcp->cur++; | ||
407 | record_gp_stall_check_time(rcp); | ||
408 | |||
409 | /* | ||
410 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | ||
411 | * Barrier Otherwise it can cause tickless idle CPUs to be | ||
412 | * included in rcp->cpumask, which will extend graceperiods | ||
413 | * unnecessarily. | ||
414 | */ | ||
415 | smp_mb(); | ||
416 | cpumask_andnot(to_cpumask(rcp->cpumask), | ||
417 | cpu_online_mask, nohz_cpu_mask); | ||
418 | |||
419 | rcp->signaled = 0; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * cpu went through a quiescent state since the beginning of the grace period. | ||
425 | * Clear it from the cpu mask and complete the grace period if it was the last | ||
426 | * cpu. Start another grace period if someone has further entries pending | ||
427 | */ | ||
428 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | ||
429 | { | ||
430 | cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask)); | ||
431 | if (cpumask_empty(to_cpumask(rcp->cpumask))) { | ||
432 | /* batch completed ! */ | ||
433 | rcp->completed = rcp->cur; | ||
434 | rcu_start_batch(rcp); | ||
435 | } | ||
436 | } | ||
437 | |||
438 | /* | ||
439 | * Check if the cpu has gone through a quiescent state (say context | ||
440 | * switch). If so and if it already hasn't done so in this RCU | ||
441 | * quiescent cycle, then indicate that it has done so. | ||
442 | */ | ||
443 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | ||
444 | struct rcu_data *rdp) | ||
445 | { | ||
446 | unsigned long flags; | ||
447 | |||
448 | if (rdp->quiescbatch != rcp->cur) { | ||
449 | /* start new grace period: */ | ||
450 | rdp->qs_pending = 1; | ||
451 | rdp->passed_quiesc = 0; | ||
452 | rdp->quiescbatch = rcp->cur; | ||
453 | return; | ||
454 | } | ||
455 | |||
456 | /* Grace period already completed for this cpu? | ||
457 | * qs_pending is checked instead of the actual bitmap to avoid | ||
458 | * cacheline trashing. | ||
459 | */ | ||
460 | if (!rdp->qs_pending) | ||
461 | return; | ||
462 | |||
463 | /* | ||
464 | * Was there a quiescent state since the beginning of the grace | ||
465 | * period? If no, then exit and wait for the next call. | ||
466 | */ | ||
467 | if (!rdp->passed_quiesc) | ||
468 | return; | ||
469 | rdp->qs_pending = 0; | ||
470 | |||
471 | spin_lock_irqsave(&rcp->lock, flags); | ||
472 | /* | ||
473 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | ||
474 | * during cpu startup. Ignore the quiescent state. | ||
475 | */ | ||
476 | if (likely(rdp->quiescbatch == rcp->cur)) | ||
477 | cpu_quiet(rdp->cpu, rcp); | ||
478 | |||
479 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
480 | } | ||
481 | |||
482 | |||
483 | #ifdef CONFIG_HOTPLUG_CPU | ||
484 | |||
485 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing | ||
486 | * locking requirements, the list it's pulling from has to belong to a cpu | ||
487 | * which is dead and hence not processing interrupts. | ||
488 | */ | ||
489 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | ||
490 | struct rcu_head **tail, long batch) | ||
491 | { | ||
492 | unsigned long flags; | ||
493 | |||
494 | if (list) { | ||
495 | local_irq_save(flags); | ||
496 | this_rdp->batch = batch; | ||
497 | *this_rdp->nxttail[2] = list; | ||
498 | this_rdp->nxttail[2] = tail; | ||
499 | local_irq_restore(flags); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | ||
504 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | ||
505 | { | ||
506 | unsigned long flags; | ||
507 | |||
508 | /* | ||
509 | * if the cpu going offline owns the grace period | ||
510 | * we can block indefinitely waiting for it, so flush | ||
511 | * it here | ||
512 | */ | ||
513 | spin_lock_irqsave(&rcp->lock, flags); | ||
514 | if (rcp->cur != rcp->completed) | ||
515 | cpu_quiet(rdp->cpu, rcp); | ||
516 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); | ||
517 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); | ||
518 | spin_unlock(&rcp->lock); | ||
519 | |||
520 | this_rdp->qlen += rdp->qlen; | ||
521 | local_irq_restore(flags); | ||
522 | } | ||
523 | |||
524 | static void rcu_offline_cpu(int cpu) | ||
525 | { | ||
526 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | ||
527 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | ||
528 | |||
529 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, | ||
530 | &per_cpu(rcu_data, cpu)); | ||
531 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, | ||
532 | &per_cpu(rcu_bh_data, cpu)); | ||
533 | put_cpu_var(rcu_data); | ||
534 | put_cpu_var(rcu_bh_data); | ||
535 | } | ||
536 | |||
537 | #else | ||
538 | |||
539 | static void rcu_offline_cpu(int cpu) | ||
540 | { | ||
541 | } | ||
542 | |||
543 | #endif | ||
544 | |||
545 | /* | ||
546 | * This does the RCU processing work from softirq context. | ||
547 | */ | ||
548 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | ||
549 | struct rcu_data *rdp) | ||
550 | { | ||
551 | unsigned long flags; | ||
552 | long completed_snap; | ||
553 | |||
554 | if (rdp->nxtlist) { | ||
555 | local_irq_save(flags); | ||
556 | completed_snap = ACCESS_ONCE(rcp->completed); | ||
557 | |||
558 | /* | ||
559 | * move the other grace-period-completed entries to | ||
560 | * [rdp->nxtlist, *rdp->nxttail[0]) temporarily | ||
561 | */ | ||
562 | if (!rcu_batch_before(completed_snap, rdp->batch)) | ||
563 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; | ||
564 | else if (!rcu_batch_before(completed_snap, rdp->batch - 1)) | ||
565 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
566 | |||
567 | /* | ||
568 | * the grace period for entries in | ||
569 | * [rdp->nxtlist, *rdp->nxttail[0]) has completed and | ||
570 | * move these entries to donelist | ||
571 | */ | ||
572 | if (rdp->nxttail[0] != &rdp->nxtlist) { | ||
573 | *rdp->donetail = rdp->nxtlist; | ||
574 | rdp->donetail = rdp->nxttail[0]; | ||
575 | rdp->nxtlist = *rdp->nxttail[0]; | ||
576 | *rdp->donetail = NULL; | ||
577 | |||
578 | if (rdp->nxttail[1] == rdp->nxttail[0]) | ||
579 | rdp->nxttail[1] = &rdp->nxtlist; | ||
580 | if (rdp->nxttail[2] == rdp->nxttail[0]) | ||
581 | rdp->nxttail[2] = &rdp->nxtlist; | ||
582 | rdp->nxttail[0] = &rdp->nxtlist; | ||
583 | } | ||
584 | |||
585 | local_irq_restore(flags); | ||
586 | |||
587 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | ||
588 | unsigned long flags2; | ||
589 | |||
590 | /* and start it/schedule start if it's a new batch */ | ||
591 | spin_lock_irqsave(&rcp->lock, flags2); | ||
592 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | ||
593 | rcp->pending = rdp->batch; | ||
594 | rcu_start_batch(rcp); | ||
595 | } | ||
596 | spin_unlock_irqrestore(&rcp->lock, flags2); | ||
597 | } | ||
598 | } | ||
599 | |||
600 | rcu_check_quiescent_state(rcp, rdp); | ||
601 | if (rdp->donelist) | ||
602 | rcu_do_batch(rdp); | ||
603 | } | ||
604 | |||
605 | static void rcu_process_callbacks(struct softirq_action *unused) | ||
606 | { | ||
607 | /* | ||
608 | * Memory references from any prior RCU read-side critical sections | ||
609 | * executed by the interrupted code must be see before any RCU | ||
610 | * grace-period manupulations below. | ||
611 | */ | ||
612 | |||
613 | smp_mb(); /* See above block comment. */ | ||
614 | |||
615 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); | ||
616 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | ||
617 | |||
618 | /* | ||
619 | * Memory references from any later RCU read-side critical sections | ||
620 | * executed by the interrupted code must be see after any RCU | ||
621 | * grace-period manupulations above. | ||
622 | */ | ||
623 | |||
624 | smp_mb(); /* See above block comment. */ | ||
625 | } | ||
626 | |||
627 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | ||
628 | { | ||
629 | /* Check for CPU stalls, if enabled. */ | ||
630 | check_cpu_stall(rcp); | ||
631 | |||
632 | if (rdp->nxtlist) { | ||
633 | long completed_snap = ACCESS_ONCE(rcp->completed); | ||
634 | |||
635 | /* | ||
636 | * This cpu has pending rcu entries and the grace period | ||
637 | * for them has completed. | ||
638 | */ | ||
639 | if (!rcu_batch_before(completed_snap, rdp->batch)) | ||
640 | return 1; | ||
641 | if (!rcu_batch_before(completed_snap, rdp->batch - 1) && | ||
642 | rdp->nxttail[0] != rdp->nxttail[1]) | ||
643 | return 1; | ||
644 | if (rdp->nxttail[0] != &rdp->nxtlist) | ||
645 | return 1; | ||
646 | |||
647 | /* | ||
648 | * This cpu has pending rcu entries and the new batch | ||
649 | * for then hasn't been started nor scheduled start | ||
650 | */ | ||
651 | if (rcu_batch_after(rdp->batch, rcp->pending)) | ||
652 | return 1; | ||
653 | } | ||
654 | |||
655 | /* This cpu has finished callbacks to invoke */ | ||
656 | if (rdp->donelist) | ||
657 | return 1; | ||
658 | |||
659 | /* The rcu core waits for a quiescent state from the cpu */ | ||
660 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) | ||
661 | return 1; | ||
662 | |||
663 | /* nothing to do */ | ||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * Check to see if there is any immediate RCU-related work to be done | ||
669 | * by the current CPU, returning 1 if so. This function is part of the | ||
670 | * RCU implementation; it is -not- an exported member of the RCU API. | ||
671 | */ | ||
672 | int rcu_pending(int cpu) | ||
673 | { | ||
674 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | ||
675 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * Check to see if any future RCU-related work will need to be done | ||
680 | * by the current CPU, even if none need be done immediately, returning | ||
681 | * 1 if so. This function is part of the RCU implementation; it is -not- | ||
682 | * an exported member of the RCU API. | ||
683 | */ | ||
684 | int rcu_needs_cpu(int cpu) | ||
685 | { | ||
686 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
687 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | ||
688 | |||
689 | return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); | ||
690 | } | ||
691 | |||
692 | /* | ||
693 | * Top-level function driving RCU grace-period detection, normally | ||
694 | * invoked from the scheduler-clock interrupt. This function simply | ||
695 | * increments counters that are read only from softirq by this same | ||
696 | * CPU, so there are no memory barriers required. | ||
697 | */ | ||
698 | void rcu_check_callbacks(int cpu, int user) | ||
699 | { | ||
700 | if (user || | ||
701 | (idle_cpu(cpu) && rcu_scheduler_active && | ||
702 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | ||
703 | |||
704 | /* | ||
705 | * Get here if this CPU took its interrupt from user | ||
706 | * mode or from the idle loop, and if this is not a | ||
707 | * nested interrupt. In this case, the CPU is in | ||
708 | * a quiescent state, so count it. | ||
709 | * | ||
710 | * Also do a memory barrier. This is needed to handle | ||
711 | * the case where writes from a preempt-disable section | ||
712 | * of code get reordered into schedule() by this CPU's | ||
713 | * write buffer. The memory barrier makes sure that | ||
714 | * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see | ||
715 | * by other CPUs to happen after any such write. | ||
716 | */ | ||
717 | |||
718 | smp_mb(); /* See above block comment. */ | ||
719 | rcu_qsctr_inc(cpu); | ||
720 | rcu_bh_qsctr_inc(cpu); | ||
721 | |||
722 | } else if (!in_softirq()) { | ||
723 | |||
724 | /* | ||
725 | * Get here if this CPU did not take its interrupt from | ||
726 | * softirq, in other words, if it is not interrupting | ||
727 | * a rcu_bh read-side critical section. This is an _bh | ||
728 | * critical section, so count it. The memory barrier | ||
729 | * is needed for the same reason as is the above one. | ||
730 | */ | ||
731 | |||
732 | smp_mb(); /* See above block comment. */ | ||
733 | rcu_bh_qsctr_inc(cpu); | ||
734 | } | ||
735 | raise_rcu_softirq(); | ||
736 | } | ||
737 | |||
738 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | ||
739 | struct rcu_data *rdp) | ||
740 | { | ||
741 | unsigned long flags; | ||
742 | |||
743 | spin_lock_irqsave(&rcp->lock, flags); | ||
744 | memset(rdp, 0, sizeof(*rdp)); | ||
745 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; | ||
746 | rdp->donetail = &rdp->donelist; | ||
747 | rdp->quiescbatch = rcp->completed; | ||
748 | rdp->qs_pending = 0; | ||
749 | rdp->cpu = cpu; | ||
750 | rdp->blimit = blimit; | ||
751 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
752 | } | ||
753 | |||
754 | static void __cpuinit rcu_online_cpu(int cpu) | ||
755 | { | ||
756 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
757 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); | ||
758 | |||
759 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); | ||
760 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); | ||
761 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
762 | } | ||
763 | |||
764 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | ||
765 | unsigned long action, void *hcpu) | ||
766 | { | ||
767 | long cpu = (long)hcpu; | ||
768 | |||
769 | switch (action) { | ||
770 | case CPU_UP_PREPARE: | ||
771 | case CPU_UP_PREPARE_FROZEN: | ||
772 | rcu_online_cpu(cpu); | ||
773 | break; | ||
774 | case CPU_DEAD: | ||
775 | case CPU_DEAD_FROZEN: | ||
776 | rcu_offline_cpu(cpu); | ||
777 | break; | ||
778 | default: | ||
779 | break; | ||
780 | } | ||
781 | return NOTIFY_OK; | ||
782 | } | ||
783 | |||
784 | static struct notifier_block __cpuinitdata rcu_nb = { | ||
785 | .notifier_call = rcu_cpu_notify, | ||
786 | }; | ||
787 | |||
788 | /* | ||
789 | * Initializes rcu mechanism. Assumed to be called early. | ||
790 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. | ||
791 | * Note that rcu_qsctr and friends are implicitly | ||
792 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. | ||
793 | */ | ||
794 | void __init __rcu_init(void) | ||
795 | { | ||
796 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
797 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
798 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
799 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | ||
800 | (void *)(long)smp_processor_id()); | ||
801 | /* Register notifier for non-boot CPUs */ | ||
802 | register_cpu_notifier(&rcu_nb); | ||
803 | } | ||
804 | |||
805 | module_param(blimit, int, 0); | ||
806 | module_param(qhimark, int, 0); | ||
807 | module_param(qlowmark, int, 0); | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a967c9feb90a..bd5d5c8e5140 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -98,6 +98,30 @@ void synchronize_rcu(void) | |||
98 | } | 98 | } |
99 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 99 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
100 | 100 | ||
101 | /** | ||
102 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
103 | * | ||
104 | * Control will return to the caller some time after a full rcu_bh grace | ||
105 | * period has elapsed, in other words after all currently executing rcu_bh | ||
106 | * read-side critical sections have completed. RCU read-side critical | ||
107 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
108 | * and may be nested. | ||
109 | */ | ||
110 | void synchronize_rcu_bh(void) | ||
111 | { | ||
112 | struct rcu_synchronize rcu; | ||
113 | |||
114 | if (rcu_blocking_is_gp()) | ||
115 | return; | ||
116 | |||
117 | init_completion(&rcu.completion); | ||
118 | /* Will wake me after RCU finished. */ | ||
119 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
120 | /* Wait for it. */ | ||
121 | wait_for_completion(&rcu.completion); | ||
122 | } | ||
123 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
124 | |||
101 | static void rcu_barrier_callback(struct rcu_head *notused) | 125 | static void rcu_barrier_callback(struct rcu_head *notused) |
102 | { | 126 | { |
103 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 127 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
@@ -129,6 +153,7 @@ static void rcu_barrier_func(void *type) | |||
129 | static inline void wait_migrated_callbacks(void) | 153 | static inline void wait_migrated_callbacks(void) |
130 | { | 154 | { |
131 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | 155 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); |
156 | smp_mb(); /* In case we didn't sleep. */ | ||
132 | } | 157 | } |
133 | 158 | ||
134 | /* | 159 | /* |
@@ -192,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused) | |||
192 | wake_up(&rcu_migrate_wq); | 217 | wake_up(&rcu_migrate_wq); |
193 | } | 218 | } |
194 | 219 | ||
220 | extern int rcu_cpu_notify(struct notifier_block *self, | ||
221 | unsigned long action, void *hcpu); | ||
222 | |||
195 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 223 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
196 | unsigned long action, void *hcpu) | 224 | unsigned long action, void *hcpu) |
197 | { | 225 | { |
226 | rcu_cpu_notify(self, action, hcpu); | ||
198 | if (action == CPU_DYING) { | 227 | if (action == CPU_DYING) { |
199 | /* | 228 | /* |
200 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | 229 | * preempt_disable() in on_each_cpu() prevents stop_machine(), |
@@ -209,7 +238,8 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | |||
209 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | 238 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); |
210 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | 239 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); |
211 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | 240 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); |
212 | } else if (action == CPU_POST_DEAD) { | 241 | } else if (action == CPU_DOWN_PREPARE) { |
242 | /* Don't need to wait until next removal operation. */ | ||
213 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | 243 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ |
214 | wait_migrated_callbacks(); | 244 | wait_migrated_callbacks(); |
215 | } | 245 | } |
@@ -219,8 +249,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | |||
219 | 249 | ||
220 | void __init rcu_init(void) | 250 | void __init rcu_init(void) |
221 | { | 251 | { |
252 | int i; | ||
253 | |||
222 | __rcu_init(); | 254 | __rcu_init(); |
223 | hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); | 255 | cpu_notifier(rcu_barrier_cpu_hotplug, 0); |
256 | |||
257 | /* | ||
258 | * We don't need protection against CPU-hotplug here because | ||
259 | * this is called early in boot, before either interrupts | ||
260 | * or the scheduler are operational. | ||
261 | */ | ||
262 | for_each_online_cpu(i) | ||
263 | rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
224 | } | 264 | } |
225 | 265 | ||
226 | void rcu_scheduler_starting(void) | 266 | void rcu_scheduler_starting(void) |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c deleted file mode 100644 index beb0e659adcc..000000000000 --- a/kernel/rcupreempt.c +++ /dev/null | |||
@@ -1,1539 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion, realtime implementation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2006 | ||
19 | * | ||
20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> | ||
21 | * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar | ||
22 | * for pushing me away from locks and towards counters, and | ||
23 | * to Suparna Bhattacharya for pushing me completely away | ||
24 | * from atomic instructions on the read side. | ||
25 | * | ||
26 | * - Added handling of Dynamic Ticks | ||
27 | * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com> | ||
28 | * - Steven Rostedt <srostedt@redhat.com> | ||
29 | * | ||
30 | * Papers: http://www.rdrop.com/users/paulmck/RCU | ||
31 | * | ||
32 | * Design Document: http://lwn.net/Articles/253651/ | ||
33 | * | ||
34 | * For detailed explanation of Read-Copy Update mechanism see - | ||
35 | * Documentation/RCU/ *.txt | ||
36 | * | ||
37 | */ | ||
38 | #include <linux/types.h> | ||
39 | #include <linux/kernel.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/spinlock.h> | ||
42 | #include <linux/smp.h> | ||
43 | #include <linux/rcupdate.h> | ||
44 | #include <linux/interrupt.h> | ||
45 | #include <linux/sched.h> | ||
46 | #include <asm/atomic.h> | ||
47 | #include <linux/bitops.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/kthread.h> | ||
50 | #include <linux/completion.h> | ||
51 | #include <linux/moduleparam.h> | ||
52 | #include <linux/percpu.h> | ||
53 | #include <linux/notifier.h> | ||
54 | #include <linux/cpu.h> | ||
55 | #include <linux/random.h> | ||
56 | #include <linux/delay.h> | ||
57 | #include <linux/cpumask.h> | ||
58 | #include <linux/rcupreempt_trace.h> | ||
59 | #include <asm/byteorder.h> | ||
60 | |||
61 | /* | ||
62 | * PREEMPT_RCU data structures. | ||
63 | */ | ||
64 | |||
65 | /* | ||
66 | * GP_STAGES specifies the number of times the state machine has | ||
67 | * to go through the all the rcu_try_flip_states (see below) | ||
68 | * in a single Grace Period. | ||
69 | * | ||
70 | * GP in GP_STAGES stands for Grace Period ;) | ||
71 | */ | ||
72 | #define GP_STAGES 2 | ||
73 | struct rcu_data { | ||
74 | spinlock_t lock; /* Protect rcu_data fields. */ | ||
75 | long completed; /* Number of last completed batch. */ | ||
76 | int waitlistcount; | ||
77 | struct rcu_head *nextlist; | ||
78 | struct rcu_head **nexttail; | ||
79 | struct rcu_head *waitlist[GP_STAGES]; | ||
80 | struct rcu_head **waittail[GP_STAGES]; | ||
81 | struct rcu_head *donelist; /* from waitlist & waitschedlist */ | ||
82 | struct rcu_head **donetail; | ||
83 | long rcu_flipctr[2]; | ||
84 | struct rcu_head *nextschedlist; | ||
85 | struct rcu_head **nextschedtail; | ||
86 | struct rcu_head *waitschedlist; | ||
87 | struct rcu_head **waitschedtail; | ||
88 | int rcu_sched_sleeping; | ||
89 | #ifdef CONFIG_RCU_TRACE | ||
90 | struct rcupreempt_trace trace; | ||
91 | #endif /* #ifdef CONFIG_RCU_TRACE */ | ||
92 | }; | ||
93 | |||
94 | /* | ||
95 | * States for rcu_try_flip() and friends. | ||
96 | */ | ||
97 | |||
98 | enum rcu_try_flip_states { | ||
99 | |||
100 | /* | ||
101 | * Stay here if nothing is happening. Flip the counter if somthing | ||
102 | * starts happening. Denoted by "I" | ||
103 | */ | ||
104 | rcu_try_flip_idle_state, | ||
105 | |||
106 | /* | ||
107 | * Wait here for all CPUs to notice that the counter has flipped. This | ||
108 | * prevents the old set of counters from ever being incremented once | ||
109 | * we leave this state, which in turn is necessary because we cannot | ||
110 | * test any individual counter for zero -- we can only check the sum. | ||
111 | * Denoted by "A". | ||
112 | */ | ||
113 | rcu_try_flip_waitack_state, | ||
114 | |||
115 | /* | ||
116 | * Wait here for the sum of the old per-CPU counters to reach zero. | ||
117 | * Denoted by "Z". | ||
118 | */ | ||
119 | rcu_try_flip_waitzero_state, | ||
120 | |||
121 | /* | ||
122 | * Wait here for each of the other CPUs to execute a memory barrier. | ||
123 | * This is necessary to ensure that these other CPUs really have | ||
124 | * completed executing their RCU read-side critical sections, despite | ||
125 | * their CPUs wildly reordering memory. Denoted by "M". | ||
126 | */ | ||
127 | rcu_try_flip_waitmb_state, | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * States for rcu_ctrlblk.rcu_sched_sleep. | ||
132 | */ | ||
133 | |||
134 | enum rcu_sched_sleep_states { | ||
135 | rcu_sched_not_sleeping, /* Not sleeping, callbacks need GP. */ | ||
136 | rcu_sched_sleep_prep, /* Thinking of sleeping, rechecking. */ | ||
137 | rcu_sched_sleeping, /* Sleeping, awaken if GP needed. */ | ||
138 | }; | ||
139 | |||
140 | struct rcu_ctrlblk { | ||
141 | spinlock_t fliplock; /* Protect state-machine transitions. */ | ||
142 | long completed; /* Number of last completed batch. */ | ||
143 | enum rcu_try_flip_states rcu_try_flip_state; /* The current state of | ||
144 | the rcu state machine */ | ||
145 | spinlock_t schedlock; /* Protect rcu_sched sleep state. */ | ||
146 | enum rcu_sched_sleep_states sched_sleep; /* rcu_sched state. */ | ||
147 | wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ | ||
148 | }; | ||
149 | |||
150 | struct rcu_dyntick_sched { | ||
151 | int dynticks; | ||
152 | int dynticks_snap; | ||
153 | int sched_qs; | ||
154 | int sched_qs_snap; | ||
155 | int sched_dynticks_snap; | ||
156 | }; | ||
157 | |||
158 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { | ||
159 | .dynticks = 1, | ||
160 | }; | ||
161 | |||
162 | void rcu_qsctr_inc(int cpu) | ||
163 | { | ||
164 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
165 | |||
166 | rdssp->sched_qs++; | ||
167 | } | ||
168 | |||
169 | #ifdef CONFIG_NO_HZ | ||
170 | |||
171 | void rcu_enter_nohz(void) | ||
172 | { | ||
173 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | ||
174 | |||
175 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | ||
176 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | ||
177 | WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); | ||
178 | } | ||
179 | |||
180 | void rcu_exit_nohz(void) | ||
181 | { | ||
182 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | ||
183 | |||
184 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | ||
185 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
186 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), | ||
187 | &rs); | ||
188 | } | ||
189 | |||
190 | #endif /* CONFIG_NO_HZ */ | ||
191 | |||
192 | |||
193 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); | ||
194 | |||
195 | static struct rcu_ctrlblk rcu_ctrlblk = { | ||
196 | .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), | ||
197 | .completed = 0, | ||
198 | .rcu_try_flip_state = rcu_try_flip_idle_state, | ||
199 | .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock), | ||
200 | .sched_sleep = rcu_sched_not_sleeping, | ||
201 | .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq), | ||
202 | }; | ||
203 | |||
204 | static struct task_struct *rcu_sched_grace_period_task; | ||
205 | |||
206 | #ifdef CONFIG_RCU_TRACE | ||
207 | static char *rcu_try_flip_state_names[] = | ||
208 | { "idle", "waitack", "waitzero", "waitmb" }; | ||
209 | #endif /* #ifdef CONFIG_RCU_TRACE */ | ||
210 | |||
211 | static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly | ||
212 | = CPU_BITS_NONE; | ||
213 | |||
214 | /* | ||
215 | * Enum and per-CPU flag to determine when each CPU has seen | ||
216 | * the most recent counter flip. | ||
217 | */ | ||
218 | |||
219 | enum rcu_flip_flag_values { | ||
220 | rcu_flip_seen, /* Steady/initial state, last flip seen. */ | ||
221 | /* Only GP detector can update. */ | ||
222 | rcu_flipped /* Flip just completed, need confirmation. */ | ||
223 | /* Only corresponding CPU can update. */ | ||
224 | }; | ||
225 | static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag) | ||
226 | = rcu_flip_seen; | ||
227 | |||
228 | /* | ||
229 | * Enum and per-CPU flag to determine when each CPU has executed the | ||
230 | * needed memory barrier to fence in memory references from its last RCU | ||
231 | * read-side critical section in the just-completed grace period. | ||
232 | */ | ||
233 | |||
234 | enum rcu_mb_flag_values { | ||
235 | rcu_mb_done, /* Steady/initial state, no mb()s required. */ | ||
236 | /* Only GP detector can update. */ | ||
237 | rcu_mb_needed /* Flip just completed, need an mb(). */ | ||
238 | /* Only corresponding CPU can update. */ | ||
239 | }; | ||
240 | static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag) | ||
241 | = rcu_mb_done; | ||
242 | |||
243 | /* | ||
244 | * RCU_DATA_ME: find the current CPU's rcu_data structure. | ||
245 | * RCU_DATA_CPU: find the specified CPU's rcu_data structure. | ||
246 | */ | ||
247 | #define RCU_DATA_ME() (&__get_cpu_var(rcu_data)) | ||
248 | #define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu)) | ||
249 | |||
250 | /* | ||
251 | * Helper macro for tracing when the appropriate rcu_data is not | ||
252 | * cached in a local variable, but where the CPU number is so cached. | ||
253 | */ | ||
254 | #define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace)); | ||
255 | |||
256 | /* | ||
257 | * Helper macro for tracing when the appropriate rcu_data is not | ||
258 | * cached in a local variable. | ||
259 | */ | ||
260 | #define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace)); | ||
261 | |||
262 | /* | ||
263 | * Helper macro for tracing when the appropriate rcu_data is pointed | ||
264 | * to by a local variable. | ||
265 | */ | ||
266 | #define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace)); | ||
267 | |||
268 | #define RCU_SCHED_BATCH_TIME (HZ / 50) | ||
269 | |||
270 | /* | ||
271 | * Return the number of RCU batches processed thus far. Useful | ||
272 | * for debug and statistics. | ||
273 | */ | ||
274 | long rcu_batches_completed(void) | ||
275 | { | ||
276 | return rcu_ctrlblk.completed; | ||
277 | } | ||
278 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
279 | |||
280 | void __rcu_read_lock(void) | ||
281 | { | ||
282 | int idx; | ||
283 | struct task_struct *t = current; | ||
284 | int nesting; | ||
285 | |||
286 | nesting = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
287 | if (nesting != 0) { | ||
288 | |||
289 | /* An earlier rcu_read_lock() covers us, just count it. */ | ||
290 | |||
291 | t->rcu_read_lock_nesting = nesting + 1; | ||
292 | |||
293 | } else { | ||
294 | unsigned long flags; | ||
295 | |||
296 | /* | ||
297 | * We disable interrupts for the following reasons: | ||
298 | * - If we get scheduling clock interrupt here, and we | ||
299 | * end up acking the counter flip, it's like a promise | ||
300 | * that we will never increment the old counter again. | ||
301 | * Thus we will break that promise if that | ||
302 | * scheduling clock interrupt happens between the time | ||
303 | * we pick the .completed field and the time that we | ||
304 | * increment our counter. | ||
305 | * | ||
306 | * - We don't want to be preempted out here. | ||
307 | * | ||
308 | * NMIs can still occur, of course, and might themselves | ||
309 | * contain rcu_read_lock(). | ||
310 | */ | ||
311 | |||
312 | local_irq_save(flags); | ||
313 | |||
314 | /* | ||
315 | * Outermost nesting of rcu_read_lock(), so increment | ||
316 | * the current counter for the current CPU. Use volatile | ||
317 | * casts to prevent the compiler from reordering. | ||
318 | */ | ||
319 | |||
320 | idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1; | ||
321 | ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++; | ||
322 | |||
323 | /* | ||
324 | * Now that the per-CPU counter has been incremented, we | ||
325 | * are protected from races with rcu_read_lock() invoked | ||
326 | * from NMI handlers on this CPU. We can therefore safely | ||
327 | * increment the nesting counter, relieving further NMIs | ||
328 | * of the need to increment the per-CPU counter. | ||
329 | */ | ||
330 | |||
331 | ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1; | ||
332 | |||
333 | /* | ||
334 | * Now that we have preventing any NMIs from storing | ||
335 | * to the ->rcu_flipctr_idx, we can safely use it to | ||
336 | * remember which counter to decrement in the matching | ||
337 | * rcu_read_unlock(). | ||
338 | */ | ||
339 | |||
340 | ACCESS_ONCE(t->rcu_flipctr_idx) = idx; | ||
341 | local_irq_restore(flags); | ||
342 | } | ||
343 | } | ||
344 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
345 | |||
346 | void __rcu_read_unlock(void) | ||
347 | { | ||
348 | int idx; | ||
349 | struct task_struct *t = current; | ||
350 | int nesting; | ||
351 | |||
352 | nesting = ACCESS_ONCE(t->rcu_read_lock_nesting); | ||
353 | if (nesting > 1) { | ||
354 | |||
355 | /* | ||
356 | * We are still protected by the enclosing rcu_read_lock(), | ||
357 | * so simply decrement the counter. | ||
358 | */ | ||
359 | |||
360 | t->rcu_read_lock_nesting = nesting - 1; | ||
361 | |||
362 | } else { | ||
363 | unsigned long flags; | ||
364 | |||
365 | /* | ||
366 | * Disable local interrupts to prevent the grace-period | ||
367 | * detection state machine from seeing us half-done. | ||
368 | * NMIs can still occur, of course, and might themselves | ||
369 | * contain rcu_read_lock() and rcu_read_unlock(). | ||
370 | */ | ||
371 | |||
372 | local_irq_save(flags); | ||
373 | |||
374 | /* | ||
375 | * Outermost nesting of rcu_read_unlock(), so we must | ||
376 | * decrement the current counter for the current CPU. | ||
377 | * This must be done carefully, because NMIs can | ||
378 | * occur at any point in this code, and any rcu_read_lock() | ||
379 | * and rcu_read_unlock() pairs in the NMI handlers | ||
380 | * must interact non-destructively with this code. | ||
381 | * Lots of volatile casts, and -very- careful ordering. | ||
382 | * | ||
383 | * Changes to this code, including this one, must be | ||
384 | * inspected, validated, and tested extremely carefully!!! | ||
385 | */ | ||
386 | |||
387 | /* | ||
388 | * First, pick up the index. | ||
389 | */ | ||
390 | |||
391 | idx = ACCESS_ONCE(t->rcu_flipctr_idx); | ||
392 | |||
393 | /* | ||
394 | * Now that we have fetched the counter index, it is | ||
395 | * safe to decrement the per-task RCU nesting counter. | ||
396 | * After this, any interrupts or NMIs will increment and | ||
397 | * decrement the per-CPU counters. | ||
398 | */ | ||
399 | ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1; | ||
400 | |||
401 | /* | ||
402 | * It is now safe to decrement this task's nesting count. | ||
403 | * NMIs that occur after this statement will route their | ||
404 | * rcu_read_lock() calls through this "else" clause, and | ||
405 | * will thus start incrementing the per-CPU counter on | ||
406 | * their own. They will also clobber ->rcu_flipctr_idx, | ||
407 | * but that is OK, since we have already fetched it. | ||
408 | */ | ||
409 | |||
410 | ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--; | ||
411 | local_irq_restore(flags); | ||
412 | } | ||
413 | } | ||
414 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
415 | |||
416 | /* | ||
417 | * If a global counter flip has occurred since the last time that we | ||
418 | * advanced callbacks, advance them. Hardware interrupts must be | ||
419 | * disabled when calling this function. | ||
420 | */ | ||
421 | static void __rcu_advance_callbacks(struct rcu_data *rdp) | ||
422 | { | ||
423 | int cpu; | ||
424 | int i; | ||
425 | int wlc = 0; | ||
426 | |||
427 | if (rdp->completed != rcu_ctrlblk.completed) { | ||
428 | if (rdp->waitlist[GP_STAGES - 1] != NULL) { | ||
429 | *rdp->donetail = rdp->waitlist[GP_STAGES - 1]; | ||
430 | rdp->donetail = rdp->waittail[GP_STAGES - 1]; | ||
431 | RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp); | ||
432 | } | ||
433 | for (i = GP_STAGES - 2; i >= 0; i--) { | ||
434 | if (rdp->waitlist[i] != NULL) { | ||
435 | rdp->waitlist[i + 1] = rdp->waitlist[i]; | ||
436 | rdp->waittail[i + 1] = rdp->waittail[i]; | ||
437 | wlc++; | ||
438 | } else { | ||
439 | rdp->waitlist[i + 1] = NULL; | ||
440 | rdp->waittail[i + 1] = | ||
441 | &rdp->waitlist[i + 1]; | ||
442 | } | ||
443 | } | ||
444 | if (rdp->nextlist != NULL) { | ||
445 | rdp->waitlist[0] = rdp->nextlist; | ||
446 | rdp->waittail[0] = rdp->nexttail; | ||
447 | wlc++; | ||
448 | rdp->nextlist = NULL; | ||
449 | rdp->nexttail = &rdp->nextlist; | ||
450 | RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp); | ||
451 | } else { | ||
452 | rdp->waitlist[0] = NULL; | ||
453 | rdp->waittail[0] = &rdp->waitlist[0]; | ||
454 | } | ||
455 | rdp->waitlistcount = wlc; | ||
456 | rdp->completed = rcu_ctrlblk.completed; | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * Check to see if this CPU needs to report that it has seen | ||
461 | * the most recent counter flip, thereby declaring that all | ||
462 | * subsequent rcu_read_lock() invocations will respect this flip. | ||
463 | */ | ||
464 | |||
465 | cpu = raw_smp_processor_id(); | ||
466 | if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { | ||
467 | smp_mb(); /* Subsequent counter accesses must see new value */ | ||
468 | per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; | ||
469 | smp_mb(); /* Subsequent RCU read-side critical sections */ | ||
470 | /* seen -after- acknowledgement. */ | ||
471 | } | ||
472 | } | ||
473 | |||
474 | #ifdef CONFIG_NO_HZ | ||
475 | static DEFINE_PER_CPU(int, rcu_update_flag); | ||
476 | |||
477 | /** | ||
478 | * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI. | ||
479 | * | ||
480 | * If the CPU was idle with dynamic ticks active, this updates the | ||
481 | * rcu_dyntick_sched.dynticks to let the RCU handling know that the | ||
482 | * CPU is active. | ||
483 | */ | ||
484 | void rcu_irq_enter(void) | ||
485 | { | ||
486 | int cpu = smp_processor_id(); | ||
487 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
488 | |||
489 | if (per_cpu(rcu_update_flag, cpu)) | ||
490 | per_cpu(rcu_update_flag, cpu)++; | ||
491 | |||
492 | /* | ||
493 | * Only update if we are coming from a stopped ticks mode | ||
494 | * (rcu_dyntick_sched.dynticks is even). | ||
495 | */ | ||
496 | if (!in_interrupt() && | ||
497 | (rdssp->dynticks & 0x1) == 0) { | ||
498 | /* | ||
499 | * The following might seem like we could have a race | ||
500 | * with NMI/SMIs. But this really isn't a problem. | ||
501 | * Here we do a read/modify/write, and the race happens | ||
502 | * when an NMI/SMI comes in after the read and before | ||
503 | * the write. But NMI/SMIs will increment this counter | ||
504 | * twice before returning, so the zero bit will not | ||
505 | * be corrupted by the NMI/SMI which is the most important | ||
506 | * part. | ||
507 | * | ||
508 | * The only thing is that we would bring back the counter | ||
509 | * to a postion that it was in during the NMI/SMI. | ||
510 | * But the zero bit would be set, so the rest of the | ||
511 | * counter would again be ignored. | ||
512 | * | ||
513 | * On return from the IRQ, the counter may have the zero | ||
514 | * bit be 0 and the counter the same as the return from | ||
515 | * the NMI/SMI. If the state machine was so unlucky to | ||
516 | * see that, it still doesn't matter, since all | ||
517 | * RCU read-side critical sections on this CPU would | ||
518 | * have already completed. | ||
519 | */ | ||
520 | rdssp->dynticks++; | ||
521 | /* | ||
522 | * The following memory barrier ensures that any | ||
523 | * rcu_read_lock() primitives in the irq handler | ||
524 | * are seen by other CPUs to follow the above | ||
525 | * increment to rcu_dyntick_sched.dynticks. This is | ||
526 | * required in order for other CPUs to correctly | ||
527 | * determine when it is safe to advance the RCU | ||
528 | * grace-period state machine. | ||
529 | */ | ||
530 | smp_mb(); /* see above block comment. */ | ||
531 | /* | ||
532 | * Since we can't determine the dynamic tick mode from | ||
533 | * the rcu_dyntick_sched.dynticks after this routine, | ||
534 | * we use a second flag to acknowledge that we came | ||
535 | * from an idle state with ticks stopped. | ||
536 | */ | ||
537 | per_cpu(rcu_update_flag, cpu)++; | ||
538 | /* | ||
539 | * If we take an NMI/SMI now, they will also increment | ||
540 | * the rcu_update_flag, and will not update the | ||
541 | * rcu_dyntick_sched.dynticks on exit. That is for | ||
542 | * this IRQ to do. | ||
543 | */ | ||
544 | } | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * rcu_irq_exit - Called from exiting Hard irq context. | ||
549 | * | ||
550 | * If the CPU was idle with dynamic ticks active, update the | ||
551 | * rcu_dyntick_sched.dynticks to put let the RCU handling be | ||
552 | * aware that the CPU is going back to idle with no ticks. | ||
553 | */ | ||
554 | void rcu_irq_exit(void) | ||
555 | { | ||
556 | int cpu = smp_processor_id(); | ||
557 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
558 | |||
559 | /* | ||
560 | * rcu_update_flag is set if we interrupted the CPU | ||
561 | * when it was idle with ticks stopped. | ||
562 | * Once this occurs, we keep track of interrupt nesting | ||
563 | * because a NMI/SMI could also come in, and we still | ||
564 | * only want the IRQ that started the increment of the | ||
565 | * rcu_dyntick_sched.dynticks to be the one that modifies | ||
566 | * it on exit. | ||
567 | */ | ||
568 | if (per_cpu(rcu_update_flag, cpu)) { | ||
569 | if (--per_cpu(rcu_update_flag, cpu)) | ||
570 | return; | ||
571 | |||
572 | /* This must match the interrupt nesting */ | ||
573 | WARN_ON(in_interrupt()); | ||
574 | |||
575 | /* | ||
576 | * If an NMI/SMI happens now we are still | ||
577 | * protected by the rcu_dyntick_sched.dynticks being odd. | ||
578 | */ | ||
579 | |||
580 | /* | ||
581 | * The following memory barrier ensures that any | ||
582 | * rcu_read_unlock() primitives in the irq handler | ||
583 | * are seen by other CPUs to preceed the following | ||
584 | * increment to rcu_dyntick_sched.dynticks. This | ||
585 | * is required in order for other CPUs to determine | ||
586 | * when it is safe to advance the RCU grace-period | ||
587 | * state machine. | ||
588 | */ | ||
589 | smp_mb(); /* see above block comment. */ | ||
590 | rdssp->dynticks++; | ||
591 | WARN_ON(rdssp->dynticks & 0x1); | ||
592 | } | ||
593 | } | ||
594 | |||
595 | void rcu_nmi_enter(void) | ||
596 | { | ||
597 | rcu_irq_enter(); | ||
598 | } | ||
599 | |||
600 | void rcu_nmi_exit(void) | ||
601 | { | ||
602 | rcu_irq_exit(); | ||
603 | } | ||
604 | |||
605 | static void dyntick_save_progress_counter(int cpu) | ||
606 | { | ||
607 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
608 | |||
609 | rdssp->dynticks_snap = rdssp->dynticks; | ||
610 | } | ||
611 | |||
612 | static inline int | ||
613 | rcu_try_flip_waitack_needed(int cpu) | ||
614 | { | ||
615 | long curr; | ||
616 | long snap; | ||
617 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
618 | |||
619 | curr = rdssp->dynticks; | ||
620 | snap = rdssp->dynticks_snap; | ||
621 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
622 | |||
623 | /* | ||
624 | * If the CPU remained in dynticks mode for the entire time | ||
625 | * and didn't take any interrupts, NMIs, SMIs, or whatever, | ||
626 | * then it cannot be in the middle of an rcu_read_lock(), so | ||
627 | * the next rcu_read_lock() it executes must use the new value | ||
628 | * of the counter. So we can safely pretend that this CPU | ||
629 | * already acknowledged the counter. | ||
630 | */ | ||
631 | |||
632 | if ((curr == snap) && ((curr & 0x1) == 0)) | ||
633 | return 0; | ||
634 | |||
635 | /* | ||
636 | * If the CPU passed through or entered a dynticks idle phase with | ||
637 | * no active irq handlers, then, as above, we can safely pretend | ||
638 | * that this CPU already acknowledged the counter. | ||
639 | */ | ||
640 | |||
641 | if ((curr - snap) > 2 || (curr & 0x1) == 0) | ||
642 | return 0; | ||
643 | |||
644 | /* We need this CPU to explicitly acknowledge the counter flip. */ | ||
645 | |||
646 | return 1; | ||
647 | } | ||
648 | |||
649 | static inline int | ||
650 | rcu_try_flip_waitmb_needed(int cpu) | ||
651 | { | ||
652 | long curr; | ||
653 | long snap; | ||
654 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
655 | |||
656 | curr = rdssp->dynticks; | ||
657 | snap = rdssp->dynticks_snap; | ||
658 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
659 | |||
660 | /* | ||
661 | * If the CPU remained in dynticks mode for the entire time | ||
662 | * and didn't take any interrupts, NMIs, SMIs, or whatever, | ||
663 | * then it cannot have executed an RCU read-side critical section | ||
664 | * during that time, so there is no need for it to execute a | ||
665 | * memory barrier. | ||
666 | */ | ||
667 | |||
668 | if ((curr == snap) && ((curr & 0x1) == 0)) | ||
669 | return 0; | ||
670 | |||
671 | /* | ||
672 | * If the CPU either entered or exited an outermost interrupt, | ||
673 | * SMI, NMI, or whatever handler, then we know that it executed | ||
674 | * a memory barrier when doing so. So we don't need another one. | ||
675 | */ | ||
676 | if (curr != snap) | ||
677 | return 0; | ||
678 | |||
679 | /* We need the CPU to execute a memory barrier. */ | ||
680 | |||
681 | return 1; | ||
682 | } | ||
683 | |||
684 | static void dyntick_save_progress_counter_sched(int cpu) | ||
685 | { | ||
686 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
687 | |||
688 | rdssp->sched_dynticks_snap = rdssp->dynticks; | ||
689 | } | ||
690 | |||
691 | static int rcu_qsctr_inc_needed_dyntick(int cpu) | ||
692 | { | ||
693 | long curr; | ||
694 | long snap; | ||
695 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
696 | |||
697 | curr = rdssp->dynticks; | ||
698 | snap = rdssp->sched_dynticks_snap; | ||
699 | smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ | ||
700 | |||
701 | /* | ||
702 | * If the CPU remained in dynticks mode for the entire time | ||
703 | * and didn't take any interrupts, NMIs, SMIs, or whatever, | ||
704 | * then it cannot be in the middle of an rcu_read_lock(), so | ||
705 | * the next rcu_read_lock() it executes must use the new value | ||
706 | * of the counter. Therefore, this CPU has been in a quiescent | ||
707 | * state the entire time, and we don't need to wait for it. | ||
708 | */ | ||
709 | |||
710 | if ((curr == snap) && ((curr & 0x1) == 0)) | ||
711 | return 0; | ||
712 | |||
713 | /* | ||
714 | * If the CPU passed through or entered a dynticks idle phase with | ||
715 | * no active irq handlers, then, as above, this CPU has already | ||
716 | * passed through a quiescent state. | ||
717 | */ | ||
718 | |||
719 | if ((curr - snap) > 2 || (snap & 0x1) == 0) | ||
720 | return 0; | ||
721 | |||
722 | /* We need this CPU to go through a quiescent state. */ | ||
723 | |||
724 | return 1; | ||
725 | } | ||
726 | |||
727 | #else /* !CONFIG_NO_HZ */ | ||
728 | |||
729 | # define dyntick_save_progress_counter(cpu) do { } while (0) | ||
730 | # define rcu_try_flip_waitack_needed(cpu) (1) | ||
731 | # define rcu_try_flip_waitmb_needed(cpu) (1) | ||
732 | |||
733 | # define dyntick_save_progress_counter_sched(cpu) do { } while (0) | ||
734 | # define rcu_qsctr_inc_needed_dyntick(cpu) (1) | ||
735 | |||
736 | #endif /* CONFIG_NO_HZ */ | ||
737 | |||
738 | static void save_qsctr_sched(int cpu) | ||
739 | { | ||
740 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
741 | |||
742 | rdssp->sched_qs_snap = rdssp->sched_qs; | ||
743 | } | ||
744 | |||
745 | static inline int rcu_qsctr_inc_needed(int cpu) | ||
746 | { | ||
747 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
748 | |||
749 | /* | ||
750 | * If there has been a quiescent state, no more need to wait | ||
751 | * on this CPU. | ||
752 | */ | ||
753 | |||
754 | if (rdssp->sched_qs != rdssp->sched_qs_snap) { | ||
755 | smp_mb(); /* force ordering with cpu entering schedule(). */ | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | /* We need this CPU to go through a quiescent state. */ | ||
760 | |||
761 | return 1; | ||
762 | } | ||
763 | |||
764 | /* | ||
765 | * Get here when RCU is idle. Decide whether we need to | ||
766 | * move out of idle state, and return non-zero if so. | ||
767 | * "Straightforward" approach for the moment, might later | ||
768 | * use callback-list lengths, grace-period duration, or | ||
769 | * some such to determine when to exit idle state. | ||
770 | * Might also need a pre-idle test that does not acquire | ||
771 | * the lock, but let's get the simple case working first... | ||
772 | */ | ||
773 | |||
774 | static int | ||
775 | rcu_try_flip_idle(void) | ||
776 | { | ||
777 | int cpu; | ||
778 | |||
779 | RCU_TRACE_ME(rcupreempt_trace_try_flip_i1); | ||
780 | if (!rcu_pending(smp_processor_id())) { | ||
781 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1); | ||
782 | return 0; | ||
783 | } | ||
784 | |||
785 | /* | ||
786 | * Do the flip. | ||
787 | */ | ||
788 | |||
789 | RCU_TRACE_ME(rcupreempt_trace_try_flip_g1); | ||
790 | rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */ | ||
791 | |||
792 | /* | ||
793 | * Need a memory barrier so that other CPUs see the new | ||
794 | * counter value before they see the subsequent change of all | ||
795 | * the rcu_flip_flag instances to rcu_flipped. | ||
796 | */ | ||
797 | |||
798 | smp_mb(); /* see above block comment. */ | ||
799 | |||
800 | /* Now ask each CPU for acknowledgement of the flip. */ | ||
801 | |||
802 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { | ||
803 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | ||
804 | dyntick_save_progress_counter(cpu); | ||
805 | } | ||
806 | |||
807 | return 1; | ||
808 | } | ||
809 | |||
810 | /* | ||
811 | * Wait for CPUs to acknowledge the flip. | ||
812 | */ | ||
813 | |||
814 | static int | ||
815 | rcu_try_flip_waitack(void) | ||
816 | { | ||
817 | int cpu; | ||
818 | |||
819 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | ||
820 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) | ||
821 | if (rcu_try_flip_waitack_needed(cpu) && | ||
822 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | ||
823 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | ||
824 | return 0; | ||
825 | } | ||
826 | |||
827 | /* | ||
828 | * Make sure our checks above don't bleed into subsequent | ||
829 | * waiting for the sum of the counters to reach zero. | ||
830 | */ | ||
831 | |||
832 | smp_mb(); /* see above block comment. */ | ||
833 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a2); | ||
834 | return 1; | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * Wait for collective ``last'' counter to reach zero, | ||
839 | * then tell all CPUs to do an end-of-grace-period memory barrier. | ||
840 | */ | ||
841 | |||
842 | static int | ||
843 | rcu_try_flip_waitzero(void) | ||
844 | { | ||
845 | int cpu; | ||
846 | int lastidx = !(rcu_ctrlblk.completed & 0x1); | ||
847 | int sum = 0; | ||
848 | |||
849 | /* Check to see if the sum of the "last" counters is zero. */ | ||
850 | |||
851 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | ||
852 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) | ||
853 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | ||
854 | if (sum != 0) { | ||
855 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | ||
856 | return 0; | ||
857 | } | ||
858 | |||
859 | /* | ||
860 | * This ensures that the other CPUs see the call for | ||
861 | * memory barriers -after- the sum to zero has been | ||
862 | * detected here | ||
863 | */ | ||
864 | smp_mb(); /* ^^^^^^^^^^^^ */ | ||
865 | |||
866 | /* Call for a memory barrier from each CPU. */ | ||
867 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) { | ||
868 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | ||
869 | dyntick_save_progress_counter(cpu); | ||
870 | } | ||
871 | |||
872 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z2); | ||
873 | return 1; | ||
874 | } | ||
875 | |||
876 | /* | ||
877 | * Wait for all CPUs to do their end-of-grace-period memory barrier. | ||
878 | * Return 0 once all CPUs have done so. | ||
879 | */ | ||
880 | |||
881 | static int | ||
882 | rcu_try_flip_waitmb(void) | ||
883 | { | ||
884 | int cpu; | ||
885 | |||
886 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | ||
887 | for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) | ||
888 | if (rcu_try_flip_waitmb_needed(cpu) && | ||
889 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | ||
890 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | ||
891 | return 0; | ||
892 | } | ||
893 | |||
894 | smp_mb(); /* Ensure that the above checks precede any following flip. */ | ||
895 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m2); | ||
896 | return 1; | ||
897 | } | ||
898 | |||
899 | /* | ||
900 | * Attempt a single flip of the counters. Remember, a single flip does | ||
901 | * -not- constitute a grace period. Instead, the interval between | ||
902 | * at least GP_STAGES consecutive flips is a grace period. | ||
903 | * | ||
904 | * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation | ||
905 | * on a large SMP, they might want to use a hierarchical organization of | ||
906 | * the per-CPU-counter pairs. | ||
907 | */ | ||
908 | static void rcu_try_flip(void) | ||
909 | { | ||
910 | unsigned long flags; | ||
911 | |||
912 | RCU_TRACE_ME(rcupreempt_trace_try_flip_1); | ||
913 | if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) { | ||
914 | RCU_TRACE_ME(rcupreempt_trace_try_flip_e1); | ||
915 | return; | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * Take the next transition(s) through the RCU grace-period | ||
920 | * flip-counter state machine. | ||
921 | */ | ||
922 | |||
923 | switch (rcu_ctrlblk.rcu_try_flip_state) { | ||
924 | case rcu_try_flip_idle_state: | ||
925 | if (rcu_try_flip_idle()) | ||
926 | rcu_ctrlblk.rcu_try_flip_state = | ||
927 | rcu_try_flip_waitack_state; | ||
928 | break; | ||
929 | case rcu_try_flip_waitack_state: | ||
930 | if (rcu_try_flip_waitack()) | ||
931 | rcu_ctrlblk.rcu_try_flip_state = | ||
932 | rcu_try_flip_waitzero_state; | ||
933 | break; | ||
934 | case rcu_try_flip_waitzero_state: | ||
935 | if (rcu_try_flip_waitzero()) | ||
936 | rcu_ctrlblk.rcu_try_flip_state = | ||
937 | rcu_try_flip_waitmb_state; | ||
938 | break; | ||
939 | case rcu_try_flip_waitmb_state: | ||
940 | if (rcu_try_flip_waitmb()) | ||
941 | rcu_ctrlblk.rcu_try_flip_state = | ||
942 | rcu_try_flip_idle_state; | ||
943 | } | ||
944 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | ||
945 | } | ||
946 | |||
947 | /* | ||
948 | * Check to see if this CPU needs to do a memory barrier in order to | ||
949 | * ensure that any prior RCU read-side critical sections have committed | ||
950 | * their counter manipulations and critical-section memory references | ||
951 | * before declaring the grace period to be completed. | ||
952 | */ | ||
953 | static void rcu_check_mb(int cpu) | ||
954 | { | ||
955 | if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) { | ||
956 | smp_mb(); /* Ensure RCU read-side accesses are visible. */ | ||
957 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_done; | ||
958 | } | ||
959 | } | ||
960 | |||
961 | void rcu_check_callbacks(int cpu, int user) | ||
962 | { | ||
963 | unsigned long flags; | ||
964 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | ||
965 | |||
966 | /* | ||
967 | * If this CPU took its interrupt from user mode or from the | ||
968 | * idle loop, and this is not a nested interrupt, then | ||
969 | * this CPU has to have exited all prior preept-disable | ||
970 | * sections of code. So increment the counter to note this. | ||
971 | * | ||
972 | * The memory barrier is needed to handle the case where | ||
973 | * writes from a preempt-disable section of code get reordered | ||
974 | * into schedule() by this CPU's write buffer. So the memory | ||
975 | * barrier makes sure that the rcu_qsctr_inc() is seen by other | ||
976 | * CPUs to happen after any such write. | ||
977 | */ | ||
978 | |||
979 | if (user || | ||
980 | (idle_cpu(cpu) && !in_softirq() && | ||
981 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | ||
982 | smp_mb(); /* Guard against aggressive schedule(). */ | ||
983 | rcu_qsctr_inc(cpu); | ||
984 | } | ||
985 | |||
986 | rcu_check_mb(cpu); | ||
987 | if (rcu_ctrlblk.completed == rdp->completed) | ||
988 | rcu_try_flip(); | ||
989 | spin_lock_irqsave(&rdp->lock, flags); | ||
990 | RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp); | ||
991 | __rcu_advance_callbacks(rdp); | ||
992 | if (rdp->donelist == NULL) { | ||
993 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
994 | } else { | ||
995 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
996 | raise_softirq(RCU_SOFTIRQ); | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | /* | ||
1001 | * Needed by dynticks, to make sure all RCU processing has finished | ||
1002 | * when we go idle: | ||
1003 | */ | ||
1004 | void rcu_advance_callbacks(int cpu, int user) | ||
1005 | { | ||
1006 | unsigned long flags; | ||
1007 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | ||
1008 | |||
1009 | if (rcu_ctrlblk.completed == rdp->completed) { | ||
1010 | rcu_try_flip(); | ||
1011 | if (rcu_ctrlblk.completed == rdp->completed) | ||
1012 | return; | ||
1013 | } | ||
1014 | spin_lock_irqsave(&rdp->lock, flags); | ||
1015 | RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp); | ||
1016 | __rcu_advance_callbacks(rdp); | ||
1017 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1018 | } | ||
1019 | |||
1020 | #ifdef CONFIG_HOTPLUG_CPU | ||
1021 | #define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \ | ||
1022 | *dsttail = srclist; \ | ||
1023 | if (srclist != NULL) { \ | ||
1024 | dsttail = srctail; \ | ||
1025 | srclist = NULL; \ | ||
1026 | srctail = &srclist;\ | ||
1027 | } \ | ||
1028 | } while (0) | ||
1029 | |||
1030 | void rcu_offline_cpu(int cpu) | ||
1031 | { | ||
1032 | int i; | ||
1033 | struct rcu_head *list = NULL; | ||
1034 | unsigned long flags; | ||
1035 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | ||
1036 | struct rcu_head *schedlist = NULL; | ||
1037 | struct rcu_head **schedtail = &schedlist; | ||
1038 | struct rcu_head **tail = &list; | ||
1039 | |||
1040 | /* | ||
1041 | * Remove all callbacks from the newly dead CPU, retaining order. | ||
1042 | * Otherwise rcu_barrier() will fail | ||
1043 | */ | ||
1044 | |||
1045 | spin_lock_irqsave(&rdp->lock, flags); | ||
1046 | rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail); | ||
1047 | for (i = GP_STAGES - 1; i >= 0; i--) | ||
1048 | rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i], | ||
1049 | list, tail); | ||
1050 | rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail); | ||
1051 | rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail, | ||
1052 | schedlist, schedtail); | ||
1053 | rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail, | ||
1054 | schedlist, schedtail); | ||
1055 | rdp->rcu_sched_sleeping = 0; | ||
1056 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1057 | rdp->waitlistcount = 0; | ||
1058 | |||
1059 | /* Disengage the newly dead CPU from the grace-period computation. */ | ||
1060 | |||
1061 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); | ||
1062 | rcu_check_mb(cpu); | ||
1063 | if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { | ||
1064 | smp_mb(); /* Subsequent counter accesses must see new value */ | ||
1065 | per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; | ||
1066 | smp_mb(); /* Subsequent RCU read-side critical sections */ | ||
1067 | /* seen -after- acknowledgement. */ | ||
1068 | } | ||
1069 | |||
1070 | RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0]; | ||
1071 | RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1]; | ||
1072 | |||
1073 | RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; | ||
1074 | RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; | ||
1075 | |||
1076 | cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map)); | ||
1077 | |||
1078 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | ||
1079 | |||
1080 | /* | ||
1081 | * Place the removed callbacks on the current CPU's queue. | ||
1082 | * Make them all start a new grace period: simple approach, | ||
1083 | * in theory could starve a given set of callbacks, but | ||
1084 | * you would need to be doing some serious CPU hotplugging | ||
1085 | * to make this happen. If this becomes a problem, adding | ||
1086 | * a synchronize_rcu() to the hotplug path would be a simple | ||
1087 | * fix. | ||
1088 | */ | ||
1089 | |||
1090 | local_irq_save(flags); /* disable preempt till we know what lock. */ | ||
1091 | rdp = RCU_DATA_ME(); | ||
1092 | spin_lock(&rdp->lock); | ||
1093 | *rdp->nexttail = list; | ||
1094 | if (list) | ||
1095 | rdp->nexttail = tail; | ||
1096 | *rdp->nextschedtail = schedlist; | ||
1097 | if (schedlist) | ||
1098 | rdp->nextschedtail = schedtail; | ||
1099 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1100 | } | ||
1101 | |||
1102 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1103 | |||
1104 | void rcu_offline_cpu(int cpu) | ||
1105 | { | ||
1106 | } | ||
1107 | |||
1108 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | ||
1109 | |||
1110 | void __cpuinit rcu_online_cpu(int cpu) | ||
1111 | { | ||
1112 | unsigned long flags; | ||
1113 | struct rcu_data *rdp; | ||
1114 | |||
1115 | spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); | ||
1116 | cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map)); | ||
1117 | spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); | ||
1118 | |||
1119 | /* | ||
1120 | * The rcu_sched grace-period processing might have bypassed | ||
1121 | * this CPU, given that it was not in the rcu_cpu_online_map | ||
1122 | * when the grace-period scan started. This means that the | ||
1123 | * grace-period task might sleep. So make sure that if this | ||
1124 | * should happen, the first callback posted to this CPU will | ||
1125 | * wake up the grace-period task if need be. | ||
1126 | */ | ||
1127 | |||
1128 | rdp = RCU_DATA_CPU(cpu); | ||
1129 | spin_lock_irqsave(&rdp->lock, flags); | ||
1130 | rdp->rcu_sched_sleeping = 1; | ||
1131 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1132 | } | ||
1133 | |||
1134 | static void rcu_process_callbacks(struct softirq_action *unused) | ||
1135 | { | ||
1136 | unsigned long flags; | ||
1137 | struct rcu_head *next, *list; | ||
1138 | struct rcu_data *rdp; | ||
1139 | |||
1140 | local_irq_save(flags); | ||
1141 | rdp = RCU_DATA_ME(); | ||
1142 | spin_lock(&rdp->lock); | ||
1143 | list = rdp->donelist; | ||
1144 | if (list == NULL) { | ||
1145 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1146 | return; | ||
1147 | } | ||
1148 | rdp->donelist = NULL; | ||
1149 | rdp->donetail = &rdp->donelist; | ||
1150 | RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp); | ||
1151 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1152 | while (list) { | ||
1153 | next = list->next; | ||
1154 | list->func(list); | ||
1155 | list = next; | ||
1156 | RCU_TRACE_ME(rcupreempt_trace_invoke); | ||
1157 | } | ||
1158 | } | ||
1159 | |||
1160 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
1161 | { | ||
1162 | unsigned long flags; | ||
1163 | struct rcu_data *rdp; | ||
1164 | |||
1165 | head->func = func; | ||
1166 | head->next = NULL; | ||
1167 | local_irq_save(flags); | ||
1168 | rdp = RCU_DATA_ME(); | ||
1169 | spin_lock(&rdp->lock); | ||
1170 | __rcu_advance_callbacks(rdp); | ||
1171 | *rdp->nexttail = head; | ||
1172 | rdp->nexttail = &head->next; | ||
1173 | RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp); | ||
1174 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1175 | } | ||
1176 | EXPORT_SYMBOL_GPL(call_rcu); | ||
1177 | |||
1178 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
1179 | { | ||
1180 | unsigned long flags; | ||
1181 | struct rcu_data *rdp; | ||
1182 | int wake_gp = 0; | ||
1183 | |||
1184 | head->func = func; | ||
1185 | head->next = NULL; | ||
1186 | local_irq_save(flags); | ||
1187 | rdp = RCU_DATA_ME(); | ||
1188 | spin_lock(&rdp->lock); | ||
1189 | *rdp->nextschedtail = head; | ||
1190 | rdp->nextschedtail = &head->next; | ||
1191 | if (rdp->rcu_sched_sleeping) { | ||
1192 | |||
1193 | /* Grace-period processing might be sleeping... */ | ||
1194 | |||
1195 | rdp->rcu_sched_sleeping = 0; | ||
1196 | wake_gp = 1; | ||
1197 | } | ||
1198 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1199 | if (wake_gp) { | ||
1200 | |||
1201 | /* Wake up grace-period processing, unless someone beat us. */ | ||
1202 | |||
1203 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); | ||
1204 | if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping) | ||
1205 | wake_gp = 0; | ||
1206 | rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping; | ||
1207 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1208 | if (wake_gp) | ||
1209 | wake_up_interruptible(&rcu_ctrlblk.sched_wq); | ||
1210 | } | ||
1211 | } | ||
1212 | EXPORT_SYMBOL_GPL(call_rcu_sched); | ||
1213 | |||
1214 | /* | ||
1215 | * Wait until all currently running preempt_disable() code segments | ||
1216 | * (including hardware-irq-disable segments) complete. Note that | ||
1217 | * in -rt this does -not- necessarily result in all currently executing | ||
1218 | * interrupt -handlers- having completed. | ||
1219 | */ | ||
1220 | void __synchronize_sched(void) | ||
1221 | { | ||
1222 | struct rcu_synchronize rcu; | ||
1223 | |||
1224 | if (num_online_cpus() == 1) | ||
1225 | return; /* blocking is gp if only one CPU! */ | ||
1226 | |||
1227 | init_completion(&rcu.completion); | ||
1228 | /* Will wake me after RCU finished. */ | ||
1229 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1230 | /* Wait for it. */ | ||
1231 | wait_for_completion(&rcu.completion); | ||
1232 | } | ||
1233 | EXPORT_SYMBOL_GPL(__synchronize_sched); | ||
1234 | |||
1235 | /* | ||
1236 | * kthread function that manages call_rcu_sched grace periods. | ||
1237 | */ | ||
1238 | static int rcu_sched_grace_period(void *arg) | ||
1239 | { | ||
1240 | int couldsleep; /* might sleep after current pass. */ | ||
1241 | int couldsleepnext = 0; /* might sleep after next pass. */ | ||
1242 | int cpu; | ||
1243 | unsigned long flags; | ||
1244 | struct rcu_data *rdp; | ||
1245 | int ret; | ||
1246 | |||
1247 | /* | ||
1248 | * Each pass through the following loop handles one | ||
1249 | * rcu_sched grace period cycle. | ||
1250 | */ | ||
1251 | do { | ||
1252 | /* Save each CPU's current state. */ | ||
1253 | |||
1254 | for_each_online_cpu(cpu) { | ||
1255 | dyntick_save_progress_counter_sched(cpu); | ||
1256 | save_qsctr_sched(cpu); | ||
1257 | } | ||
1258 | |||
1259 | /* | ||
1260 | * Sleep for about an RCU grace-period's worth to | ||
1261 | * allow better batching and to consume less CPU. | ||
1262 | */ | ||
1263 | schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME); | ||
1264 | |||
1265 | /* | ||
1266 | * If there was nothing to do last time, prepare to | ||
1267 | * sleep at the end of the current grace period cycle. | ||
1268 | */ | ||
1269 | couldsleep = couldsleepnext; | ||
1270 | couldsleepnext = 1; | ||
1271 | if (couldsleep) { | ||
1272 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); | ||
1273 | rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep; | ||
1274 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1278 | * Wait on each CPU in turn to have either visited | ||
1279 | * a quiescent state or been in dynticks-idle mode. | ||
1280 | */ | ||
1281 | for_each_online_cpu(cpu) { | ||
1282 | while (rcu_qsctr_inc_needed(cpu) && | ||
1283 | rcu_qsctr_inc_needed_dyntick(cpu)) { | ||
1284 | /* resched_cpu(cpu); @@@ */ | ||
1285 | schedule_timeout_interruptible(1); | ||
1286 | } | ||
1287 | } | ||
1288 | |||
1289 | /* Advance callbacks for each CPU. */ | ||
1290 | |||
1291 | for_each_online_cpu(cpu) { | ||
1292 | |||
1293 | rdp = RCU_DATA_CPU(cpu); | ||
1294 | spin_lock_irqsave(&rdp->lock, flags); | ||
1295 | |||
1296 | /* | ||
1297 | * We are running on this CPU irq-disabled, so no | ||
1298 | * CPU can go offline until we re-enable irqs. | ||
1299 | * The current CPU might have already gone | ||
1300 | * offline (between the for_each_offline_cpu and | ||
1301 | * the spin_lock_irqsave), but in that case all its | ||
1302 | * callback lists will be empty, so no harm done. | ||
1303 | * | ||
1304 | * Advance the callbacks! We share normal RCU's | ||
1305 | * donelist, since callbacks are invoked the | ||
1306 | * same way in either case. | ||
1307 | */ | ||
1308 | if (rdp->waitschedlist != NULL) { | ||
1309 | *rdp->donetail = rdp->waitschedlist; | ||
1310 | rdp->donetail = rdp->waitschedtail; | ||
1311 | |||
1312 | /* | ||
1313 | * Next rcu_check_callbacks() will | ||
1314 | * do the required raise_softirq(). | ||
1315 | */ | ||
1316 | } | ||
1317 | if (rdp->nextschedlist != NULL) { | ||
1318 | rdp->waitschedlist = rdp->nextschedlist; | ||
1319 | rdp->waitschedtail = rdp->nextschedtail; | ||
1320 | couldsleep = 0; | ||
1321 | couldsleepnext = 0; | ||
1322 | } else { | ||
1323 | rdp->waitschedlist = NULL; | ||
1324 | rdp->waitschedtail = &rdp->waitschedlist; | ||
1325 | } | ||
1326 | rdp->nextschedlist = NULL; | ||
1327 | rdp->nextschedtail = &rdp->nextschedlist; | ||
1328 | |||
1329 | /* Mark sleep intention. */ | ||
1330 | |||
1331 | rdp->rcu_sched_sleeping = couldsleep; | ||
1332 | |||
1333 | spin_unlock_irqrestore(&rdp->lock, flags); | ||
1334 | } | ||
1335 | |||
1336 | /* If we saw callbacks on the last scan, go deal with them. */ | ||
1337 | |||
1338 | if (!couldsleep) | ||
1339 | continue; | ||
1340 | |||
1341 | /* Attempt to block... */ | ||
1342 | |||
1343 | spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags); | ||
1344 | if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) { | ||
1345 | |||
1346 | /* | ||
1347 | * Someone posted a callback after we scanned. | ||
1348 | * Go take care of it. | ||
1349 | */ | ||
1350 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1351 | couldsleepnext = 0; | ||
1352 | continue; | ||
1353 | } | ||
1354 | |||
1355 | /* Block until the next person posts a callback. */ | ||
1356 | |||
1357 | rcu_ctrlblk.sched_sleep = rcu_sched_sleeping; | ||
1358 | spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags); | ||
1359 | ret = 0; /* unused */ | ||
1360 | __wait_event_interruptible(rcu_ctrlblk.sched_wq, | ||
1361 | rcu_ctrlblk.sched_sleep != rcu_sched_sleeping, | ||
1362 | ret); | ||
1363 | |||
1364 | couldsleepnext = 0; | ||
1365 | |||
1366 | } while (!kthread_should_stop()); | ||
1367 | |||
1368 | return (0); | ||
1369 | } | ||
1370 | |||
1371 | /* | ||
1372 | * Check to see if any future RCU-related work will need to be done | ||
1373 | * by the current CPU, even if none need be done immediately, returning | ||
1374 | * 1 if so. Assumes that notifiers would take care of handling any | ||
1375 | * outstanding requests from the RCU core. | ||
1376 | * | ||
1377 | * This function is part of the RCU implementation; it is -not- | ||
1378 | * an exported member of the RCU API. | ||
1379 | */ | ||
1380 | int rcu_needs_cpu(int cpu) | ||
1381 | { | ||
1382 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | ||
1383 | |||
1384 | return (rdp->donelist != NULL || | ||
1385 | !!rdp->waitlistcount || | ||
1386 | rdp->nextlist != NULL || | ||
1387 | rdp->nextschedlist != NULL || | ||
1388 | rdp->waitschedlist != NULL); | ||
1389 | } | ||
1390 | |||
1391 | int rcu_pending(int cpu) | ||
1392 | { | ||
1393 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | ||
1394 | |||
1395 | /* The CPU has at least one callback queued somewhere. */ | ||
1396 | |||
1397 | if (rdp->donelist != NULL || | ||
1398 | !!rdp->waitlistcount || | ||
1399 | rdp->nextlist != NULL || | ||
1400 | rdp->nextschedlist != NULL || | ||
1401 | rdp->waitschedlist != NULL) | ||
1402 | return 1; | ||
1403 | |||
1404 | /* The RCU core needs an acknowledgement from this CPU. */ | ||
1405 | |||
1406 | if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) || | ||
1407 | (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed)) | ||
1408 | return 1; | ||
1409 | |||
1410 | /* This CPU has fallen behind the global grace-period number. */ | ||
1411 | |||
1412 | if (rdp->completed != rcu_ctrlblk.completed) | ||
1413 | return 1; | ||
1414 | |||
1415 | /* Nothing needed from this CPU. */ | ||
1416 | |||
1417 | return 0; | ||
1418 | } | ||
1419 | |||
1420 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | ||
1421 | unsigned long action, void *hcpu) | ||
1422 | { | ||
1423 | long cpu = (long)hcpu; | ||
1424 | |||
1425 | switch (action) { | ||
1426 | case CPU_UP_PREPARE: | ||
1427 | case CPU_UP_PREPARE_FROZEN: | ||
1428 | rcu_online_cpu(cpu); | ||
1429 | break; | ||
1430 | case CPU_UP_CANCELED: | ||
1431 | case CPU_UP_CANCELED_FROZEN: | ||
1432 | case CPU_DEAD: | ||
1433 | case CPU_DEAD_FROZEN: | ||
1434 | rcu_offline_cpu(cpu); | ||
1435 | break; | ||
1436 | default: | ||
1437 | break; | ||
1438 | } | ||
1439 | return NOTIFY_OK; | ||
1440 | } | ||
1441 | |||
1442 | static struct notifier_block __cpuinitdata rcu_nb = { | ||
1443 | .notifier_call = rcu_cpu_notify, | ||
1444 | }; | ||
1445 | |||
1446 | void __init __rcu_init(void) | ||
1447 | { | ||
1448 | int cpu; | ||
1449 | int i; | ||
1450 | struct rcu_data *rdp; | ||
1451 | |||
1452 | printk(KERN_NOTICE "Preemptible RCU implementation.\n"); | ||
1453 | for_each_possible_cpu(cpu) { | ||
1454 | rdp = RCU_DATA_CPU(cpu); | ||
1455 | spin_lock_init(&rdp->lock); | ||
1456 | rdp->completed = 0; | ||
1457 | rdp->waitlistcount = 0; | ||
1458 | rdp->nextlist = NULL; | ||
1459 | rdp->nexttail = &rdp->nextlist; | ||
1460 | for (i = 0; i < GP_STAGES; i++) { | ||
1461 | rdp->waitlist[i] = NULL; | ||
1462 | rdp->waittail[i] = &rdp->waitlist[i]; | ||
1463 | } | ||
1464 | rdp->donelist = NULL; | ||
1465 | rdp->donetail = &rdp->donelist; | ||
1466 | rdp->rcu_flipctr[0] = 0; | ||
1467 | rdp->rcu_flipctr[1] = 0; | ||
1468 | rdp->nextschedlist = NULL; | ||
1469 | rdp->nextschedtail = &rdp->nextschedlist; | ||
1470 | rdp->waitschedlist = NULL; | ||
1471 | rdp->waitschedtail = &rdp->waitschedlist; | ||
1472 | rdp->rcu_sched_sleeping = 0; | ||
1473 | } | ||
1474 | register_cpu_notifier(&rcu_nb); | ||
1475 | |||
1476 | /* | ||
1477 | * We don't need protection against CPU-Hotplug here | ||
1478 | * since | ||
1479 | * a) If a CPU comes online while we are iterating over the | ||
1480 | * cpu_online_mask below, we would only end up making a | ||
1481 | * duplicate call to rcu_online_cpu() which sets the corresponding | ||
1482 | * CPU's mask in the rcu_cpu_online_map. | ||
1483 | * | ||
1484 | * b) A CPU cannot go offline at this point in time since the user | ||
1485 | * does not have access to the sysfs interface, nor do we | ||
1486 | * suspend the system. | ||
1487 | */ | ||
1488 | for_each_online_cpu(cpu) | ||
1489 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu); | ||
1490 | |||
1491 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
1492 | } | ||
1493 | |||
1494 | /* | ||
1495 | * Late-boot-time RCU initialization that must wait until after scheduler | ||
1496 | * has been initialized. | ||
1497 | */ | ||
1498 | void __init rcu_init_sched(void) | ||
1499 | { | ||
1500 | rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period, | ||
1501 | NULL, | ||
1502 | "rcu_sched_grace_period"); | ||
1503 | WARN_ON(IS_ERR(rcu_sched_grace_period_task)); | ||
1504 | } | ||
1505 | |||
1506 | #ifdef CONFIG_RCU_TRACE | ||
1507 | long *rcupreempt_flipctr(int cpu) | ||
1508 | { | ||
1509 | return &RCU_DATA_CPU(cpu)->rcu_flipctr[0]; | ||
1510 | } | ||
1511 | EXPORT_SYMBOL_GPL(rcupreempt_flipctr); | ||
1512 | |||
1513 | int rcupreempt_flip_flag(int cpu) | ||
1514 | { | ||
1515 | return per_cpu(rcu_flip_flag, cpu); | ||
1516 | } | ||
1517 | EXPORT_SYMBOL_GPL(rcupreempt_flip_flag); | ||
1518 | |||
1519 | int rcupreempt_mb_flag(int cpu) | ||
1520 | { | ||
1521 | return per_cpu(rcu_mb_flag, cpu); | ||
1522 | } | ||
1523 | EXPORT_SYMBOL_GPL(rcupreempt_mb_flag); | ||
1524 | |||
1525 | char *rcupreempt_try_flip_state_name(void) | ||
1526 | { | ||
1527 | return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state]; | ||
1528 | } | ||
1529 | EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name); | ||
1530 | |||
1531 | struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu) | ||
1532 | { | ||
1533 | struct rcu_data *rdp = RCU_DATA_CPU(cpu); | ||
1534 | |||
1535 | return &rdp->trace; | ||
1536 | } | ||
1537 | EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu); | ||
1538 | |||
1539 | #endif /* #ifdef RCU_TRACE */ | ||
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c deleted file mode 100644 index 7c2665cac172..000000000000 --- a/kernel/rcupreempt_trace.c +++ /dev/null | |||
@@ -1,334 +0,0 @@ | |||
1 | /* | ||
2 | * Read-Copy Update tracing for realtime implementation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2006 | ||
19 | * | ||
20 | * Papers: http://www.rdrop.com/users/paulmck/RCU | ||
21 | * | ||
22 | * For detailed explanation of Read-Copy Update mechanism see - | ||
23 | * Documentation/RCU/ *.txt | ||
24 | * | ||
25 | */ | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/smp.h> | ||
31 | #include <linux/rcupdate.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <asm/atomic.h> | ||
35 | #include <linux/bitops.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/completion.h> | ||
38 | #include <linux/moduleparam.h> | ||
39 | #include <linux/percpu.h> | ||
40 | #include <linux/notifier.h> | ||
41 | #include <linux/cpu.h> | ||
42 | #include <linux/mutex.h> | ||
43 | #include <linux/rcupreempt_trace.h> | ||
44 | #include <linux/debugfs.h> | ||
45 | |||
46 | static struct mutex rcupreempt_trace_mutex; | ||
47 | static char *rcupreempt_trace_buf; | ||
48 | #define RCUPREEMPT_TRACE_BUF_SIZE 4096 | ||
49 | |||
50 | void rcupreempt_trace_move2done(struct rcupreempt_trace *trace) | ||
51 | { | ||
52 | trace->done_length += trace->wait_length; | ||
53 | trace->done_add += trace->wait_length; | ||
54 | trace->wait_length = 0; | ||
55 | } | ||
56 | void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace) | ||
57 | { | ||
58 | trace->wait_length += trace->next_length; | ||
59 | trace->wait_add += trace->next_length; | ||
60 | trace->next_length = 0; | ||
61 | } | ||
62 | void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace) | ||
63 | { | ||
64 | atomic_inc(&trace->rcu_try_flip_1); | ||
65 | } | ||
66 | void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace) | ||
67 | { | ||
68 | atomic_inc(&trace->rcu_try_flip_e1); | ||
69 | } | ||
70 | void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace) | ||
71 | { | ||
72 | trace->rcu_try_flip_i1++; | ||
73 | } | ||
74 | void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace) | ||
75 | { | ||
76 | trace->rcu_try_flip_ie1++; | ||
77 | } | ||
78 | void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace) | ||
79 | { | ||
80 | trace->rcu_try_flip_g1++; | ||
81 | } | ||
82 | void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace) | ||
83 | { | ||
84 | trace->rcu_try_flip_a1++; | ||
85 | } | ||
86 | void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace) | ||
87 | { | ||
88 | trace->rcu_try_flip_ae1++; | ||
89 | } | ||
90 | void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace) | ||
91 | { | ||
92 | trace->rcu_try_flip_a2++; | ||
93 | } | ||
94 | void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace) | ||
95 | { | ||
96 | trace->rcu_try_flip_z1++; | ||
97 | } | ||
98 | void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace) | ||
99 | { | ||
100 | trace->rcu_try_flip_ze1++; | ||
101 | } | ||
102 | void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace) | ||
103 | { | ||
104 | trace->rcu_try_flip_z2++; | ||
105 | } | ||
106 | void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace) | ||
107 | { | ||
108 | trace->rcu_try_flip_m1++; | ||
109 | } | ||
110 | void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace) | ||
111 | { | ||
112 | trace->rcu_try_flip_me1++; | ||
113 | } | ||
114 | void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace) | ||
115 | { | ||
116 | trace->rcu_try_flip_m2++; | ||
117 | } | ||
118 | void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace) | ||
119 | { | ||
120 | trace->rcu_check_callbacks++; | ||
121 | } | ||
122 | void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace) | ||
123 | { | ||
124 | trace->done_remove += trace->done_length; | ||
125 | trace->done_length = 0; | ||
126 | } | ||
127 | void rcupreempt_trace_invoke(struct rcupreempt_trace *trace) | ||
128 | { | ||
129 | atomic_inc(&trace->done_invoked); | ||
130 | } | ||
131 | void rcupreempt_trace_next_add(struct rcupreempt_trace *trace) | ||
132 | { | ||
133 | trace->next_add++; | ||
134 | trace->next_length++; | ||
135 | } | ||
136 | |||
137 | static void rcupreempt_trace_sum(struct rcupreempt_trace *sp) | ||
138 | { | ||
139 | struct rcupreempt_trace *cp; | ||
140 | int cpu; | ||
141 | |||
142 | memset(sp, 0, sizeof(*sp)); | ||
143 | for_each_possible_cpu(cpu) { | ||
144 | cp = rcupreempt_trace_cpu(cpu); | ||
145 | sp->next_length += cp->next_length; | ||
146 | sp->next_add += cp->next_add; | ||
147 | sp->wait_length += cp->wait_length; | ||
148 | sp->wait_add += cp->wait_add; | ||
149 | sp->done_length += cp->done_length; | ||
150 | sp->done_add += cp->done_add; | ||
151 | sp->done_remove += cp->done_remove; | ||
152 | atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked); | ||
153 | sp->rcu_check_callbacks += cp->rcu_check_callbacks; | ||
154 | atomic_add(atomic_read(&cp->rcu_try_flip_1), | ||
155 | &sp->rcu_try_flip_1); | ||
156 | atomic_add(atomic_read(&cp->rcu_try_flip_e1), | ||
157 | &sp->rcu_try_flip_e1); | ||
158 | sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1; | ||
159 | sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1; | ||
160 | sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1; | ||
161 | sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1; | ||
162 | sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1; | ||
163 | sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2; | ||
164 | sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1; | ||
165 | sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1; | ||
166 | sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2; | ||
167 | sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1; | ||
168 | sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1; | ||
169 | sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static ssize_t rcustats_read(struct file *filp, char __user *buffer, | ||
174 | size_t count, loff_t *ppos) | ||
175 | { | ||
176 | struct rcupreempt_trace trace; | ||
177 | ssize_t bcount; | ||
178 | int cnt = 0; | ||
179 | |||
180 | rcupreempt_trace_sum(&trace); | ||
181 | mutex_lock(&rcupreempt_trace_mutex); | ||
182 | snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt, | ||
183 | "ggp=%ld rcc=%ld\n", | ||
184 | rcu_batches_completed(), | ||
185 | trace.rcu_check_callbacks); | ||
186 | snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt, | ||
187 | "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n" | ||
188 | "1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n" | ||
189 | "z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n", | ||
190 | |||
191 | trace.next_add, trace.next_length, | ||
192 | trace.wait_add, trace.wait_length, | ||
193 | trace.done_add, trace.done_length, | ||
194 | trace.done_remove, atomic_read(&trace.done_invoked), | ||
195 | atomic_read(&trace.rcu_try_flip_1), | ||
196 | atomic_read(&trace.rcu_try_flip_e1), | ||
197 | trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1, | ||
198 | trace.rcu_try_flip_g1, | ||
199 | trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1, | ||
200 | trace.rcu_try_flip_a2, | ||
201 | trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1, | ||
202 | trace.rcu_try_flip_z2, | ||
203 | trace.rcu_try_flip_m1, trace.rcu_try_flip_me1, | ||
204 | trace.rcu_try_flip_m2); | ||
205 | bcount = simple_read_from_buffer(buffer, count, ppos, | ||
206 | rcupreempt_trace_buf, strlen(rcupreempt_trace_buf)); | ||
207 | mutex_unlock(&rcupreempt_trace_mutex); | ||
208 | return bcount; | ||
209 | } | ||
210 | |||
211 | static ssize_t rcugp_read(struct file *filp, char __user *buffer, | ||
212 | size_t count, loff_t *ppos) | ||
213 | { | ||
214 | long oldgp = rcu_batches_completed(); | ||
215 | ssize_t bcount; | ||
216 | |||
217 | mutex_lock(&rcupreempt_trace_mutex); | ||
218 | synchronize_rcu(); | ||
219 | snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE, | ||
220 | "oldggp=%ld newggp=%ld\n", oldgp, rcu_batches_completed()); | ||
221 | bcount = simple_read_from_buffer(buffer, count, ppos, | ||
222 | rcupreempt_trace_buf, strlen(rcupreempt_trace_buf)); | ||
223 | mutex_unlock(&rcupreempt_trace_mutex); | ||
224 | return bcount; | ||
225 | } | ||
226 | |||
227 | static ssize_t rcuctrs_read(struct file *filp, char __user *buffer, | ||
228 | size_t count, loff_t *ppos) | ||
229 | { | ||
230 | int cnt = 0; | ||
231 | int cpu; | ||
232 | int f = rcu_batches_completed() & 0x1; | ||
233 | ssize_t bcount; | ||
234 | |||
235 | mutex_lock(&rcupreempt_trace_mutex); | ||
236 | |||
237 | cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE, | ||
238 | "CPU last cur F M\n"); | ||
239 | for_each_online_cpu(cpu) { | ||
240 | long *flipctr = rcupreempt_flipctr(cpu); | ||
241 | cnt += snprintf(&rcupreempt_trace_buf[cnt], | ||
242 | RCUPREEMPT_TRACE_BUF_SIZE - cnt, | ||
243 | "%3d %4ld %3ld %d %d\n", | ||
244 | cpu, | ||
245 | flipctr[!f], | ||
246 | flipctr[f], | ||
247 | rcupreempt_flip_flag(cpu), | ||
248 | rcupreempt_mb_flag(cpu)); | ||
249 | } | ||
250 | cnt += snprintf(&rcupreempt_trace_buf[cnt], | ||
251 | RCUPREEMPT_TRACE_BUF_SIZE - cnt, | ||
252 | "ggp = %ld, state = %s\n", | ||
253 | rcu_batches_completed(), | ||
254 | rcupreempt_try_flip_state_name()); | ||
255 | cnt += snprintf(&rcupreempt_trace_buf[cnt], | ||
256 | RCUPREEMPT_TRACE_BUF_SIZE - cnt, | ||
257 | "\n"); | ||
258 | bcount = simple_read_from_buffer(buffer, count, ppos, | ||
259 | rcupreempt_trace_buf, strlen(rcupreempt_trace_buf)); | ||
260 | mutex_unlock(&rcupreempt_trace_mutex); | ||
261 | return bcount; | ||
262 | } | ||
263 | |||
264 | static struct file_operations rcustats_fops = { | ||
265 | .owner = THIS_MODULE, | ||
266 | .read = rcustats_read, | ||
267 | }; | ||
268 | |||
269 | static struct file_operations rcugp_fops = { | ||
270 | .owner = THIS_MODULE, | ||
271 | .read = rcugp_read, | ||
272 | }; | ||
273 | |||
274 | static struct file_operations rcuctrs_fops = { | ||
275 | .owner = THIS_MODULE, | ||
276 | .read = rcuctrs_read, | ||
277 | }; | ||
278 | |||
279 | static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir; | ||
280 | static int rcupreempt_debugfs_init(void) | ||
281 | { | ||
282 | rcudir = debugfs_create_dir("rcu", NULL); | ||
283 | if (!rcudir) | ||
284 | goto out; | ||
285 | statdir = debugfs_create_file("rcustats", 0444, rcudir, | ||
286 | NULL, &rcustats_fops); | ||
287 | if (!statdir) | ||
288 | goto free_out; | ||
289 | |||
290 | gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); | ||
291 | if (!gpdir) | ||
292 | goto free_out; | ||
293 | |||
294 | ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir, | ||
295 | NULL, &rcuctrs_fops); | ||
296 | if (!ctrsdir) | ||
297 | goto free_out; | ||
298 | return 0; | ||
299 | free_out: | ||
300 | if (statdir) | ||
301 | debugfs_remove(statdir); | ||
302 | if (gpdir) | ||
303 | debugfs_remove(gpdir); | ||
304 | debugfs_remove(rcudir); | ||
305 | out: | ||
306 | return 1; | ||
307 | } | ||
308 | |||
309 | static int __init rcupreempt_trace_init(void) | ||
310 | { | ||
311 | int ret; | ||
312 | |||
313 | mutex_init(&rcupreempt_trace_mutex); | ||
314 | rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); | ||
315 | if (!rcupreempt_trace_buf) | ||
316 | return 1; | ||
317 | ret = rcupreempt_debugfs_init(); | ||
318 | if (ret) | ||
319 | kfree(rcupreempt_trace_buf); | ||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | static void __exit rcupreempt_trace_cleanup(void) | ||
324 | { | ||
325 | debugfs_remove(statdir); | ||
326 | debugfs_remove(gpdir); | ||
327 | debugfs_remove(ctrsdir); | ||
328 | debugfs_remove(rcudir); | ||
329 | kfree(rcupreempt_trace_buf); | ||
330 | } | ||
331 | |||
332 | |||
333 | module_init(rcupreempt_trace_init); | ||
334 | module_exit(rcupreempt_trace_cleanup); | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 9b4a975a4b4a..b33db539a8ad 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -257,14 +257,14 @@ struct rcu_torture_ops { | |||
257 | void (*init)(void); | 257 | void (*init)(void); |
258 | void (*cleanup)(void); | 258 | void (*cleanup)(void); |
259 | int (*readlock)(void); | 259 | int (*readlock)(void); |
260 | void (*readdelay)(struct rcu_random_state *rrsp); | 260 | void (*read_delay)(struct rcu_random_state *rrsp); |
261 | void (*readunlock)(int idx); | 261 | void (*readunlock)(int idx); |
262 | int (*completed)(void); | 262 | int (*completed)(void); |
263 | void (*deferredfree)(struct rcu_torture *p); | 263 | void (*deferred_free)(struct rcu_torture *p); |
264 | void (*sync)(void); | 264 | void (*sync)(void); |
265 | void (*cb_barrier)(void); | 265 | void (*cb_barrier)(void); |
266 | int (*stats)(char *page); | 266 | int (*stats)(char *page); |
267 | int irqcapable; | 267 | int irq_capable; |
268 | char *name; | 268 | char *name; |
269 | }; | 269 | }; |
270 | static struct rcu_torture_ops *cur_ops = NULL; | 270 | static struct rcu_torture_ops *cur_ops = NULL; |
@@ -320,7 +320,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
320 | rp->rtort_mbtest = 0; | 320 | rp->rtort_mbtest = 0; |
321 | rcu_torture_free(rp); | 321 | rcu_torture_free(rp); |
322 | } else | 322 | } else |
323 | cur_ops->deferredfree(rp); | 323 | cur_ops->deferred_free(rp); |
324 | } | 324 | } |
325 | 325 | ||
326 | static void rcu_torture_deferred_free(struct rcu_torture *p) | 326 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
@@ -329,18 +329,18 @@ static void rcu_torture_deferred_free(struct rcu_torture *p) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | static struct rcu_torture_ops rcu_ops = { | 331 | static struct rcu_torture_ops rcu_ops = { |
332 | .init = NULL, | 332 | .init = NULL, |
333 | .cleanup = NULL, | 333 | .cleanup = NULL, |
334 | .readlock = rcu_torture_read_lock, | 334 | .readlock = rcu_torture_read_lock, |
335 | .readdelay = rcu_read_delay, | 335 | .read_delay = rcu_read_delay, |
336 | .readunlock = rcu_torture_read_unlock, | 336 | .readunlock = rcu_torture_read_unlock, |
337 | .completed = rcu_torture_completed, | 337 | .completed = rcu_torture_completed, |
338 | .deferredfree = rcu_torture_deferred_free, | 338 | .deferred_free = rcu_torture_deferred_free, |
339 | .sync = synchronize_rcu, | 339 | .sync = synchronize_rcu, |
340 | .cb_barrier = rcu_barrier, | 340 | .cb_barrier = rcu_barrier, |
341 | .stats = NULL, | 341 | .stats = NULL, |
342 | .irqcapable = 1, | 342 | .irq_capable = 1, |
343 | .name = "rcu" | 343 | .name = "rcu" |
344 | }; | 344 | }; |
345 | 345 | ||
346 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) | 346 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) |
@@ -370,18 +370,18 @@ static void rcu_sync_torture_init(void) | |||
370 | } | 370 | } |
371 | 371 | ||
372 | static struct rcu_torture_ops rcu_sync_ops = { | 372 | static struct rcu_torture_ops rcu_sync_ops = { |
373 | .init = rcu_sync_torture_init, | 373 | .init = rcu_sync_torture_init, |
374 | .cleanup = NULL, | 374 | .cleanup = NULL, |
375 | .readlock = rcu_torture_read_lock, | 375 | .readlock = rcu_torture_read_lock, |
376 | .readdelay = rcu_read_delay, | 376 | .read_delay = rcu_read_delay, |
377 | .readunlock = rcu_torture_read_unlock, | 377 | .readunlock = rcu_torture_read_unlock, |
378 | .completed = rcu_torture_completed, | 378 | .completed = rcu_torture_completed, |
379 | .deferredfree = rcu_sync_torture_deferred_free, | 379 | .deferred_free = rcu_sync_torture_deferred_free, |
380 | .sync = synchronize_rcu, | 380 | .sync = synchronize_rcu, |
381 | .cb_barrier = NULL, | 381 | .cb_barrier = NULL, |
382 | .stats = NULL, | 382 | .stats = NULL, |
383 | .irqcapable = 1, | 383 | .irq_capable = 1, |
384 | .name = "rcu_sync" | 384 | .name = "rcu_sync" |
385 | }; | 385 | }; |
386 | 386 | ||
387 | /* | 387 | /* |
@@ -432,33 +432,33 @@ static void rcu_bh_torture_synchronize(void) | |||
432 | } | 432 | } |
433 | 433 | ||
434 | static struct rcu_torture_ops rcu_bh_ops = { | 434 | static struct rcu_torture_ops rcu_bh_ops = { |
435 | .init = NULL, | 435 | .init = NULL, |
436 | .cleanup = NULL, | 436 | .cleanup = NULL, |
437 | .readlock = rcu_bh_torture_read_lock, | 437 | .readlock = rcu_bh_torture_read_lock, |
438 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 438 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
439 | .readunlock = rcu_bh_torture_read_unlock, | 439 | .readunlock = rcu_bh_torture_read_unlock, |
440 | .completed = rcu_bh_torture_completed, | 440 | .completed = rcu_bh_torture_completed, |
441 | .deferredfree = rcu_bh_torture_deferred_free, | 441 | .deferred_free = rcu_bh_torture_deferred_free, |
442 | .sync = rcu_bh_torture_synchronize, | 442 | .sync = rcu_bh_torture_synchronize, |
443 | .cb_barrier = rcu_barrier_bh, | 443 | .cb_barrier = rcu_barrier_bh, |
444 | .stats = NULL, | 444 | .stats = NULL, |
445 | .irqcapable = 1, | 445 | .irq_capable = 1, |
446 | .name = "rcu_bh" | 446 | .name = "rcu_bh" |
447 | }; | 447 | }; |
448 | 448 | ||
449 | static struct rcu_torture_ops rcu_bh_sync_ops = { | 449 | static struct rcu_torture_ops rcu_bh_sync_ops = { |
450 | .init = rcu_sync_torture_init, | 450 | .init = rcu_sync_torture_init, |
451 | .cleanup = NULL, | 451 | .cleanup = NULL, |
452 | .readlock = rcu_bh_torture_read_lock, | 452 | .readlock = rcu_bh_torture_read_lock, |
453 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 453 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
454 | .readunlock = rcu_bh_torture_read_unlock, | 454 | .readunlock = rcu_bh_torture_read_unlock, |
455 | .completed = rcu_bh_torture_completed, | 455 | .completed = rcu_bh_torture_completed, |
456 | .deferredfree = rcu_sync_torture_deferred_free, | 456 | .deferred_free = rcu_sync_torture_deferred_free, |
457 | .sync = rcu_bh_torture_synchronize, | 457 | .sync = rcu_bh_torture_synchronize, |
458 | .cb_barrier = NULL, | 458 | .cb_barrier = NULL, |
459 | .stats = NULL, | 459 | .stats = NULL, |
460 | .irqcapable = 1, | 460 | .irq_capable = 1, |
461 | .name = "rcu_bh_sync" | 461 | .name = "rcu_bh_sync" |
462 | }; | 462 | }; |
463 | 463 | ||
464 | /* | 464 | /* |
@@ -530,17 +530,17 @@ static int srcu_torture_stats(char *page) | |||
530 | } | 530 | } |
531 | 531 | ||
532 | static struct rcu_torture_ops srcu_ops = { | 532 | static struct rcu_torture_ops srcu_ops = { |
533 | .init = srcu_torture_init, | 533 | .init = srcu_torture_init, |
534 | .cleanup = srcu_torture_cleanup, | 534 | .cleanup = srcu_torture_cleanup, |
535 | .readlock = srcu_torture_read_lock, | 535 | .readlock = srcu_torture_read_lock, |
536 | .readdelay = srcu_read_delay, | 536 | .read_delay = srcu_read_delay, |
537 | .readunlock = srcu_torture_read_unlock, | 537 | .readunlock = srcu_torture_read_unlock, |
538 | .completed = srcu_torture_completed, | 538 | .completed = srcu_torture_completed, |
539 | .deferredfree = rcu_sync_torture_deferred_free, | 539 | .deferred_free = rcu_sync_torture_deferred_free, |
540 | .sync = srcu_torture_synchronize, | 540 | .sync = srcu_torture_synchronize, |
541 | .cb_barrier = NULL, | 541 | .cb_barrier = NULL, |
542 | .stats = srcu_torture_stats, | 542 | .stats = srcu_torture_stats, |
543 | .name = "srcu" | 543 | .name = "srcu" |
544 | }; | 544 | }; |
545 | 545 | ||
546 | /* | 546 | /* |
@@ -574,32 +574,49 @@ static void sched_torture_synchronize(void) | |||
574 | } | 574 | } |
575 | 575 | ||
576 | static struct rcu_torture_ops sched_ops = { | 576 | static struct rcu_torture_ops sched_ops = { |
577 | .init = rcu_sync_torture_init, | 577 | .init = rcu_sync_torture_init, |
578 | .cleanup = NULL, | 578 | .cleanup = NULL, |
579 | .readlock = sched_torture_read_lock, | 579 | .readlock = sched_torture_read_lock, |
580 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 580 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
581 | .readunlock = sched_torture_read_unlock, | 581 | .readunlock = sched_torture_read_unlock, |
582 | .completed = sched_torture_completed, | 582 | .completed = sched_torture_completed, |
583 | .deferredfree = rcu_sched_torture_deferred_free, | 583 | .deferred_free = rcu_sched_torture_deferred_free, |
584 | .sync = sched_torture_synchronize, | 584 | .sync = sched_torture_synchronize, |
585 | .cb_barrier = rcu_barrier_sched, | 585 | .cb_barrier = rcu_barrier_sched, |
586 | .stats = NULL, | 586 | .stats = NULL, |
587 | .irqcapable = 1, | 587 | .irq_capable = 1, |
588 | .name = "sched" | 588 | .name = "sched" |
589 | }; | 589 | }; |
590 | 590 | ||
591 | static struct rcu_torture_ops sched_ops_sync = { | 591 | static struct rcu_torture_ops sched_ops_sync = { |
592 | .init = rcu_sync_torture_init, | 592 | .init = rcu_sync_torture_init, |
593 | .cleanup = NULL, | 593 | .cleanup = NULL, |
594 | .readlock = sched_torture_read_lock, | 594 | .readlock = sched_torture_read_lock, |
595 | .readdelay = rcu_read_delay, /* just reuse rcu's version. */ | 595 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
596 | .readunlock = sched_torture_read_unlock, | 596 | .readunlock = sched_torture_read_unlock, |
597 | .completed = sched_torture_completed, | 597 | .completed = sched_torture_completed, |
598 | .deferredfree = rcu_sync_torture_deferred_free, | 598 | .deferred_free = rcu_sync_torture_deferred_free, |
599 | .sync = sched_torture_synchronize, | 599 | .sync = sched_torture_synchronize, |
600 | .cb_barrier = NULL, | 600 | .cb_barrier = NULL, |
601 | .stats = NULL, | 601 | .stats = NULL, |
602 | .name = "sched_sync" | 602 | .name = "sched_sync" |
603 | }; | ||
604 | |||
605 | extern int rcu_expedited_torture_stats(char *page); | ||
606 | |||
607 | static struct rcu_torture_ops sched_expedited_ops = { | ||
608 | .init = rcu_sync_torture_init, | ||
609 | .cleanup = NULL, | ||
610 | .readlock = sched_torture_read_lock, | ||
611 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
612 | .readunlock = sched_torture_read_unlock, | ||
613 | .completed = sched_torture_completed, | ||
614 | .deferred_free = rcu_sync_torture_deferred_free, | ||
615 | .sync = synchronize_sched_expedited, | ||
616 | .cb_barrier = NULL, | ||
617 | .stats = rcu_expedited_torture_stats, | ||
618 | .irq_capable = 1, | ||
619 | .name = "sched_expedited" | ||
603 | }; | 620 | }; |
604 | 621 | ||
605 | /* | 622 | /* |
@@ -635,7 +652,7 @@ rcu_torture_writer(void *arg) | |||
635 | i = RCU_TORTURE_PIPE_LEN; | 652 | i = RCU_TORTURE_PIPE_LEN; |
636 | atomic_inc(&rcu_torture_wcount[i]); | 653 | atomic_inc(&rcu_torture_wcount[i]); |
637 | old_rp->rtort_pipe_count++; | 654 | old_rp->rtort_pipe_count++; |
638 | cur_ops->deferredfree(old_rp); | 655 | cur_ops->deferred_free(old_rp); |
639 | } | 656 | } |
640 | rcu_torture_current_version++; | 657 | rcu_torture_current_version++; |
641 | oldbatch = cur_ops->completed(); | 658 | oldbatch = cur_ops->completed(); |
@@ -700,7 +717,7 @@ static void rcu_torture_timer(unsigned long unused) | |||
700 | if (p->rtort_mbtest == 0) | 717 | if (p->rtort_mbtest == 0) |
701 | atomic_inc(&n_rcu_torture_mberror); | 718 | atomic_inc(&n_rcu_torture_mberror); |
702 | spin_lock(&rand_lock); | 719 | spin_lock(&rand_lock); |
703 | cur_ops->readdelay(&rand); | 720 | cur_ops->read_delay(&rand); |
704 | n_rcu_torture_timers++; | 721 | n_rcu_torture_timers++; |
705 | spin_unlock(&rand_lock); | 722 | spin_unlock(&rand_lock); |
706 | preempt_disable(); | 723 | preempt_disable(); |
@@ -738,11 +755,11 @@ rcu_torture_reader(void *arg) | |||
738 | 755 | ||
739 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | 756 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); |
740 | set_user_nice(current, 19); | 757 | set_user_nice(current, 19); |
741 | if (irqreader && cur_ops->irqcapable) | 758 | if (irqreader && cur_ops->irq_capable) |
742 | setup_timer_on_stack(&t, rcu_torture_timer, 0); | 759 | setup_timer_on_stack(&t, rcu_torture_timer, 0); |
743 | 760 | ||
744 | do { | 761 | do { |
745 | if (irqreader && cur_ops->irqcapable) { | 762 | if (irqreader && cur_ops->irq_capable) { |
746 | if (!timer_pending(&t)) | 763 | if (!timer_pending(&t)) |
747 | mod_timer(&t, 1); | 764 | mod_timer(&t, 1); |
748 | } | 765 | } |
@@ -757,7 +774,7 @@ rcu_torture_reader(void *arg) | |||
757 | } | 774 | } |
758 | if (p->rtort_mbtest == 0) | 775 | if (p->rtort_mbtest == 0) |
759 | atomic_inc(&n_rcu_torture_mberror); | 776 | atomic_inc(&n_rcu_torture_mberror); |
760 | cur_ops->readdelay(&rand); | 777 | cur_ops->read_delay(&rand); |
761 | preempt_disable(); | 778 | preempt_disable(); |
762 | pipe_count = p->rtort_pipe_count; | 779 | pipe_count = p->rtort_pipe_count; |
763 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | 780 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
@@ -778,7 +795,7 @@ rcu_torture_reader(void *arg) | |||
778 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 795 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); |
779 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | 796 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
780 | rcutorture_shutdown_absorb("rcu_torture_reader"); | 797 | rcutorture_shutdown_absorb("rcu_torture_reader"); |
781 | if (irqreader && cur_ops->irqcapable) | 798 | if (irqreader && cur_ops->irq_capable) |
782 | del_timer_sync(&t); | 799 | del_timer_sync(&t); |
783 | while (!kthread_should_stop()) | 800 | while (!kthread_should_stop()) |
784 | schedule_timeout_uninterruptible(1); | 801 | schedule_timeout_uninterruptible(1); |
@@ -1078,6 +1095,7 @@ rcu_torture_init(void) | |||
1078 | int firsterr = 0; | 1095 | int firsterr = 0; |
1079 | static struct rcu_torture_ops *torture_ops[] = | 1096 | static struct rcu_torture_ops *torture_ops[] = |
1080 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 1097 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, |
1098 | &sched_expedited_ops, | ||
1081 | &srcu_ops, &sched_ops, &sched_ops_sync, }; | 1099 | &srcu_ops, &sched_ops, &sched_ops_sync, }; |
1082 | 1100 | ||
1083 | mutex_lock(&fullstop_mutex); | 1101 | mutex_lock(&fullstop_mutex); |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 9c5fa9fc57ec..6b11b07cfe7f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -47,6 +47,8 @@ | |||
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | 49 | ||
50 | #include "rcutree.h" | ||
51 | |||
50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
51 | static struct lock_class_key rcu_lock_key; | 53 | static struct lock_class_key rcu_lock_key; |
52 | struct lockdep_map rcu_lock_map = | 54 | struct lockdep_map rcu_lock_map = |
@@ -73,30 +75,59 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
73 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
74 | } | 76 | } |
75 | 77 | ||
76 | struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); | 78 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
77 | DEFINE_PER_CPU(struct rcu_data, rcu_data); | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
78 | 80 | ||
79 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
80 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
81 | 83 | ||
84 | extern long rcu_batches_completed_sched(void); | ||
85 | static struct rcu_node *rcu_get_root(struct rcu_state *rsp); | ||
86 | static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, | ||
87 | struct rcu_node *rnp, unsigned long flags); | ||
88 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags); | ||
89 | #ifdef CONFIG_HOTPLUG_CPU | ||
90 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp); | ||
91 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
92 | static void __rcu_process_callbacks(struct rcu_state *rsp, | ||
93 | struct rcu_data *rdp); | ||
94 | static void __call_rcu(struct rcu_head *head, | ||
95 | void (*func)(struct rcu_head *rcu), | ||
96 | struct rcu_state *rsp); | ||
97 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp); | ||
98 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, | ||
99 | int preemptable); | ||
100 | |||
101 | #include "rcutree_plugin.h" | ||
102 | |||
82 | /* | 103 | /* |
83 | * Increment the quiescent state counter. | 104 | * Note a quiescent state. Because we do not need to know |
84 | * The counter is a bit degenerated: We do not need to know | ||
85 | * how many quiescent states passed, just if there was at least | 105 | * how many quiescent states passed, just if there was at least |
86 | * one since the start of the grace period. Thus just a flag. | 106 | * one since the start of the grace period, this just sets a flag. |
87 | */ | 107 | */ |
88 | void rcu_qsctr_inc(int cpu) | 108 | void rcu_sched_qs(int cpu) |
89 | { | 109 | { |
90 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 110 | unsigned long flags; |
111 | struct rcu_data *rdp; | ||
112 | |||
113 | local_irq_save(flags); | ||
114 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
91 | rdp->passed_quiesc = 1; | 115 | rdp->passed_quiesc = 1; |
92 | rdp->passed_quiesc_completed = rdp->completed; | 116 | rdp->passed_quiesc_completed = rdp->completed; |
117 | rcu_preempt_qs(cpu); | ||
118 | local_irq_restore(flags); | ||
93 | } | 119 | } |
94 | 120 | ||
95 | void rcu_bh_qsctr_inc(int cpu) | 121 | void rcu_bh_qs(int cpu) |
96 | { | 122 | { |
97 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 123 | unsigned long flags; |
124 | struct rcu_data *rdp; | ||
125 | |||
126 | local_irq_save(flags); | ||
127 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
98 | rdp->passed_quiesc = 1; | 128 | rdp->passed_quiesc = 1; |
99 | rdp->passed_quiesc_completed = rdp->completed; | 129 | rdp->passed_quiesc_completed = rdp->completed; |
130 | local_irq_restore(flags); | ||
100 | } | 131 | } |
101 | 132 | ||
102 | #ifdef CONFIG_NO_HZ | 133 | #ifdef CONFIG_NO_HZ |
@@ -111,15 +142,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */ | |||
111 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 142 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
112 | 143 | ||
113 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 144 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
145 | static int rcu_pending(int cpu); | ||
114 | 146 | ||
115 | /* | 147 | /* |
116 | * Return the number of RCU batches processed thus far for debug & stats. | 148 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
117 | */ | 149 | */ |
118 | long rcu_batches_completed(void) | 150 | long rcu_batches_completed_sched(void) |
119 | { | 151 | { |
120 | return rcu_state.completed; | 152 | return rcu_sched_state.completed; |
121 | } | 153 | } |
122 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 154 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
123 | 155 | ||
124 | /* | 156 | /* |
125 | * Return the number of RCU BH batches processed thus far for debug & stats. | 157 | * Return the number of RCU BH batches processed thus far for debug & stats. |
@@ -182,6 +214,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
182 | return 1; | 214 | return 1; |
183 | } | 215 | } |
184 | 216 | ||
217 | /* If preemptable RCU, no point in sending reschedule IPI. */ | ||
218 | if (rdp->preemptable) | ||
219 | return 0; | ||
220 | |||
185 | /* The CPU is online, so send it a reschedule IPI. */ | 221 | /* The CPU is online, so send it a reschedule IPI. */ |
186 | if (rdp->cpu != smp_processor_id()) | 222 | if (rdp->cpu != smp_processor_id()) |
187 | smp_send_reschedule(rdp->cpu); | 223 | smp_send_reschedule(rdp->cpu); |
@@ -194,7 +230,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
194 | #endif /* #ifdef CONFIG_SMP */ | 230 | #endif /* #ifdef CONFIG_SMP */ |
195 | 231 | ||
196 | #ifdef CONFIG_NO_HZ | 232 | #ifdef CONFIG_NO_HZ |
197 | static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5); | ||
198 | 233 | ||
199 | /** | 234 | /** |
200 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | 235 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz |
@@ -214,7 +249,7 @@ void rcu_enter_nohz(void) | |||
214 | rdtp = &__get_cpu_var(rcu_dynticks); | 249 | rdtp = &__get_cpu_var(rcu_dynticks); |
215 | rdtp->dynticks++; | 250 | rdtp->dynticks++; |
216 | rdtp->dynticks_nesting--; | 251 | rdtp->dynticks_nesting--; |
217 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 252 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
218 | local_irq_restore(flags); | 253 | local_irq_restore(flags); |
219 | } | 254 | } |
220 | 255 | ||
@@ -233,7 +268,7 @@ void rcu_exit_nohz(void) | |||
233 | rdtp = &__get_cpu_var(rcu_dynticks); | 268 | rdtp = &__get_cpu_var(rcu_dynticks); |
234 | rdtp->dynticks++; | 269 | rdtp->dynticks++; |
235 | rdtp->dynticks_nesting++; | 270 | rdtp->dynticks_nesting++; |
236 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 271 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
237 | local_irq_restore(flags); | 272 | local_irq_restore(flags); |
238 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 273 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
239 | } | 274 | } |
@@ -252,7 +287,7 @@ void rcu_nmi_enter(void) | |||
252 | if (rdtp->dynticks & 0x1) | 287 | if (rdtp->dynticks & 0x1) |
253 | return; | 288 | return; |
254 | rdtp->dynticks_nmi++; | 289 | rdtp->dynticks_nmi++; |
255 | WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); | 290 | WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); |
256 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 291 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
257 | } | 292 | } |
258 | 293 | ||
@@ -271,7 +306,7 @@ void rcu_nmi_exit(void) | |||
271 | return; | 306 | return; |
272 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 307 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
273 | rdtp->dynticks_nmi++; | 308 | rdtp->dynticks_nmi++; |
274 | WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); | 309 | WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); |
275 | } | 310 | } |
276 | 311 | ||
277 | /** | 312 | /** |
@@ -287,7 +322,7 @@ void rcu_irq_enter(void) | |||
287 | if (rdtp->dynticks_nesting++) | 322 | if (rdtp->dynticks_nesting++) |
288 | return; | 323 | return; |
289 | rdtp->dynticks++; | 324 | rdtp->dynticks++; |
290 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 325 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
291 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 326 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
292 | } | 327 | } |
293 | 328 | ||
@@ -306,10 +341,10 @@ void rcu_irq_exit(void) | |||
306 | return; | 341 | return; |
307 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 342 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
308 | rdtp->dynticks++; | 343 | rdtp->dynticks++; |
309 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 344 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
310 | 345 | ||
311 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 346 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
312 | if (__get_cpu_var(rcu_data).nxtlist || | 347 | if (__get_cpu_var(rcu_sched_data).nxtlist || |
313 | __get_cpu_var(rcu_bh_data).nxtlist) | 348 | __get_cpu_var(rcu_bh_data).nxtlist) |
314 | set_need_resched(); | 349 | set_need_resched(); |
315 | } | 350 | } |
@@ -462,6 +497,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
462 | 497 | ||
463 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 498 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
464 | for (; rnp_cur < rnp_end; rnp_cur++) { | 499 | for (; rnp_cur < rnp_end; rnp_cur++) { |
500 | rcu_print_task_stall(rnp); | ||
465 | if (rnp_cur->qsmask == 0) | 501 | if (rnp_cur->qsmask == 0) |
466 | continue; | 502 | continue; |
467 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | 503 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) |
@@ -679,6 +715,19 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | |||
679 | } | 715 | } |
680 | 716 | ||
681 | /* | 717 | /* |
718 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
719 | * the next grace period if one is needed. Note that the caller must | ||
720 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
721 | */ | ||
722 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | ||
723 | __releases(rnp->lock) | ||
724 | { | ||
725 | rsp->completed = rsp->gpnum; | ||
726 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
727 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | ||
728 | } | ||
729 | |||
730 | /* | ||
682 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 731 | * Similar to cpu_quiet(), for which it is a helper function. Allows |
683 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 732 | * a group of CPUs to be quieted at one go, though all the CPUs in the |
684 | * group must be represented by the same leaf rcu_node structure. | 733 | * group must be represented by the same leaf rcu_node structure. |
@@ -699,7 +748,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
699 | return; | 748 | return; |
700 | } | 749 | } |
701 | rnp->qsmask &= ~mask; | 750 | rnp->qsmask &= ~mask; |
702 | if (rnp->qsmask != 0) { | 751 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { |
703 | 752 | ||
704 | /* Other bits still set at this level, so done. */ | 753 | /* Other bits still set at this level, so done. */ |
705 | spin_unlock_irqrestore(&rnp->lock, flags); | 754 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -719,14 +768,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
719 | 768 | ||
720 | /* | 769 | /* |
721 | * Get here if we are the last CPU to pass through a quiescent | 770 | * Get here if we are the last CPU to pass through a quiescent |
722 | * state for this grace period. Clean up and let rcu_start_gp() | 771 | * state for this grace period. Invoke cpu_quiet_msk_finish() |
723 | * start up the next grace period if one is needed. Note that | 772 | * to clean up and start the next grace period if one is needed. |
724 | * we still hold rnp->lock, as required by rcu_start_gp(), which | ||
725 | * will release it. | ||
726 | */ | 773 | */ |
727 | rsp->completed = rsp->gpnum; | 774 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ |
728 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
729 | rcu_start_gp(rsp, flags); /* releases rnp->lock. */ | ||
730 | } | 775 | } |
731 | 776 | ||
732 | /* | 777 | /* |
@@ -833,11 +878,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
833 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 878 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
834 | rnp->qsmaskinit &= ~mask; | 879 | rnp->qsmaskinit &= ~mask; |
835 | if (rnp->qsmaskinit != 0) { | 880 | if (rnp->qsmaskinit != 0) { |
836 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 881 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
837 | break; | 882 | break; |
838 | } | 883 | } |
884 | rcu_preempt_offline_tasks(rsp, rnp); | ||
839 | mask = rnp->grpmask; | 885 | mask = rnp->grpmask; |
840 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 886 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
841 | rnp = rnp->parent; | 887 | rnp = rnp->parent; |
842 | } while (rnp != NULL); | 888 | } while (rnp != NULL); |
843 | lastcomp = rsp->completed; | 889 | lastcomp = rsp->completed; |
@@ -850,7 +896,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
850 | /* | 896 | /* |
851 | * Move callbacks from the outgoing CPU to the running CPU. | 897 | * Move callbacks from the outgoing CPU to the running CPU. |
852 | * Note that the outgoing CPU is now quiscent, so it is now | 898 | * Note that the outgoing CPU is now quiscent, so it is now |
853 | * (uncharacteristically) safe to access it rcu_data structure. | 899 | * (uncharacteristically) safe to access its rcu_data structure. |
854 | * Note also that we must carefully retain the order of the | 900 | * Note also that we must carefully retain the order of the |
855 | * outgoing CPU's callbacks in order for rcu_barrier() to work | 901 | * outgoing CPU's callbacks in order for rcu_barrier() to work |
856 | * correctly. Finally, note that we start all the callbacks | 902 | * correctly. Finally, note that we start all the callbacks |
@@ -881,8 +927,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
881 | */ | 927 | */ |
882 | static void rcu_offline_cpu(int cpu) | 928 | static void rcu_offline_cpu(int cpu) |
883 | { | 929 | { |
884 | __rcu_offline_cpu(cpu, &rcu_state); | 930 | __rcu_offline_cpu(cpu, &rcu_sched_state); |
885 | __rcu_offline_cpu(cpu, &rcu_bh_state); | 931 | __rcu_offline_cpu(cpu, &rcu_bh_state); |
932 | rcu_preempt_offline_cpu(cpu); | ||
886 | } | 933 | } |
887 | 934 | ||
888 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 935 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
@@ -968,6 +1015,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
968 | */ | 1015 | */ |
969 | void rcu_check_callbacks(int cpu, int user) | 1016 | void rcu_check_callbacks(int cpu, int user) |
970 | { | 1017 | { |
1018 | if (!rcu_pending(cpu)) | ||
1019 | return; /* if nothing for RCU to do. */ | ||
971 | if (user || | 1020 | if (user || |
972 | (idle_cpu(cpu) && rcu_scheduler_active && | 1021 | (idle_cpu(cpu) && rcu_scheduler_active && |
973 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1022 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
@@ -976,17 +1025,16 @@ void rcu_check_callbacks(int cpu, int user) | |||
976 | * Get here if this CPU took its interrupt from user | 1025 | * Get here if this CPU took its interrupt from user |
977 | * mode or from the idle loop, and if this is not a | 1026 | * mode or from the idle loop, and if this is not a |
978 | * nested interrupt. In this case, the CPU is in | 1027 | * nested interrupt. In this case, the CPU is in |
979 | * a quiescent state, so count it. | 1028 | * a quiescent state, so note it. |
980 | * | 1029 | * |
981 | * No memory barrier is required here because both | 1030 | * No memory barrier is required here because both |
982 | * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference | 1031 | * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local |
983 | * only CPU-local variables that other CPUs neither | 1032 | * variables that other CPUs neither access nor modify, |
984 | * access nor modify, at least not while the corresponding | 1033 | * at least not while the corresponding CPU is online. |
985 | * CPU is online. | ||
986 | */ | 1034 | */ |
987 | 1035 | ||
988 | rcu_qsctr_inc(cpu); | 1036 | rcu_sched_qs(cpu); |
989 | rcu_bh_qsctr_inc(cpu); | 1037 | rcu_bh_qs(cpu); |
990 | 1038 | ||
991 | } else if (!in_softirq()) { | 1039 | } else if (!in_softirq()) { |
992 | 1040 | ||
@@ -994,11 +1042,12 @@ void rcu_check_callbacks(int cpu, int user) | |||
994 | * Get here if this CPU did not take its interrupt from | 1042 | * Get here if this CPU did not take its interrupt from |
995 | * softirq, in other words, if it is not interrupting | 1043 | * softirq, in other words, if it is not interrupting |
996 | * a rcu_bh read-side critical section. This is an _bh | 1044 | * a rcu_bh read-side critical section. This is an _bh |
997 | * critical section, so count it. | 1045 | * critical section, so note it. |
998 | */ | 1046 | */ |
999 | 1047 | ||
1000 | rcu_bh_qsctr_inc(cpu); | 1048 | rcu_bh_qs(cpu); |
1001 | } | 1049 | } |
1050 | rcu_preempt_check_callbacks(cpu); | ||
1002 | raise_softirq(RCU_SOFTIRQ); | 1051 | raise_softirq(RCU_SOFTIRQ); |
1003 | } | 1052 | } |
1004 | 1053 | ||
@@ -1137,6 +1186,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1137 | { | 1186 | { |
1138 | unsigned long flags; | 1187 | unsigned long flags; |
1139 | 1188 | ||
1189 | WARN_ON_ONCE(rdp->beenonline == 0); | ||
1190 | |||
1140 | /* | 1191 | /* |
1141 | * If an RCU GP has gone long enough, go check for dyntick | 1192 | * If an RCU GP has gone long enough, go check for dyntick |
1142 | * idle CPUs and, if needed, send resched IPIs. | 1193 | * idle CPUs and, if needed, send resched IPIs. |
@@ -1175,8 +1226,10 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1175 | */ | 1226 | */ |
1176 | smp_mb(); /* See above block comment. */ | 1227 | smp_mb(); /* See above block comment. */ |
1177 | 1228 | ||
1178 | __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); | 1229 | __rcu_process_callbacks(&rcu_sched_state, |
1230 | &__get_cpu_var(rcu_sched_data)); | ||
1179 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1231 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
1232 | rcu_preempt_process_callbacks(); | ||
1180 | 1233 | ||
1181 | /* | 1234 | /* |
1182 | * Memory references from any later RCU read-side critical sections | 1235 | * Memory references from any later RCU read-side critical sections |
@@ -1232,13 +1285,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1232 | } | 1285 | } |
1233 | 1286 | ||
1234 | /* | 1287 | /* |
1235 | * Queue an RCU callback for invocation after a grace period. | 1288 | * Queue an RCU-sched callback for invocation after a grace period. |
1236 | */ | 1289 | */ |
1237 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 1290 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
1238 | { | 1291 | { |
1239 | __call_rcu(head, func, &rcu_state); | 1292 | __call_rcu(head, func, &rcu_sched_state); |
1240 | } | 1293 | } |
1241 | EXPORT_SYMBOL_GPL(call_rcu); | 1294 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
1242 | 1295 | ||
1243 | /* | 1296 | /* |
1244 | * Queue an RCU for invocation after a quicker grace period. | 1297 | * Queue an RCU for invocation after a quicker grace period. |
@@ -1310,10 +1363,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1310 | * by the current CPU, returning 1 if so. This function is part of the | 1363 | * by the current CPU, returning 1 if so. This function is part of the |
1311 | * RCU implementation; it is -not- an exported member of the RCU API. | 1364 | * RCU implementation; it is -not- an exported member of the RCU API. |
1312 | */ | 1365 | */ |
1313 | int rcu_pending(int cpu) | 1366 | static int rcu_pending(int cpu) |
1314 | { | 1367 | { |
1315 | return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || | 1368 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
1316 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); | 1369 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || |
1370 | rcu_preempt_pending(cpu); | ||
1317 | } | 1371 | } |
1318 | 1372 | ||
1319 | /* | 1373 | /* |
@@ -1325,27 +1379,46 @@ int rcu_pending(int cpu) | |||
1325 | int rcu_needs_cpu(int cpu) | 1379 | int rcu_needs_cpu(int cpu) |
1326 | { | 1380 | { |
1327 | /* RCU callbacks either ready or pending? */ | 1381 | /* RCU callbacks either ready or pending? */ |
1328 | return per_cpu(rcu_data, cpu).nxtlist || | 1382 | return per_cpu(rcu_sched_data, cpu).nxtlist || |
1329 | per_cpu(rcu_bh_data, cpu).nxtlist; | 1383 | per_cpu(rcu_bh_data, cpu).nxtlist || |
1384 | rcu_preempt_needs_cpu(cpu); | ||
1330 | } | 1385 | } |
1331 | 1386 | ||
1332 | /* | 1387 | /* |
1333 | * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" | 1388 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
1334 | * approach so that we don't have to worry about how long the CPU has | ||
1335 | * been gone, or whether it ever was online previously. We do trust the | ||
1336 | * ->mynode field, as it is constant for a given struct rcu_data and | ||
1337 | * initialized during early boot. | ||
1338 | * | ||
1339 | * Note that only one online or offline event can be happening at a given | ||
1340 | * time. Note also that we can accept some slop in the rsp->completed | ||
1341 | * access due to the fact that this CPU cannot possibly have any RCU | ||
1342 | * callbacks in flight yet. | ||
1343 | */ | 1389 | */ |
1344 | static void __cpuinit | 1390 | static void __init |
1345 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1391 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
1346 | { | 1392 | { |
1347 | unsigned long flags; | 1393 | unsigned long flags; |
1348 | int i; | 1394 | int i; |
1395 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
1396 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1397 | |||
1398 | /* Set up local state, ensuring consistent view of global state. */ | ||
1399 | spin_lock_irqsave(&rnp->lock, flags); | ||
1400 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
1401 | rdp->nxtlist = NULL; | ||
1402 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
1403 | rdp->nxttail[i] = &rdp->nxtlist; | ||
1404 | rdp->qlen = 0; | ||
1405 | #ifdef CONFIG_NO_HZ | ||
1406 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
1407 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
1408 | rdp->cpu = cpu; | ||
1409 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
1410 | } | ||
1411 | |||
1412 | /* | ||
1413 | * Initialize a CPU's per-CPU RCU data. Note that only one online or | ||
1414 | * offline event can be happening at a given time. Note also that we | ||
1415 | * can accept some slop in the rsp->completed access due to the fact | ||
1416 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | ||
1417 | */ | ||
1418 | static void __cpuinit | ||
1419 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
1420 | { | ||
1421 | unsigned long flags; | ||
1349 | long lastcomp; | 1422 | long lastcomp; |
1350 | unsigned long mask; | 1423 | unsigned long mask; |
1351 | struct rcu_data *rdp = rsp->rda[cpu]; | 1424 | struct rcu_data *rdp = rsp->rda[cpu]; |
@@ -1359,17 +1432,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1359 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1432 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1360 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1433 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1361 | rdp->beenonline = 1; /* We have now been online. */ | 1434 | rdp->beenonline = 1; /* We have now been online. */ |
1435 | rdp->preemptable = preemptable; | ||
1362 | rdp->passed_quiesc_completed = lastcomp - 1; | 1436 | rdp->passed_quiesc_completed = lastcomp - 1; |
1363 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | ||
1364 | rdp->nxtlist = NULL; | ||
1365 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
1366 | rdp->nxttail[i] = &rdp->nxtlist; | ||
1367 | rdp->qlen = 0; | ||
1368 | rdp->blimit = blimit; | 1437 | rdp->blimit = blimit; |
1369 | #ifdef CONFIG_NO_HZ | ||
1370 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | ||
1371 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
1372 | rdp->cpu = cpu; | ||
1373 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1438 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1374 | 1439 | ||
1375 | /* | 1440 | /* |
@@ -1410,16 +1475,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1410 | 1475 | ||
1411 | static void __cpuinit rcu_online_cpu(int cpu) | 1476 | static void __cpuinit rcu_online_cpu(int cpu) |
1412 | { | 1477 | { |
1413 | rcu_init_percpu_data(cpu, &rcu_state); | 1478 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
1414 | rcu_init_percpu_data(cpu, &rcu_bh_state); | 1479 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); |
1415 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1480 | rcu_preempt_init_percpu_data(cpu); |
1416 | } | 1481 | } |
1417 | 1482 | ||
1418 | /* | 1483 | /* |
1419 | * Handle CPU online/offline notifcation events. | 1484 | * Handle CPU online/offline notification events. |
1420 | */ | 1485 | */ |
1421 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1486 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1422 | unsigned long action, void *hcpu) | 1487 | unsigned long action, void *hcpu) |
1423 | { | 1488 | { |
1424 | long cpu = (long)hcpu; | 1489 | long cpu = (long)hcpu; |
1425 | 1490 | ||
@@ -1491,6 +1556,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1491 | rnp = rsp->level[i]; | 1556 | rnp = rsp->level[i]; |
1492 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1557 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1493 | spin_lock_init(&rnp->lock); | 1558 | spin_lock_init(&rnp->lock); |
1559 | rnp->gpnum = 0; | ||
1494 | rnp->qsmask = 0; | 1560 | rnp->qsmask = 0; |
1495 | rnp->qsmaskinit = 0; | 1561 | rnp->qsmaskinit = 0; |
1496 | rnp->grplo = j * cpustride; | 1562 | rnp->grplo = j * cpustride; |
@@ -1508,16 +1574,20 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1508 | j / rsp->levelspread[i - 1]; | 1574 | j / rsp->levelspread[i - 1]; |
1509 | } | 1575 | } |
1510 | rnp->level = i; | 1576 | rnp->level = i; |
1577 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | ||
1578 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1511 | } | 1579 | } |
1512 | } | 1580 | } |
1513 | } | 1581 | } |
1514 | 1582 | ||
1515 | /* | 1583 | /* |
1516 | * Helper macro for __rcu_init(). To be used nowhere else! | 1584 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used |
1517 | * Assigns leaf node pointers into each CPU's rcu_data structure. | 1585 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data |
1586 | * structure. | ||
1518 | */ | 1587 | */ |
1519 | #define RCU_DATA_PTR_INIT(rsp, rcu_data) \ | 1588 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
1520 | do { \ | 1589 | do { \ |
1590 | rcu_init_one(rsp); \ | ||
1521 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 1591 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
1522 | j = 0; \ | 1592 | j = 0; \ |
1523 | for_each_possible_cpu(i) { \ | 1593 | for_each_possible_cpu(i) { \ |
@@ -1525,32 +1595,43 @@ do { \ | |||
1525 | j++; \ | 1595 | j++; \ |
1526 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | 1596 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ |
1527 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1597 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
1598 | rcu_boot_init_percpu_data(i, rsp); \ | ||
1528 | } \ | 1599 | } \ |
1529 | } while (0) | 1600 | } while (0) |
1530 | 1601 | ||
1531 | static struct notifier_block __cpuinitdata rcu_nb = { | 1602 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1532 | .notifier_call = rcu_cpu_notify, | 1603 | |
1533 | }; | 1604 | void __init __rcu_init_preempt(void) |
1605 | { | ||
1606 | int i; /* All used by RCU_INIT_FLAVOR(). */ | ||
1607 | int j; | ||
1608 | struct rcu_node *rnp; | ||
1609 | |||
1610 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | ||
1611 | } | ||
1612 | |||
1613 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1614 | |||
1615 | void __init __rcu_init_preempt(void) | ||
1616 | { | ||
1617 | } | ||
1618 | |||
1619 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
1534 | 1620 | ||
1535 | void __init __rcu_init(void) | 1621 | void __init __rcu_init(void) |
1536 | { | 1622 | { |
1537 | int i; /* All used by RCU_DATA_PTR_INIT(). */ | 1623 | int i; /* All used by RCU_INIT_FLAVOR(). */ |
1538 | int j; | 1624 | int j; |
1539 | struct rcu_node *rnp; | 1625 | struct rcu_node *rnp; |
1540 | 1626 | ||
1541 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 1627 | rcu_bootup_announce(); |
1542 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1628 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1543 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1629 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1544 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1630 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1545 | rcu_init_one(&rcu_state); | 1631 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1546 | RCU_DATA_PTR_INIT(&rcu_state, rcu_data); | 1632 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1547 | rcu_init_one(&rcu_bh_state); | 1633 | __rcu_init_preempt(); |
1548 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); | 1634 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1549 | |||
1550 | for_each_online_cpu(i) | ||
1551 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | ||
1552 | /* Register notifier for non-boot CPUs */ | ||
1553 | register_cpu_notifier(&rcu_nb); | ||
1554 | } | 1635 | } |
1555 | 1636 | ||
1556 | module_param(blimit, int, 0); | 1637 | module_param(blimit, int, 0); |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 5e872bbf07f5..bf8a6f9f134d 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -1,10 +1,259 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
3 | * Internal non-public definitions. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | * | ||
19 | * Copyright IBM Corporation, 2008 | ||
20 | * | ||
21 | * Author: Ingo Molnar <mingo@elte.hu> | ||
22 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/cache.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/threads.h> | ||
28 | #include <linux/cpumask.h> | ||
29 | #include <linux/seqlock.h> | ||
30 | |||
31 | /* | ||
32 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | ||
33 | * In theory, it should be possible to add more levels straightforwardly. | ||
34 | * In practice, this has not been tested, so there is probably some | ||
35 | * bug somewhere. | ||
36 | */ | ||
37 | #define MAX_RCU_LVLS 3 | ||
38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | ||
39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | ||
40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | ||
41 | |||
42 | #if NR_CPUS <= RCU_FANOUT | ||
43 | # define NUM_RCU_LVLS 1 | ||
44 | # define NUM_RCU_LVL_0 1 | ||
45 | # define NUM_RCU_LVL_1 (NR_CPUS) | ||
46 | # define NUM_RCU_LVL_2 0 | ||
47 | # define NUM_RCU_LVL_3 0 | ||
48 | #elif NR_CPUS <= RCU_FANOUT_SQ | ||
49 | # define NUM_RCU_LVLS 2 | ||
50 | # define NUM_RCU_LVL_0 1 | ||
51 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) | ||
52 | # define NUM_RCU_LVL_2 (NR_CPUS) | ||
53 | # define NUM_RCU_LVL_3 0 | ||
54 | #elif NR_CPUS <= RCU_FANOUT_CUBE | ||
55 | # define NUM_RCU_LVLS 3 | ||
56 | # define NUM_RCU_LVL_0 1 | ||
57 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) | ||
58 | # define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) | ||
59 | # define NUM_RCU_LVL_3 NR_CPUS | ||
60 | #else | ||
61 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | ||
62 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | ||
63 | |||
64 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | ||
65 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | ||
66 | |||
67 | /* | ||
68 | * Dynticks per-CPU state. | ||
69 | */ | ||
70 | struct rcu_dynticks { | ||
71 | int dynticks_nesting; /* Track nesting level, sort of. */ | ||
72 | int dynticks; /* Even value for dynticks-idle, else odd. */ | ||
73 | int dynticks_nmi; /* Even value for either dynticks-idle or */ | ||
74 | /* not in nmi handler, else odd. So this */ | ||
75 | /* remains even for nmi from irq handler. */ | ||
76 | }; | ||
77 | |||
78 | /* | ||
79 | * Definition for node within the RCU grace-period-detection hierarchy. | ||
80 | */ | ||
81 | struct rcu_node { | ||
82 | spinlock_t lock; | ||
83 | long gpnum; /* Current grace period for this node. */ | ||
84 | /* This will either be equal to or one */ | ||
85 | /* behind the root rcu_node's gpnum. */ | ||
86 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | ||
87 | /* order for current grace period to proceed.*/ | ||
88 | unsigned long qsmaskinit; | ||
89 | /* Per-GP initialization for qsmask. */ | ||
90 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | ||
91 | int grplo; /* lowest-numbered CPU or group here. */ | ||
92 | int grphi; /* highest-numbered CPU or group here. */ | ||
93 | u8 grpnum; /* CPU/group number for next level up. */ | ||
94 | u8 level; /* root is at level 0. */ | ||
95 | struct rcu_node *parent; | ||
96 | struct list_head blocked_tasks[2]; | ||
97 | /* Tasks blocked in RCU read-side critsect. */ | ||
98 | } ____cacheline_internodealigned_in_smp; | ||
99 | |||
100 | /* Index values for nxttail array in struct rcu_data. */ | ||
101 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | ||
102 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | ||
103 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ | ||
104 | #define RCU_NEXT_TAIL 3 | ||
105 | #define RCU_NEXT_SIZE 4 | ||
106 | |||
107 | /* Per-CPU data for read-copy update. */ | ||
108 | struct rcu_data { | ||
109 | /* 1) quiescent-state and grace-period handling : */ | ||
110 | long completed; /* Track rsp->completed gp number */ | ||
111 | /* in order to detect GP end. */ | ||
112 | long gpnum; /* Highest gp number that this CPU */ | ||
113 | /* is aware of having started. */ | ||
114 | long passed_quiesc_completed; | ||
115 | /* Value of completed at time of qs. */ | ||
116 | bool passed_quiesc; /* User-mode/idle loop etc. */ | ||
117 | bool qs_pending; /* Core waits for quiesc state. */ | ||
118 | bool beenonline; /* CPU online at least once. */ | ||
119 | bool preemptable; /* Preemptable RCU? */ | ||
120 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | ||
121 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | ||
122 | |||
123 | /* 2) batch handling */ | ||
124 | /* | ||
125 | * If nxtlist is not NULL, it is partitioned as follows. | ||
126 | * Any of the partitions might be empty, in which case the | ||
127 | * pointer to that partition will be equal to the pointer for | ||
128 | * the following partition. When the list is empty, all of | ||
129 | * the nxttail elements point to nxtlist, which is NULL. | ||
130 | * | ||
131 | * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): | ||
132 | * Entries that might have arrived after current GP ended | ||
133 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
134 | * Entries known to have arrived before current GP ended | ||
135 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
136 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
137 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | ||
138 | * Entries that batch # <= ->completed | ||
139 | * The grace period for these entries has completed, and | ||
140 | * the other grace-period-completed entries may be moved | ||
141 | * here temporarily in rcu_process_callbacks(). | ||
142 | */ | ||
143 | struct rcu_head *nxtlist; | ||
144 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | ||
145 | long qlen; /* # of queued callbacks */ | ||
146 | long blimit; /* Upper limit on a processed batch */ | ||
147 | |||
148 | #ifdef CONFIG_NO_HZ | ||
149 | /* 3) dynticks interface. */ | ||
150 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | ||
151 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | ||
152 | int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ | ||
153 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
154 | |||
155 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | ||
156 | #ifdef CONFIG_NO_HZ | ||
157 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ | ||
158 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
159 | unsigned long offline_fqs; /* Kicked due to being offline. */ | ||
160 | unsigned long resched_ipi; /* Sent a resched IPI. */ | ||
161 | |||
162 | /* 5) __rcu_pending() statistics. */ | ||
163 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | ||
164 | long n_rp_qs_pending; | ||
165 | long n_rp_cb_ready; | ||
166 | long n_rp_cpu_needs_gp; | ||
167 | long n_rp_gp_completed; | ||
168 | long n_rp_gp_started; | ||
169 | long n_rp_need_fqs; | ||
170 | long n_rp_need_nothing; | ||
171 | |||
172 | int cpu; | ||
173 | }; | ||
174 | |||
175 | /* Values for signaled field in struct rcu_state. */ | ||
176 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | ||
177 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | ||
178 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | ||
179 | #ifdef CONFIG_NO_HZ | ||
180 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | ||
181 | #else /* #ifdef CONFIG_NO_HZ */ | ||
182 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | ||
183 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
184 | |||
185 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | ||
186 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
187 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | ||
188 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | ||
189 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | ||
190 | /* to take at least one */ | ||
191 | /* scheduling clock irq */ | ||
192 | /* before ratting on them. */ | ||
193 | |||
194 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
195 | |||
196 | /* | ||
197 | * RCU global state, including node hierarchy. This hierarchy is | ||
198 | * represented in "heap" form in a dense array. The root (first level) | ||
199 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | ||
200 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | ||
201 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | ||
202 | * by ->level[2]). The number of levels is determined by the number of | ||
203 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" | ||
204 | * consisting of a single rcu_node. | ||
205 | */ | ||
206 | struct rcu_state { | ||
207 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ | ||
208 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | ||
209 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | ||
210 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | ||
211 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | ||
212 | |||
213 | /* The following fields are guarded by the root rcu_node's lock. */ | ||
214 | |||
215 | u8 signaled ____cacheline_internodealigned_in_smp; | ||
216 | /* Force QS state. */ | ||
217 | long gpnum; /* Current gp number. */ | ||
218 | long completed; /* # of last completed gp. */ | ||
219 | spinlock_t onofflock; /* exclude on/offline and */ | ||
220 | /* starting new GP. */ | ||
221 | spinlock_t fqslock; /* Only one task forcing */ | ||
222 | /* quiescent states. */ | ||
223 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | ||
224 | /* force_quiescent_state(). */ | ||
225 | unsigned long n_force_qs; /* Number of calls to */ | ||
226 | /* force_quiescent_state(). */ | ||
227 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ | ||
228 | /* due to lock unavailable. */ | ||
229 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | ||
230 | /* due to no GP active. */ | ||
231 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
232 | unsigned long gp_start; /* Time at which GP started, */ | ||
233 | /* but in jiffies. */ | ||
234 | unsigned long jiffies_stall; /* Time at which to check */ | ||
235 | /* for CPU stalls. */ | ||
236 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
237 | #ifdef CONFIG_NO_HZ | ||
238 | long dynticks_completed; /* Value of completed @ snap. */ | ||
239 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
240 | }; | ||
241 | |||
242 | #ifdef RCU_TREE_NONCORE | ||
1 | 243 | ||
2 | /* | 244 | /* |
3 | * RCU implementation internal declarations: | 245 | * RCU implementation internal declarations: |
4 | */ | 246 | */ |
5 | extern struct rcu_state rcu_state; | 247 | extern struct rcu_state rcu_sched_state; |
6 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | 248 | DECLARE_PER_CPU(struct rcu_data, rcu_sched_data); |
7 | 249 | ||
8 | extern struct rcu_state rcu_bh_state; | 250 | extern struct rcu_state rcu_bh_state; |
9 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | 251 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); |
10 | 252 | ||
253 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
254 | extern struct rcu_state rcu_preempt_state; | ||
255 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | ||
256 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
257 | |||
258 | #endif /* #ifdef RCU_TREE_NONCORE */ | ||
259 | |||
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h new file mode 100644 index 000000000000..47789369ea59 --- /dev/null +++ b/kernel/rcutree_plugin.h | |||
@@ -0,0 +1,532 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
3 | * Internal non-public definitions that provide either classic | ||
4 | * or preemptable semantics. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright Red Hat, 2009 | ||
21 | * Copyright IBM Corporation, 2009 | ||
22 | * | ||
23 | * Author: Ingo Molnar <mingo@elte.hu> | ||
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
25 | */ | ||
26 | |||
27 | |||
28 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
29 | |||
30 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | ||
31 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | ||
32 | |||
33 | /* | ||
34 | * Tell them what RCU they are running. | ||
35 | */ | ||
36 | static inline void rcu_bootup_announce(void) | ||
37 | { | ||
38 | printk(KERN_INFO | ||
39 | "Experimental preemptable hierarchical RCU implementation.\n"); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Return the number of RCU-preempt batches processed thus far | ||
44 | * for debug and statistics. | ||
45 | */ | ||
46 | long rcu_batches_completed_preempt(void) | ||
47 | { | ||
48 | return rcu_preempt_state.completed; | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | ||
51 | |||
52 | /* | ||
53 | * Return the number of RCU batches processed thus far for debug & stats. | ||
54 | */ | ||
55 | long rcu_batches_completed(void) | ||
56 | { | ||
57 | return rcu_batches_completed_preempt(); | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
60 | |||
61 | /* | ||
62 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | ||
63 | * that this just means that the task currently running on the CPU is | ||
64 | * not in a quiescent state. There might be any number of tasks blocked | ||
65 | * while in an RCU read-side critical section. | ||
66 | */ | ||
67 | static void rcu_preempt_qs_record(int cpu) | ||
68 | { | ||
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | ||
70 | rdp->passed_quiesc = 1; | ||
71 | rdp->passed_quiesc_completed = rdp->completed; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * We have entered the scheduler or are between softirqs in ksoftirqd. | ||
76 | * If we are in an RCU read-side critical section, we need to reflect | ||
77 | * that in the state of the rcu_node structure corresponding to this CPU. | ||
78 | * Caller must disable hardirqs. | ||
79 | */ | ||
80 | static void rcu_preempt_qs(int cpu) | ||
81 | { | ||
82 | struct task_struct *t = current; | ||
83 | int phase; | ||
84 | struct rcu_data *rdp; | ||
85 | struct rcu_node *rnp; | ||
86 | |||
87 | if (t->rcu_read_lock_nesting && | ||
88 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | ||
89 | |||
90 | /* Possibly blocking in an RCU read-side critical section. */ | ||
91 | rdp = rcu_preempt_state.rda[cpu]; | ||
92 | rnp = rdp->mynode; | ||
93 | spin_lock(&rnp->lock); | ||
94 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | ||
95 | t->rcu_blocked_node = rnp; | ||
96 | |||
97 | /* | ||
98 | * If this CPU has already checked in, then this task | ||
99 | * will hold up the next grace period rather than the | ||
100 | * current grace period. Queue the task accordingly. | ||
101 | * If the task is queued for the current grace period | ||
102 | * (i.e., this CPU has not yet passed through a quiescent | ||
103 | * state for the current grace period), then as long | ||
104 | * as that task remains queued, the current grace period | ||
105 | * cannot end. | ||
106 | */ | ||
107 | phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); | ||
108 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); | ||
109 | smp_mb(); /* Ensure later ctxt swtch seen after above. */ | ||
110 | spin_unlock(&rnp->lock); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Either we were not in an RCU read-side critical section to | ||
115 | * begin with, or we have now recorded that critical section | ||
116 | * globally. Either way, we can now note a quiescent state | ||
117 | * for this CPU. Again, if we were in an RCU read-side critical | ||
118 | * section, and if that critical section was blocking the current | ||
119 | * grace period, then the fact that the task has been enqueued | ||
120 | * means that we continue to block the current grace period. | ||
121 | */ | ||
122 | rcu_preempt_qs_record(cpu); | ||
123 | t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS | | ||
124 | RCU_READ_UNLOCK_GOT_QS); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Tree-preemptable RCU implementation for rcu_read_lock(). | ||
129 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
130 | * if we block. | ||
131 | */ | ||
132 | void __rcu_read_lock(void) | ||
133 | { | ||
134 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | ||
135 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
138 | |||
139 | static void rcu_read_unlock_special(struct task_struct *t) | ||
140 | { | ||
141 | int empty; | ||
142 | unsigned long flags; | ||
143 | unsigned long mask; | ||
144 | struct rcu_node *rnp; | ||
145 | int special; | ||
146 | |||
147 | /* NMI handlers cannot block and cannot safely manipulate state. */ | ||
148 | if (in_nmi()) | ||
149 | return; | ||
150 | |||
151 | local_irq_save(flags); | ||
152 | |||
153 | /* | ||
154 | * If RCU core is waiting for this CPU to exit critical section, | ||
155 | * let it know that we have done so. | ||
156 | */ | ||
157 | special = t->rcu_read_unlock_special; | ||
158 | if (special & RCU_READ_UNLOCK_NEED_QS) { | ||
159 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS; | ||
161 | } | ||
162 | |||
163 | /* Hardware IRQ handlers cannot block. */ | ||
164 | if (in_irq()) { | ||
165 | local_irq_restore(flags); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | /* Clean up if blocked during RCU read-side critical section. */ | ||
170 | if (special & RCU_READ_UNLOCK_BLOCKED) { | ||
171 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | ||
172 | |||
173 | /* | ||
174 | * Remove this task from the list it blocked on. The | ||
175 | * task can migrate while we acquire the lock, but at | ||
176 | * most one time. So at most two passes through loop. | ||
177 | */ | ||
178 | for (;;) { | ||
179 | rnp = t->rcu_blocked_node; | ||
180 | spin_lock(&rnp->lock); | ||
181 | if (rnp == t->rcu_blocked_node) | ||
182 | break; | ||
183 | spin_unlock(&rnp->lock); | ||
184 | } | ||
185 | empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | ||
186 | list_del_init(&t->rcu_node_entry); | ||
187 | t->rcu_blocked_node = NULL; | ||
188 | |||
189 | /* | ||
190 | * If this was the last task on the current list, and if | ||
191 | * we aren't waiting on any CPUs, report the quiescent state. | ||
192 | * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() | ||
193 | * drop rnp->lock and restore irq. | ||
194 | */ | ||
195 | if (!empty && rnp->qsmask == 0 && | ||
196 | list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { | ||
197 | t->rcu_read_unlock_special &= | ||
198 | ~(RCU_READ_UNLOCK_NEED_QS | | ||
199 | RCU_READ_UNLOCK_GOT_QS); | ||
200 | if (rnp->parent == NULL) { | ||
201 | /* Only one rcu_node in the tree. */ | ||
202 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | ||
203 | return; | ||
204 | } | ||
205 | /* Report up the rest of the hierarchy. */ | ||
206 | mask = rnp->grpmask; | ||
207 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
208 | rnp = rnp->parent; | ||
209 | spin_lock_irqsave(&rnp->lock, flags); | ||
210 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags); | ||
211 | return; | ||
212 | } | ||
213 | spin_unlock(&rnp->lock); | ||
214 | } | ||
215 | local_irq_restore(flags); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | ||
220 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
221 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
222 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
223 | * in an RCU read-side critical section and other special cases. | ||
224 | */ | ||
225 | void __rcu_read_unlock(void) | ||
226 | { | ||
227 | struct task_struct *t = current; | ||
228 | |||
229 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | ||
230 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | ||
231 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
232 | rcu_read_unlock_special(t); | ||
233 | } | ||
234 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
235 | |||
236 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
237 | |||
238 | /* | ||
239 | * Scan the current list of tasks blocked within RCU read-side critical | ||
240 | * sections, printing out the tid of each. | ||
241 | */ | ||
242 | static void rcu_print_task_stall(struct rcu_node *rnp) | ||
243 | { | ||
244 | unsigned long flags; | ||
245 | struct list_head *lp; | ||
246 | int phase = rnp->gpnum & 0x1; | ||
247 | struct task_struct *t; | ||
248 | |||
249 | if (!list_empty(&rnp->blocked_tasks[phase])) { | ||
250 | spin_lock_irqsave(&rnp->lock, flags); | ||
251 | phase = rnp->gpnum & 0x1; /* re-read under lock. */ | ||
252 | lp = &rnp->blocked_tasks[phase]; | ||
253 | list_for_each_entry(t, lp, rcu_node_entry) | ||
254 | printk(" P%d", t->pid); | ||
255 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
260 | |||
261 | /* | ||
262 | * Check for preempted RCU readers for the specified rcu_node structure. | ||
263 | * If the caller needs a reliable answer, it must hold the rcu_node's | ||
264 | * >lock. | ||
265 | */ | ||
266 | static int rcu_preempted_readers(struct rcu_node *rnp) | ||
267 | { | ||
268 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | ||
269 | } | ||
270 | |||
271 | #ifdef CONFIG_HOTPLUG_CPU | ||
272 | |||
273 | /* | ||
274 | * Handle tasklist migration for case in which all CPUs covered by the | ||
275 | * specified rcu_node have gone offline. Move them up to the root | ||
276 | * rcu_node. The reason for not just moving them to the immediate | ||
277 | * parent is to remove the need for rcu_read_unlock_special() to | ||
278 | * make more than two attempts to acquire the target rcu_node's lock. | ||
279 | * | ||
280 | * The caller must hold rnp->lock with irqs disabled. | ||
281 | */ | ||
282 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
283 | struct rcu_node *rnp) | ||
284 | { | ||
285 | int i; | ||
286 | struct list_head *lp; | ||
287 | struct list_head *lp_root; | ||
288 | struct rcu_node *rnp_root = rcu_get_root(rsp); | ||
289 | struct task_struct *tp; | ||
290 | |||
291 | if (rnp == rnp_root) { | ||
292 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | ||
293 | return; /* Shouldn't happen: at least one CPU online. */ | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * Move tasks up to root rcu_node. Rely on the fact that the | ||
298 | * root rcu_node can be at most one ahead of the rest of the | ||
299 | * rcu_nodes in terms of gp_num value. This fact allows us to | ||
300 | * move the blocked_tasks[] array directly, element by element. | ||
301 | */ | ||
302 | for (i = 0; i < 2; i++) { | ||
303 | lp = &rnp->blocked_tasks[i]; | ||
304 | lp_root = &rnp_root->blocked_tasks[i]; | ||
305 | while (!list_empty(lp)) { | ||
306 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | ||
307 | spin_lock(&rnp_root->lock); /* irqs already disabled */ | ||
308 | list_del(&tp->rcu_node_entry); | ||
309 | tp->rcu_blocked_node = rnp_root; | ||
310 | list_add(&tp->rcu_node_entry, lp_root); | ||
311 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | ||
312 | } | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Do CPU-offline processing for preemptable RCU. | ||
318 | */ | ||
319 | static void rcu_preempt_offline_cpu(int cpu) | ||
320 | { | ||
321 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | ||
322 | } | ||
323 | |||
324 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
325 | |||
326 | /* | ||
327 | * Check for a quiescent state from the current CPU. When a task blocks, | ||
328 | * the task is recorded in the corresponding CPU's rcu_node structure, | ||
329 | * which is checked elsewhere. | ||
330 | * | ||
331 | * Caller must disable hard irqs. | ||
332 | */ | ||
333 | static void rcu_preempt_check_callbacks(int cpu) | ||
334 | { | ||
335 | struct task_struct *t = current; | ||
336 | |||
337 | if (t->rcu_read_lock_nesting == 0) { | ||
338 | t->rcu_read_unlock_special &= | ||
339 | ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS); | ||
340 | rcu_preempt_qs_record(cpu); | ||
341 | return; | ||
342 | } | ||
343 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) { | ||
344 | if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) { | ||
345 | rcu_preempt_qs_record(cpu); | ||
346 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS; | ||
347 | } else if (!(t->rcu_read_unlock_special & | ||
348 | RCU_READ_UNLOCK_NEED_QS)) { | ||
349 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | ||
350 | } | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Process callbacks for preemptable RCU. | ||
356 | */ | ||
357 | static void rcu_preempt_process_callbacks(void) | ||
358 | { | ||
359 | __rcu_process_callbacks(&rcu_preempt_state, | ||
360 | &__get_cpu_var(rcu_preempt_data)); | ||
361 | } | ||
362 | |||
363 | /* | ||
364 | * Queue a preemptable-RCU callback for invocation after a grace period. | ||
365 | */ | ||
366 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
367 | { | ||
368 | __call_rcu(head, func, &rcu_preempt_state); | ||
369 | } | ||
370 | EXPORT_SYMBOL_GPL(call_rcu); | ||
371 | |||
372 | /* | ||
373 | * Check to see if there is any immediate preemptable-RCU-related work | ||
374 | * to be done. | ||
375 | */ | ||
376 | static int rcu_preempt_pending(int cpu) | ||
377 | { | ||
378 | return __rcu_pending(&rcu_preempt_state, | ||
379 | &per_cpu(rcu_preempt_data, cpu)); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | ||
384 | */ | ||
385 | static int rcu_preempt_needs_cpu(int cpu) | ||
386 | { | ||
387 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * Initialize preemptable RCU's per-CPU data. | ||
392 | */ | ||
393 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | ||
394 | { | ||
395 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Check for a task exiting while in a preemptable-RCU read-side | ||
400 | * critical section, clean up if so. No need to issue warnings, | ||
401 | * as debug_check_no_locks_held() already does this if lockdep | ||
402 | * is enabled. | ||
403 | */ | ||
404 | void exit_rcu(void) | ||
405 | { | ||
406 | struct task_struct *t = current; | ||
407 | |||
408 | if (t->rcu_read_lock_nesting == 0) | ||
409 | return; | ||
410 | t->rcu_read_lock_nesting = 1; | ||
411 | rcu_read_unlock(); | ||
412 | } | ||
413 | |||
414 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
415 | |||
416 | /* | ||
417 | * Tell them what RCU they are running. | ||
418 | */ | ||
419 | static inline void rcu_bootup_announce(void) | ||
420 | { | ||
421 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * Return the number of RCU batches processed thus far for debug & stats. | ||
426 | */ | ||
427 | long rcu_batches_completed(void) | ||
428 | { | ||
429 | return rcu_batches_completed_sched(); | ||
430 | } | ||
431 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | ||
432 | |||
433 | /* | ||
434 | * Because preemptable RCU does not exist, we never have to check for | ||
435 | * CPUs being in quiescent states. | ||
436 | */ | ||
437 | static void rcu_preempt_qs(int cpu) | ||
438 | { | ||
439 | } | ||
440 | |||
441 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
442 | |||
443 | /* | ||
444 | * Because preemptable RCU does not exist, we never have to check for | ||
445 | * tasks blocked within RCU read-side critical sections. | ||
446 | */ | ||
447 | static void rcu_print_task_stall(struct rcu_node *rnp) | ||
448 | { | ||
449 | } | ||
450 | |||
451 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
452 | |||
453 | /* | ||
454 | * Because preemptable RCU does not exist, there are never any preempted | ||
455 | * RCU readers. | ||
456 | */ | ||
457 | static int rcu_preempted_readers(struct rcu_node *rnp) | ||
458 | { | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | #ifdef CONFIG_HOTPLUG_CPU | ||
463 | |||
464 | /* | ||
465 | * Because preemptable RCU does not exist, it never needs to migrate | ||
466 | * tasks that were blocked within RCU read-side critical sections. | ||
467 | */ | ||
468 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | ||
469 | struct rcu_node *rnp) | ||
470 | { | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * Because preemptable RCU does not exist, it never needs CPU-offline | ||
475 | * processing. | ||
476 | */ | ||
477 | static void rcu_preempt_offline_cpu(int cpu) | ||
478 | { | ||
479 | } | ||
480 | |||
481 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
482 | |||
483 | /* | ||
484 | * Because preemptable RCU does not exist, it never has any callbacks | ||
485 | * to check. | ||
486 | */ | ||
487 | void rcu_preempt_check_callbacks(int cpu) | ||
488 | { | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * Because preemptable RCU does not exist, it never has any callbacks | ||
493 | * to process. | ||
494 | */ | ||
495 | void rcu_preempt_process_callbacks(void) | ||
496 | { | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * In classic RCU, call_rcu() is just call_rcu_sched(). | ||
501 | */ | ||
502 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
503 | { | ||
504 | call_rcu_sched(head, func); | ||
505 | } | ||
506 | EXPORT_SYMBOL_GPL(call_rcu); | ||
507 | |||
508 | /* | ||
509 | * Because preemptable RCU does not exist, it never has any work to do. | ||
510 | */ | ||
511 | static int rcu_preempt_pending(int cpu) | ||
512 | { | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * Because preemptable RCU does not exist, it never needs any CPU. | ||
518 | */ | ||
519 | static int rcu_preempt_needs_cpu(int cpu) | ||
520 | { | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | /* | ||
525 | * Because preemptable RCU does not exist, there is no per-CPU | ||
526 | * data to initialize. | ||
527 | */ | ||
528 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | ||
529 | { | ||
530 | } | ||
531 | |||
532 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index fe1dcdbf1ca3..0ea1bff69727 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/debugfs.h> | 43 | #include <linux/debugfs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | 45 | ||
46 | #define RCU_TREE_NONCORE | ||
46 | #include "rcutree.h" | 47 | #include "rcutree.h" |
47 | 48 | ||
48 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | 49 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) |
@@ -76,8 +77,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
76 | 77 | ||
77 | static int show_rcudata(struct seq_file *m, void *unused) | 78 | static int show_rcudata(struct seq_file *m, void *unused) |
78 | { | 79 | { |
79 | seq_puts(m, "rcu:\n"); | 80 | #ifdef CONFIG_TREE_PREEMPT_RCU |
80 | PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m); | 81 | seq_puts(m, "rcu_preempt:\n"); |
82 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m); | ||
83 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
84 | seq_puts(m, "rcu_sched:\n"); | ||
85 | PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m); | ||
81 | seq_puts(m, "rcu_bh:\n"); | 86 | seq_puts(m, "rcu_bh:\n"); |
82 | PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); | 87 | PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); |
83 | return 0; | 88 | return 0; |
@@ -102,7 +107,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
102 | return; | 107 | return; |
103 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", | 108 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", |
104 | rdp->cpu, | 109 | rdp->cpu, |
105 | cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", | 110 | cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"", |
106 | rdp->completed, rdp->gpnum, | 111 | rdp->completed, rdp->gpnum, |
107 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | 112 | rdp->passed_quiesc, rdp->passed_quiesc_completed, |
108 | rdp->qs_pending); | 113 | rdp->qs_pending); |
@@ -124,8 +129,12 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) | |||
124 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); | 129 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); |
125 | #endif /* #ifdef CONFIG_NO_HZ */ | 130 | #endif /* #ifdef CONFIG_NO_HZ */ |
126 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); | 131 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); |
127 | seq_puts(m, "\"rcu:\"\n"); | 132 | #ifdef CONFIG_TREE_PREEMPT_RCU |
128 | PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m); | 133 | seq_puts(m, "\"rcu_preempt:\"\n"); |
134 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); | ||
135 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
136 | seq_puts(m, "\"rcu_sched:\"\n"); | ||
137 | PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m); | ||
129 | seq_puts(m, "\"rcu_bh:\"\n"); | 138 | seq_puts(m, "\"rcu_bh:\"\n"); |
130 | PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); | 139 | PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); |
131 | return 0; | 140 | return 0; |
@@ -171,8 +180,12 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
171 | 180 | ||
172 | static int show_rcuhier(struct seq_file *m, void *unused) | 181 | static int show_rcuhier(struct seq_file *m, void *unused) |
173 | { | 182 | { |
174 | seq_puts(m, "rcu:\n"); | 183 | #ifdef CONFIG_TREE_PREEMPT_RCU |
175 | print_one_rcu_state(m, &rcu_state); | 184 | seq_puts(m, "rcu_preempt:\n"); |
185 | print_one_rcu_state(m, &rcu_preempt_state); | ||
186 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
187 | seq_puts(m, "rcu_sched:\n"); | ||
188 | print_one_rcu_state(m, &rcu_sched_state); | ||
176 | seq_puts(m, "rcu_bh:\n"); | 189 | seq_puts(m, "rcu_bh:\n"); |
177 | print_one_rcu_state(m, &rcu_bh_state); | 190 | print_one_rcu_state(m, &rcu_bh_state); |
178 | return 0; | 191 | return 0; |
@@ -193,8 +206,12 @@ static struct file_operations rcuhier_fops = { | |||
193 | 206 | ||
194 | static int show_rcugp(struct seq_file *m, void *unused) | 207 | static int show_rcugp(struct seq_file *m, void *unused) |
195 | { | 208 | { |
196 | seq_printf(m, "rcu: completed=%ld gpnum=%ld\n", | 209 | #ifdef CONFIG_TREE_PREEMPT_RCU |
197 | rcu_state.completed, rcu_state.gpnum); | 210 | seq_printf(m, "rcu_preempt: completed=%ld gpnum=%ld\n", |
211 | rcu_preempt_state.completed, rcu_preempt_state.gpnum); | ||
212 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
213 | seq_printf(m, "rcu_sched: completed=%ld gpnum=%ld\n", | ||
214 | rcu_sched_state.completed, rcu_sched_state.gpnum); | ||
198 | seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", | 215 | seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", |
199 | rcu_bh_state.completed, rcu_bh_state.gpnum); | 216 | rcu_bh_state.completed, rcu_bh_state.gpnum); |
200 | return 0; | 217 | return 0; |
@@ -243,8 +260,12 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp) | |||
243 | 260 | ||
244 | static int show_rcu_pending(struct seq_file *m, void *unused) | 261 | static int show_rcu_pending(struct seq_file *m, void *unused) |
245 | { | 262 | { |
246 | seq_puts(m, "rcu:\n"); | 263 | #ifdef CONFIG_TREE_PREEMPT_RCU |
247 | print_rcu_pendings(m, &rcu_state); | 264 | seq_puts(m, "rcu_preempt:\n"); |
265 | print_rcu_pendings(m, &rcu_preempt_state); | ||
266 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
267 | seq_puts(m, "rcu_sched:\n"); | ||
268 | print_rcu_pendings(m, &rcu_sched_state); | ||
248 | seq_puts(m, "rcu_bh:\n"); | 269 | seq_puts(m, "rcu_bh:\n"); |
249 | print_rcu_pendings(m, &rcu_bh_state); | 270 | print_rcu_pendings(m, &rcu_bh_state); |
250 | return 0; | 271 | return 0; |
@@ -264,62 +285,47 @@ static struct file_operations rcu_pending_fops = { | |||
264 | }; | 285 | }; |
265 | 286 | ||
266 | static struct dentry *rcudir; | 287 | static struct dentry *rcudir; |
267 | static struct dentry *datadir; | ||
268 | static struct dentry *datadir_csv; | ||
269 | static struct dentry *gpdir; | ||
270 | static struct dentry *hierdir; | ||
271 | static struct dentry *rcu_pendingdir; | ||
272 | 288 | ||
273 | static int __init rcuclassic_trace_init(void) | 289 | static int __init rcuclassic_trace_init(void) |
274 | { | 290 | { |
291 | struct dentry *retval; | ||
292 | |||
275 | rcudir = debugfs_create_dir("rcu", NULL); | 293 | rcudir = debugfs_create_dir("rcu", NULL); |
276 | if (!rcudir) | 294 | if (!rcudir) |
277 | goto out; | 295 | goto free_out; |
278 | 296 | ||
279 | datadir = debugfs_create_file("rcudata", 0444, rcudir, | 297 | retval = debugfs_create_file("rcudata", 0444, rcudir, |
280 | NULL, &rcudata_fops); | 298 | NULL, &rcudata_fops); |
281 | if (!datadir) | 299 | if (!retval) |
282 | goto free_out; | 300 | goto free_out; |
283 | 301 | ||
284 | datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir, | 302 | retval = debugfs_create_file("rcudata.csv", 0444, rcudir, |
285 | NULL, &rcudata_csv_fops); | 303 | NULL, &rcudata_csv_fops); |
286 | if (!datadir_csv) | 304 | if (!retval) |
287 | goto free_out; | 305 | goto free_out; |
288 | 306 | ||
289 | gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); | 307 | retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); |
290 | if (!gpdir) | 308 | if (!retval) |
291 | goto free_out; | 309 | goto free_out; |
292 | 310 | ||
293 | hierdir = debugfs_create_file("rcuhier", 0444, rcudir, | 311 | retval = debugfs_create_file("rcuhier", 0444, rcudir, |
294 | NULL, &rcuhier_fops); | 312 | NULL, &rcuhier_fops); |
295 | if (!hierdir) | 313 | if (!retval) |
296 | goto free_out; | 314 | goto free_out; |
297 | 315 | ||
298 | rcu_pendingdir = debugfs_create_file("rcu_pending", 0444, rcudir, | 316 | retval = debugfs_create_file("rcu_pending", 0444, rcudir, |
299 | NULL, &rcu_pending_fops); | 317 | NULL, &rcu_pending_fops); |
300 | if (!rcu_pendingdir) | 318 | if (!retval) |
301 | goto free_out; | 319 | goto free_out; |
302 | return 0; | 320 | return 0; |
303 | free_out: | 321 | free_out: |
304 | if (datadir) | 322 | debugfs_remove_recursive(rcudir); |
305 | debugfs_remove(datadir); | ||
306 | if (datadir_csv) | ||
307 | debugfs_remove(datadir_csv); | ||
308 | if (gpdir) | ||
309 | debugfs_remove(gpdir); | ||
310 | debugfs_remove(rcudir); | ||
311 | out: | ||
312 | return 1; | 323 | return 1; |
313 | } | 324 | } |
314 | 325 | ||
315 | static void __exit rcuclassic_trace_cleanup(void) | 326 | static void __exit rcuclassic_trace_cleanup(void) |
316 | { | 327 | { |
317 | debugfs_remove(datadir); | 328 | debugfs_remove_recursive(rcudir); |
318 | debugfs_remove(datadir_csv); | ||
319 | debugfs_remove(gpdir); | ||
320 | debugfs_remove(hierdir); | ||
321 | debugfs_remove(rcu_pendingdir); | ||
322 | debugfs_remove(rcudir); | ||
323 | } | 329 | } |
324 | 330 | ||
325 | 331 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 2c75f7daa439..4066241ae9f4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5325,7 +5325,7 @@ need_resched: | |||
5325 | preempt_disable(); | 5325 | preempt_disable(); |
5326 | cpu = smp_processor_id(); | 5326 | cpu = smp_processor_id(); |
5327 | rq = cpu_rq(cpu); | 5327 | rq = cpu_rq(cpu); |
5328 | rcu_qsctr_inc(cpu); | 5328 | rcu_sched_qs(cpu); |
5329 | prev = rq->curr; | 5329 | prev = rq->curr; |
5330 | switch_count = &prev->nivcsw; | 5330 | switch_count = &prev->nivcsw; |
5331 | 5331 | ||
@@ -7053,6 +7053,11 @@ fail: | |||
7053 | return ret; | 7053 | return ret; |
7054 | } | 7054 | } |
7055 | 7055 | ||
7056 | #define RCU_MIGRATION_IDLE 0 | ||
7057 | #define RCU_MIGRATION_NEED_QS 1 | ||
7058 | #define RCU_MIGRATION_GOT_QS 2 | ||
7059 | #define RCU_MIGRATION_MUST_SYNC 3 | ||
7060 | |||
7056 | /* | 7061 | /* |
7057 | * migration_thread - this is a highprio system thread that performs | 7062 | * migration_thread - this is a highprio system thread that performs |
7058 | * thread migration by bumping thread off CPU then 'pushing' onto | 7063 | * thread migration by bumping thread off CPU then 'pushing' onto |
@@ -7060,6 +7065,7 @@ fail: | |||
7060 | */ | 7065 | */ |
7061 | static int migration_thread(void *data) | 7066 | static int migration_thread(void *data) |
7062 | { | 7067 | { |
7068 | int badcpu; | ||
7063 | int cpu = (long)data; | 7069 | int cpu = (long)data; |
7064 | struct rq *rq; | 7070 | struct rq *rq; |
7065 | 7071 | ||
@@ -7094,8 +7100,17 @@ static int migration_thread(void *data) | |||
7094 | req = list_entry(head->next, struct migration_req, list); | 7100 | req = list_entry(head->next, struct migration_req, list); |
7095 | list_del_init(head->next); | 7101 | list_del_init(head->next); |
7096 | 7102 | ||
7097 | spin_unlock(&rq->lock); | 7103 | if (req->task != NULL) { |
7098 | __migrate_task(req->task, cpu, req->dest_cpu); | 7104 | spin_unlock(&rq->lock); |
7105 | __migrate_task(req->task, cpu, req->dest_cpu); | ||
7106 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | ||
7107 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | ||
7108 | spin_unlock(&rq->lock); | ||
7109 | } else { | ||
7110 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | ||
7111 | spin_unlock(&rq->lock); | ||
7112 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | ||
7113 | } | ||
7099 | local_irq_enable(); | 7114 | local_irq_enable(); |
7100 | 7115 | ||
7101 | complete(&req->done); | 7116 | complete(&req->done); |
@@ -10583,3 +10598,113 @@ struct cgroup_subsys cpuacct_subsys = { | |||
10583 | .subsys_id = cpuacct_subsys_id, | 10598 | .subsys_id = cpuacct_subsys_id, |
10584 | }; | 10599 | }; |
10585 | #endif /* CONFIG_CGROUP_CPUACCT */ | 10600 | #endif /* CONFIG_CGROUP_CPUACCT */ |
10601 | |||
10602 | #ifndef CONFIG_SMP | ||
10603 | |||
10604 | int rcu_expedited_torture_stats(char *page) | ||
10605 | { | ||
10606 | return 0; | ||
10607 | } | ||
10608 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10609 | |||
10610 | void synchronize_sched_expedited(void) | ||
10611 | { | ||
10612 | } | ||
10613 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
10614 | |||
10615 | #else /* #ifndef CONFIG_SMP */ | ||
10616 | |||
10617 | static DEFINE_PER_CPU(struct migration_req, rcu_migration_req); | ||
10618 | static DEFINE_MUTEX(rcu_sched_expedited_mutex); | ||
10619 | |||
10620 | #define RCU_EXPEDITED_STATE_POST -2 | ||
10621 | #define RCU_EXPEDITED_STATE_IDLE -1 | ||
10622 | |||
10623 | static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10624 | |||
10625 | int rcu_expedited_torture_stats(char *page) | ||
10626 | { | ||
10627 | int cnt = 0; | ||
10628 | int cpu; | ||
10629 | |||
10630 | cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state); | ||
10631 | for_each_online_cpu(cpu) { | ||
10632 | cnt += sprintf(&page[cnt], " %d:%d", | ||
10633 | cpu, per_cpu(rcu_migration_req, cpu).dest_cpu); | ||
10634 | } | ||
10635 | cnt += sprintf(&page[cnt], "\n"); | ||
10636 | return cnt; | ||
10637 | } | ||
10638 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10639 | |||
10640 | static long synchronize_sched_expedited_count; | ||
10641 | |||
10642 | /* | ||
10643 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
10644 | * approach to force grace period to end quickly. This consumes | ||
10645 | * significant time on all CPUs, and is thus not recommended for | ||
10646 | * any sort of common-case code. | ||
10647 | * | ||
10648 | * Note that it is illegal to call this function while holding any | ||
10649 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
10650 | * observe this restriction will result in deadlock. | ||
10651 | */ | ||
10652 | void synchronize_sched_expedited(void) | ||
10653 | { | ||
10654 | int cpu; | ||
10655 | unsigned long flags; | ||
10656 | bool need_full_sync = 0; | ||
10657 | struct rq *rq; | ||
10658 | struct migration_req *req; | ||
10659 | long snap; | ||
10660 | int trycount = 0; | ||
10661 | |||
10662 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | ||
10663 | snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; | ||
10664 | get_online_cpus(); | ||
10665 | while (!mutex_trylock(&rcu_sched_expedited_mutex)) { | ||
10666 | put_online_cpus(); | ||
10667 | if (trycount++ < 10) | ||
10668 | udelay(trycount * num_online_cpus()); | ||
10669 | else { | ||
10670 | synchronize_sched(); | ||
10671 | return; | ||
10672 | } | ||
10673 | if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { | ||
10674 | smp_mb(); /* ensure test happens before caller kfree */ | ||
10675 | return; | ||
10676 | } | ||
10677 | get_online_cpus(); | ||
10678 | } | ||
10679 | rcu_expedited_state = RCU_EXPEDITED_STATE_POST; | ||
10680 | for_each_online_cpu(cpu) { | ||
10681 | rq = cpu_rq(cpu); | ||
10682 | req = &per_cpu(rcu_migration_req, cpu); | ||
10683 | init_completion(&req->done); | ||
10684 | req->task = NULL; | ||
10685 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | ||
10686 | spin_lock_irqsave(&rq->lock, flags); | ||
10687 | list_add(&req->list, &rq->migration_queue); | ||
10688 | spin_unlock_irqrestore(&rq->lock, flags); | ||
10689 | wake_up_process(rq->migration_thread); | ||
10690 | } | ||
10691 | for_each_online_cpu(cpu) { | ||
10692 | rcu_expedited_state = cpu; | ||
10693 | req = &per_cpu(rcu_migration_req, cpu); | ||
10694 | rq = cpu_rq(cpu); | ||
10695 | wait_for_completion(&req->done); | ||
10696 | spin_lock_irqsave(&rq->lock, flags); | ||
10697 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | ||
10698 | need_full_sync = 1; | ||
10699 | req->dest_cpu = RCU_MIGRATION_IDLE; | ||
10700 | spin_unlock_irqrestore(&rq->lock, flags); | ||
10701 | } | ||
10702 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10703 | mutex_unlock(&rcu_sched_expedited_mutex); | ||
10704 | put_online_cpus(); | ||
10705 | if (need_full_sync) | ||
10706 | synchronize_sched(); | ||
10707 | } | ||
10708 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
10709 | |||
10710 | #endif /* #else #ifndef CONFIG_SMP */ | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index eb5e131a0485..7db25067cd2d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -227,7 +227,7 @@ restart: | |||
227 | preempt_count() = prev_count; | 227 | preempt_count() = prev_count; |
228 | } | 228 | } |
229 | 229 | ||
230 | rcu_bh_qsctr_inc(cpu); | 230 | rcu_bh_qs(cpu); |
231 | } | 231 | } |
232 | h++; | 232 | h++; |
233 | pending >>= 1; | 233 | pending >>= 1; |
@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu) | |||
721 | preempt_enable_no_resched(); | 721 | preempt_enable_no_resched(); |
722 | cond_resched(); | 722 | cond_resched(); |
723 | preempt_disable(); | 723 | preempt_disable(); |
724 | rcu_qsctr_inc((long)__bind_cpu); | 724 | rcu_sched_qs((long)__bind_cpu); |
725 | } | 725 | } |
726 | preempt_enable(); | 726 | preempt_enable(); |
727 | set_current_state(TASK_INTERRUPTIBLE); | 727 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/kernel/timer.c b/kernel/timer.c index a7f07d5a6241..a3d25f415019 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1156,8 +1156,7 @@ void update_process_times(int user_tick) | |||
1156 | /* Note: this timer irq context must be accounted for as well. */ | 1156 | /* Note: this timer irq context must be accounted for as well. */ |
1157 | account_process_tick(p, user_tick); | 1157 | account_process_tick(p, user_tick); |
1158 | run_local_timers(); | 1158 | run_local_timers(); |
1159 | if (rcu_pending(cpu)) | 1159 | rcu_check_callbacks(cpu, user_tick); |
1160 | rcu_check_callbacks(cpu, user_tick); | ||
1161 | printk_tick(); | 1160 | printk_tick(); |
1162 | scheduler_tick(); | 1161 | scheduler_tick(); |
1163 | run_posix_cpu_timers(p); | 1162 | run_posix_cpu_timers(p); |