aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcuclassic.c331
-rw-r--r--kernel/rcupreempt.c8
-rw-r--r--kernel/rcupreempt_trace.c7
3 files changed, 251 insertions, 95 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index aad93cdc9f68..ed15128ca2c9 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -47,6 +47,7 @@
47#include <linux/notifier.h> 47#include <linux/notifier.h>
48#include <linux/cpu.h> 48#include <linux/cpu.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/time.h>
50 51
51#ifdef CONFIG_DEBUG_LOCK_ALLOC 52#ifdef CONFIG_DEBUG_LOCK_ALLOC
52static struct lock_class_key rcu_lock_key; 53static struct lock_class_key rcu_lock_key;
@@ -60,12 +61,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
60static struct rcu_ctrlblk rcu_ctrlblk = { 61static struct rcu_ctrlblk rcu_ctrlblk = {
61 .cur = -300, 62 .cur = -300,
62 .completed = -300, 63 .completed = -300,
64 .pending = -300,
63 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
64 .cpumask = CPU_MASK_NONE, 66 .cpumask = CPU_MASK_NONE,
65}; 67};
66static struct rcu_ctrlblk rcu_bh_ctrlblk = { 68static struct rcu_ctrlblk rcu_bh_ctrlblk = {
67 .cur = -300, 69 .cur = -300,
68 .completed = -300, 70 .completed = -300,
71 .pending = -300,
69 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
70 .cpumask = CPU_MASK_NONE, 73 .cpumask = CPU_MASK_NONE,
71}; 74};
@@ -83,7 +86,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
83{ 86{
84 int cpu; 87 int cpu;
85 cpumask_t cpumask; 88 cpumask_t cpumask;
89 unsigned long flags;
90
86 set_need_resched(); 91 set_need_resched();
92 spin_lock_irqsave(&rcp->lock, flags);
87 if (unlikely(!rcp->signaled)) { 93 if (unlikely(!rcp->signaled)) {
88 rcp->signaled = 1; 94 rcp->signaled = 1;
89 /* 95 /*
@@ -109,6 +115,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
109 for_each_cpu_mask_nr(cpu, cpumask) 115 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 116 smp_send_reschedule(cpu);
111 } 117 }
118 spin_unlock_irqrestore(&rcp->lock, flags);
112} 119}
113#else 120#else
114static inline void force_quiescent_state(struct rcu_data *rdp, 121static inline void force_quiescent_state(struct rcu_data *rdp,
@@ -118,6 +125,45 @@ static inline void force_quiescent_state(struct rcu_data *rdp,
118} 125}
119#endif 126#endif
120 127
128static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
129 struct rcu_data *rdp)
130{
131 long batch;
132
133 head->next = NULL;
134 smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
135
136 /*
137 * Determine the batch number of this callback.
138 *
139 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
140 * local variable "batch" and emits codes like this:
141 * 1) rdp->batch = rcp->cur + 1 # gets old value
142 * ......
143 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
144 * then [*nxttail[0], *nxttail[1]) may contain callbacks
145 * that batch# = rdp->batch, see the comment of struct rcu_data.
146 */
147 batch = ACCESS_ONCE(rcp->cur) + 1;
148
149 if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
150 /* process callbacks */
151 rdp->nxttail[0] = rdp->nxttail[1];
152 rdp->nxttail[1] = rdp->nxttail[2];
153 if (rcu_batch_after(batch - 1, rdp->batch))
154 rdp->nxttail[0] = rdp->nxttail[2];
155 }
156
157 rdp->batch = batch;
158 *rdp->nxttail[2] = head;
159 rdp->nxttail[2] = &head->next;
160
161 if (unlikely(++rdp->qlen > qhimark)) {
162 rdp->blimit = INT_MAX;
163 force_quiescent_state(rdp, &rcu_ctrlblk);
164 }
165}
166
121/** 167/**
122 * call_rcu - Queue an RCU callback for invocation after a grace period. 168 * call_rcu - Queue an RCU callback for invocation after a grace period.
123 * @head: structure to be used for queueing the RCU updates. 169 * @head: structure to be used for queueing the RCU updates.
@@ -133,18 +179,10 @@ void call_rcu(struct rcu_head *head,
133 void (*func)(struct rcu_head *rcu)) 179 void (*func)(struct rcu_head *rcu))
134{ 180{
135 unsigned long flags; 181 unsigned long flags;
136 struct rcu_data *rdp;
137 182
138 head->func = func; 183 head->func = func;
139 head->next = NULL;
140 local_irq_save(flags); 184 local_irq_save(flags);
141 rdp = &__get_cpu_var(rcu_data); 185 __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
142 *rdp->nxttail = head;
143 rdp->nxttail = &head->next;
144 if (unlikely(++rdp->qlen > qhimark)) {
145 rdp->blimit = INT_MAX;
146 force_quiescent_state(rdp, &rcu_ctrlblk);
147 }
148 local_irq_restore(flags); 186 local_irq_restore(flags);
149} 187}
150EXPORT_SYMBOL_GPL(call_rcu); 188EXPORT_SYMBOL_GPL(call_rcu);
@@ -169,20 +207,10 @@ void call_rcu_bh(struct rcu_head *head,
169 void (*func)(struct rcu_head *rcu)) 207 void (*func)(struct rcu_head *rcu))
170{ 208{
171 unsigned long flags; 209 unsigned long flags;
172 struct rcu_data *rdp;
173 210
174 head->func = func; 211 head->func = func;
175 head->next = NULL;
176 local_irq_save(flags); 212 local_irq_save(flags);
177 rdp = &__get_cpu_var(rcu_bh_data); 213 __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
178 *rdp->nxttail = head;
179 rdp->nxttail = &head->next;
180
181 if (unlikely(++rdp->qlen > qhimark)) {
182 rdp->blimit = INT_MAX;
183 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
184 }
185
186 local_irq_restore(flags); 214 local_irq_restore(flags);
187} 215}
188EXPORT_SYMBOL_GPL(call_rcu_bh); 216EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -211,12 +239,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
211static inline void raise_rcu_softirq(void) 239static inline void raise_rcu_softirq(void)
212{ 240{
213 raise_softirq(RCU_SOFTIRQ); 241 raise_softirq(RCU_SOFTIRQ);
214 /*
215 * The smp_mb() here is required to ensure that this cpu's
216 * __rcu_process_callbacks() reads the most recently updated
217 * value of rcu->cur.
218 */
219 smp_mb();
220} 242}
221 243
222/* 244/*
@@ -225,6 +247,7 @@ static inline void raise_rcu_softirq(void)
225 */ 247 */
226static void rcu_do_batch(struct rcu_data *rdp) 248static void rcu_do_batch(struct rcu_data *rdp)
227{ 249{
250 unsigned long flags;
228 struct rcu_head *next, *list; 251 struct rcu_head *next, *list;
229 int count = 0; 252 int count = 0;
230 253
@@ -239,9 +262,9 @@ static void rcu_do_batch(struct rcu_data *rdp)
239 } 262 }
240 rdp->donelist = list; 263 rdp->donelist = list;
241 264
242 local_irq_disable(); 265 local_irq_save(flags);
243 rdp->qlen -= count; 266 rdp->qlen -= count;
244 local_irq_enable(); 267 local_irq_restore(flags);
245 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) 268 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
246 rdp->blimit = blimit; 269 rdp->blimit = blimit;
247 270
@@ -269,6 +292,85 @@ static void rcu_do_batch(struct rcu_data *rdp)
269 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace 292 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
270 * period (if necessary). 293 * period (if necessary).
271 */ 294 */
295
296#ifdef CONFIG_DEBUG_RCU_STALL
297
298static inline void record_gp_check_time(struct rcu_ctrlblk *rcp)
299{
300 rcp->gp_check = get_seconds() + 3;
301}
302
303static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
304{
305 int cpu;
306 long delta;
307 unsigned long flags;
308
309 /* Only let one CPU complain about others per time interval. */
310
311 spin_lock_irqsave(&rcp->lock, flags);
312 delta = get_seconds() - rcp->gp_check;
313 if (delta < 2L || cpus_empty(rcp->cpumask)) {
314 spin_unlock(&rcp->lock);
315 return;
316 }
317 rcp->gp_check = get_seconds() + 30;
318 spin_unlock_irqrestore(&rcp->lock, flags);
319
320 /* OK, time to rat on our buddy... */
321
322 printk(KERN_ERR "RCU detected CPU stalls:");
323 for_each_cpu_mask(cpu, rcp->cpumask)
324 printk(" %d", cpu);
325 printk(" (detected by %d, t=%lu/%lu)\n",
326 smp_processor_id(), get_seconds(), rcp->gp_check);
327}
328
329static void print_cpu_stall(struct rcu_ctrlblk *rcp)
330{
331 unsigned long flags;
332
333 printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu)\n",
334 smp_processor_id(), get_seconds(), rcp->gp_check);
335 dump_stack();
336 spin_lock_irqsave(&rcp->lock, flags);
337 if ((long)(get_seconds() - rcp->gp_check) >= 0L)
338 rcp->gp_check = get_seconds() + 30;
339 spin_unlock_irqrestore(&rcp->lock, flags);
340}
341
342static void check_cpu_stall(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
343{
344 long delta;
345
346 delta = get_seconds() - rcp->gp_check;
347 if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0L) {
348
349 /* We haven't checked in, so go dump stack. */
350
351 print_cpu_stall(rcp);
352
353 } else {
354 if (!cpus_empty(rcp->cpumask) && delta >= 2L) {
355 /* They had two seconds to dump stack, so complain. */
356 print_other_cpu_stall(rcp);
357 }
358 }
359}
360
361#else /* #ifdef CONFIG_DEBUG_RCU_STALL */
362
363static inline void record_gp_check_time(struct rcu_ctrlblk *rcp)
364{
365}
366
367static inline void
368check_cpu_stall(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
369{
370}
371
372#endif /* #else #ifdef CONFIG_DEBUG_RCU_STALL */
373
272/* 374/*
273 * Register a new batch of callbacks, and start it up if there is currently no 375 * Register a new batch of callbacks, and start it up if there is currently no
274 * active batch and the batch to be registered has not already occurred. 376 * active batch and the batch to be registered has not already occurred.
@@ -276,15 +378,10 @@ static void rcu_do_batch(struct rcu_data *rdp)
276 */ 378 */
277static void rcu_start_batch(struct rcu_ctrlblk *rcp) 379static void rcu_start_batch(struct rcu_ctrlblk *rcp)
278{ 380{
279 if (rcp->next_pending && 381 if (rcp->cur != rcp->pending &&
280 rcp->completed == rcp->cur) { 382 rcp->completed == rcp->cur) {
281 rcp->next_pending = 0;
282 /*
283 * next_pending == 0 must be visible in
284 * __rcu_process_callbacks() before it can see new value of cur.
285 */
286 smp_wmb();
287 rcp->cur++; 383 rcp->cur++;
384 record_gp_check_time(rcp);
288 385
289 /* 386 /*
290 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a 387 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
@@ -322,6 +419,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
322static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, 419static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
323 struct rcu_data *rdp) 420 struct rcu_data *rdp)
324{ 421{
422 unsigned long flags;
423
325 if (rdp->quiescbatch != rcp->cur) { 424 if (rdp->quiescbatch != rcp->cur) {
326 /* start new grace period: */ 425 /* start new grace period: */
327 rdp->qs_pending = 1; 426 rdp->qs_pending = 1;
@@ -345,7 +444,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
345 return; 444 return;
346 rdp->qs_pending = 0; 445 rdp->qs_pending = 0;
347 446
348 spin_lock(&rcp->lock); 447 spin_lock_irqsave(&rcp->lock, flags);
349 /* 448 /*
350 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync 449 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
351 * during cpu startup. Ignore the quiescent state. 450 * during cpu startup. Ignore the quiescent state.
@@ -353,7 +452,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
353 if (likely(rdp->quiescbatch == rcp->cur)) 452 if (likely(rdp->quiescbatch == rcp->cur))
354 cpu_quiet(rdp->cpu, rcp); 453 cpu_quiet(rdp->cpu, rcp);
355 454
356 spin_unlock(&rcp->lock); 455 spin_unlock_irqrestore(&rcp->lock, flags);
357} 456}
358 457
359 458
@@ -364,33 +463,38 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
364 * which is dead and hence not processing interrupts. 463 * which is dead and hence not processing interrupts.
365 */ 464 */
366static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, 465static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
367 struct rcu_head **tail) 466 struct rcu_head **tail, long batch)
368{ 467{
369 local_irq_disable(); 468 unsigned long flags;
370 *this_rdp->nxttail = list; 469
371 if (list) 470 if (list) {
372 this_rdp->nxttail = tail; 471 local_irq_save(flags);
373 local_irq_enable(); 472 this_rdp->batch = batch;
473 *this_rdp->nxttail[2] = list;
474 this_rdp->nxttail[2] = tail;
475 local_irq_restore(flags);
476 }
374} 477}
375 478
376static void __rcu_offline_cpu(struct rcu_data *this_rdp, 479static void __rcu_offline_cpu(struct rcu_data *this_rdp,
377 struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 480 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
378{ 481{
379 /* if the cpu going offline owns the grace period 482 unsigned long flags;
483
484 /*
485 * if the cpu going offline owns the grace period
380 * we can block indefinitely waiting for it, so flush 486 * we can block indefinitely waiting for it, so flush
381 * it here 487 * it here
382 */ 488 */
383 spin_lock_bh(&rcp->lock); 489 spin_lock_irqsave(&rcp->lock, flags);
384 if (rcp->cur != rcp->completed) 490 if (rcp->cur != rcp->completed)
385 cpu_quiet(rdp->cpu, rcp); 491 cpu_quiet(rdp->cpu, rcp);
386 spin_unlock_bh(&rcp->lock); 492 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
387 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); 493 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
388 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); 494 spin_unlock(&rcp->lock);
389 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
390 495
391 local_irq_disable();
392 this_rdp->qlen += rdp->qlen; 496 this_rdp->qlen += rdp->qlen;
393 local_irq_enable(); 497 local_irq_restore(flags);
394} 498}
395 499
396static void rcu_offline_cpu(int cpu) 500static void rcu_offline_cpu(int cpu)
@@ -420,38 +524,52 @@ static void rcu_offline_cpu(int cpu)
420static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, 524static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
421 struct rcu_data *rdp) 525 struct rcu_data *rdp)
422{ 526{
423 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { 527 unsigned long flags;
424 *rdp->donetail = rdp->curlist; 528 long completed_snap;
425 rdp->donetail = rdp->curtail;
426 rdp->curlist = NULL;
427 rdp->curtail = &rdp->curlist;
428 }
429 529
430 if (rdp->nxtlist && !rdp->curlist) { 530 if (rdp->nxtlist) {
431 local_irq_disable(); 531 local_irq_save(flags);
432 rdp->curlist = rdp->nxtlist; 532 completed_snap = ACCESS_ONCE(rcp->completed);
433 rdp->curtail = rdp->nxttail;
434 rdp->nxtlist = NULL;
435 rdp->nxttail = &rdp->nxtlist;
436 local_irq_enable();
437 533
438 /* 534 /*
439 * start the next batch of callbacks 535 * move the other grace-period-completed entries to
536 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
440 */ 537 */
538 if (!rcu_batch_before(completed_snap, rdp->batch))
539 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
540 else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
541 rdp->nxttail[0] = rdp->nxttail[1];
441 542
442 /* determine batch number */ 543 /*
443 rdp->batch = rcp->cur + 1; 544 * the grace period for entries in
444 /* see the comment and corresponding wmb() in 545 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
445 * the rcu_start_batch() 546 * move these entries to donelist
446 */ 547 */
447 smp_rmb(); 548 if (rdp->nxttail[0] != &rdp->nxtlist) {
549 *rdp->donetail = rdp->nxtlist;
550 rdp->donetail = rdp->nxttail[0];
551 rdp->nxtlist = *rdp->nxttail[0];
552 *rdp->donetail = NULL;
553
554 if (rdp->nxttail[1] == rdp->nxttail[0])
555 rdp->nxttail[1] = &rdp->nxtlist;
556 if (rdp->nxttail[2] == rdp->nxttail[0])
557 rdp->nxttail[2] = &rdp->nxtlist;
558 rdp->nxttail[0] = &rdp->nxtlist;
559 }
560
561 local_irq_restore(flags);
562
563 if (rcu_batch_after(rdp->batch, rcp->pending)) {
564 unsigned long flags2;
448 565
449 if (!rcp->next_pending) {
450 /* and start it/schedule start if it's a new batch */ 566 /* and start it/schedule start if it's a new batch */
451 spin_lock(&rcp->lock); 567 spin_lock_irqsave(&rcp->lock, flags2);
452 rcp->next_pending = 1; 568 if (rcu_batch_after(rdp->batch, rcp->pending)) {
453 rcu_start_batch(rcp); 569 rcp->pending = rdp->batch;
454 spin_unlock(&rcp->lock); 570 rcu_start_batch(rcp);
571 }
572 spin_unlock_irqrestore(&rcp->lock, flags2);
455 } 573 }
456 } 574 }
457 575
@@ -462,21 +580,53 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
462 580
463static void rcu_process_callbacks(struct softirq_action *unused) 581static void rcu_process_callbacks(struct softirq_action *unused)
464{ 582{
583 /*
584 * Memory references from any prior RCU read-side critical sections
585 * executed by the interrupted code must be see before any RCU
586 * grace-period manupulations below.
587 */
588
589 smp_mb(); /* See above block comment. */
590
465 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); 591 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
466 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); 592 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
593
594 /*
595 * Memory references from any later RCU read-side critical sections
596 * executed by the interrupted code must be see after any RCU
597 * grace-period manupulations above.
598 */
599
600 smp_mb(); /* See above block comment. */
467} 601}
468 602
469static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 603static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
470{ 604{
471 /* This cpu has pending rcu entries and the grace period 605 /* Check for CPU stalls, if enabled. */
472 * for them has completed. 606 check_cpu_stall(rcp, rdp);
473 */
474 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
475 return 1;
476 607
477 /* This cpu has no pending entries, but there are new entries */ 608 if (rdp->nxtlist) {
478 if (!rdp->curlist && rdp->nxtlist) 609 long completed_snap = ACCESS_ONCE(rcp->completed);
479 return 1; 610
611 /*
612 * This cpu has pending rcu entries and the grace period
613 * for them has completed.
614 */
615 if (!rcu_batch_before(completed_snap, rdp->batch))
616 return 1;
617 if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
618 rdp->nxttail[0] != rdp->nxttail[1])
619 return 1;
620 if (rdp->nxttail[0] != &rdp->nxtlist)
621 return 1;
622
623 /*
624 * This cpu has pending rcu entries and the new batch
625 * for then hasn't been started nor scheduled start
626 */
627 if (rcu_batch_after(rdp->batch, rcp->pending))
628 return 1;
629 }
480 630
481 /* This cpu has finished callbacks to invoke */ 631 /* This cpu has finished callbacks to invoke */
482 if (rdp->donelist) 632 if (rdp->donelist)
@@ -512,9 +662,15 @@ int rcu_needs_cpu(int cpu)
512 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 662 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
513 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); 663 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
514 664
515 return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); 665 return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
516} 666}
517 667
668/*
669 * Top-level function driving RCU grace-period detection, normally
670 * invoked from the scheduler-clock interrupt. This function simply
671 * increments counters that are read only from softirq by this same
672 * CPU, so there are no memory barriers required.
673 */
518void rcu_check_callbacks(int cpu, int user) 674void rcu_check_callbacks(int cpu, int user)
519{ 675{
520 if (user || 676 if (user ||
@@ -558,14 +714,17 @@ void rcu_check_callbacks(int cpu, int user)
558static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, 714static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
559 struct rcu_data *rdp) 715 struct rcu_data *rdp)
560{ 716{
717 unsigned long flags;
718
719 spin_lock_irqsave(&rcp->lock, flags);
561 memset(rdp, 0, sizeof(*rdp)); 720 memset(rdp, 0, sizeof(*rdp));
562 rdp->curtail = &rdp->curlist; 721 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
563 rdp->nxttail = &rdp->nxtlist;
564 rdp->donetail = &rdp->donelist; 722 rdp->donetail = &rdp->donelist;
565 rdp->quiescbatch = rcp->completed; 723 rdp->quiescbatch = rcp->completed;
566 rdp->qs_pending = 0; 724 rdp->qs_pending = 0;
567 rdp->cpu = cpu; 725 rdp->cpu = cpu;
568 rdp->blimit = blimit; 726 rdp->blimit = blimit;
727 spin_unlock_irqrestore(&rcp->lock, flags);
569} 728}
570 729
571static void __cpuinit rcu_online_cpu(int cpu) 730static void __cpuinit rcu_online_cpu(int cpu)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 27827931ca0d..ca4bbbe04aa4 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -59,14 +59,6 @@
59#include <linux/rcupreempt_trace.h> 59#include <linux/rcupreempt_trace.h>
60 60
61/* 61/*
62 * Macro that prevents the compiler from reordering accesses, but does
63 * absolutely -nothing- to prevent CPUs from reordering. This is used
64 * only to mediate communication between mainline code and hardware
65 * interrupt and NMI handlers.
66 */
67#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68
69/*
70 * PREEMPT_RCU data structures. 62 * PREEMPT_RCU data structures.
71 */ 63 */
72 64
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
index 5edf82c34bbc..35c2d3360ecf 100644
--- a/kernel/rcupreempt_trace.c
+++ b/kernel/rcupreempt_trace.c
@@ -308,11 +308,16 @@ out:
308 308
309static int __init rcupreempt_trace_init(void) 309static int __init rcupreempt_trace_init(void)
310{ 310{
311 int ret;
312
311 mutex_init(&rcupreempt_trace_mutex); 313 mutex_init(&rcupreempt_trace_mutex);
312 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); 314 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
313 if (!rcupreempt_trace_buf) 315 if (!rcupreempt_trace_buf)
314 return 1; 316 return 1;
315 return rcupreempt_debugfs_init(); 317 ret = rcupreempt_debugfs_init();
318 if (ret)
319 kfree(rcupreempt_trace_buf);
320 return ret;
316} 321}
317 322
318static void __exit rcupreempt_trace_cleanup(void) 323static void __exit rcupreempt_trace_cleanup(void)