aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-05-27 06:38:52 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-28 11:41:05 -0400
commit29f742f88a32c9ab8cf6d9ba69e1ea918be5aa58 (patch)
treea38aa38c8025e050ec82a7e64d02dca07f90ffc7 /kernel/rcutree.c
parentf62508f68d04adefc4cf9b0177ba02c8818b3eec (diff)
parent23b5c8fa01b723c70a20d6e4ef4ff54c7656d6e1 (diff)
Merge branch 'rcu/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu into core/urgent
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c130
1 files changed, 56 insertions, 74 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f07d2f03181a..8154a4a3491c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
163#ifdef CONFIG_NO_HZ 163#ifdef CONFIG_NO_HZ
164DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 164DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
165 .dynticks_nesting = 1, 165 .dynticks_nesting = 1,
166 .dynticks = 1, 166 .dynticks = ATOMIC_INIT(1),
167}; 167};
168#endif /* #ifdef CONFIG_NO_HZ */ 168#endif /* #ifdef CONFIG_NO_HZ */
169 169
@@ -322,13 +322,25 @@ void rcu_enter_nohz(void)
322 unsigned long flags; 322 unsigned long flags;
323 struct rcu_dynticks *rdtp; 323 struct rcu_dynticks *rdtp;
324 324
325 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
326 local_irq_save(flags); 325 local_irq_save(flags);
327 rdtp = &__get_cpu_var(rcu_dynticks); 326 rdtp = &__get_cpu_var(rcu_dynticks);
328 rdtp->dynticks++; 327 if (--rdtp->dynticks_nesting) {
329 rdtp->dynticks_nesting--; 328 local_irq_restore(flags);
330 WARN_ON_ONCE(rdtp->dynticks & 0x1); 329 return;
330 }
331 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
332 smp_mb__before_atomic_inc(); /* See above. */
333 atomic_inc(&rdtp->dynticks);
334 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
335 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
331 local_irq_restore(flags); 336 local_irq_restore(flags);
337
338 /* If the interrupt queued a callback, get out of dyntick mode. */
339 if (in_irq() &&
340 (__get_cpu_var(rcu_sched_data).nxtlist ||
341 __get_cpu_var(rcu_bh_data).nxtlist ||
342 rcu_preempt_needs_cpu(smp_processor_id())))
343 set_need_resched();
332} 344}
333 345
334/* 346/*
@@ -344,11 +356,16 @@ void rcu_exit_nohz(void)
344 356
345 local_irq_save(flags); 357 local_irq_save(flags);
346 rdtp = &__get_cpu_var(rcu_dynticks); 358 rdtp = &__get_cpu_var(rcu_dynticks);
347 rdtp->dynticks++; 359 if (rdtp->dynticks_nesting++) {
348 rdtp->dynticks_nesting++; 360 local_irq_restore(flags);
349 WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); 361 return;
362 }
363 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
364 atomic_inc(&rdtp->dynticks);
365 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
366 smp_mb__after_atomic_inc(); /* See above. */
367 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
350 local_irq_restore(flags); 368 local_irq_restore(flags);
351 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
352} 369}
353 370
354/** 371/**
@@ -362,11 +379,15 @@ void rcu_nmi_enter(void)
362{ 379{
363 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 380 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
364 381
365 if (rdtp->dynticks & 0x1) 382 if (rdtp->dynticks_nmi_nesting == 0 &&
383 (atomic_read(&rdtp->dynticks) & 0x1))
366 return; 384 return;
367 rdtp->dynticks_nmi++; 385 rdtp->dynticks_nmi_nesting++;
368 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); 386 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
369 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 387 atomic_inc(&rdtp->dynticks);
388 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
389 smp_mb__after_atomic_inc(); /* See above. */
390 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
370} 391}
371 392
372/** 393/**
@@ -380,11 +401,14 @@ void rcu_nmi_exit(void)
380{ 401{
381 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 402 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
382 403
383 if (rdtp->dynticks & 0x1) 404 if (rdtp->dynticks_nmi_nesting == 0 ||
405 --rdtp->dynticks_nmi_nesting != 0)
384 return; 406 return;
385 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 407 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
386 rdtp->dynticks_nmi++; 408 smp_mb__before_atomic_inc(); /* See above. */
387 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); 409 atomic_inc(&rdtp->dynticks);
410 smp_mb__after_atomic_inc(); /* Force delay to next write. */
411 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
388} 412}
389 413
390/** 414/**
@@ -395,13 +419,7 @@ void rcu_nmi_exit(void)
395 */ 419 */
396void rcu_irq_enter(void) 420void rcu_irq_enter(void)
397{ 421{
398 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 422 rcu_exit_nohz();
399
400 if (rdtp->dynticks_nesting++)
401 return;
402 rdtp->dynticks++;
403 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
404 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
405} 423}
406 424
407/** 425/**
@@ -413,18 +431,7 @@ void rcu_irq_enter(void)
413 */ 431 */
414void rcu_irq_exit(void) 432void rcu_irq_exit(void)
415{ 433{
416 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 434 rcu_enter_nohz();
417
418 if (--rdtp->dynticks_nesting)
419 return;
420 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
421 rdtp->dynticks++;
422 WARN_ON_ONCE(rdtp->dynticks & 0x1);
423
424 /* If the interrupt queued a callback, get out of dyntick mode. */
425 if (__this_cpu_read(rcu_sched_data.nxtlist) ||
426 __this_cpu_read(rcu_bh_data.nxtlist))
427 set_need_resched();
428} 435}
429 436
430#ifdef CONFIG_SMP 437#ifdef CONFIG_SMP
@@ -436,19 +443,8 @@ void rcu_irq_exit(void)
436 */ 443 */
437static int dyntick_save_progress_counter(struct rcu_data *rdp) 444static int dyntick_save_progress_counter(struct rcu_data *rdp)
438{ 445{
439 int ret; 446 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
440 int snap; 447 return 0;
441 int snap_nmi;
442
443 snap = rdp->dynticks->dynticks;
444 snap_nmi = rdp->dynticks->dynticks_nmi;
445 smp_mb(); /* Order sampling of snap with end of grace period. */
446 rdp->dynticks_snap = snap;
447 rdp->dynticks_nmi_snap = snap_nmi;
448 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
449 if (ret)
450 rdp->dynticks_fqs++;
451 return ret;
452} 448}
453 449
454/* 450/*
@@ -459,16 +455,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
459 */ 455 */
460static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 456static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
461{ 457{
462 long curr; 458 unsigned long curr;
463 long curr_nmi; 459 unsigned long snap;
464 long snap;
465 long snap_nmi;
466 460
467 curr = rdp->dynticks->dynticks; 461 curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
468 snap = rdp->dynticks_snap; 462 snap = (unsigned long)rdp->dynticks_snap;
469 curr_nmi = rdp->dynticks->dynticks_nmi;
470 snap_nmi = rdp->dynticks_nmi_snap;
471 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
472 463
473 /* 464 /*
474 * If the CPU passed through or entered a dynticks idle phase with 465 * If the CPU passed through or entered a dynticks idle phase with
@@ -478,8 +469,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
478 * read-side critical section that started before the beginning 469 * read-side critical section that started before the beginning
479 * of the current RCU grace period. 470 * of the current RCU grace period.
480 */ 471 */
481 if ((curr != snap || (curr & 0x1) == 0) && 472 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
482 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
483 rdp->dynticks_fqs++; 473 rdp->dynticks_fqs++;
484 return 1; 474 return 1;
485 } 475 }
@@ -908,6 +898,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
908 unsigned long gp_duration; 898 unsigned long gp_duration;
909 899
910 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 900 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
901
902 /*
903 * Ensure that all grace-period and pre-grace-period activity
904 * is seen before the assignment to rsp->completed.
905 */
906 smp_mb(); /* See above block comment. */
911 gp_duration = jiffies - rsp->gp_start; 907 gp_duration = jiffies - rsp->gp_start;
912 if (gp_duration > rsp->gp_max) 908 if (gp_duration > rsp->gp_max)
913 rsp->gp_max = gp_duration; 909 rsp->gp_max = gp_duration;
@@ -1455,25 +1451,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1455 */ 1451 */
1456static void rcu_process_callbacks(void) 1452static void rcu_process_callbacks(void)
1457{ 1453{
1458 /*
1459 * Memory references from any prior RCU read-side critical sections
1460 * executed by the interrupted code must be seen before any RCU
1461 * grace-period manipulations below.
1462 */
1463 smp_mb(); /* See above block comment. */
1464
1465 __rcu_process_callbacks(&rcu_sched_state, 1454 __rcu_process_callbacks(&rcu_sched_state,
1466 &__get_cpu_var(rcu_sched_data)); 1455 &__get_cpu_var(rcu_sched_data));
1467 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1456 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1468 rcu_preempt_process_callbacks(); 1457 rcu_preempt_process_callbacks();
1469 1458
1470 /*
1471 * Memory references from any later RCU read-side critical sections
1472 * executed by the interrupted code must be seen after any RCU
1473 * grace-period manipulations above.
1474 */
1475 smp_mb(); /* See above block comment. */
1476
1477 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ 1459 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1478 rcu_needs_cpu_flush(); 1460 rcu_needs_cpu_flush();
1479} 1461}