aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c111
1 files changed, 50 insertions, 61 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 99c6038ad04d..5616b17e4a22 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
162#ifdef CONFIG_NO_HZ 162#ifdef CONFIG_NO_HZ
163DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 163DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
164 .dynticks_nesting = 1, 164 .dynticks_nesting = 1,
165 .dynticks = 1, 165 .dynticks = ATOMIC_INIT(1),
166}; 166};
167#endif /* #ifdef CONFIG_NO_HZ */ 167#endif /* #ifdef CONFIG_NO_HZ */
168 168
@@ -321,13 +321,25 @@ void rcu_enter_nohz(void)
321 unsigned long flags; 321 unsigned long flags;
322 struct rcu_dynticks *rdtp; 322 struct rcu_dynticks *rdtp;
323 323
324 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
325 local_irq_save(flags); 324 local_irq_save(flags);
326 rdtp = &__get_cpu_var(rcu_dynticks); 325 rdtp = &__get_cpu_var(rcu_dynticks);
327 if (--rdtp->dynticks_nesting == 0) 326 if (--rdtp->dynticks_nesting) {
328 rdtp->dynticks++; 327 local_irq_restore(flags);
329 WARN_ON_ONCE(rdtp->dynticks & 0x1); 328 return;
329 }
330 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
331 smp_mb__before_atomic_inc(); /* See above. */
332 atomic_inc(&rdtp->dynticks);
333 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
334 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
330 local_irq_restore(flags); 335 local_irq_restore(flags);
336
337 /* If the interrupt queued a callback, get out of dyntick mode. */
338 if (in_irq() &&
339 (__get_cpu_var(rcu_sched_data).nxtlist ||
340 __get_cpu_var(rcu_bh_data).nxtlist ||
341 rcu_preempt_needs_cpu(smp_processor_id())))
342 set_need_resched();
331} 343}
332 344
333/* 345/*
@@ -343,11 +355,16 @@ void rcu_exit_nohz(void)
343 355
344 local_irq_save(flags); 356 local_irq_save(flags);
345 rdtp = &__get_cpu_var(rcu_dynticks); 357 rdtp = &__get_cpu_var(rcu_dynticks);
346 rdtp->dynticks++; 358 if (rdtp->dynticks_nesting++) {
347 rdtp->dynticks_nesting++; 359 local_irq_restore(flags);
348 WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); 360 return;
361 }
362 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
363 atomic_inc(&rdtp->dynticks);
364 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
365 smp_mb__after_atomic_inc(); /* See above. */
366 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
349 local_irq_restore(flags); 367 local_irq_restore(flags);
350 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
351} 368}
352 369
353/** 370/**
@@ -361,11 +378,15 @@ void rcu_nmi_enter(void)
361{ 378{
362 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 379 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
363 380
364 if (rdtp->dynticks & 0x1) 381 if (rdtp->dynticks_nmi_nesting == 0 &&
382 (atomic_read(&rdtp->dynticks) & 0x1))
365 return; 383 return;
366 rdtp->dynticks_nmi++; 384 rdtp->dynticks_nmi_nesting++;
367 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); 385 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
368 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 386 atomic_inc(&rdtp->dynticks);
387 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
388 smp_mb__after_atomic_inc(); /* See above. */
389 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
369} 390}
370 391
371/** 392/**
@@ -379,11 +400,14 @@ void rcu_nmi_exit(void)
379{ 400{
380 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 401 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
381 402
382 if (rdtp->dynticks & 0x1) 403 if (rdtp->dynticks_nmi_nesting == 0 ||
404 --rdtp->dynticks_nmi_nesting != 0)
383 return; 405 return;
384 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 406 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
385 rdtp->dynticks_nmi++; 407 smp_mb__before_atomic_inc(); /* See above. */
386 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); 408 atomic_inc(&rdtp->dynticks);
409 smp_mb__after_atomic_inc(); /* Force delay to next write. */
410 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
387} 411}
388 412
389/** 413/**
@@ -394,13 +418,7 @@ void rcu_nmi_exit(void)
394 */ 418 */
395void rcu_irq_enter(void) 419void rcu_irq_enter(void)
396{ 420{
397 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 421 rcu_exit_nohz();
398
399 if (rdtp->dynticks_nesting++)
400 return;
401 rdtp->dynticks++;
402 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
403 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
404} 422}
405 423
406/** 424/**
@@ -412,19 +430,7 @@ void rcu_irq_enter(void)
412 */ 430 */
413void rcu_irq_exit(void) 431void rcu_irq_exit(void)
414{ 432{
415 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 433 rcu_enter_nohz();
416
417 if (--rdtp->dynticks_nesting)
418 return;
419 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
420 rdtp->dynticks++;
421 WARN_ON_ONCE(rdtp->dynticks & 0x1);
422
423 /* If the interrupt queued a callback, get out of dyntick mode. */
424 if (in_irq() &&
425 (__this_cpu_read(rcu_sched_data.nxtlist) ||
426 __this_cpu_read(rcu_bh_data.nxtlist)))
427 set_need_resched();
428} 434}
429 435
430#ifdef CONFIG_SMP 436#ifdef CONFIG_SMP
@@ -436,19 +442,8 @@ void rcu_irq_exit(void)
436 */ 442 */
437static int dyntick_save_progress_counter(struct rcu_data *rdp) 443static int dyntick_save_progress_counter(struct rcu_data *rdp)
438{ 444{
439 int ret; 445 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
440 int snap; 446 return 0;
441 int snap_nmi;
442
443 snap = rdp->dynticks->dynticks;
444 snap_nmi = rdp->dynticks->dynticks_nmi;
445 smp_mb(); /* Order sampling of snap with end of grace period. */
446 rdp->dynticks_snap = snap;
447 rdp->dynticks_nmi_snap = snap_nmi;
448 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
449 if (ret)
450 rdp->dynticks_fqs++;
451 return ret;
452} 447}
453 448
454/* 449/*
@@ -459,16 +454,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
459 */ 454 */
460static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 455static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
461{ 456{
462 long curr; 457 unsigned long curr;
463 long curr_nmi; 458 unsigned long snap;
464 long snap;
465 long snap_nmi;
466 459
467 curr = rdp->dynticks->dynticks; 460 curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
468 snap = rdp->dynticks_snap; 461 snap = (unsigned long)rdp->dynticks_snap;
469 curr_nmi = rdp->dynticks->dynticks_nmi;
470 snap_nmi = rdp->dynticks_nmi_snap;
471 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
472 462
473 /* 463 /*
474 * If the CPU passed through or entered a dynticks idle phase with 464 * If the CPU passed through or entered a dynticks idle phase with
@@ -478,8 +468,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
478 * read-side critical section that started before the beginning 468 * read-side critical section that started before the beginning
479 * of the current RCU grace period. 469 * of the current RCU grace period.
480 */ 470 */
481 if ((curr != snap || (curr & 0x1) == 0) && 471 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
482 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
483 rdp->dynticks_fqs++; 472 rdp->dynticks_fqs++;
484 return 1; 473 return 1;
485 } 474 }