aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/trace.txt17
-rw-r--r--kernel/rcutree.c130
-rw-r--r--kernel/rcutree.h9
-rw-r--r--kernel/rcutree_plugin.h7
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--lib/locking-selftest.c2
6 files changed, 74 insertions, 103 deletions
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index c078ad48f7a1..8173cec473aa 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -99,18 +99,11 @@ o "qp" indicates that RCU still expects a quiescent state from
99 99
100o "dt" is the current value of the dyntick counter that is incremented 100o "dt" is the current value of the dyntick counter that is incremented
101 when entering or leaving dynticks idle state, either by the 101 when entering or leaving dynticks idle state, either by the
102 scheduler or by irq. The number after the "/" is the interrupt 102 scheduler or by irq. This number is even if the CPU is in
103 nesting depth when in dyntick-idle state, or one greater than 103 dyntick idle mode and odd otherwise. The number after the first
104 the interrupt-nesting depth otherwise. 104 "/" is the interrupt nesting depth when in dyntick-idle state,
105 105 or one greater than the interrupt-nesting depth otherwise.
106 This field is displayed only for CONFIG_NO_HZ kernels. 106 The number after the second "/" is the NMI nesting depth.
107
108o "dn" is the current value of the dyntick counter that is incremented
109 when entering or leaving dynticks idle state via NMI. If both
110 the "dt" and "dn" values are even, then this CPU is in dynticks
111 idle mode and may be ignored by RCU. If either of these two
112 counters is odd, then RCU must be alert to the possibility of
113 an RCU read-side critical section running on this CPU.
114 107
115 This field is displayed only for CONFIG_NO_HZ kernels. 108 This field is displayed only for CONFIG_NO_HZ kernels.
116 109
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f07d2f03181a..8154a4a3491c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
163#ifdef CONFIG_NO_HZ 163#ifdef CONFIG_NO_HZ
164DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 164DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
165 .dynticks_nesting = 1, 165 .dynticks_nesting = 1,
166 .dynticks = 1, 166 .dynticks = ATOMIC_INIT(1),
167}; 167};
168#endif /* #ifdef CONFIG_NO_HZ */ 168#endif /* #ifdef CONFIG_NO_HZ */
169 169
@@ -322,13 +322,25 @@ void rcu_enter_nohz(void)
322 unsigned long flags; 322 unsigned long flags;
323 struct rcu_dynticks *rdtp; 323 struct rcu_dynticks *rdtp;
324 324
325 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
326 local_irq_save(flags); 325 local_irq_save(flags);
327 rdtp = &__get_cpu_var(rcu_dynticks); 326 rdtp = &__get_cpu_var(rcu_dynticks);
328 rdtp->dynticks++; 327 if (--rdtp->dynticks_nesting) {
329 rdtp->dynticks_nesting--; 328 local_irq_restore(flags);
330 WARN_ON_ONCE(rdtp->dynticks & 0x1); 329 return;
330 }
331 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
332 smp_mb__before_atomic_inc(); /* See above. */
333 atomic_inc(&rdtp->dynticks);
334 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
335 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
331 local_irq_restore(flags); 336 local_irq_restore(flags);
337
338 /* If the interrupt queued a callback, get out of dyntick mode. */
339 if (in_irq() &&
340 (__get_cpu_var(rcu_sched_data).nxtlist ||
341 __get_cpu_var(rcu_bh_data).nxtlist ||
342 rcu_preempt_needs_cpu(smp_processor_id())))
343 set_need_resched();
332} 344}
333 345
334/* 346/*
@@ -344,11 +356,16 @@ void rcu_exit_nohz(void)
344 356
345 local_irq_save(flags); 357 local_irq_save(flags);
346 rdtp = &__get_cpu_var(rcu_dynticks); 358 rdtp = &__get_cpu_var(rcu_dynticks);
347 rdtp->dynticks++; 359 if (rdtp->dynticks_nesting++) {
348 rdtp->dynticks_nesting++; 360 local_irq_restore(flags);
349 WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); 361 return;
362 }
363 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
364 atomic_inc(&rdtp->dynticks);
365 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
366 smp_mb__after_atomic_inc(); /* See above. */
367 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
350 local_irq_restore(flags); 368 local_irq_restore(flags);
351 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
352} 369}
353 370
354/** 371/**
@@ -362,11 +379,15 @@ void rcu_nmi_enter(void)
362{ 379{
363 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 380 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
364 381
365 if (rdtp->dynticks & 0x1) 382 if (rdtp->dynticks_nmi_nesting == 0 &&
383 (atomic_read(&rdtp->dynticks) & 0x1))
366 return; 384 return;
367 rdtp->dynticks_nmi++; 385 rdtp->dynticks_nmi_nesting++;
368 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); 386 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
369 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 387 atomic_inc(&rdtp->dynticks);
388 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
389 smp_mb__after_atomic_inc(); /* See above. */
390 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
370} 391}
371 392
372/** 393/**
@@ -380,11 +401,14 @@ void rcu_nmi_exit(void)
380{ 401{
381 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 402 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
382 403
383 if (rdtp->dynticks & 0x1) 404 if (rdtp->dynticks_nmi_nesting == 0 ||
405 --rdtp->dynticks_nmi_nesting != 0)
384 return; 406 return;
385 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 407 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
386 rdtp->dynticks_nmi++; 408 smp_mb__before_atomic_inc(); /* See above. */
387 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); 409 atomic_inc(&rdtp->dynticks);
410 smp_mb__after_atomic_inc(); /* Force delay to next write. */
411 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
388} 412}
389 413
390/** 414/**
@@ -395,13 +419,7 @@ void rcu_nmi_exit(void)
395 */ 419 */
396void rcu_irq_enter(void) 420void rcu_irq_enter(void)
397{ 421{
398 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 422 rcu_exit_nohz();
399
400 if (rdtp->dynticks_nesting++)
401 return;
402 rdtp->dynticks++;
403 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
404 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
405} 423}
406 424
407/** 425/**
@@ -413,18 +431,7 @@ void rcu_irq_enter(void)
413 */ 431 */
414void rcu_irq_exit(void) 432void rcu_irq_exit(void)
415{ 433{
416 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 434 rcu_enter_nohz();
417
418 if (--rdtp->dynticks_nesting)
419 return;
420 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
421 rdtp->dynticks++;
422 WARN_ON_ONCE(rdtp->dynticks & 0x1);
423
424 /* If the interrupt queued a callback, get out of dyntick mode. */
425 if (__this_cpu_read(rcu_sched_data.nxtlist) ||
426 __this_cpu_read(rcu_bh_data.nxtlist))
427 set_need_resched();
428} 435}
429 436
430#ifdef CONFIG_SMP 437#ifdef CONFIG_SMP
@@ -436,19 +443,8 @@ void rcu_irq_exit(void)
436 */ 443 */
437static int dyntick_save_progress_counter(struct rcu_data *rdp) 444static int dyntick_save_progress_counter(struct rcu_data *rdp)
438{ 445{
439 int ret; 446 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
440 int snap; 447 return 0;
441 int snap_nmi;
442
443 snap = rdp->dynticks->dynticks;
444 snap_nmi = rdp->dynticks->dynticks_nmi;
445 smp_mb(); /* Order sampling of snap with end of grace period. */
446 rdp->dynticks_snap = snap;
447 rdp->dynticks_nmi_snap = snap_nmi;
448 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
449 if (ret)
450 rdp->dynticks_fqs++;
451 return ret;
452} 448}
453 449
454/* 450/*
@@ -459,16 +455,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
459 */ 455 */
460static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 456static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
461{ 457{
462 long curr; 458 unsigned long curr;
463 long curr_nmi; 459 unsigned long snap;
464 long snap;
465 long snap_nmi;
466 460
467 curr = rdp->dynticks->dynticks; 461 curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
468 snap = rdp->dynticks_snap; 462 snap = (unsigned long)rdp->dynticks_snap;
469 curr_nmi = rdp->dynticks->dynticks_nmi;
470 snap_nmi = rdp->dynticks_nmi_snap;
471 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
472 463
473 /* 464 /*
474 * If the CPU passed through or entered a dynticks idle phase with 465 * If the CPU passed through or entered a dynticks idle phase with
@@ -478,8 +469,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
478 * read-side critical section that started before the beginning 469 * read-side critical section that started before the beginning
479 * of the current RCU grace period. 470 * of the current RCU grace period.
480 */ 471 */
481 if ((curr != snap || (curr & 0x1) == 0) && 472 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
482 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
483 rdp->dynticks_fqs++; 473 rdp->dynticks_fqs++;
484 return 1; 474 return 1;
485 } 475 }
@@ -908,6 +898,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
908 unsigned long gp_duration; 898 unsigned long gp_duration;
909 899
910 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 900 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
901
902 /*
903 * Ensure that all grace-period and pre-grace-period activity
904 * is seen before the assignment to rsp->completed.
905 */
906 smp_mb(); /* See above block comment. */
911 gp_duration = jiffies - rsp->gp_start; 907 gp_duration = jiffies - rsp->gp_start;
912 if (gp_duration > rsp->gp_max) 908 if (gp_duration > rsp->gp_max)
913 rsp->gp_max = gp_duration; 909 rsp->gp_max = gp_duration;
@@ -1455,25 +1451,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1455 */ 1451 */
1456static void rcu_process_callbacks(void) 1452static void rcu_process_callbacks(void)
1457{ 1453{
1458 /*
1459 * Memory references from any prior RCU read-side critical sections
1460 * executed by the interrupted code must be seen before any RCU
1461 * grace-period manipulations below.
1462 */
1463 smp_mb(); /* See above block comment. */
1464
1465 __rcu_process_callbacks(&rcu_sched_state, 1454 __rcu_process_callbacks(&rcu_sched_state,
1466 &__get_cpu_var(rcu_sched_data)); 1455 &__get_cpu_var(rcu_sched_data));
1467 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1456 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1468 rcu_preempt_process_callbacks(); 1457 rcu_preempt_process_callbacks();
1469 1458
1470 /*
1471 * Memory references from any later RCU read-side critical sections
1472 * executed by the interrupted code must be seen after any RCU
1473 * grace-period manipulations above.
1474 */
1475 smp_mb(); /* See above block comment. */
1476
1477 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ 1459 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1478 rcu_needs_cpu_flush(); 1460 rcu_needs_cpu_flush();
1479} 1461}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 257664815d5d..93d4a1c2e88b 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,11 +84,9 @@
84 * Dynticks per-CPU state. 84 * Dynticks per-CPU state.
85 */ 85 */
86struct rcu_dynticks { 86struct rcu_dynticks {
87 int dynticks_nesting; /* Track nesting level, sort of. */ 87 int dynticks_nesting; /* Track irq/process nesting level. */
88 int dynticks; /* Even value for dynticks-idle, else odd. */ 88 int dynticks_nmi_nesting; /* Track NMI nesting level. */
89 int dynticks_nmi; /* Even value for either dynticks-idle or */ 89 atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
90 /* not in nmi handler, else odd. So this */
91 /* remains even for nmi from irq handler. */
92}; 90};
93 91
94/* RCU's kthread states for tracing. */ 92/* RCU's kthread states for tracing. */
@@ -284,7 +282,6 @@ struct rcu_data {
284 /* 3) dynticks interface. */ 282 /* 3) dynticks interface. */
285 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ 283 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
286 int dynticks_snap; /* Per-GP tracking for dynticks. */ 284 int dynticks_snap; /* Per-GP tracking for dynticks. */
287 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
288#endif /* #ifdef CONFIG_NO_HZ */ 285#endif /* #ifdef CONFIG_NO_HZ */
289 286
290 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 287 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3f6559a5f5cd..ed339702481d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1520,7 +1520,6 @@ int rcu_needs_cpu(int cpu)
1520{ 1520{
1521 int c = 0; 1521 int c = 0;
1522 int snap; 1522 int snap;
1523 int snap_nmi;
1524 int thatcpu; 1523 int thatcpu;
1525 1524
1526 /* Check for being in the holdoff period. */ 1525 /* Check for being in the holdoff period. */
@@ -1531,10 +1530,10 @@ int rcu_needs_cpu(int cpu)
1531 for_each_online_cpu(thatcpu) { 1530 for_each_online_cpu(thatcpu) {
1532 if (thatcpu == cpu) 1531 if (thatcpu == cpu)
1533 continue; 1532 continue;
1534 snap = per_cpu(rcu_dynticks, thatcpu).dynticks; 1533 snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
1535 snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; 1534 thatcpu).dynticks);
1536 smp_mb(); /* Order sampling of snap with end of grace period. */ 1535 smp_mb(); /* Order sampling of snap with end of grace period. */
1537 if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { 1536 if ((snap & 0x1) != 0) {
1538 per_cpu(rcu_dyntick_drain, cpu) = 0; 1537 per_cpu(rcu_dyntick_drain, cpu) = 0;
1539 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 1538 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1540 return rcu_needs_cpu_quick_check(cpu); 1539 return rcu_needs_cpu_quick_check(cpu);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index aa0fd72b4bc7..9678cc3650f5 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -69,10 +69,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69 rdp->passed_quiesc, rdp->passed_quiesc_completed, 69 rdp->passed_quiesc, rdp->passed_quiesc_completed,
70 rdp->qs_pending); 70 rdp->qs_pending);
71#ifdef CONFIG_NO_HZ 71#ifdef CONFIG_NO_HZ
72 seq_printf(m, " dt=%d/%d dn=%d df=%lu", 72 seq_printf(m, " dt=%d/%d/%d df=%lu",
73 rdp->dynticks->dynticks, 73 atomic_read(&rdp->dynticks->dynticks),
74 rdp->dynticks->dynticks_nesting, 74 rdp->dynticks->dynticks_nesting,
75 rdp->dynticks->dynticks_nmi, 75 rdp->dynticks->dynticks_nmi_nesting,
76 rdp->dynticks_fqs); 76 rdp->dynticks_fqs);
77#endif /* #ifdef CONFIG_NO_HZ */ 77#endif /* #ifdef CONFIG_NO_HZ */
78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
@@ -141,9 +141,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
141 rdp->qs_pending); 141 rdp->qs_pending);
142#ifdef CONFIG_NO_HZ 142#ifdef CONFIG_NO_HZ
143 seq_printf(m, ",%d,%d,%d,%lu", 143 seq_printf(m, ",%d,%d,%d,%lu",
144 rdp->dynticks->dynticks, 144 atomic_read(&rdp->dynticks->dynticks),
145 rdp->dynticks->dynticks_nesting, 145 rdp->dynticks->dynticks_nesting,
146 rdp->dynticks->dynticks_nmi, 146 rdp->dynticks->dynticks_nmi_nesting,
147 rdp->dynticks_fqs); 147 rdp->dynticks_fqs);
148#endif /* #ifdef CONFIG_NO_HZ */ 148#endif /* #ifdef CONFIG_NO_HZ */
149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
@@ -167,7 +167,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
167{ 167{
168 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); 168 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
169#ifdef CONFIG_NO_HZ 169#ifdef CONFIG_NO_HZ
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171#endif /* #ifdef CONFIG_NO_HZ */ 171#endif /* #ifdef CONFIG_NO_HZ */
172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); 172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n");
173#ifdef CONFIG_TREE_PREEMPT_RCU 173#ifdef CONFIG_TREE_PREEMPT_RCU
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 619313ed6c46..507a22fab738 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -144,7 +144,7 @@ static void init_shared_classes(void)
144 144
145#define HARDIRQ_ENTER() \ 145#define HARDIRQ_ENTER() \
146 local_irq_disable(); \ 146 local_irq_disable(); \
147 irq_enter(); \ 147 __irq_enter(); \
148 WARN_ON(!in_irq()); 148 WARN_ON(!in_irq());
149 149
150#define HARDIRQ_EXIT() \ 150#define HARDIRQ_EXIT() \