aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c130
1 files changed, 56 insertions, 74 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 18f7a593d4c7..90104a19c564 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -128,7 +128,7 @@ void rcu_note_context_switch(int cpu)
128#ifdef CONFIG_NO_HZ 128#ifdef CONFIG_NO_HZ
129DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 129DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
130 .dynticks_nesting = 1, 130 .dynticks_nesting = 1,
131 .dynticks = 1, 131 .dynticks = ATOMIC_INIT(1),
132}; 132};
133#endif /* #ifdef CONFIG_NO_HZ */ 133#endif /* #ifdef CONFIG_NO_HZ */
134 134
@@ -262,13 +262,25 @@ void rcu_enter_nohz(void)
262 unsigned long flags; 262 unsigned long flags;
263 struct rcu_dynticks *rdtp; 263 struct rcu_dynticks *rdtp;
264 264
265 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
266 local_irq_save(flags); 265 local_irq_save(flags);
267 rdtp = &__get_cpu_var(rcu_dynticks); 266 rdtp = &__get_cpu_var(rcu_dynticks);
268 rdtp->dynticks++; 267 if (--rdtp->dynticks_nesting) {
269 rdtp->dynticks_nesting--; 268 local_irq_restore(flags);
270 WARN_ON_ONCE(rdtp->dynticks & 0x1); 269 return;
270 }
271 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
272 smp_mb__before_atomic_inc(); /* See above. */
273 atomic_inc(&rdtp->dynticks);
274 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
275 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
271 local_irq_restore(flags); 276 local_irq_restore(flags);
277
278 /* If the interrupt queued a callback, get out of dyntick mode. */
279 if (in_irq() &&
280 (__get_cpu_var(rcu_sched_data).nxtlist ||
281 __get_cpu_var(rcu_bh_data).nxtlist ||
282 rcu_preempt_needs_cpu(smp_processor_id())))
283 set_need_resched();
272} 284}
273 285
274/* 286/*
@@ -284,11 +296,16 @@ void rcu_exit_nohz(void)
284 296
285 local_irq_save(flags); 297 local_irq_save(flags);
286 rdtp = &__get_cpu_var(rcu_dynticks); 298 rdtp = &__get_cpu_var(rcu_dynticks);
287 rdtp->dynticks++; 299 if (rdtp->dynticks_nesting++) {
288 rdtp->dynticks_nesting++; 300 local_irq_restore(flags);
289 WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); 301 return;
302 }
303 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
304 atomic_inc(&rdtp->dynticks);
305 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
306 smp_mb__after_atomic_inc(); /* See above. */
307 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
290 local_irq_restore(flags); 308 local_irq_restore(flags);
291 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
292} 309}
293 310
294/** 311/**
@@ -302,11 +319,15 @@ void rcu_nmi_enter(void)
302{ 319{
303 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 320 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
304 321
305 if (rdtp->dynticks & 0x1) 322 if (rdtp->dynticks_nmi_nesting == 0 &&
323 (atomic_read(&rdtp->dynticks) & 0x1))
306 return; 324 return;
307 rdtp->dynticks_nmi++; 325 rdtp->dynticks_nmi_nesting++;
308 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); 326 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
309 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 327 atomic_inc(&rdtp->dynticks);
328 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
329 smp_mb__after_atomic_inc(); /* See above. */
330 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
310} 331}
311 332
312/** 333/**
@@ -320,11 +341,14 @@ void rcu_nmi_exit(void)
320{ 341{
321 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 342 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
322 343
323 if (rdtp->dynticks & 0x1) 344 if (rdtp->dynticks_nmi_nesting == 0 ||
345 --rdtp->dynticks_nmi_nesting != 0)
324 return; 346 return;
325 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 347 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
326 rdtp->dynticks_nmi++; 348 smp_mb__before_atomic_inc(); /* See above. */
327 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); 349 atomic_inc(&rdtp->dynticks);
350 smp_mb__after_atomic_inc(); /* Force delay to next write. */
351 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
328} 352}
329 353
330/** 354/**
@@ -335,13 +359,7 @@ void rcu_nmi_exit(void)
335 */ 359 */
336void rcu_irq_enter(void) 360void rcu_irq_enter(void)
337{ 361{
338 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 362 rcu_exit_nohz();
339
340 if (rdtp->dynticks_nesting++)
341 return;
342 rdtp->dynticks++;
343 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
344 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
345} 363}
346 364
347/** 365/**
@@ -353,18 +371,7 @@ void rcu_irq_enter(void)
353 */ 371 */
354void rcu_irq_exit(void) 372void rcu_irq_exit(void)
355{ 373{
356 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 374 rcu_enter_nohz();
357
358 if (--rdtp->dynticks_nesting)
359 return;
360 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
361 rdtp->dynticks++;
362 WARN_ON_ONCE(rdtp->dynticks & 0x1);
363
364 /* If the interrupt queued a callback, get out of dyntick mode. */
365 if (__this_cpu_read(rcu_sched_data.nxtlist) ||
366 __this_cpu_read(rcu_bh_data.nxtlist))
367 set_need_resched();
368} 375}
369 376
370#ifdef CONFIG_SMP 377#ifdef CONFIG_SMP
@@ -376,19 +383,8 @@ void rcu_irq_exit(void)
376 */ 383 */
377static int dyntick_save_progress_counter(struct rcu_data *rdp) 384static int dyntick_save_progress_counter(struct rcu_data *rdp)
378{ 385{
379 int ret; 386 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
380 int snap; 387 return 0;
381 int snap_nmi;
382
383 snap = rdp->dynticks->dynticks;
384 snap_nmi = rdp->dynticks->dynticks_nmi;
385 smp_mb(); /* Order sampling of snap with end of grace period. */
386 rdp->dynticks_snap = snap;
387 rdp->dynticks_nmi_snap = snap_nmi;
388 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
389 if (ret)
390 rdp->dynticks_fqs++;
391 return ret;
392} 388}
393 389
394/* 390/*
@@ -399,16 +395,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
399 */ 395 */
400static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 396static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
401{ 397{
402 long curr; 398 unsigned long curr;
403 long curr_nmi; 399 unsigned long snap;
404 long snap;
405 long snap_nmi;
406 400
407 curr = rdp->dynticks->dynticks; 401 curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
408 snap = rdp->dynticks_snap; 402 snap = (unsigned long)rdp->dynticks_snap;
409 curr_nmi = rdp->dynticks->dynticks_nmi;
410 snap_nmi = rdp->dynticks_nmi_snap;
411 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
412 403
413 /* 404 /*
414 * If the CPU passed through or entered a dynticks idle phase with 405 * If the CPU passed through or entered a dynticks idle phase with
@@ -418,8 +409,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
418 * read-side critical section that started before the beginning 409 * read-side critical section that started before the beginning
419 * of the current RCU grace period. 410 * of the current RCU grace period.
420 */ 411 */
421 if ((curr != snap || (curr & 0x1) == 0) && 412 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
422 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
423 rdp->dynticks_fqs++; 413 rdp->dynticks_fqs++;
424 return 1; 414 return 1;
425 } 415 }
@@ -841,6 +831,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
841 __releases(rcu_get_root(rsp)->lock) 831 __releases(rcu_get_root(rsp)->lock)
842{ 832{
843 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 833 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
834
835 /*
836 * Ensure that all grace-period and pre-grace-period activity
837 * is seen before the assignment to rsp->completed.
838 */
839 smp_mb(); /* See above block comment. */
844 rsp->completed = rsp->gpnum; 840 rsp->completed = rsp->gpnum;
845 rsp->signaled = RCU_GP_IDLE; 841 rsp->signaled = RCU_GP_IDLE;
846 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 842 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
@@ -1367,25 +1363,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1367 */ 1363 */
1368static void rcu_process_callbacks(struct softirq_action *unused) 1364static void rcu_process_callbacks(struct softirq_action *unused)
1369{ 1365{
1370 /*
1371 * Memory references from any prior RCU read-side critical sections
1372 * executed by the interrupted code must be seen before any RCU
1373 * grace-period manipulations below.
1374 */
1375 smp_mb(); /* See above block comment. */
1376
1377 __rcu_process_callbacks(&rcu_sched_state, 1366 __rcu_process_callbacks(&rcu_sched_state,
1378 &__get_cpu_var(rcu_sched_data)); 1367 &__get_cpu_var(rcu_sched_data));
1379 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1368 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1380 rcu_preempt_process_callbacks(); 1369 rcu_preempt_process_callbacks();
1381 1370
1382 /*
1383 * Memory references from any later RCU read-side critical sections
1384 * executed by the interrupted code must be seen after any RCU
1385 * grace-period manipulations above.
1386 */
1387 smp_mb(); /* See above block comment. */
1388
1389 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ 1371 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1390 rcu_needs_cpu_flush(); 1372 rcu_needs_cpu_flush();
1391} 1373}