aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c135
1 files changed, 101 insertions, 34 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7387e46009d9..af0dc3472a4b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -322,16 +322,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
322} 322}
323 323
324/* 324/*
325 * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle 325 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
326 * 326 *
327 * If the new value of the ->dynticks_nesting counter now is zero, 327 * If the new value of the ->dynticks_nesting counter now is zero,
328 * we really have entered idle, and must do the appropriate accounting. 328 * we really have entered idle, and must do the appropriate accounting.
329 * The caller must have disabled interrupts. 329 * The caller must have disabled interrupts.
330 */ 330 */
331static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) 331static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
332 bool user)
332{ 333{
333 trace_rcu_dyntick("Start", oldval, 0); 334 trace_rcu_dyntick("Start", oldval, 0);
334 if (!is_idle_task(current)) { 335 if (!is_idle_task(current) && !user) {
335 struct task_struct *idle = idle_task(smp_processor_id()); 336 struct task_struct *idle = idle_task(smp_processor_id());
336 337
337 trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); 338 trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
@@ -348,7 +349,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
348 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 349 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
349 350
350 /* 351 /*
351 * The idle task is not permitted to enter the idle loop while 352 * It is illegal to enter an extended quiescent state while
352 * in an RCU read-side critical section. 353 * in an RCU read-side critical section.
353 */ 354 */
354 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), 355 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
@@ -359,19 +360,11 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
359 "Illegal idle entry in RCU-sched read-side critical section."); 360 "Illegal idle entry in RCU-sched read-side critical section.");
360} 361}
361 362
362/** 363/*
363 * rcu_idle_enter - inform RCU that current CPU is entering idle 364 * Enter an RCU extended quiescent state, which can be either the
364 * 365 * idle loop or adaptive-tickless usermode execution.
365 * Enter idle mode, in other words, -leave- the mode in which RCU
366 * read-side critical sections can occur. (Though RCU read-side
367 * critical sections can occur in irq handlers in idle, a possibility
368 * handled by irq_enter() and irq_exit().)
369 *
370 * We crowbar the ->dynticks_nesting field to zero to allow for
371 * the possibility of usermode upcalls having messed up our count
372 * of interrupt nesting level during the prior busy period.
373 */ 366 */
374void rcu_idle_enter(void) 367static void rcu_eqs_enter(bool user)
375{ 368{
376 unsigned long flags; 369 unsigned long flags;
377 long long oldval; 370 long long oldval;
@@ -385,12 +378,54 @@ void rcu_idle_enter(void)
385 rdtp->dynticks_nesting = 0; 378 rdtp->dynticks_nesting = 0;
386 else 379 else
387 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 380 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
388 rcu_idle_enter_common(rdtp, oldval); 381 rcu_eqs_enter_common(rdtp, oldval, user);
389 local_irq_restore(flags); 382 local_irq_restore(flags);
390} 383}
384
385/**
386 * rcu_idle_enter - inform RCU that current CPU is entering idle
387 *
388 * Enter idle mode, in other words, -leave- the mode in which RCU
389 * read-side critical sections can occur. (Though RCU read-side
390 * critical sections can occur in irq handlers in idle, a possibility
391 * handled by irq_enter() and irq_exit().)
392 *
393 * We crowbar the ->dynticks_nesting field to zero to allow for
394 * the possibility of usermode upcalls having messed up our count
395 * of interrupt nesting level during the prior busy period.
396 */
397void rcu_idle_enter(void)
398{
399 rcu_eqs_enter(0);
400}
391EXPORT_SYMBOL_GPL(rcu_idle_enter); 401EXPORT_SYMBOL_GPL(rcu_idle_enter);
392 402
393/** 403/**
404 * rcu_user_enter - inform RCU that we are resuming userspace.
405 *
406 * Enter RCU idle mode right before resuming userspace. No use of RCU
407 * is permitted between this call and rcu_user_exit(). This way the
408 * CPU doesn't need to maintain the tick for RCU maintenance purposes
409 * when the CPU runs in userspace.
410 */
411void rcu_user_enter(void)
412{
413 /*
414 * Some contexts may involve an exception occuring in an irq,
415 * leading to that nesting:
416 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
417 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
418 * helpers are enough to protect RCU uses inside the exception. So
419 * just return immediately if we detect we are in an IRQ.
420 */
421 if (in_interrupt())
422 return;
423
424 rcu_eqs_enter(1);
425}
426
427
428/**
394 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 429 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
395 * 430 *
396 * Exit from an interrupt handler, which might possibly result in entering 431 * Exit from an interrupt handler, which might possibly result in entering
@@ -420,18 +455,19 @@ void rcu_irq_exit(void)
420 if (rdtp->dynticks_nesting) 455 if (rdtp->dynticks_nesting)
421 trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); 456 trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
422 else 457 else
423 rcu_idle_enter_common(rdtp, oldval); 458 rcu_eqs_enter_common(rdtp, oldval, 1);
424 local_irq_restore(flags); 459 local_irq_restore(flags);
425} 460}
426 461
427/* 462/*
428 * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle 463 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
429 * 464 *
430 * If the new value of the ->dynticks_nesting counter was previously zero, 465 * If the new value of the ->dynticks_nesting counter was previously zero,
431 * we really have exited idle, and must do the appropriate accounting. 466 * we really have exited idle, and must do the appropriate accounting.
432 * The caller must have disabled interrupts. 467 * The caller must have disabled interrupts.
433 */ 468 */
434static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) 469static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
470 int user)
435{ 471{
436 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ 472 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
437 atomic_inc(&rdtp->dynticks); 473 atomic_inc(&rdtp->dynticks);
@@ -440,7 +476,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
440 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 476 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
441 rcu_cleanup_after_idle(smp_processor_id()); 477 rcu_cleanup_after_idle(smp_processor_id());
442 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); 478 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
443 if (!is_idle_task(current)) { 479 if (!is_idle_task(current) && !user) {
444 struct task_struct *idle = idle_task(smp_processor_id()); 480 struct task_struct *idle = idle_task(smp_processor_id());
445 481
446 trace_rcu_dyntick("Error on exit: not idle task", 482 trace_rcu_dyntick("Error on exit: not idle task",
@@ -452,18 +488,11 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
452 } 488 }
453} 489}
454 490
455/** 491/*
456 * rcu_idle_exit - inform RCU that current CPU is leaving idle 492 * Exit an RCU extended quiescent state, which can be either the
457 * 493 * idle loop or adaptive-tickless usermode execution.
458 * Exit idle mode, in other words, -enter- the mode in which RCU
459 * read-side critical sections can occur.
460 *
461 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
462 * allow for the possibility of usermode upcalls messing up our count
463 * of interrupt nesting level during the busy period that is just
464 * now starting.
465 */ 494 */
466void rcu_idle_exit(void) 495static void rcu_eqs_exit(bool user)
467{ 496{
468 unsigned long flags; 497 unsigned long flags;
469 struct rcu_dynticks *rdtp; 498 struct rcu_dynticks *rdtp;
@@ -477,12 +506,50 @@ void rcu_idle_exit(void)
477 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 506 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
478 else 507 else
479 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 508 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
480 rcu_idle_exit_common(rdtp, oldval); 509 rcu_eqs_exit_common(rdtp, oldval, user);
481 local_irq_restore(flags); 510 local_irq_restore(flags);
482} 511}
512
513/**
514 * rcu_idle_exit - inform RCU that current CPU is leaving idle
515 *
516 * Exit idle mode, in other words, -enter- the mode in which RCU
517 * read-side critical sections can occur.
518 *
519 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
520 * allow for the possibility of usermode upcalls messing up our count
521 * of interrupt nesting level during the busy period that is just
522 * now starting.
523 */
524void rcu_idle_exit(void)
525{
526 rcu_eqs_exit(0);
527}
483EXPORT_SYMBOL_GPL(rcu_idle_exit); 528EXPORT_SYMBOL_GPL(rcu_idle_exit);
484 529
485/** 530/**
531 * rcu_user_exit - inform RCU that we are exiting userspace.
532 *
533 * Exit RCU idle mode while entering the kernel because it can
534 * run a RCU read side critical section anytime.
535 */
536void rcu_user_exit(void)
537{
538 /*
539 * Some contexts may involve an exception occuring in an irq,
540 * leading to that nesting:
541 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
542 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
543 * helpers are enough to protect RCU uses inside the exception. So
544 * just return immediately if we detect we are in an IRQ.
545 */
546 if (in_interrupt())
547 return;
548
549 rcu_eqs_exit(1);
550}
551
552/**
486 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 553 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
487 * 554 *
488 * Enter an interrupt handler, which might possibly result in exiting 555 * Enter an interrupt handler, which might possibly result in exiting
@@ -515,7 +582,7 @@ void rcu_irq_enter(void)
515 if (oldval) 582 if (oldval)
516 trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); 583 trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
517 else 584 else
518 rcu_idle_exit_common(rdtp, oldval); 585 rcu_eqs_exit_common(rdtp, oldval, 1);
519 local_irq_restore(flags); 586 local_irq_restore(flags);
520} 587}
521 588