diff options
| author | Ingo Molnar <mingo@kernel.org> | 2012-09-27 02:09:38 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2012-09-27 02:09:38 -0400 |
| commit | fa34da708cbe1e2d9a2ee7fc68ea8fccbf095d12 (patch) | |
| tree | b20795decb064af75aae8f9f6af77806b1a4769a /kernel | |
| parent | a9b86fab4b0a36fc4cd2712a07259c2c0e769742 (diff) | |
| parent | cb349ca95407cbc11424d5e9fc7c8e700709041b (diff) | |
Merge branch 'rcu/idle' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull the RCU adaptive-idle feature from Paul E. McKenney:
"This series adds RCU APIs that allow non-idle tasks to
enter RCU idle mode and provides x86 code to make use of them, allowing
RCU to treat user-mode execution as an extended quiescent state when the
new RCU_USER_QS kernel configuration parameter is specified. Work is
in progress to port this to a few other architectures, but is not part
of this series."
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcutree.c | 210 | ||||
| -rw-r--r-- | kernel/rcutree.h | 4 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 20 | ||||
| -rw-r--r-- | kernel/sched/core.c | 17 |
4 files changed, 224 insertions, 27 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7387e46009d9..4fb2376ddf06 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -206,6 +206,9 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); | |||
| 206 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 206 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
| 207 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 207 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
| 208 | .dynticks = ATOMIC_INIT(1), | 208 | .dynticks = ATOMIC_INIT(1), |
| 209 | #if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE) | ||
| 210 | .ignore_user_qs = true, | ||
| 211 | #endif | ||
| 209 | }; | 212 | }; |
| 210 | 213 | ||
| 211 | static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 214 | static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
| @@ -322,16 +325,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
| 322 | } | 325 | } |
| 323 | 326 | ||
| 324 | /* | 327 | /* |
| 325 | * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle | 328 | * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state |
| 326 | * | 329 | * |
| 327 | * If the new value of the ->dynticks_nesting counter now is zero, | 330 | * If the new value of the ->dynticks_nesting counter now is zero, |
| 328 | * we really have entered idle, and must do the appropriate accounting. | 331 | * we really have entered idle, and must do the appropriate accounting. |
| 329 | * The caller must have disabled interrupts. | 332 | * The caller must have disabled interrupts. |
| 330 | */ | 333 | */ |
| 331 | static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | 334 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
| 335 | bool user) | ||
| 332 | { | 336 | { |
| 333 | trace_rcu_dyntick("Start", oldval, 0); | 337 | trace_rcu_dyntick("Start", oldval, 0); |
| 334 | if (!is_idle_task(current)) { | 338 | if (!user && !is_idle_task(current)) { |
| 335 | struct task_struct *idle = idle_task(smp_processor_id()); | 339 | struct task_struct *idle = idle_task(smp_processor_id()); |
| 336 | 340 | ||
| 337 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); | 341 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); |
| @@ -348,7 +352,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
| 348 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 352 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
| 349 | 353 | ||
| 350 | /* | 354 | /* |
| 351 | * The idle task is not permitted to enter the idle loop while | 355 | * It is illegal to enter an extended quiescent state while |
| 352 | * in an RCU read-side critical section. | 356 | * in an RCU read-side critical section. |
| 353 | */ | 357 | */ |
| 354 | rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), | 358 | rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), |
| @@ -359,6 +363,25 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
| 359 | "Illegal idle entry in RCU-sched read-side critical section."); | 363 | "Illegal idle entry in RCU-sched read-side critical section."); |
| 360 | } | 364 | } |
| 361 | 365 | ||
| 366 | /* | ||
| 367 | * Enter an RCU extended quiescent state, which can be either the | ||
| 368 | * idle loop or adaptive-tickless usermode execution. | ||
| 369 | */ | ||
| 370 | static void rcu_eqs_enter(bool user) | ||
| 371 | { | ||
| 372 | long long oldval; | ||
| 373 | struct rcu_dynticks *rdtp; | ||
| 374 | |||
| 375 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 376 | oldval = rdtp->dynticks_nesting; | ||
| 377 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | ||
| 378 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | ||
| 379 | rdtp->dynticks_nesting = 0; | ||
| 380 | else | ||
| 381 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
| 382 | rcu_eqs_enter_common(rdtp, oldval, user); | ||
| 383 | } | ||
| 384 | |||
| 362 | /** | 385 | /** |
| 363 | * rcu_idle_enter - inform RCU that current CPU is entering idle | 386 | * rcu_idle_enter - inform RCU that current CPU is entering idle |
| 364 | * | 387 | * |
| @@ -374,21 +397,70 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
| 374 | void rcu_idle_enter(void) | 397 | void rcu_idle_enter(void) |
| 375 | { | 398 | { |
| 376 | unsigned long flags; | 399 | unsigned long flags; |
| 377 | long long oldval; | 400 | |
| 401 | local_irq_save(flags); | ||
| 402 | rcu_eqs_enter(false); | ||
| 403 | local_irq_restore(flags); | ||
| 404 | } | ||
| 405 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | ||
| 406 | |||
| 407 | #ifdef CONFIG_RCU_USER_QS | ||
| 408 | /** | ||
| 409 | * rcu_user_enter - inform RCU that we are resuming userspace. | ||
| 410 | * | ||
| 411 | * Enter RCU idle mode right before resuming userspace. No use of RCU | ||
| 412 | * is permitted between this call and rcu_user_exit(). This way the | ||
| 413 | * CPU doesn't need to maintain the tick for RCU maintenance purposes | ||
| 414 | * when the CPU runs in userspace. | ||
| 415 | */ | ||
| 416 | void rcu_user_enter(void) | ||
| 417 | { | ||
| 418 | unsigned long flags; | ||
| 378 | struct rcu_dynticks *rdtp; | 419 | struct rcu_dynticks *rdtp; |
| 379 | 420 | ||
| 421 | /* | ||
| 422 | * Some contexts may involve an exception occuring in an irq, | ||
| 423 | * leading to that nesting: | ||
| 424 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
| 425 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
| 426 | * helpers are enough to protect RCU uses inside the exception. So | ||
| 427 | * just return immediately if we detect we are in an IRQ. | ||
| 428 | */ | ||
| 429 | if (in_interrupt()) | ||
| 430 | return; | ||
| 431 | |||
| 432 | WARN_ON_ONCE(!current->mm); | ||
| 433 | |||
| 380 | local_irq_save(flags); | 434 | local_irq_save(flags); |
| 381 | rdtp = &__get_cpu_var(rcu_dynticks); | 435 | rdtp = &__get_cpu_var(rcu_dynticks); |
| 382 | oldval = rdtp->dynticks_nesting; | 436 | if (!rdtp->ignore_user_qs && !rdtp->in_user) { |
| 383 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 437 | rdtp->in_user = true; |
| 384 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 438 | rcu_eqs_enter(true); |
| 385 | rdtp->dynticks_nesting = 0; | 439 | } |
| 386 | else | ||
| 387 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
| 388 | rcu_idle_enter_common(rdtp, oldval); | ||
| 389 | local_irq_restore(flags); | 440 | local_irq_restore(flags); |
| 390 | } | 441 | } |
| 391 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 442 | |
| 443 | /** | ||
| 444 | * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace | ||
| 445 | * after the current irq returns. | ||
| 446 | * | ||
| 447 | * This is similar to rcu_user_enter() but in the context of a non-nesting | ||
| 448 | * irq. After this call, RCU enters into idle mode when the interrupt | ||
| 449 | * returns. | ||
| 450 | */ | ||
| 451 | void rcu_user_enter_after_irq(void) | ||
| 452 | { | ||
| 453 | unsigned long flags; | ||
| 454 | struct rcu_dynticks *rdtp; | ||
| 455 | |||
| 456 | local_irq_save(flags); | ||
| 457 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 458 | /* Ensure this irq is interrupting a non-idle RCU state. */ | ||
| 459 | WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK)); | ||
| 460 | rdtp->dynticks_nesting = 1; | ||
| 461 | local_irq_restore(flags); | ||
| 462 | } | ||
| 463 | #endif /* CONFIG_RCU_USER_QS */ | ||
| 392 | 464 | ||
| 393 | /** | 465 | /** |
| 394 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle | 466 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle |
| @@ -420,18 +492,19 @@ void rcu_irq_exit(void) | |||
| 420 | if (rdtp->dynticks_nesting) | 492 | if (rdtp->dynticks_nesting) |
| 421 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); | 493 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); |
| 422 | else | 494 | else |
| 423 | rcu_idle_enter_common(rdtp, oldval); | 495 | rcu_eqs_enter_common(rdtp, oldval, true); |
| 424 | local_irq_restore(flags); | 496 | local_irq_restore(flags); |
| 425 | } | 497 | } |
| 426 | 498 | ||
| 427 | /* | 499 | /* |
| 428 | * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle | 500 | * rcu_eqs_exit_common - current CPU moving away from extended quiescent state |
| 429 | * | 501 | * |
| 430 | * If the new value of the ->dynticks_nesting counter was previously zero, | 502 | * If the new value of the ->dynticks_nesting counter was previously zero, |
| 431 | * we really have exited idle, and must do the appropriate accounting. | 503 | * we really have exited idle, and must do the appropriate accounting. |
| 432 | * The caller must have disabled interrupts. | 504 | * The caller must have disabled interrupts. |
| 433 | */ | 505 | */ |
| 434 | static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | 506 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
| 507 | int user) | ||
| 435 | { | 508 | { |
| 436 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ | 509 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ |
| 437 | atomic_inc(&rdtp->dynticks); | 510 | atomic_inc(&rdtp->dynticks); |
| @@ -440,7 +513,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
| 440 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 513 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
| 441 | rcu_cleanup_after_idle(smp_processor_id()); | 514 | rcu_cleanup_after_idle(smp_processor_id()); |
| 442 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); | 515 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); |
| 443 | if (!is_idle_task(current)) { | 516 | if (!user && !is_idle_task(current)) { |
| 444 | struct task_struct *idle = idle_task(smp_processor_id()); | 517 | struct task_struct *idle = idle_task(smp_processor_id()); |
| 445 | 518 | ||
| 446 | trace_rcu_dyntick("Error on exit: not idle task", | 519 | trace_rcu_dyntick("Error on exit: not idle task", |
| @@ -452,6 +525,25 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
| 452 | } | 525 | } |
| 453 | } | 526 | } |
| 454 | 527 | ||
| 528 | /* | ||
| 529 | * Exit an RCU extended quiescent state, which can be either the | ||
| 530 | * idle loop or adaptive-tickless usermode execution. | ||
| 531 | */ | ||
| 532 | static void rcu_eqs_exit(bool user) | ||
| 533 | { | ||
| 534 | struct rcu_dynticks *rdtp; | ||
| 535 | long long oldval; | ||
| 536 | |||
| 537 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 538 | oldval = rdtp->dynticks_nesting; | ||
| 539 | WARN_ON_ONCE(oldval < 0); | ||
| 540 | if (oldval & DYNTICK_TASK_NEST_MASK) | ||
| 541 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | ||
| 542 | else | ||
| 543 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
| 544 | rcu_eqs_exit_common(rdtp, oldval, user); | ||
| 545 | } | ||
| 546 | |||
| 455 | /** | 547 | /** |
| 456 | * rcu_idle_exit - inform RCU that current CPU is leaving idle | 548 | * rcu_idle_exit - inform RCU that current CPU is leaving idle |
| 457 | * | 549 | * |
| @@ -466,21 +558,67 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
| 466 | void rcu_idle_exit(void) | 558 | void rcu_idle_exit(void) |
| 467 | { | 559 | { |
| 468 | unsigned long flags; | 560 | unsigned long flags; |
| 561 | |||
| 562 | local_irq_save(flags); | ||
| 563 | rcu_eqs_exit(false); | ||
| 564 | local_irq_restore(flags); | ||
| 565 | } | ||
| 566 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | ||
| 567 | |||
| 568 | #ifdef CONFIG_RCU_USER_QS | ||
| 569 | /** | ||
| 570 | * rcu_user_exit - inform RCU that we are exiting userspace. | ||
| 571 | * | ||
| 572 | * Exit RCU idle mode while entering the kernel because it can | ||
| 573 | * run a RCU read side critical section anytime. | ||
| 574 | */ | ||
| 575 | void rcu_user_exit(void) | ||
| 576 | { | ||
| 577 | unsigned long flags; | ||
| 469 | struct rcu_dynticks *rdtp; | 578 | struct rcu_dynticks *rdtp; |
| 470 | long long oldval; | 579 | |
| 580 | /* | ||
| 581 | * Some contexts may involve an exception occuring in an irq, | ||
| 582 | * leading to that nesting: | ||
| 583 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
| 584 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
| 585 | * helpers are enough to protect RCU uses inside the exception. So | ||
| 586 | * just return immediately if we detect we are in an IRQ. | ||
| 587 | */ | ||
| 588 | if (in_interrupt()) | ||
| 589 | return; | ||
| 471 | 590 | ||
| 472 | local_irq_save(flags); | 591 | local_irq_save(flags); |
| 473 | rdtp = &__get_cpu_var(rcu_dynticks); | 592 | rdtp = &__get_cpu_var(rcu_dynticks); |
| 474 | oldval = rdtp->dynticks_nesting; | 593 | if (rdtp->in_user) { |
| 475 | WARN_ON_ONCE(oldval < 0); | 594 | rdtp->in_user = false; |
| 476 | if (oldval & DYNTICK_TASK_NEST_MASK) | 595 | rcu_eqs_exit(true); |
| 477 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 596 | } |
| 478 | else | ||
| 479 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
| 480 | rcu_idle_exit_common(rdtp, oldval); | ||
| 481 | local_irq_restore(flags); | 597 | local_irq_restore(flags); |
| 482 | } | 598 | } |
| 483 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 599 | |
| 600 | /** | ||
| 601 | * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace | ||
| 602 | * idle mode after the current non-nesting irq returns. | ||
| 603 | * | ||
| 604 | * This is similar to rcu_user_exit() but in the context of an irq. | ||
| 605 | * This is called when the irq has interrupted a userspace RCU idle mode | ||
| 606 | * context. When the current non-nesting interrupt returns after this call, | ||
| 607 | * the CPU won't restore the RCU idle mode. | ||
| 608 | */ | ||
| 609 | void rcu_user_exit_after_irq(void) | ||
| 610 | { | ||
| 611 | unsigned long flags; | ||
| 612 | struct rcu_dynticks *rdtp; | ||
| 613 | |||
| 614 | local_irq_save(flags); | ||
| 615 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 616 | /* Ensure we are interrupting an RCU idle mode. */ | ||
| 617 | WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK); | ||
| 618 | rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE; | ||
| 619 | local_irq_restore(flags); | ||
| 620 | } | ||
| 621 | #endif /* CONFIG_RCU_USER_QS */ | ||
| 484 | 622 | ||
| 485 | /** | 623 | /** |
| 486 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle | 624 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle |
| @@ -515,7 +653,7 @@ void rcu_irq_enter(void) | |||
| 515 | if (oldval) | 653 | if (oldval) |
| 516 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); | 654 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); |
| 517 | else | 655 | else |
| 518 | rcu_idle_exit_common(rdtp, oldval); | 656 | rcu_eqs_exit_common(rdtp, oldval, true); |
| 519 | local_irq_restore(flags); | 657 | local_irq_restore(flags); |
| 520 | } | 658 | } |
| 521 | 659 | ||
| @@ -579,6 +717,21 @@ int rcu_is_cpu_idle(void) | |||
| 579 | } | 717 | } |
| 580 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 718 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
| 581 | 719 | ||
| 720 | #ifdef CONFIG_RCU_USER_QS | ||
| 721 | void rcu_user_hooks_switch(struct task_struct *prev, | ||
| 722 | struct task_struct *next) | ||
| 723 | { | ||
| 724 | struct rcu_dynticks *rdtp; | ||
| 725 | |||
| 726 | /* Interrupts are disabled in context switch */ | ||
| 727 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
| 728 | if (!rdtp->ignore_user_qs) { | ||
| 729 | clear_tsk_thread_flag(prev, TIF_NOHZ); | ||
| 730 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
| 731 | } | ||
| 732 | } | ||
| 733 | #endif /* #ifdef CONFIG_RCU_USER_QS */ | ||
| 734 | |||
| 582 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 735 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
| 583 | 736 | ||
| 584 | /* | 737 | /* |
| @@ -2473,6 +2626,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
| 2473 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 2626 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
| 2474 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); | 2627 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); |
| 2475 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); | 2628 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
| 2629 | #ifdef CONFIG_RCU_USER_QS | ||
| 2630 | WARN_ON_ONCE(rdp->dynticks->in_user); | ||
| 2631 | #endif | ||
| 2476 | rdp->cpu = cpu; | 2632 | rdp->cpu = cpu; |
| 2477 | rdp->rsp = rsp; | 2633 | rdp->rsp = rsp; |
| 2478 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2634 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7576fd4d8ce6..5faf05d68326 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -102,6 +102,10 @@ struct rcu_dynticks { | |||
| 102 | /* idle-period nonlazy_posted snapshot. */ | 102 | /* idle-period nonlazy_posted snapshot. */ |
| 103 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ | 103 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
| 104 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 104 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
| 105 | #ifdef CONFIG_RCU_USER_QS | ||
| 106 | bool ignore_user_qs; /* Treat userspace as extended QS or not */ | ||
| 107 | bool in_user; /* Is the CPU in userland from RCU POV? */ | ||
| 108 | #endif | ||
| 105 | }; | 109 | }; |
| 106 | 110 | ||
| 107 | /* RCU's kthread states for tracing. */ | 111 | /* RCU's kthread states for tracing. */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 9c71c1b18e03..f92115488187 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -1757,6 +1757,26 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1757 | if (!tne) | 1757 | if (!tne) |
| 1758 | return; | 1758 | return; |
| 1759 | 1759 | ||
| 1760 | /* Adaptive-tick mode, where usermode execution is idle to RCU. */ | ||
| 1761 | if (!is_idle_task(current)) { | ||
| 1762 | rdtp->dyntick_holdoff = jiffies - 1; | ||
| 1763 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { | ||
| 1764 | trace_rcu_prep_idle("User dyntick with callbacks"); | ||
| 1765 | rdtp->idle_gp_timer_expires = | ||
| 1766 | round_up(jiffies + RCU_IDLE_GP_DELAY, | ||
| 1767 | RCU_IDLE_GP_DELAY); | ||
| 1768 | } else if (rcu_cpu_has_callbacks(cpu)) { | ||
| 1769 | rdtp->idle_gp_timer_expires = | ||
| 1770 | round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY); | ||
| 1771 | trace_rcu_prep_idle("User dyntick with lazy callbacks"); | ||
| 1772 | } else { | ||
| 1773 | return; | ||
| 1774 | } | ||
| 1775 | tp = &rdtp->idle_gp_timer; | ||
| 1776 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); | ||
| 1777 | return; | ||
| 1778 | } | ||
| 1779 | |||
| 1760 | /* | 1780 | /* |
| 1761 | * If this is an idle re-entry, for example, due to use of | 1781 | * If this is an idle re-entry, for example, due to use of |
| 1762 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | 1782 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1a48cdbc8631..3c4dec0594d6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2081,6 +2081,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
| 2081 | #endif | 2081 | #endif |
| 2082 | 2082 | ||
| 2083 | /* Here we just switch the register state and the stack. */ | 2083 | /* Here we just switch the register state and the stack. */ |
| 2084 | rcu_switch(prev, next); | ||
| 2084 | switch_to(prev, next, prev); | 2085 | switch_to(prev, next, prev); |
| 2085 | 2086 | ||
| 2086 | barrier(); | 2087 | barrier(); |
| @@ -3468,6 +3469,21 @@ asmlinkage void __sched schedule(void) | |||
| 3468 | } | 3469 | } |
| 3469 | EXPORT_SYMBOL(schedule); | 3470 | EXPORT_SYMBOL(schedule); |
| 3470 | 3471 | ||
| 3472 | #ifdef CONFIG_RCU_USER_QS | ||
| 3473 | asmlinkage void __sched schedule_user(void) | ||
| 3474 | { | ||
| 3475 | /* | ||
| 3476 | * If we come here after a random call to set_need_resched(), | ||
| 3477 | * or we have been woken up remotely but the IPI has not yet arrived, | ||
| 3478 | * we haven't yet exited the RCU idle mode. Do it here manually until | ||
| 3479 | * we find a better solution. | ||
| 3480 | */ | ||
| 3481 | rcu_user_exit(); | ||
| 3482 | schedule(); | ||
| 3483 | rcu_user_enter(); | ||
| 3484 | } | ||
| 3485 | #endif | ||
| 3486 | |||
| 3471 | /** | 3487 | /** |
| 3472 | * schedule_preempt_disabled - called with preemption disabled | 3488 | * schedule_preempt_disabled - called with preemption disabled |
| 3473 | * | 3489 | * |
| @@ -3569,6 +3585,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
| 3569 | /* Catch callers which need to be fixed */ | 3585 | /* Catch callers which need to be fixed */ |
| 3570 | BUG_ON(ti->preempt_count || !irqs_disabled()); | 3586 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
| 3571 | 3587 | ||
| 3588 | rcu_user_exit(); | ||
| 3572 | do { | 3589 | do { |
| 3573 | add_preempt_count(PREEMPT_ACTIVE); | 3590 | add_preempt_count(PREEMPT_ACTIVE); |
| 3574 | local_irq_enable(); | 3591 | local_irq_enable(); |
