diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 64 |
1 files changed, 2 insertions, 62 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7733eb56e156..e441b77b614e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -207,9 +207,6 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); | |||
207 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 207 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
208 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 208 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
209 | .dynticks = ATOMIC_INIT(1), | 209 | .dynticks = ATOMIC_INIT(1), |
210 | #if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE) | ||
211 | .ignore_user_qs = true, | ||
212 | #endif | ||
213 | }; | 210 | }; |
214 | 211 | ||
215 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 212 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
@@ -420,29 +417,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
420 | */ | 417 | */ |
421 | void rcu_user_enter(void) | 418 | void rcu_user_enter(void) |
422 | { | 419 | { |
423 | unsigned long flags; | 420 | rcu_eqs_enter(1); |
424 | struct rcu_dynticks *rdtp; | ||
425 | |||
426 | /* | ||
427 | * Some contexts may involve an exception occuring in an irq, | ||
428 | * leading to that nesting: | ||
429 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
430 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
431 | * helpers are enough to protect RCU uses inside the exception. So | ||
432 | * just return immediately if we detect we are in an IRQ. | ||
433 | */ | ||
434 | if (in_interrupt()) | ||
435 | return; | ||
436 | |||
437 | WARN_ON_ONCE(!current->mm); | ||
438 | |||
439 | local_irq_save(flags); | ||
440 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
441 | if (!rdtp->ignore_user_qs && !rdtp->in_user) { | ||
442 | rdtp->in_user = true; | ||
443 | rcu_eqs_enter(true); | ||
444 | } | ||
445 | local_irq_restore(flags); | ||
446 | } | 421 | } |
447 | 422 | ||
448 | /** | 423 | /** |
@@ -579,27 +554,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit); | |||
579 | */ | 554 | */ |
580 | void rcu_user_exit(void) | 555 | void rcu_user_exit(void) |
581 | { | 556 | { |
582 | unsigned long flags; | 557 | rcu_eqs_exit(1); |
583 | struct rcu_dynticks *rdtp; | ||
584 | |||
585 | /* | ||
586 | * Some contexts may involve an exception occuring in an irq, | ||
587 | * leading to that nesting: | ||
588 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
589 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
590 | * helpers are enough to protect RCU uses inside the exception. So | ||
591 | * just return immediately if we detect we are in an IRQ. | ||
592 | */ | ||
593 | if (in_interrupt()) | ||
594 | return; | ||
595 | |||
596 | local_irq_save(flags); | ||
597 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
598 | if (rdtp->in_user) { | ||
599 | rdtp->in_user = false; | ||
600 | rcu_eqs_exit(true); | ||
601 | } | ||
602 | local_irq_restore(flags); | ||
603 | } | 558 | } |
604 | 559 | ||
605 | /** | 560 | /** |
@@ -722,21 +677,6 @@ int rcu_is_cpu_idle(void) | |||
722 | } | 677 | } |
723 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 678 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
724 | 679 | ||
725 | #ifdef CONFIG_RCU_USER_QS | ||
726 | void rcu_user_hooks_switch(struct task_struct *prev, | ||
727 | struct task_struct *next) | ||
728 | { | ||
729 | struct rcu_dynticks *rdtp; | ||
730 | |||
731 | /* Interrupts are disabled in context switch */ | ||
732 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
733 | if (!rdtp->ignore_user_qs) { | ||
734 | clear_tsk_thread_flag(prev, TIF_NOHZ); | ||
735 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
736 | } | ||
737 | } | ||
738 | #endif /* #ifdef CONFIG_RCU_USER_QS */ | ||
739 | |||
740 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 680 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
741 | 681 | ||
742 | /* | 682 | /* |