diff options
| author | Ingo Molnar <mingo@kernel.org> | 2014-11-20 02:57:58 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-11-20 02:57:58 -0500 |
| commit | d360b78f99e5d1724279644c8eb51d5cf0de4027 (patch) | |
| tree | 011c67bd0654b141e8f7f9fe1d8e1338b05663ba /kernel | |
| parent | fc14f9c1272f62c3e8d01300f52467c0d9af50f9 (diff) | |
| parent | 9ea6c5885681e3d9ce9844ba9dc57371a5cfc6d2 (diff) | |
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney:
- Streamline RCU's use of per-CPU variables, shifting from "cpu"
arguments to functions to "this_"-style per-CPU variable accessors.
- Signal-handling RCU updates.
- Real-time updates.
- Torture-test updates.
- Miscellaneous fixes.
- Documentation updates.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu.c | 19 | ||||
| -rw-r--r-- | kernel/fork.c | 5 | ||||
| -rw-r--r-- | kernel/rcu/Makefile | 2 | ||||
| -rw-r--r-- | kernel/rcu/rcu.h | 2 | ||||
| -rw-r--r-- | kernel/rcu/rcutorture.c | 1 | ||||
| -rw-r--r-- | kernel/rcu/tiny.c | 6 | ||||
| -rw-r--r-- | kernel/rcu/tree.c | 97 | ||||
| -rw-r--r-- | kernel/rcu/tree.h | 22 | ||||
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 111 | ||||
| -rw-r--r-- | kernel/rcu/update.c | 89 | ||||
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/signal.c | 42 | ||||
| -rw-r--r-- | kernel/softirq.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
| -rw-r--r-- | kernel/time/timer.c | 3 |
15 files changed, 268 insertions, 137 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 90a3d017b90c..5d220234b3ca 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -86,6 +86,16 @@ static struct { | |||
| 86 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | 86 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) |
| 87 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | 87 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
| 88 | 88 | ||
| 89 | static void apply_puts_pending(int max) | ||
| 90 | { | ||
| 91 | int delta; | ||
| 92 | |||
| 93 | if (atomic_read(&cpu_hotplug.puts_pending) >= max) { | ||
| 94 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
| 95 | cpu_hotplug.refcount -= delta; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 89 | void get_online_cpus(void) | 99 | void get_online_cpus(void) |
| 90 | { | 100 | { |
| 91 | might_sleep(); | 101 | might_sleep(); |
| @@ -93,6 +103,7 @@ void get_online_cpus(void) | |||
| 93 | return; | 103 | return; |
| 94 | cpuhp_lock_acquire_read(); | 104 | cpuhp_lock_acquire_read(); |
| 95 | mutex_lock(&cpu_hotplug.lock); | 105 | mutex_lock(&cpu_hotplug.lock); |
| 106 | apply_puts_pending(65536); | ||
| 96 | cpu_hotplug.refcount++; | 107 | cpu_hotplug.refcount++; |
| 97 | mutex_unlock(&cpu_hotplug.lock); | 108 | mutex_unlock(&cpu_hotplug.lock); |
| 98 | } | 109 | } |
| @@ -105,6 +116,7 @@ bool try_get_online_cpus(void) | |||
| 105 | if (!mutex_trylock(&cpu_hotplug.lock)) | 116 | if (!mutex_trylock(&cpu_hotplug.lock)) |
| 106 | return false; | 117 | return false; |
| 107 | cpuhp_lock_acquire_tryread(); | 118 | cpuhp_lock_acquire_tryread(); |
| 119 | apply_puts_pending(65536); | ||
| 108 | cpu_hotplug.refcount++; | 120 | cpu_hotplug.refcount++; |
| 109 | mutex_unlock(&cpu_hotplug.lock); | 121 | mutex_unlock(&cpu_hotplug.lock); |
| 110 | return true; | 122 | return true; |
| @@ -161,12 +173,7 @@ void cpu_hotplug_begin(void) | |||
| 161 | cpuhp_lock_acquire(); | 173 | cpuhp_lock_acquire(); |
| 162 | for (;;) { | 174 | for (;;) { |
| 163 | mutex_lock(&cpu_hotplug.lock); | 175 | mutex_lock(&cpu_hotplug.lock); |
| 164 | if (atomic_read(&cpu_hotplug.puts_pending)) { | 176 | apply_puts_pending(1); |
| 165 | int delta; | ||
| 166 | |||
| 167 | delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); | ||
| 168 | cpu_hotplug.refcount -= delta; | ||
| 169 | } | ||
| 170 | if (likely(!cpu_hotplug.refcount)) | 177 | if (likely(!cpu_hotplug.refcount)) |
| 171 | break; | 178 | break; |
| 172 | __set_current_state(TASK_UNINTERRUPTIBLE); | 179 | __set_current_state(TASK_UNINTERRUPTIBLE); |
diff --git a/kernel/fork.c b/kernel/fork.c index 9b7d746d6d62..9ca84189cfc2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1022,11 +1022,14 @@ void __cleanup_sighand(struct sighand_struct *sighand) | |||
| 1022 | { | 1022 | { |
| 1023 | if (atomic_dec_and_test(&sighand->count)) { | 1023 | if (atomic_dec_and_test(&sighand->count)) { |
| 1024 | signalfd_cleanup(sighand); | 1024 | signalfd_cleanup(sighand); |
| 1025 | /* | ||
| 1026 | * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it | ||
| 1027 | * without an RCU grace period, see __lock_task_sighand(). | ||
| 1028 | */ | ||
| 1025 | kmem_cache_free(sighand_cachep, sighand); | 1029 | kmem_cache_free(sighand_cachep, sighand); |
| 1026 | } | 1030 | } |
| 1027 | } | 1031 | } |
| 1028 | 1032 | ||
| 1029 | |||
| 1030 | /* | 1033 | /* |
| 1031 | * Initialize POSIX timer handling for a thread group. | 1034 | * Initialize POSIX timer handling for a thread group. |
| 1032 | */ | 1035 | */ |
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index 807ccfbf69b3..e6fae503d1bc 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | obj-y += update.o srcu.o | 1 | obj-y += update.o srcu.o |
| 2 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 2 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
| 3 | obj-$(CONFIG_TREE_RCU) += tree.o | 3 | obj-$(CONFIG_TREE_RCU) += tree.o |
| 4 | obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o | 4 | obj-$(CONFIG_PREEMPT_RCU) += tree.o |
| 5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o | 5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o |
| 6 | obj-$(CONFIG_TINY_RCU) += tiny.o | 6 | obj-$(CONFIG_TINY_RCU) += tiny.o |
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index ff1a6de62f17..07bb02eda844 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h | |||
| @@ -135,4 +135,6 @@ int rcu_jiffies_till_stall_check(void); | |||
| 135 | */ | 135 | */ |
| 136 | #define TPS(x) tracepoint_string(x) | 136 | #define TPS(x) tracepoint_string(x) |
| 137 | 137 | ||
| 138 | void rcu_early_boot_tests(void); | ||
| 139 | |||
| 138 | #endif /* __LINUX_RCU_H */ | 140 | #endif /* __LINUX_RCU_H */ |
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 240fa9094f83..4d559baf06e0 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c | |||
| @@ -812,6 +812,7 @@ rcu_torture_cbflood(void *arg) | |||
| 812 | cur_ops->cb_barrier(); | 812 | cur_ops->cb_barrier(); |
| 813 | stutter_wait("rcu_torture_cbflood"); | 813 | stutter_wait("rcu_torture_cbflood"); |
| 814 | } while (!torture_must_stop()); | 814 | } while (!torture_must_stop()); |
| 815 | vfree(rhp); | ||
| 815 | torture_kthread_stopping("rcu_torture_cbflood"); | 816 | torture_kthread_stopping("rcu_torture_cbflood"); |
| 816 | return 0; | 817 | return 0; |
| 817 | } | 818 | } |
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index c0623fc47125..0db5649f8817 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c | |||
| @@ -247,7 +247,7 @@ void rcu_bh_qs(void) | |||
| 247 | * be called from hardirq context. It is normally called from the | 247 | * be called from hardirq context. It is normally called from the |
| 248 | * scheduling-clock interrupt. | 248 | * scheduling-clock interrupt. |
| 249 | */ | 249 | */ |
| 250 | void rcu_check_callbacks(int cpu, int user) | 250 | void rcu_check_callbacks(int user) |
| 251 | { | 251 | { |
| 252 | RCU_TRACE(check_cpu_stalls()); | 252 | RCU_TRACE(check_cpu_stalls()); |
| 253 | if (user || rcu_is_cpu_rrupt_from_idle()) | 253 | if (user || rcu_is_cpu_rrupt_from_idle()) |
| @@ -380,7 +380,9 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
| 380 | } | 380 | } |
| 381 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 381 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
| 382 | 382 | ||
| 383 | void rcu_init(void) | 383 | void __init rcu_init(void) |
| 384 | { | 384 | { |
| 385 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 385 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
| 386 | |||
| 387 | rcu_early_boot_tests(); | ||
| 386 | } | 388 | } |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9815447d22e0..7680fc275036 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -105,7 +105,7 @@ struct rcu_state sname##_state = { \ | |||
| 105 | .name = RCU_STATE_NAME(sname), \ | 105 | .name = RCU_STATE_NAME(sname), \ |
| 106 | .abbr = sabbr, \ | 106 | .abbr = sabbr, \ |
| 107 | }; \ | 107 | }; \ |
| 108 | DEFINE_PER_CPU(struct rcu_data, sname##_data) | 108 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data) |
| 109 | 109 | ||
| 110 | RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); | 110 | RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); |
| 111 | RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); | 111 | RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); |
| @@ -152,19 +152,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); | |||
| 152 | */ | 152 | */ |
| 153 | static int rcu_scheduler_fully_active __read_mostly; | 153 | static int rcu_scheduler_fully_active __read_mostly; |
| 154 | 154 | ||
| 155 | #ifdef CONFIG_RCU_BOOST | ||
| 156 | |||
| 157 | /* | ||
| 158 | * Control variables for per-CPU and per-rcu_node kthreads. These | ||
| 159 | * handle all flavors of RCU. | ||
| 160 | */ | ||
| 161 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | ||
| 162 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | ||
| 163 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | ||
| 164 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | ||
| 165 | |||
| 166 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 167 | |||
| 168 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 155 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
| 169 | static void invoke_rcu_core(void); | 156 | static void invoke_rcu_core(void); |
| 170 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | 157 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); |
| @@ -286,11 +273,11 @@ static void rcu_momentary_dyntick_idle(void) | |||
| 286 | * and requires special handling for preemptible RCU. | 273 | * and requires special handling for preemptible RCU. |
| 287 | * The caller must have disabled preemption. | 274 | * The caller must have disabled preemption. |
| 288 | */ | 275 | */ |
| 289 | void rcu_note_context_switch(int cpu) | 276 | void rcu_note_context_switch(void) |
| 290 | { | 277 | { |
| 291 | trace_rcu_utilization(TPS("Start context switch")); | 278 | trace_rcu_utilization(TPS("Start context switch")); |
| 292 | rcu_sched_qs(); | 279 | rcu_sched_qs(); |
| 293 | rcu_preempt_note_context_switch(cpu); | 280 | rcu_preempt_note_context_switch(); |
| 294 | if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) | 281 | if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) |
| 295 | rcu_momentary_dyntick_idle(); | 282 | rcu_momentary_dyntick_idle(); |
| 296 | trace_rcu_utilization(TPS("End context switch")); | 283 | trace_rcu_utilization(TPS("End context switch")); |
| @@ -325,7 +312,7 @@ static void force_qs_rnp(struct rcu_state *rsp, | |||
| 325 | unsigned long *maxj), | 312 | unsigned long *maxj), |
| 326 | bool *isidle, unsigned long *maxj); | 313 | bool *isidle, unsigned long *maxj); |
| 327 | static void force_quiescent_state(struct rcu_state *rsp); | 314 | static void force_quiescent_state(struct rcu_state *rsp); |
| 328 | static int rcu_pending(int cpu); | 315 | static int rcu_pending(void); |
| 329 | 316 | ||
| 330 | /* | 317 | /* |
| 331 | * Return the number of RCU-sched batches processed thus far for debug & stats. | 318 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
| @@ -510,11 +497,11 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 510 | * we really have entered idle, and must do the appropriate accounting. | 497 | * we really have entered idle, and must do the appropriate accounting. |
| 511 | * The caller must have disabled interrupts. | 498 | * The caller must have disabled interrupts. |
| 512 | */ | 499 | */ |
| 513 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | 500 | static void rcu_eqs_enter_common(long long oldval, bool user) |
| 514 | bool user) | ||
| 515 | { | 501 | { |
| 516 | struct rcu_state *rsp; | 502 | struct rcu_state *rsp; |
| 517 | struct rcu_data *rdp; | 503 | struct rcu_data *rdp; |
| 504 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 518 | 505 | ||
| 519 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); | 506 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); |
| 520 | if (!user && !is_idle_task(current)) { | 507 | if (!user && !is_idle_task(current)) { |
| @@ -531,7 +518,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
| 531 | rdp = this_cpu_ptr(rsp->rda); | 518 | rdp = this_cpu_ptr(rsp->rda); |
| 532 | do_nocb_deferred_wakeup(rdp); | 519 | do_nocb_deferred_wakeup(rdp); |
| 533 | } | 520 | } |
| 534 | rcu_prepare_for_idle(smp_processor_id()); | 521 | rcu_prepare_for_idle(); |
| 535 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 522 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
| 536 | smp_mb__before_atomic(); /* See above. */ | 523 | smp_mb__before_atomic(); /* See above. */ |
| 537 | atomic_inc(&rdtp->dynticks); | 524 | atomic_inc(&rdtp->dynticks); |
| @@ -565,7 +552,7 @@ static void rcu_eqs_enter(bool user) | |||
| 565 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 552 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); |
| 566 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { | 553 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { |
| 567 | rdtp->dynticks_nesting = 0; | 554 | rdtp->dynticks_nesting = 0; |
| 568 | rcu_eqs_enter_common(rdtp, oldval, user); | 555 | rcu_eqs_enter_common(oldval, user); |
| 569 | } else { | 556 | } else { |
| 570 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 557 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; |
| 571 | } | 558 | } |
| @@ -589,7 +576,7 @@ void rcu_idle_enter(void) | |||
| 589 | 576 | ||
| 590 | local_irq_save(flags); | 577 | local_irq_save(flags); |
| 591 | rcu_eqs_enter(false); | 578 | rcu_eqs_enter(false); |
| 592 | rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0); | 579 | rcu_sysidle_enter(0); |
| 593 | local_irq_restore(flags); | 580 | local_irq_restore(flags); |
| 594 | } | 581 | } |
| 595 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 582 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
| @@ -639,8 +626,8 @@ void rcu_irq_exit(void) | |||
| 639 | if (rdtp->dynticks_nesting) | 626 | if (rdtp->dynticks_nesting) |
| 640 | trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); | 627 | trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); |
| 641 | else | 628 | else |
| 642 | rcu_eqs_enter_common(rdtp, oldval, true); | 629 | rcu_eqs_enter_common(oldval, true); |
| 643 | rcu_sysidle_enter(rdtp, 1); | 630 | rcu_sysidle_enter(1); |
| 644 | local_irq_restore(flags); | 631 | local_irq_restore(flags); |
| 645 | } | 632 | } |
| 646 | 633 | ||
| @@ -651,16 +638,17 @@ void rcu_irq_exit(void) | |||
| 651 | * we really have exited idle, and must do the appropriate accounting. | 638 | * we really have exited idle, and must do the appropriate accounting. |
| 652 | * The caller must have disabled interrupts. | 639 | * The caller must have disabled interrupts. |
| 653 | */ | 640 | */ |
| 654 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, | 641 | static void rcu_eqs_exit_common(long long oldval, int user) |
| 655 | int user) | ||
| 656 | { | 642 | { |
| 643 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 644 | |||
| 657 | rcu_dynticks_task_exit(); | 645 | rcu_dynticks_task_exit(); |
| 658 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ | 646 | smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ |
| 659 | atomic_inc(&rdtp->dynticks); | 647 | atomic_inc(&rdtp->dynticks); |
| 660 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ | 648 | /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ |
| 661 | smp_mb__after_atomic(); /* See above. */ | 649 | smp_mb__after_atomic(); /* See above. */ |
| 662 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 650 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
| 663 | rcu_cleanup_after_idle(smp_processor_id()); | 651 | rcu_cleanup_after_idle(); |
| 664 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); | 652 | trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); |
| 665 | if (!user && !is_idle_task(current)) { | 653 | if (!user && !is_idle_task(current)) { |
| 666 | struct task_struct *idle __maybe_unused = | 654 | struct task_struct *idle __maybe_unused = |
| @@ -691,7 +679,7 @@ static void rcu_eqs_exit(bool user) | |||
| 691 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 679 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; |
| 692 | } else { | 680 | } else { |
| 693 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 681 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
| 694 | rcu_eqs_exit_common(rdtp, oldval, user); | 682 | rcu_eqs_exit_common(oldval, user); |
| 695 | } | 683 | } |
| 696 | } | 684 | } |
| 697 | 685 | ||
| @@ -712,7 +700,7 @@ void rcu_idle_exit(void) | |||
| 712 | 700 | ||
| 713 | local_irq_save(flags); | 701 | local_irq_save(flags); |
| 714 | rcu_eqs_exit(false); | 702 | rcu_eqs_exit(false); |
| 715 | rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0); | 703 | rcu_sysidle_exit(0); |
| 716 | local_irq_restore(flags); | 704 | local_irq_restore(flags); |
| 717 | } | 705 | } |
| 718 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 706 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
| @@ -763,8 +751,8 @@ void rcu_irq_enter(void) | |||
| 763 | if (oldval) | 751 | if (oldval) |
| 764 | trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); | 752 | trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); |
| 765 | else | 753 | else |
| 766 | rcu_eqs_exit_common(rdtp, oldval, true); | 754 | rcu_eqs_exit_common(oldval, true); |
| 767 | rcu_sysidle_exit(rdtp, 1); | 755 | rcu_sysidle_exit(1); |
| 768 | local_irq_restore(flags); | 756 | local_irq_restore(flags); |
| 769 | } | 757 | } |
| 770 | 758 | ||
| @@ -2387,7 +2375,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 2387 | * invoked from the scheduling-clock interrupt. If rcu_pending returns | 2375 | * invoked from the scheduling-clock interrupt. If rcu_pending returns |
| 2388 | * false, there is no point in invoking rcu_check_callbacks(). | 2376 | * false, there is no point in invoking rcu_check_callbacks(). |
| 2389 | */ | 2377 | */ |
| 2390 | void rcu_check_callbacks(int cpu, int user) | 2378 | void rcu_check_callbacks(int user) |
| 2391 | { | 2379 | { |
| 2392 | trace_rcu_utilization(TPS("Start scheduler-tick")); | 2380 | trace_rcu_utilization(TPS("Start scheduler-tick")); |
| 2393 | increment_cpu_stall_ticks(); | 2381 | increment_cpu_stall_ticks(); |
| @@ -2419,8 +2407,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
| 2419 | 2407 | ||
| 2420 | rcu_bh_qs(); | 2408 | rcu_bh_qs(); |
| 2421 | } | 2409 | } |
| 2422 | rcu_preempt_check_callbacks(cpu); | 2410 | rcu_preempt_check_callbacks(); |
| 2423 | if (rcu_pending(cpu)) | 2411 | if (rcu_pending()) |
| 2424 | invoke_rcu_core(); | 2412 | invoke_rcu_core(); |
| 2425 | if (user) | 2413 | if (user) |
| 2426 | rcu_note_voluntary_context_switch(current); | 2414 | rcu_note_voluntary_context_switch(current); |
| @@ -2963,6 +2951,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data) | |||
| 2963 | */ | 2951 | */ |
| 2964 | void synchronize_sched_expedited(void) | 2952 | void synchronize_sched_expedited(void) |
| 2965 | { | 2953 | { |
| 2954 | cpumask_var_t cm; | ||
| 2955 | bool cma = false; | ||
| 2956 | int cpu; | ||
| 2966 | long firstsnap, s, snap; | 2957 | long firstsnap, s, snap; |
| 2967 | int trycount = 0; | 2958 | int trycount = 0; |
| 2968 | struct rcu_state *rsp = &rcu_sched_state; | 2959 | struct rcu_state *rsp = &rcu_sched_state; |
| @@ -2997,11 +2988,26 @@ void synchronize_sched_expedited(void) | |||
| 2997 | } | 2988 | } |
| 2998 | WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); | 2989 | WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); |
| 2999 | 2990 | ||
| 2991 | /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */ | ||
| 2992 | cma = zalloc_cpumask_var(&cm, GFP_KERNEL); | ||
| 2993 | if (cma) { | ||
| 2994 | cpumask_copy(cm, cpu_online_mask); | ||
| 2995 | cpumask_clear_cpu(raw_smp_processor_id(), cm); | ||
| 2996 | for_each_cpu(cpu, cm) { | ||
| 2997 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | ||
| 2998 | |||
| 2999 | if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1)) | ||
| 3000 | cpumask_clear_cpu(cpu, cm); | ||
| 3001 | } | ||
| 3002 | if (cpumask_weight(cm) == 0) | ||
| 3003 | goto all_cpus_idle; | ||
| 3004 | } | ||
| 3005 | |||
| 3000 | /* | 3006 | /* |
| 3001 | * Each pass through the following loop attempts to force a | 3007 | * Each pass through the following loop attempts to force a |
| 3002 | * context switch on each CPU. | 3008 | * context switch on each CPU. |
| 3003 | */ | 3009 | */ |
| 3004 | while (try_stop_cpus(cpu_online_mask, | 3010 | while (try_stop_cpus(cma ? cm : cpu_online_mask, |
| 3005 | synchronize_sched_expedited_cpu_stop, | 3011 | synchronize_sched_expedited_cpu_stop, |
| 3006 | NULL) == -EAGAIN) { | 3012 | NULL) == -EAGAIN) { |
| 3007 | put_online_cpus(); | 3013 | put_online_cpus(); |
| @@ -3013,6 +3019,7 @@ void synchronize_sched_expedited(void) | |||
| 3013 | /* ensure test happens before caller kfree */ | 3019 | /* ensure test happens before caller kfree */ |
| 3014 | smp_mb__before_atomic(); /* ^^^ */ | 3020 | smp_mb__before_atomic(); /* ^^^ */ |
| 3015 | atomic_long_inc(&rsp->expedited_workdone1); | 3021 | atomic_long_inc(&rsp->expedited_workdone1); |
| 3022 | free_cpumask_var(cm); | ||
| 3016 | return; | 3023 | return; |
| 3017 | } | 3024 | } |
| 3018 | 3025 | ||
| @@ -3022,6 +3029,7 @@ void synchronize_sched_expedited(void) | |||
| 3022 | } else { | 3029 | } else { |
| 3023 | wait_rcu_gp(call_rcu_sched); | 3030 | wait_rcu_gp(call_rcu_sched); |
| 3024 | atomic_long_inc(&rsp->expedited_normal); | 3031 | atomic_long_inc(&rsp->expedited_normal); |
| 3032 | free_cpumask_var(cm); | ||
| 3025 | return; | 3033 | return; |
| 3026 | } | 3034 | } |
| 3027 | 3035 | ||
| @@ -3031,6 +3039,7 @@ void synchronize_sched_expedited(void) | |||
| 3031 | /* ensure test happens before caller kfree */ | 3039 | /* ensure test happens before caller kfree */ |
| 3032 | smp_mb__before_atomic(); /* ^^^ */ | 3040 | smp_mb__before_atomic(); /* ^^^ */ |
| 3033 | atomic_long_inc(&rsp->expedited_workdone2); | 3041 | atomic_long_inc(&rsp->expedited_workdone2); |
| 3042 | free_cpumask_var(cm); | ||
| 3034 | return; | 3043 | return; |
| 3035 | } | 3044 | } |
| 3036 | 3045 | ||
| @@ -3045,6 +3054,7 @@ void synchronize_sched_expedited(void) | |||
| 3045 | /* CPU hotplug operation in flight, use normal GP. */ | 3054 | /* CPU hotplug operation in flight, use normal GP. */ |
| 3046 | wait_rcu_gp(call_rcu_sched); | 3055 | wait_rcu_gp(call_rcu_sched); |
| 3047 | atomic_long_inc(&rsp->expedited_normal); | 3056 | atomic_long_inc(&rsp->expedited_normal); |
| 3057 | free_cpumask_var(cm); | ||
| 3048 | return; | 3058 | return; |
| 3049 | } | 3059 | } |
| 3050 | snap = atomic_long_read(&rsp->expedited_start); | 3060 | snap = atomic_long_read(&rsp->expedited_start); |
| @@ -3052,6 +3062,9 @@ void synchronize_sched_expedited(void) | |||
| 3052 | } | 3062 | } |
| 3053 | atomic_long_inc(&rsp->expedited_stoppedcpus); | 3063 | atomic_long_inc(&rsp->expedited_stoppedcpus); |
| 3054 | 3064 | ||
| 3065 | all_cpus_idle: | ||
| 3066 | free_cpumask_var(cm); | ||
| 3067 | |||
| 3055 | /* | 3068 | /* |
| 3056 | * Everyone up to our most recent fetch is covered by our grace | 3069 | * Everyone up to our most recent fetch is covered by our grace |
| 3057 | * period. Update the counter, but only if our work is still | 3070 | * period. Update the counter, but only if our work is still |
| @@ -3143,12 +3156,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 3143 | * by the current CPU, returning 1 if so. This function is part of the | 3156 | * by the current CPU, returning 1 if so. This function is part of the |
| 3144 | * RCU implementation; it is -not- an exported member of the RCU API. | 3157 | * RCU implementation; it is -not- an exported member of the RCU API. |
| 3145 | */ | 3158 | */ |
| 3146 | static int rcu_pending(int cpu) | 3159 | static int rcu_pending(void) |
| 3147 | { | 3160 | { |
| 3148 | struct rcu_state *rsp; | 3161 | struct rcu_state *rsp; |
| 3149 | 3162 | ||
| 3150 | for_each_rcu_flavor(rsp) | 3163 | for_each_rcu_flavor(rsp) |
| 3151 | if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu))) | 3164 | if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda))) |
| 3152 | return 1; | 3165 | return 1; |
| 3153 | return 0; | 3166 | return 0; |
| 3154 | } | 3167 | } |
| @@ -3158,7 +3171,7 @@ static int rcu_pending(int cpu) | |||
| 3158 | * non-NULL, store an indication of whether all callbacks are lazy. | 3171 | * non-NULL, store an indication of whether all callbacks are lazy. |
| 3159 | * (If there are no callbacks, all of them are deemed to be lazy.) | 3172 | * (If there are no callbacks, all of them are deemed to be lazy.) |
| 3160 | */ | 3173 | */ |
| 3161 | static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy) | 3174 | static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy) |
| 3162 | { | 3175 | { |
| 3163 | bool al = true; | 3176 | bool al = true; |
| 3164 | bool hc = false; | 3177 | bool hc = false; |
| @@ -3166,7 +3179,7 @@ static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy) | |||
| 3166 | struct rcu_state *rsp; | 3179 | struct rcu_state *rsp; |
| 3167 | 3180 | ||
| 3168 | for_each_rcu_flavor(rsp) { | 3181 | for_each_rcu_flavor(rsp) { |
| 3169 | rdp = per_cpu_ptr(rsp->rda, cpu); | 3182 | rdp = this_cpu_ptr(rsp->rda); |
| 3170 | if (!rdp->nxtlist) | 3183 | if (!rdp->nxtlist) |
| 3171 | continue; | 3184 | continue; |
| 3172 | hc = true; | 3185 | hc = true; |
| @@ -3485,8 +3498,10 @@ static int rcu_cpu_notify(struct notifier_block *self, | |||
| 3485 | case CPU_DEAD_FROZEN: | 3498 | case CPU_DEAD_FROZEN: |
| 3486 | case CPU_UP_CANCELED: | 3499 | case CPU_UP_CANCELED: |
| 3487 | case CPU_UP_CANCELED_FROZEN: | 3500 | case CPU_UP_CANCELED_FROZEN: |
| 3488 | for_each_rcu_flavor(rsp) | 3501 | for_each_rcu_flavor(rsp) { |
| 3489 | rcu_cleanup_dead_cpu(cpu, rsp); | 3502 | rcu_cleanup_dead_cpu(cpu, rsp); |
| 3503 | do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); | ||
| 3504 | } | ||
| 3490 | break; | 3505 | break; |
| 3491 | default: | 3506 | default: |
| 3492 | break; | 3507 | break; |
| @@ -3766,6 +3781,8 @@ void __init rcu_init(void) | |||
| 3766 | pm_notifier(rcu_pm_notify, 0); | 3781 | pm_notifier(rcu_pm_notify, 0); |
| 3767 | for_each_online_cpu(cpu) | 3782 | for_each_online_cpu(cpu) |
| 3768 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 3783 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
| 3784 | |||
| 3785 | rcu_early_boot_tests(); | ||
| 3769 | } | 3786 | } |
| 3770 | 3787 | ||
| 3771 | #include "tree_plugin.h" | 3788 | #include "tree_plugin.h" |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bbdc45d8d74f..8e7b1843896e 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
| @@ -139,7 +139,7 @@ struct rcu_node { | |||
| 139 | unsigned long expmask; /* Groups that have ->blkd_tasks */ | 139 | unsigned long expmask; /* Groups that have ->blkd_tasks */ |
| 140 | /* elements that need to drain to allow the */ | 140 | /* elements that need to drain to allow the */ |
| 141 | /* current expedited grace period to */ | 141 | /* current expedited grace period to */ |
| 142 | /* complete (only for TREE_PREEMPT_RCU). */ | 142 | /* complete (only for PREEMPT_RCU). */ |
| 143 | unsigned long qsmaskinit; | 143 | unsigned long qsmaskinit; |
| 144 | /* Per-GP initial value for qsmask & expmask. */ | 144 | /* Per-GP initial value for qsmask & expmask. */ |
| 145 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | 145 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
| @@ -530,10 +530,10 @@ DECLARE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
| 530 | extern struct rcu_state rcu_bh_state; | 530 | extern struct rcu_state rcu_bh_state; |
| 531 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | 531 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); |
| 532 | 532 | ||
| 533 | #ifdef CONFIG_TREE_PREEMPT_RCU | 533 | #ifdef CONFIG_PREEMPT_RCU |
| 534 | extern struct rcu_state rcu_preempt_state; | 534 | extern struct rcu_state rcu_preempt_state; |
| 535 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | 535 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); |
| 536 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 536 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
| 537 | 537 | ||
| 538 | #ifdef CONFIG_RCU_BOOST | 538 | #ifdef CONFIG_RCU_BOOST |
| 539 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 539 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
| @@ -547,7 +547,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); | |||
| 547 | /* Forward declarations for rcutree_plugin.h */ | 547 | /* Forward declarations for rcutree_plugin.h */ |
| 548 | static void rcu_bootup_announce(void); | 548 | static void rcu_bootup_announce(void); |
| 549 | long rcu_batches_completed(void); | 549 | long rcu_batches_completed(void); |
| 550 | static void rcu_preempt_note_context_switch(int cpu); | 550 | static void rcu_preempt_note_context_switch(void); |
| 551 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 551 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
| 552 | #ifdef CONFIG_HOTPLUG_CPU | 552 | #ifdef CONFIG_HOTPLUG_CPU |
| 553 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 553 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
| @@ -561,12 +561,12 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 561 | struct rcu_node *rnp, | 561 | struct rcu_node *rnp, |
| 562 | struct rcu_data *rdp); | 562 | struct rcu_data *rdp); |
| 563 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 563 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 564 | static void rcu_preempt_check_callbacks(int cpu); | 564 | static void rcu_preempt_check_callbacks(void); |
| 565 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 565 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
| 566 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) | 566 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) |
| 567 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | 567 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
| 568 | bool wake); | 568 | bool wake); |
| 569 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | 569 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */ |
| 570 | static void __init __rcu_init_preempt(void); | 570 | static void __init __rcu_init_preempt(void); |
| 571 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 571 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
| 572 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 572 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
| @@ -579,8 +579,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
| 579 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 579 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
| 580 | static void __init rcu_spawn_boost_kthreads(void); | 580 | static void __init rcu_spawn_boost_kthreads(void); |
| 581 | static void rcu_prepare_kthreads(int cpu); | 581 | static void rcu_prepare_kthreads(int cpu); |
| 582 | static void rcu_cleanup_after_idle(int cpu); | 582 | static void rcu_cleanup_after_idle(void); |
| 583 | static void rcu_prepare_for_idle(int cpu); | 583 | static void rcu_prepare_for_idle(void); |
| 584 | static void rcu_idle_count_callbacks_posted(void); | 584 | static void rcu_idle_count_callbacks_posted(void); |
| 585 | static void print_cpu_stall_info_begin(void); | 585 | static void print_cpu_stall_info_begin(void); |
| 586 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); | 586 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); |
| @@ -606,8 +606,8 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); | |||
| 606 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | 606 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 607 | static void __maybe_unused rcu_kick_nohz_cpu(int cpu); | 607 | static void __maybe_unused rcu_kick_nohz_cpu(int cpu); |
| 608 | static bool init_nocb_callback_list(struct rcu_data *rdp); | 608 | static bool init_nocb_callback_list(struct rcu_data *rdp); |
| 609 | static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); | 609 | static void rcu_sysidle_enter(int irq); |
| 610 | static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); | 610 | static void rcu_sysidle_exit(int irq); |
| 611 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, | 611 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, |
| 612 | unsigned long *maxj); | 612 | unsigned long *maxj); |
| 613 | static bool is_sysidle_rcu_state(struct rcu_state *rsp); | 613 | static bool is_sysidle_rcu_state(struct rcu_state *rsp); |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c1d7f27bd38f..3ec85cb5d544 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
| @@ -30,14 +30,24 @@ | |||
| 30 | #include <linux/smpboot.h> | 30 | #include <linux/smpboot.h> |
| 31 | #include "../time/tick-internal.h" | 31 | #include "../time/tick-internal.h" |
| 32 | 32 | ||
| 33 | #define RCU_KTHREAD_PRIO 1 | ||
| 34 | |||
| 35 | #ifdef CONFIG_RCU_BOOST | 33 | #ifdef CONFIG_RCU_BOOST |
| 34 | |||
| 36 | #include "../locking/rtmutex_common.h" | 35 | #include "../locking/rtmutex_common.h" |
| 37 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO | 36 | |
| 38 | #else | 37 | /* rcuc/rcub kthread realtime priority */ |
| 39 | #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO | 38 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; |
| 40 | #endif | 39 | module_param(kthread_prio, int, 0644); |
| 40 | |||
| 41 | /* | ||
| 42 | * Control variables for per-CPU and per-rcu_node kthreads. These | ||
| 43 | * handle all flavors of RCU. | ||
| 44 | */ | ||
| 45 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | ||
| 46 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | ||
| 47 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | ||
| 48 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | ||
| 49 | |||
| 50 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 41 | 51 | ||
| 42 | #ifdef CONFIG_RCU_NOCB_CPU | 52 | #ifdef CONFIG_RCU_NOCB_CPU |
| 43 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ | 53 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ |
| @@ -72,9 +82,6 @@ static void __init rcu_bootup_announce_oddness(void) | |||
| 72 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | 82 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE |
| 73 | pr_info("\tRCU torture testing starts during boot.\n"); | 83 | pr_info("\tRCU torture testing starts during boot.\n"); |
| 74 | #endif | 84 | #endif |
| 75 | #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) | ||
| 76 | pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n"); | ||
| 77 | #endif | ||
| 78 | #if defined(CONFIG_RCU_CPU_STALL_INFO) | 85 | #if defined(CONFIG_RCU_CPU_STALL_INFO) |
| 79 | pr_info("\tAdditional per-CPU info printed with stalls.\n"); | 86 | pr_info("\tAdditional per-CPU info printed with stalls.\n"); |
| 80 | #endif | 87 | #endif |
| @@ -85,9 +92,12 @@ static void __init rcu_bootup_announce_oddness(void) | |||
| 85 | pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); | 92 | pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); |
| 86 | if (nr_cpu_ids != NR_CPUS) | 93 | if (nr_cpu_ids != NR_CPUS) |
| 87 | pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); | 94 | pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); |
| 95 | #ifdef CONFIG_RCU_BOOST | ||
| 96 | pr_info("\tRCU kthread priority: %d.\n", kthread_prio); | ||
| 97 | #endif | ||
| 88 | } | 98 | } |
| 89 | 99 | ||
| 90 | #ifdef CONFIG_TREE_PREEMPT_RCU | 100 | #ifdef CONFIG_PREEMPT_RCU |
| 91 | 101 | ||
| 92 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); | 102 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); |
| 93 | static struct rcu_state *rcu_state_p = &rcu_preempt_state; | 103 | static struct rcu_state *rcu_state_p = &rcu_preempt_state; |
| @@ -156,7 +166,7 @@ static void rcu_preempt_qs(void) | |||
| 156 | * | 166 | * |
| 157 | * Caller must disable preemption. | 167 | * Caller must disable preemption. |
| 158 | */ | 168 | */ |
| 159 | static void rcu_preempt_note_context_switch(int cpu) | 169 | static void rcu_preempt_note_context_switch(void) |
| 160 | { | 170 | { |
| 161 | struct task_struct *t = current; | 171 | struct task_struct *t = current; |
| 162 | unsigned long flags; | 172 | unsigned long flags; |
| @@ -167,7 +177,7 @@ static void rcu_preempt_note_context_switch(int cpu) | |||
| 167 | !t->rcu_read_unlock_special.b.blocked) { | 177 | !t->rcu_read_unlock_special.b.blocked) { |
| 168 | 178 | ||
| 169 | /* Possibly blocking in an RCU read-side critical section. */ | 179 | /* Possibly blocking in an RCU read-side critical section. */ |
| 170 | rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); | 180 | rdp = this_cpu_ptr(rcu_preempt_state.rda); |
| 171 | rnp = rdp->mynode; | 181 | rnp = rdp->mynode; |
| 172 | raw_spin_lock_irqsave(&rnp->lock, flags); | 182 | raw_spin_lock_irqsave(&rnp->lock, flags); |
| 173 | smp_mb__after_unlock_lock(); | 183 | smp_mb__after_unlock_lock(); |
| @@ -415,8 +425,6 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
| 415 | } | 425 | } |
| 416 | } | 426 | } |
| 417 | 427 | ||
| 418 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | ||
| 419 | |||
| 420 | /* | 428 | /* |
| 421 | * Dump detailed information for all tasks blocking the current RCU | 429 | * Dump detailed information for all tasks blocking the current RCU |
| 422 | * grace period on the specified rcu_node structure. | 430 | * grace period on the specified rcu_node structure. |
| @@ -451,14 +459,6 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) | |||
| 451 | rcu_print_detail_task_stall_rnp(rnp); | 459 | rcu_print_detail_task_stall_rnp(rnp); |
| 452 | } | 460 | } |
| 453 | 461 | ||
| 454 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | ||
| 455 | |||
| 456 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | ||
| 457 | { | ||
| 458 | } | ||
| 459 | |||
| 460 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | ||
| 461 | |||
| 462 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 462 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
| 463 | 463 | ||
| 464 | static void rcu_print_task_stall_begin(struct rcu_node *rnp) | 464 | static void rcu_print_task_stall_begin(struct rcu_node *rnp) |
| @@ -621,7 +621,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 621 | * | 621 | * |
| 622 | * Caller must disable hard irqs. | 622 | * Caller must disable hard irqs. |
| 623 | */ | 623 | */ |
| 624 | static void rcu_preempt_check_callbacks(int cpu) | 624 | static void rcu_preempt_check_callbacks(void) |
| 625 | { | 625 | { |
| 626 | struct task_struct *t = current; | 626 | struct task_struct *t = current; |
| 627 | 627 | ||
| @@ -630,8 +630,8 @@ static void rcu_preempt_check_callbacks(int cpu) | |||
| 630 | return; | 630 | return; |
| 631 | } | 631 | } |
| 632 | if (t->rcu_read_lock_nesting > 0 && | 632 | if (t->rcu_read_lock_nesting > 0 && |
| 633 | per_cpu(rcu_preempt_data, cpu).qs_pending && | 633 | __this_cpu_read(rcu_preempt_data.qs_pending) && |
| 634 | !per_cpu(rcu_preempt_data, cpu).passed_quiesce) | 634 | !__this_cpu_read(rcu_preempt_data.passed_quiesce)) |
| 635 | t->rcu_read_unlock_special.b.need_qs = true; | 635 | t->rcu_read_unlock_special.b.need_qs = true; |
| 636 | } | 636 | } |
| 637 | 637 | ||
| @@ -919,7 +919,7 @@ void exit_rcu(void) | |||
| 919 | __rcu_read_unlock(); | 919 | __rcu_read_unlock(); |
| 920 | } | 920 | } |
| 921 | 921 | ||
| 922 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 922 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 923 | 923 | ||
| 924 | static struct rcu_state *rcu_state_p = &rcu_sched_state; | 924 | static struct rcu_state *rcu_state_p = &rcu_sched_state; |
| 925 | 925 | ||
| @@ -945,7 +945,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
| 945 | * Because preemptible RCU does not exist, we never have to check for | 945 | * Because preemptible RCU does not exist, we never have to check for |
| 946 | * CPUs being in quiescent states. | 946 | * CPUs being in quiescent states. |
| 947 | */ | 947 | */ |
| 948 | static void rcu_preempt_note_context_switch(int cpu) | 948 | static void rcu_preempt_note_context_switch(void) |
| 949 | { | 949 | { |
| 950 | } | 950 | } |
| 951 | 951 | ||
| @@ -1017,7 +1017,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
| 1017 | * Because preemptible RCU does not exist, it never has any callbacks | 1017 | * Because preemptible RCU does not exist, it never has any callbacks |
| 1018 | * to check. | 1018 | * to check. |
| 1019 | */ | 1019 | */ |
| 1020 | static void rcu_preempt_check_callbacks(int cpu) | 1020 | static void rcu_preempt_check_callbacks(void) |
| 1021 | { | 1021 | { |
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| @@ -1070,7 +1070,7 @@ void exit_rcu(void) | |||
| 1070 | { | 1070 | { |
| 1071 | } | 1071 | } |
| 1072 | 1072 | ||
| 1073 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1073 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 1074 | 1074 | ||
| 1075 | #ifdef CONFIG_RCU_BOOST | 1075 | #ifdef CONFIG_RCU_BOOST |
| 1076 | 1076 | ||
| @@ -1326,7 +1326,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
| 1326 | smp_mb__after_unlock_lock(); | 1326 | smp_mb__after_unlock_lock(); |
| 1327 | rnp->boost_kthread_task = t; | 1327 | rnp->boost_kthread_task = t; |
| 1328 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1328 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
| 1329 | sp.sched_priority = RCU_BOOST_PRIO; | 1329 | sp.sched_priority = kthread_prio; |
| 1330 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1330 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| 1331 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | 1331 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ |
| 1332 | return 0; | 1332 | return 0; |
| @@ -1343,7 +1343,7 @@ static void rcu_cpu_kthread_setup(unsigned int cpu) | |||
| 1343 | { | 1343 | { |
| 1344 | struct sched_param sp; | 1344 | struct sched_param sp; |
| 1345 | 1345 | ||
| 1346 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1346 | sp.sched_priority = kthread_prio; |
| 1347 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | 1347 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
| 1348 | } | 1348 | } |
| 1349 | 1349 | ||
| @@ -1512,10 +1512,10 @@ static void rcu_prepare_kthreads(int cpu) | |||
| 1512 | * any flavor of RCU. | 1512 | * any flavor of RCU. |
| 1513 | */ | 1513 | */ |
| 1514 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 1514 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
| 1515 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | 1515 | int rcu_needs_cpu(unsigned long *delta_jiffies) |
| 1516 | { | 1516 | { |
| 1517 | *delta_jiffies = ULONG_MAX; | 1517 | *delta_jiffies = ULONG_MAX; |
| 1518 | return rcu_cpu_has_callbacks(cpu, NULL); | 1518 | return rcu_cpu_has_callbacks(NULL); |
| 1519 | } | 1519 | } |
| 1520 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | 1520 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ |
| 1521 | 1521 | ||
| @@ -1523,7 +1523,7 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) | |||
| 1523 | * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up | 1523 | * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up |
| 1524 | * after it. | 1524 | * after it. |
| 1525 | */ | 1525 | */ |
| 1526 | static void rcu_cleanup_after_idle(int cpu) | 1526 | static void rcu_cleanup_after_idle(void) |
| 1527 | { | 1527 | { |
| 1528 | } | 1528 | } |
| 1529 | 1529 | ||
| @@ -1531,7 +1531,7 @@ static void rcu_cleanup_after_idle(int cpu) | |||
| 1531 | * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, | 1531 | * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n, |
| 1532 | * is nothing. | 1532 | * is nothing. |
| 1533 | */ | 1533 | */ |
| 1534 | static void rcu_prepare_for_idle(int cpu) | 1534 | static void rcu_prepare_for_idle(void) |
| 1535 | { | 1535 | { |
| 1536 | } | 1536 | } |
| 1537 | 1537 | ||
| @@ -1624,15 +1624,15 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) | |||
| 1624 | * The caller must have disabled interrupts. | 1624 | * The caller must have disabled interrupts. |
| 1625 | */ | 1625 | */ |
| 1626 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 1626 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
| 1627 | int rcu_needs_cpu(int cpu, unsigned long *dj) | 1627 | int rcu_needs_cpu(unsigned long *dj) |
| 1628 | { | 1628 | { |
| 1629 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1629 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
| 1630 | 1630 | ||
| 1631 | /* Snapshot to detect later posting of non-lazy callback. */ | 1631 | /* Snapshot to detect later posting of non-lazy callback. */ |
| 1632 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | 1632 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; |
| 1633 | 1633 | ||
| 1634 | /* If no callbacks, RCU doesn't need the CPU. */ | 1634 | /* If no callbacks, RCU doesn't need the CPU. */ |
| 1635 | if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) { | 1635 | if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) { |
| 1636 | *dj = ULONG_MAX; | 1636 | *dj = ULONG_MAX; |
| 1637 | return 0; | 1637 | return 0; |
| 1638 | } | 1638 | } |
| @@ -1666,12 +1666,12 @@ int rcu_needs_cpu(int cpu, unsigned long *dj) | |||
| 1666 | * | 1666 | * |
| 1667 | * The caller must have disabled interrupts. | 1667 | * The caller must have disabled interrupts. |
| 1668 | */ | 1668 | */ |
| 1669 | static void rcu_prepare_for_idle(int cpu) | 1669 | static void rcu_prepare_for_idle(void) |
| 1670 | { | 1670 | { |
| 1671 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 1671 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
| 1672 | bool needwake; | 1672 | bool needwake; |
| 1673 | struct rcu_data *rdp; | 1673 | struct rcu_data *rdp; |
| 1674 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1674 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
| 1675 | struct rcu_node *rnp; | 1675 | struct rcu_node *rnp; |
| 1676 | struct rcu_state *rsp; | 1676 | struct rcu_state *rsp; |
| 1677 | int tne; | 1677 | int tne; |
| @@ -1679,7 +1679,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1679 | /* Handle nohz enablement switches conservatively. */ | 1679 | /* Handle nohz enablement switches conservatively. */ |
| 1680 | tne = ACCESS_ONCE(tick_nohz_active); | 1680 | tne = ACCESS_ONCE(tick_nohz_active); |
| 1681 | if (tne != rdtp->tick_nohz_enabled_snap) { | 1681 | if (tne != rdtp->tick_nohz_enabled_snap) { |
| 1682 | if (rcu_cpu_has_callbacks(cpu, NULL)) | 1682 | if (rcu_cpu_has_callbacks(NULL)) |
| 1683 | invoke_rcu_core(); /* force nohz to see update. */ | 1683 | invoke_rcu_core(); /* force nohz to see update. */ |
| 1684 | rdtp->tick_nohz_enabled_snap = tne; | 1684 | rdtp->tick_nohz_enabled_snap = tne; |
| 1685 | return; | 1685 | return; |
| @@ -1688,7 +1688,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1688 | return; | 1688 | return; |
| 1689 | 1689 | ||
| 1690 | /* If this is a no-CBs CPU, no callbacks, just return. */ | 1690 | /* If this is a no-CBs CPU, no callbacks, just return. */ |
| 1691 | if (rcu_is_nocb_cpu(cpu)) | 1691 | if (rcu_is_nocb_cpu(smp_processor_id())) |
| 1692 | return; | 1692 | return; |
| 1693 | 1693 | ||
| 1694 | /* | 1694 | /* |
| @@ -1712,7 +1712,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1712 | return; | 1712 | return; |
| 1713 | rdtp->last_accelerate = jiffies; | 1713 | rdtp->last_accelerate = jiffies; |
| 1714 | for_each_rcu_flavor(rsp) { | 1714 | for_each_rcu_flavor(rsp) { |
| 1715 | rdp = per_cpu_ptr(rsp->rda, cpu); | 1715 | rdp = this_cpu_ptr(rsp->rda); |
| 1716 | if (!*rdp->nxttail[RCU_DONE_TAIL]) | 1716 | if (!*rdp->nxttail[RCU_DONE_TAIL]) |
| 1717 | continue; | 1717 | continue; |
| 1718 | rnp = rdp->mynode; | 1718 | rnp = rdp->mynode; |
| @@ -1731,10 +1731,10 @@ static void rcu_prepare_for_idle(int cpu) | |||
| 1731 | * any grace periods that elapsed while the CPU was idle, and if any | 1731 | * any grace periods that elapsed while the CPU was idle, and if any |
| 1732 | * callbacks are now ready to invoke, initiate invocation. | 1732 | * callbacks are now ready to invoke, initiate invocation. |
| 1733 | */ | 1733 | */ |
| 1734 | static void rcu_cleanup_after_idle(int cpu) | 1734 | static void rcu_cleanup_after_idle(void) |
| 1735 | { | 1735 | { |
| 1736 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 1736 | #ifndef CONFIG_RCU_NOCB_CPU_ALL |
| 1737 | if (rcu_is_nocb_cpu(cpu)) | 1737 | if (rcu_is_nocb_cpu(smp_processor_id())) |
| 1738 | return; | 1738 | return; |
| 1739 | if (rcu_try_advance_all_cbs()) | 1739 | if (rcu_try_advance_all_cbs()) |
| 1740 | invoke_rcu_core(); | 1740 | invoke_rcu_core(); |
| @@ -2573,9 +2573,13 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) | |||
| 2573 | rdp->nocb_leader = rdp_spawn; | 2573 | rdp->nocb_leader = rdp_spawn; |
| 2574 | if (rdp_last && rdp != rdp_spawn) | 2574 | if (rdp_last && rdp != rdp_spawn) |
| 2575 | rdp_last->nocb_next_follower = rdp; | 2575 | rdp_last->nocb_next_follower = rdp; |
| 2576 | rdp_last = rdp; | 2576 | if (rdp == rdp_spawn) { |
| 2577 | rdp = rdp->nocb_next_follower; | 2577 | rdp = rdp->nocb_next_follower; |
| 2578 | rdp_last->nocb_next_follower = NULL; | 2578 | } else { |
| 2579 | rdp_last = rdp; | ||
| 2580 | rdp = rdp->nocb_next_follower; | ||
| 2581 | rdp_last->nocb_next_follower = NULL; | ||
| 2582 | } | ||
| 2579 | } while (rdp); | 2583 | } while (rdp); |
| 2580 | rdp_spawn->nocb_next_follower = rdp_old_leader; | 2584 | rdp_spawn->nocb_next_follower = rdp_old_leader; |
| 2581 | } | 2585 | } |
| @@ -2761,9 +2765,10 @@ static int full_sysidle_state; /* Current system-idle state. */ | |||
| 2761 | * to detect full-system idle states, not RCU quiescent states and grace | 2765 | * to detect full-system idle states, not RCU quiescent states and grace |
| 2762 | * periods. The caller must have disabled interrupts. | 2766 | * periods. The caller must have disabled interrupts. |
| 2763 | */ | 2767 | */ |
| 2764 | static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) | 2768 | static void rcu_sysidle_enter(int irq) |
| 2765 | { | 2769 | { |
| 2766 | unsigned long j; | 2770 | unsigned long j; |
| 2771 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 2767 | 2772 | ||
| 2768 | /* If there are no nohz_full= CPUs, no need to track this. */ | 2773 | /* If there are no nohz_full= CPUs, no need to track this. */ |
| 2769 | if (!tick_nohz_full_enabled()) | 2774 | if (!tick_nohz_full_enabled()) |
| @@ -2832,8 +2837,10 @@ void rcu_sysidle_force_exit(void) | |||
| 2832 | * usermode execution does -not- count as idle here! The caller must | 2837 | * usermode execution does -not- count as idle here! The caller must |
| 2833 | * have disabled interrupts. | 2838 | * have disabled interrupts. |
| 2834 | */ | 2839 | */ |
| 2835 | static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) | 2840 | static void rcu_sysidle_exit(int irq) |
| 2836 | { | 2841 | { |
| 2842 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | ||
| 2843 | |||
| 2837 | /* If there are no nohz_full= CPUs, no need to track this. */ | 2844 | /* If there are no nohz_full= CPUs, no need to track this. */ |
| 2838 | if (!tick_nohz_full_enabled()) | 2845 | if (!tick_nohz_full_enabled()) |
| 2839 | return; | 2846 | return; |
| @@ -3127,11 +3134,11 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) | |||
| 3127 | 3134 | ||
| 3128 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 3135 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
| 3129 | 3136 | ||
| 3130 | static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) | 3137 | static void rcu_sysidle_enter(int irq) |
| 3131 | { | 3138 | { |
| 3132 | } | 3139 | } |
| 3133 | 3140 | ||
| 3134 | static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) | 3141 | static void rcu_sysidle_exit(int irq) |
| 3135 | { | 3142 | { |
| 3136 | } | 3143 | } |
| 3137 | 3144 | ||
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 3ef8ba58694e..e0d31a345ee6 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
| @@ -306,7 +306,7 @@ struct debug_obj_descr rcuhead_debug_descr = { | |||
| 306 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); | 306 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); |
| 307 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 307 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 308 | 308 | ||
| 309 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) | 309 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
| 310 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, | 310 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
| 311 | unsigned long secs, | 311 | unsigned long secs, |
| 312 | unsigned long c_old, unsigned long c) | 312 | unsigned long c_old, unsigned long c) |
| @@ -531,7 +531,8 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
| 531 | struct rcu_head *next; | 531 | struct rcu_head *next; |
| 532 | LIST_HEAD(rcu_tasks_holdouts); | 532 | LIST_HEAD(rcu_tasks_holdouts); |
| 533 | 533 | ||
| 534 | /* FIXME: Add housekeeping affinity. */ | 534 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
| 535 | housekeeping_affine(current); | ||
| 535 | 536 | ||
| 536 | /* | 537 | /* |
| 537 | * Each pass through the following loop makes one check for | 538 | * Each pass through the following loop makes one check for |
| @@ -690,3 +691,87 @@ static void rcu_spawn_tasks_kthread(void) | |||
| 690 | } | 691 | } |
| 691 | 692 | ||
| 692 | #endif /* #ifdef CONFIG_TASKS_RCU */ | 693 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
| 694 | |||
| 695 | #ifdef CONFIG_PROVE_RCU | ||
| 696 | |||
| 697 | /* | ||
| 698 | * Early boot self test parameters, one for each flavor | ||
| 699 | */ | ||
| 700 | static bool rcu_self_test; | ||
| 701 | static bool rcu_self_test_bh; | ||
| 702 | static bool rcu_self_test_sched; | ||
| 703 | |||
| 704 | module_param(rcu_self_test, bool, 0444); | ||
| 705 | module_param(rcu_self_test_bh, bool, 0444); | ||
| 706 | module_param(rcu_self_test_sched, bool, 0444); | ||
| 707 | |||
| 708 | static int rcu_self_test_counter; | ||
| 709 | |||
| 710 | static void test_callback(struct rcu_head *r) | ||
| 711 | { | ||
| 712 | rcu_self_test_counter++; | ||
| 713 | pr_info("RCU test callback executed %d\n", rcu_self_test_counter); | ||
| 714 | } | ||
| 715 | |||
| 716 | static void early_boot_test_call_rcu(void) | ||
| 717 | { | ||
| 718 | static struct rcu_head head; | ||
| 719 | |||
| 720 | call_rcu(&head, test_callback); | ||
| 721 | } | ||
| 722 | |||
| 723 | static void early_boot_test_call_rcu_bh(void) | ||
| 724 | { | ||
| 725 | static struct rcu_head head; | ||
| 726 | |||
| 727 | call_rcu_bh(&head, test_callback); | ||
| 728 | } | ||
| 729 | |||
| 730 | static void early_boot_test_call_rcu_sched(void) | ||
| 731 | { | ||
| 732 | static struct rcu_head head; | ||
| 733 | |||
| 734 | call_rcu_sched(&head, test_callback); | ||
| 735 | } | ||
| 736 | |||
| 737 | void rcu_early_boot_tests(void) | ||
| 738 | { | ||
| 739 | pr_info("Running RCU self tests\n"); | ||
| 740 | |||
| 741 | if (rcu_self_test) | ||
| 742 | early_boot_test_call_rcu(); | ||
| 743 | if (rcu_self_test_bh) | ||
| 744 | early_boot_test_call_rcu_bh(); | ||
| 745 | if (rcu_self_test_sched) | ||
| 746 | early_boot_test_call_rcu_sched(); | ||
| 747 | } | ||
| 748 | |||
| 749 | static int rcu_verify_early_boot_tests(void) | ||
| 750 | { | ||
| 751 | int ret = 0; | ||
| 752 | int early_boot_test_counter = 0; | ||
| 753 | |||
| 754 | if (rcu_self_test) { | ||
| 755 | early_boot_test_counter++; | ||
| 756 | rcu_barrier(); | ||
| 757 | } | ||
| 758 | if (rcu_self_test_bh) { | ||
| 759 | early_boot_test_counter++; | ||
| 760 | rcu_barrier_bh(); | ||
| 761 | } | ||
| 762 | if (rcu_self_test_sched) { | ||
| 763 | early_boot_test_counter++; | ||
| 764 | rcu_barrier_sched(); | ||
| 765 | } | ||
| 766 | |||
| 767 | if (rcu_self_test_counter != early_boot_test_counter) { | ||
| 768 | WARN_ON(1); | ||
| 769 | ret = -1; | ||
| 770 | } | ||
| 771 | |||
| 772 | return ret; | ||
| 773 | } | ||
| 774 | late_initcall(rcu_verify_early_boot_tests); | ||
| 775 | #else | ||
| 776 | void rcu_early_boot_tests(void) {} | ||
| 777 | #endif /* CONFIG_PROVE_RCU */ | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..05e5c1ebc73b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2802,7 +2802,7 @@ need_resched: | |||
| 2802 | preempt_disable(); | 2802 | preempt_disable(); |
| 2803 | cpu = smp_processor_id(); | 2803 | cpu = smp_processor_id(); |
| 2804 | rq = cpu_rq(cpu); | 2804 | rq = cpu_rq(cpu); |
| 2805 | rcu_note_context_switch(cpu); | 2805 | rcu_note_context_switch(); |
| 2806 | prev = rq->curr; | 2806 | prev = rq->curr; |
| 2807 | 2807 | ||
| 2808 | schedule_debug(prev); | 2808 | schedule_debug(prev); |
diff --git a/kernel/signal.c b/kernel/signal.c index 8f0876f9f6dd..19e35135fc60 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -1275,7 +1275,17 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | |||
| 1275 | local_irq_restore(*flags); | 1275 | local_irq_restore(*flags); |
| 1276 | break; | 1276 | break; |
| 1277 | } | 1277 | } |
| 1278 | 1278 | /* | |
| 1279 | * This sighand can be already freed and even reused, but | ||
| 1280 | * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which | ||
| 1281 | * initializes ->siglock: this slab can't go away, it has | ||
| 1282 | * the same object type, ->siglock can't be reinitialized. | ||
| 1283 | * | ||
| 1284 | * We need to ensure that tsk->sighand is still the same | ||
| 1285 | * after we take the lock, we can race with de_thread() or | ||
| 1286 | * __exit_signal(). In the latter case the next iteration | ||
| 1287 | * must see ->sighand == NULL. | ||
| 1288 | */ | ||
| 1279 | spin_lock(&sighand->siglock); | 1289 | spin_lock(&sighand->siglock); |
| 1280 | if (likely(sighand == tsk->sighand)) { | 1290 | if (likely(sighand == tsk->sighand)) { |
| 1281 | rcu_read_unlock(); | 1291 | rcu_read_unlock(); |
| @@ -1331,23 +1341,21 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) | |||
| 1331 | int error = -ESRCH; | 1341 | int error = -ESRCH; |
| 1332 | struct task_struct *p; | 1342 | struct task_struct *p; |
| 1333 | 1343 | ||
| 1334 | rcu_read_lock(); | 1344 | for (;;) { |
| 1335 | retry: | 1345 | rcu_read_lock(); |
| 1336 | p = pid_task(pid, PIDTYPE_PID); | 1346 | p = pid_task(pid, PIDTYPE_PID); |
| 1337 | if (p) { | 1347 | if (p) |
| 1338 | error = group_send_sig_info(sig, info, p); | 1348 | error = group_send_sig_info(sig, info, p); |
| 1339 | if (unlikely(error == -ESRCH)) | 1349 | rcu_read_unlock(); |
| 1340 | /* | 1350 | if (likely(!p || error != -ESRCH)) |
| 1341 | * The task was unhashed in between, try again. | 1351 | return error; |
| 1342 | * If it is dead, pid_task() will return NULL, | ||
| 1343 | * if we race with de_thread() it will find the | ||
| 1344 | * new leader. | ||
| 1345 | */ | ||
| 1346 | goto retry; | ||
| 1347 | } | ||
| 1348 | rcu_read_unlock(); | ||
| 1349 | 1352 | ||
| 1350 | return error; | 1353 | /* |
| 1354 | * The task was unhashed in between, try again. If it | ||
| 1355 | * is dead, pid_task() will return NULL, if we race with | ||
| 1356 | * de_thread() it will find the new leader. | ||
| 1357 | */ | ||
| 1358 | } | ||
| 1351 | } | 1359 | } |
| 1352 | 1360 | ||
| 1353 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) | 1361 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 0699add19164..501baa9ac1be 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -656,7 +656,7 @@ static void run_ksoftirqd(unsigned int cpu) | |||
| 656 | * in the task stack here. | 656 | * in the task stack here. |
| 657 | */ | 657 | */ |
| 658 | __do_softirq(); | 658 | __do_softirq(); |
| 659 | rcu_note_context_switch(cpu); | 659 | rcu_note_context_switch(); |
| 660 | local_irq_enable(); | 660 | local_irq_enable(); |
| 661 | cond_resched(); | 661 | cond_resched(); |
| 662 | return; | 662 | return; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 7b5741fc4110..1f4356037a7d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -585,7 +585,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
| 585 | last_jiffies = jiffies; | 585 | last_jiffies = jiffies; |
| 586 | } while (read_seqretry(&jiffies_lock, seq)); | 586 | } while (read_seqretry(&jiffies_lock, seq)); |
| 587 | 587 | ||
| 588 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || | 588 | if (rcu_needs_cpu(&rcu_delta_jiffies) || |
| 589 | arch_needs_cpu() || irq_work_needs_cpu()) { | 589 | arch_needs_cpu() || irq_work_needs_cpu()) { |
| 590 | next_jiffies = last_jiffies + 1; | 590 | next_jiffies = last_jiffies + 1; |
| 591 | delta_jiffies = 1; | 591 | delta_jiffies = 1; |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 3260ffdb368f..2d3f5c504939 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
| @@ -1377,12 +1377,11 @@ unsigned long get_next_timer_interrupt(unsigned long now) | |||
| 1377 | void update_process_times(int user_tick) | 1377 | void update_process_times(int user_tick) |
| 1378 | { | 1378 | { |
| 1379 | struct task_struct *p = current; | 1379 | struct task_struct *p = current; |
| 1380 | int cpu = smp_processor_id(); | ||
| 1381 | 1380 | ||
| 1382 | /* Note: this timer irq context must be accounted for as well. */ | 1381 | /* Note: this timer irq context must be accounted for as well. */ |
| 1383 | account_process_tick(p, user_tick); | 1382 | account_process_tick(p, user_tick); |
| 1384 | run_local_timers(); | 1383 | run_local_timers(); |
| 1385 | rcu_check_callbacks(cpu, user_tick); | 1384 | rcu_check_callbacks(user_tick); |
| 1386 | #ifdef CONFIG_IRQ_WORK | 1385 | #ifdef CONFIG_IRQ_WORK |
| 1387 | if (in_irq()) | 1386 | if (in_irq()) |
| 1388 | irq_work_tick(); | 1387 | irq_work_tick(); |
