aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/Makefile2
-rw-r--r--kernel/rcu/rcu.h2
-rw-r--r--kernel/rcu/rcutorture.c1
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tree.c46
-rw-r--r--kernel/rcu/tree.h10
-rw-r--r--kernel/rcu/tree_plugin.h58
-rw-r--r--kernel/rcu/update.c89
8 files changed, 161 insertions, 51 deletions
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 807ccfbf69b3..e6fae503d1bc 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,6 +1,6 @@
1obj-y += update.o srcu.o 1obj-y += update.o srcu.o
2obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 2obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
3obj-$(CONFIG_TREE_RCU) += tree.o 3obj-$(CONFIG_TREE_RCU) += tree.o
4obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o 4obj-$(CONFIG_PREEMPT_RCU) += tree.o
5obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o 5obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
6obj-$(CONFIG_TINY_RCU) += tiny.o 6obj-$(CONFIG_TINY_RCU) += tiny.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index ff1a6de62f17..07bb02eda844 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -135,4 +135,6 @@ int rcu_jiffies_till_stall_check(void);
135 */ 135 */
136#define TPS(x) tracepoint_string(x) 136#define TPS(x) tracepoint_string(x)
137 137
138void rcu_early_boot_tests(void);
139
138#endif /* __LINUX_RCU_H */ 140#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 240fa9094f83..4d559baf06e0 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -812,6 +812,7 @@ rcu_torture_cbflood(void *arg)
812 cur_ops->cb_barrier(); 812 cur_ops->cb_barrier();
813 stutter_wait("rcu_torture_cbflood"); 813 stutter_wait("rcu_torture_cbflood");
814 } while (!torture_must_stop()); 814 } while (!torture_must_stop());
815 vfree(rhp);
815 torture_kthread_stopping("rcu_torture_cbflood"); 816 torture_kthread_stopping("rcu_torture_cbflood");
816 return 0; 817 return 0;
817} 818}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 01570c68d237..0db5649f8817 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -380,7 +380,9 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
380} 380}
381EXPORT_SYMBOL_GPL(call_rcu_bh); 381EXPORT_SYMBOL_GPL(call_rcu_bh);
382 382
383void rcu_init(void) 383void __init rcu_init(void)
384{ 384{
385 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 385 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
386
387 rcu_early_boot_tests();
386} 388}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index fa76fd3c219c..7680fc275036 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -152,19 +152,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
152 */ 152 */
153static int rcu_scheduler_fully_active __read_mostly; 153static int rcu_scheduler_fully_active __read_mostly;
154 154
155#ifdef CONFIG_RCU_BOOST
156
157/*
158 * Control variables for per-CPU and per-rcu_node kthreads. These
159 * handle all flavors of RCU.
160 */
161static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
162DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
163DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
164DEFINE_PER_CPU(char, rcu_cpu_has_work);
165
166#endif /* #ifdef CONFIG_RCU_BOOST */
167
168static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 155static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
169static void invoke_rcu_core(void); 156static void invoke_rcu_core(void);
170static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@@ -2964,6 +2951,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
2964 */ 2951 */
2965void synchronize_sched_expedited(void) 2952void synchronize_sched_expedited(void)
2966{ 2953{
2954 cpumask_var_t cm;
2955 bool cma = false;
2956 int cpu;
2967 long firstsnap, s, snap; 2957 long firstsnap, s, snap;
2968 int trycount = 0; 2958 int trycount = 0;
2969 struct rcu_state *rsp = &rcu_sched_state; 2959 struct rcu_state *rsp = &rcu_sched_state;
@@ -2998,11 +2988,26 @@ void synchronize_sched_expedited(void)
2998 } 2988 }
2999 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); 2989 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
3000 2990
2991 /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
2992 cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
2993 if (cma) {
2994 cpumask_copy(cm, cpu_online_mask);
2995 cpumask_clear_cpu(raw_smp_processor_id(), cm);
2996 for_each_cpu(cpu, cm) {
2997 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2998
2999 if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3000 cpumask_clear_cpu(cpu, cm);
3001 }
3002 if (cpumask_weight(cm) == 0)
3003 goto all_cpus_idle;
3004 }
3005
3001 /* 3006 /*
3002 * Each pass through the following loop attempts to force a 3007 * Each pass through the following loop attempts to force a
3003 * context switch on each CPU. 3008 * context switch on each CPU.
3004 */ 3009 */
3005 while (try_stop_cpus(cpu_online_mask, 3010 while (try_stop_cpus(cma ? cm : cpu_online_mask,
3006 synchronize_sched_expedited_cpu_stop, 3011 synchronize_sched_expedited_cpu_stop,
3007 NULL) == -EAGAIN) { 3012 NULL) == -EAGAIN) {
3008 put_online_cpus(); 3013 put_online_cpus();
@@ -3014,6 +3019,7 @@ void synchronize_sched_expedited(void)
3014 /* ensure test happens before caller kfree */ 3019 /* ensure test happens before caller kfree */
3015 smp_mb__before_atomic(); /* ^^^ */ 3020 smp_mb__before_atomic(); /* ^^^ */
3016 atomic_long_inc(&rsp->expedited_workdone1); 3021 atomic_long_inc(&rsp->expedited_workdone1);
3022 free_cpumask_var(cm);
3017 return; 3023 return;
3018 } 3024 }
3019 3025
@@ -3023,6 +3029,7 @@ void synchronize_sched_expedited(void)
3023 } else { 3029 } else {
3024 wait_rcu_gp(call_rcu_sched); 3030 wait_rcu_gp(call_rcu_sched);
3025 atomic_long_inc(&rsp->expedited_normal); 3031 atomic_long_inc(&rsp->expedited_normal);
3032 free_cpumask_var(cm);
3026 return; 3033 return;
3027 } 3034 }
3028 3035
@@ -3032,6 +3039,7 @@ void synchronize_sched_expedited(void)
3032 /* ensure test happens before caller kfree */ 3039 /* ensure test happens before caller kfree */
3033 smp_mb__before_atomic(); /* ^^^ */ 3040 smp_mb__before_atomic(); /* ^^^ */
3034 atomic_long_inc(&rsp->expedited_workdone2); 3041 atomic_long_inc(&rsp->expedited_workdone2);
3042 free_cpumask_var(cm);
3035 return; 3043 return;
3036 } 3044 }
3037 3045
@@ -3046,6 +3054,7 @@ void synchronize_sched_expedited(void)
3046 /* CPU hotplug operation in flight, use normal GP. */ 3054 /* CPU hotplug operation in flight, use normal GP. */
3047 wait_rcu_gp(call_rcu_sched); 3055 wait_rcu_gp(call_rcu_sched);
3048 atomic_long_inc(&rsp->expedited_normal); 3056 atomic_long_inc(&rsp->expedited_normal);
3057 free_cpumask_var(cm);
3049 return; 3058 return;
3050 } 3059 }
3051 snap = atomic_long_read(&rsp->expedited_start); 3060 snap = atomic_long_read(&rsp->expedited_start);
@@ -3053,6 +3062,9 @@ void synchronize_sched_expedited(void)
3053 } 3062 }
3054 atomic_long_inc(&rsp->expedited_stoppedcpus); 3063 atomic_long_inc(&rsp->expedited_stoppedcpus);
3055 3064
3065all_cpus_idle:
3066 free_cpumask_var(cm);
3067
3056 /* 3068 /*
3057 * Everyone up to our most recent fetch is covered by our grace 3069 * Everyone up to our most recent fetch is covered by our grace
3058 * period. Update the counter, but only if our work is still 3070 * period. Update the counter, but only if our work is still
@@ -3486,8 +3498,10 @@ static int rcu_cpu_notify(struct notifier_block *self,
3486 case CPU_DEAD_FROZEN: 3498 case CPU_DEAD_FROZEN:
3487 case CPU_UP_CANCELED: 3499 case CPU_UP_CANCELED:
3488 case CPU_UP_CANCELED_FROZEN: 3500 case CPU_UP_CANCELED_FROZEN:
3489 for_each_rcu_flavor(rsp) 3501 for_each_rcu_flavor(rsp) {
3490 rcu_cleanup_dead_cpu(cpu, rsp); 3502 rcu_cleanup_dead_cpu(cpu, rsp);
3503 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3504 }
3491 break; 3505 break;
3492 default: 3506 default:
3493 break; 3507 break;
@@ -3767,6 +3781,8 @@ void __init rcu_init(void)
3767 pm_notifier(rcu_pm_notify, 0); 3781 pm_notifier(rcu_pm_notify, 0);
3768 for_each_online_cpu(cpu) 3782 for_each_online_cpu(cpu)
3769 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 3783 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3784
3785 rcu_early_boot_tests();
3770} 3786}
3771 3787
3772#include "tree_plugin.h" 3788#include "tree_plugin.h"
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 36c30390e4e9..8e7b1843896e 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -139,7 +139,7 @@ struct rcu_node {
139 unsigned long expmask; /* Groups that have ->blkd_tasks */ 139 unsigned long expmask; /* Groups that have ->blkd_tasks */
140 /* elements that need to drain to allow the */ 140 /* elements that need to drain to allow the */
141 /* current expedited grace period to */ 141 /* current expedited grace period to */
142 /* complete (only for TREE_PREEMPT_RCU). */ 142 /* complete (only for PREEMPT_RCU). */
143 unsigned long qsmaskinit; 143 unsigned long qsmaskinit;
144 /* Per-GP initial value for qsmask & expmask. */ 144 /* Per-GP initial value for qsmask & expmask. */
145 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 145 unsigned long grpmask; /* Mask to apply to parent qsmask. */
@@ -530,10 +530,10 @@ DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
530extern struct rcu_state rcu_bh_state; 530extern struct rcu_state rcu_bh_state;
531DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); 531DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
532 532
533#ifdef CONFIG_TREE_PREEMPT_RCU 533#ifdef CONFIG_PREEMPT_RCU
534extern struct rcu_state rcu_preempt_state; 534extern struct rcu_state rcu_preempt_state;
535DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); 535DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
536#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 536#endif /* #ifdef CONFIG_PREEMPT_RCU */
537 537
538#ifdef CONFIG_RCU_BOOST 538#ifdef CONFIG_RCU_BOOST
539DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 539DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -563,10 +563,10 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
563#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 563#endif /* #ifdef CONFIG_HOTPLUG_CPU */
564static void rcu_preempt_check_callbacks(void); 564static void rcu_preempt_check_callbacks(void);
565void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 565void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
566#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) 566#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
567static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 567static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
568 bool wake); 568 bool wake);
569#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ 569#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
570static void __init __rcu_init_preempt(void); 570static void __init __rcu_init_preempt(void);
571static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 571static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
572static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 572static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 2443282737ba..3ec85cb5d544 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -30,14 +30,24 @@
30#include <linux/smpboot.h> 30#include <linux/smpboot.h>
31#include "../time/tick-internal.h" 31#include "../time/tick-internal.h"
32 32
33#define RCU_KTHREAD_PRIO 1
34
35#ifdef CONFIG_RCU_BOOST 33#ifdef CONFIG_RCU_BOOST
34
36#include "../locking/rtmutex_common.h" 35#include "../locking/rtmutex_common.h"
37#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO 36
38#else 37/* rcuc/rcub kthread realtime priority */
39#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO 38static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
40#endif 39module_param(kthread_prio, int, 0644);
40
41/*
42 * Control variables for per-CPU and per-rcu_node kthreads. These
43 * handle all flavors of RCU.
44 */
45static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
46DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
47DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
48DEFINE_PER_CPU(char, rcu_cpu_has_work);
49
50#endif /* #ifdef CONFIG_RCU_BOOST */
41 51
42#ifdef CONFIG_RCU_NOCB_CPU 52#ifdef CONFIG_RCU_NOCB_CPU
43static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 53static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
@@ -72,9 +82,6 @@ static void __init rcu_bootup_announce_oddness(void)
72#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE 82#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
73 pr_info("\tRCU torture testing starts during boot.\n"); 83 pr_info("\tRCU torture testing starts during boot.\n");
74#endif 84#endif
75#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
76 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
77#endif
78#if defined(CONFIG_RCU_CPU_STALL_INFO) 85#if defined(CONFIG_RCU_CPU_STALL_INFO)
79 pr_info("\tAdditional per-CPU info printed with stalls.\n"); 86 pr_info("\tAdditional per-CPU info printed with stalls.\n");
80#endif 87#endif
@@ -85,9 +92,12 @@ static void __init rcu_bootup_announce_oddness(void)
85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 92 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
86 if (nr_cpu_ids != NR_CPUS) 93 if (nr_cpu_ids != NR_CPUS)
87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 94 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
95#ifdef CONFIG_RCU_BOOST
96 pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
97#endif
88} 98}
89 99
90#ifdef CONFIG_TREE_PREEMPT_RCU 100#ifdef CONFIG_PREEMPT_RCU
91 101
92RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); 102RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
93static struct rcu_state *rcu_state_p = &rcu_preempt_state; 103static struct rcu_state *rcu_state_p = &rcu_preempt_state;
@@ -415,8 +425,6 @@ void rcu_read_unlock_special(struct task_struct *t)
415 } 425 }
416} 426}
417 427
418#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
419
420/* 428/*
421 * Dump detailed information for all tasks blocking the current RCU 429 * Dump detailed information for all tasks blocking the current RCU
422 * grace period on the specified rcu_node structure. 430 * grace period on the specified rcu_node structure.
@@ -451,14 +459,6 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
451 rcu_print_detail_task_stall_rnp(rnp); 459 rcu_print_detail_task_stall_rnp(rnp);
452} 460}
453 461
454#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
455
456static void rcu_print_detail_task_stall(struct rcu_state *rsp)
457{
458}
459
460#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
461
462#ifdef CONFIG_RCU_CPU_STALL_INFO 462#ifdef CONFIG_RCU_CPU_STALL_INFO
463 463
464static void rcu_print_task_stall_begin(struct rcu_node *rnp) 464static void rcu_print_task_stall_begin(struct rcu_node *rnp)
@@ -919,7 +919,7 @@ void exit_rcu(void)
919 __rcu_read_unlock(); 919 __rcu_read_unlock();
920} 920}
921 921
922#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 922#else /* #ifdef CONFIG_PREEMPT_RCU */
923 923
924static struct rcu_state *rcu_state_p = &rcu_sched_state; 924static struct rcu_state *rcu_state_p = &rcu_sched_state;
925 925
@@ -1070,7 +1070,7 @@ void exit_rcu(void)
1070{ 1070{
1071} 1071}
1072 1072
1073#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1073#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
1074 1074
1075#ifdef CONFIG_RCU_BOOST 1075#ifdef CONFIG_RCU_BOOST
1076 1076
@@ -1326,7 +1326,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1326 smp_mb__after_unlock_lock(); 1326 smp_mb__after_unlock_lock();
1327 rnp->boost_kthread_task = t; 1327 rnp->boost_kthread_task = t;
1328 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1328 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1329 sp.sched_priority = RCU_BOOST_PRIO; 1329 sp.sched_priority = kthread_prio;
1330 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1330 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1331 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1331 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1332 return 0; 1332 return 0;
@@ -1343,7 +1343,7 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)
1343{ 1343{
1344 struct sched_param sp; 1344 struct sched_param sp;
1345 1345
1346 sp.sched_priority = RCU_KTHREAD_PRIO; 1346 sp.sched_priority = kthread_prio;
1347 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1347 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1348} 1348}
1349 1349
@@ -2573,9 +2573,13 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2573 rdp->nocb_leader = rdp_spawn; 2573 rdp->nocb_leader = rdp_spawn;
2574 if (rdp_last && rdp != rdp_spawn) 2574 if (rdp_last && rdp != rdp_spawn)
2575 rdp_last->nocb_next_follower = rdp; 2575 rdp_last->nocb_next_follower = rdp;
2576 rdp_last = rdp; 2576 if (rdp == rdp_spawn) {
2577 rdp = rdp->nocb_next_follower; 2577 rdp = rdp->nocb_next_follower;
2578 rdp_last->nocb_next_follower = NULL; 2578 } else {
2579 rdp_last = rdp;
2580 rdp = rdp->nocb_next_follower;
2581 rdp_last->nocb_next_follower = NULL;
2582 }
2579 } while (rdp); 2583 } while (rdp);
2580 rdp_spawn->nocb_next_follower = rdp_old_leader; 2584 rdp_spawn->nocb_next_follower = rdp_old_leader;
2581 } 2585 }
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 3ef8ba58694e..e0d31a345ee6 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -306,7 +306,7 @@ struct debug_obj_descr rcuhead_debug_descr = {
306EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 306EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
307#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 307#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
308 308
309#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 309#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
310void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 310void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
311 unsigned long secs, 311 unsigned long secs,
312 unsigned long c_old, unsigned long c) 312 unsigned long c_old, unsigned long c)
@@ -531,7 +531,8 @@ static int __noreturn rcu_tasks_kthread(void *arg)
531 struct rcu_head *next; 531 struct rcu_head *next;
532 LIST_HEAD(rcu_tasks_holdouts); 532 LIST_HEAD(rcu_tasks_holdouts);
533 533
534 /* FIXME: Add housekeeping affinity. */ 534 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
535 housekeeping_affine(current);
535 536
536 /* 537 /*
537 * Each pass through the following loop makes one check for 538 * Each pass through the following loop makes one check for
@@ -690,3 +691,87 @@ static void rcu_spawn_tasks_kthread(void)
690} 691}
691 692
692#endif /* #ifdef CONFIG_TASKS_RCU */ 693#endif /* #ifdef CONFIG_TASKS_RCU */
694
695#ifdef CONFIG_PROVE_RCU
696
697/*
698 * Early boot self test parameters, one for each flavor
699 */
700static bool rcu_self_test;
701static bool rcu_self_test_bh;
702static bool rcu_self_test_sched;
703
704module_param(rcu_self_test, bool, 0444);
705module_param(rcu_self_test_bh, bool, 0444);
706module_param(rcu_self_test_sched, bool, 0444);
707
708static int rcu_self_test_counter;
709
710static void test_callback(struct rcu_head *r)
711{
712 rcu_self_test_counter++;
713 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
714}
715
716static void early_boot_test_call_rcu(void)
717{
718 static struct rcu_head head;
719
720 call_rcu(&head, test_callback);
721}
722
723static void early_boot_test_call_rcu_bh(void)
724{
725 static struct rcu_head head;
726
727 call_rcu_bh(&head, test_callback);
728}
729
730static void early_boot_test_call_rcu_sched(void)
731{
732 static struct rcu_head head;
733
734 call_rcu_sched(&head, test_callback);
735}
736
737void rcu_early_boot_tests(void)
738{
739 pr_info("Running RCU self tests\n");
740
741 if (rcu_self_test)
742 early_boot_test_call_rcu();
743 if (rcu_self_test_bh)
744 early_boot_test_call_rcu_bh();
745 if (rcu_self_test_sched)
746 early_boot_test_call_rcu_sched();
747}
748
749static int rcu_verify_early_boot_tests(void)
750{
751 int ret = 0;
752 int early_boot_test_counter = 0;
753
754 if (rcu_self_test) {
755 early_boot_test_counter++;
756 rcu_barrier();
757 }
758 if (rcu_self_test_bh) {
759 early_boot_test_counter++;
760 rcu_barrier_bh();
761 }
762 if (rcu_self_test_sched) {
763 early_boot_test_counter++;
764 rcu_barrier_sched();
765 }
766
767 if (rcu_self_test_counter != early_boot_test_counter) {
768 WARN_ON(1);
769 ret = -1;
770 }
771
772 return ret;
773}
774late_initcall(rcu_verify_early_boot_tests);
775#else
776void rcu_early_boot_tests(void) {}
777#endif /* CONFIG_PROVE_RCU */