aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h404
1 files changed, 273 insertions, 131 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a7997e272564..387dd4599344 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -85,33 +85,6 @@ static void __init rcu_bootup_announce_oddness(void)
85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 85 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
86 if (nr_cpu_ids != NR_CPUS) 86 if (nr_cpu_ids != NR_CPUS)
87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); 87 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
88#ifdef CONFIG_RCU_NOCB_CPU
89#ifndef CONFIG_RCU_NOCB_CPU_NONE
90 if (!have_rcu_nocb_mask) {
91 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
92 have_rcu_nocb_mask = true;
93 }
94#ifdef CONFIG_RCU_NOCB_CPU_ZERO
95 pr_info("\tOffload RCU callbacks from CPU 0\n");
96 cpumask_set_cpu(0, rcu_nocb_mask);
97#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
98#ifdef CONFIG_RCU_NOCB_CPU_ALL
99 pr_info("\tOffload RCU callbacks from all CPUs\n");
100 cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
101#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
102#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
103 if (have_rcu_nocb_mask) {
104 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
105 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
106 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
107 rcu_nocb_mask);
108 }
109 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
110 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
111 if (rcu_nocb_poll)
112 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
113 }
114#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
115} 88}
116 89
117#ifdef CONFIG_TREE_PREEMPT_RCU 90#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -134,7 +107,7 @@ static void __init rcu_bootup_announce(void)
134 * Return the number of RCU-preempt batches processed thus far 107 * Return the number of RCU-preempt batches processed thus far
135 * for debug and statistics. 108 * for debug and statistics.
136 */ 109 */
137long rcu_batches_completed_preempt(void) 110static long rcu_batches_completed_preempt(void)
138{ 111{
139 return rcu_preempt_state.completed; 112 return rcu_preempt_state.completed;
140} 113}
@@ -155,18 +128,19 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
155 * not in a quiescent state. There might be any number of tasks blocked 128 * not in a quiescent state. There might be any number of tasks blocked
156 * while in an RCU read-side critical section. 129 * while in an RCU read-side critical section.
157 * 130 *
158 * Unlike the other rcu_*_qs() functions, callers to this function 131 * As with the other rcu_*_qs() functions, callers to this function
159 * must disable irqs in order to protect the assignment to 132 * must disable preemption.
160 * ->rcu_read_unlock_special. 133 */
161 */ 134static void rcu_preempt_qs(void)
162static void rcu_preempt_qs(int cpu) 135{
163{ 136 if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
164 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 137 trace_rcu_grace_period(TPS("rcu_preempt"),
165 138 __this_cpu_read(rcu_preempt_data.gpnum),
166 if (rdp->passed_quiesce == 0) 139 TPS("cpuqs"));
167 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs")); 140 __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
168 rdp->passed_quiesce = 1; 141 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
169 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 142 current->rcu_read_unlock_special.b.need_qs = false;
143 }
170} 144}
171 145
172/* 146/*
@@ -190,14 +164,14 @@ static void rcu_preempt_note_context_switch(int cpu)
190 struct rcu_node *rnp; 164 struct rcu_node *rnp;
191 165
192 if (t->rcu_read_lock_nesting > 0 && 166 if (t->rcu_read_lock_nesting > 0 &&
193 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 167 !t->rcu_read_unlock_special.b.blocked) {
194 168
195 /* Possibly blocking in an RCU read-side critical section. */ 169 /* Possibly blocking in an RCU read-side critical section. */
196 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); 170 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
197 rnp = rdp->mynode; 171 rnp = rdp->mynode;
198 raw_spin_lock_irqsave(&rnp->lock, flags); 172 raw_spin_lock_irqsave(&rnp->lock, flags);
199 smp_mb__after_unlock_lock(); 173 smp_mb__after_unlock_lock();
200 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 174 t->rcu_read_unlock_special.b.blocked = true;
201 t->rcu_blocked_node = rnp; 175 t->rcu_blocked_node = rnp;
202 176
203 /* 177 /*
@@ -239,7 +213,7 @@ static void rcu_preempt_note_context_switch(int cpu)
239 : rnp->gpnum + 1); 213 : rnp->gpnum + 1);
240 raw_spin_unlock_irqrestore(&rnp->lock, flags); 214 raw_spin_unlock_irqrestore(&rnp->lock, flags);
241 } else if (t->rcu_read_lock_nesting < 0 && 215 } else if (t->rcu_read_lock_nesting < 0 &&
242 t->rcu_read_unlock_special) { 216 t->rcu_read_unlock_special.s) {
243 217
244 /* 218 /*
245 * Complete exit from RCU read-side critical section on 219 * Complete exit from RCU read-side critical section on
@@ -257,9 +231,7 @@ static void rcu_preempt_note_context_switch(int cpu)
257 * grace period, then the fact that the task has been enqueued 231 * grace period, then the fact that the task has been enqueued
258 * means that we continue to block the current grace period. 232 * means that we continue to block the current grace period.
259 */ 233 */
260 local_irq_save(flags); 234 rcu_preempt_qs();
261 rcu_preempt_qs(cpu);
262 local_irq_restore(flags);
263} 235}
264 236
265/* 237/*
@@ -340,7 +312,7 @@ void rcu_read_unlock_special(struct task_struct *t)
340 bool drop_boost_mutex = false; 312 bool drop_boost_mutex = false;
341#endif /* #ifdef CONFIG_RCU_BOOST */ 313#endif /* #ifdef CONFIG_RCU_BOOST */
342 struct rcu_node *rnp; 314 struct rcu_node *rnp;
343 int special; 315 union rcu_special special;
344 316
345 /* NMI handlers cannot block and cannot safely manipulate state. */ 317 /* NMI handlers cannot block and cannot safely manipulate state. */
346 if (in_nmi()) 318 if (in_nmi())
@@ -350,12 +322,13 @@ void rcu_read_unlock_special(struct task_struct *t)
350 322
351 /* 323 /*
352 * If RCU core is waiting for this CPU to exit critical section, 324 * If RCU core is waiting for this CPU to exit critical section,
353 * let it know that we have done so. 325 * let it know that we have done so. Because irqs are disabled,
326 * t->rcu_read_unlock_special cannot change.
354 */ 327 */
355 special = t->rcu_read_unlock_special; 328 special = t->rcu_read_unlock_special;
356 if (special & RCU_READ_UNLOCK_NEED_QS) { 329 if (special.b.need_qs) {
357 rcu_preempt_qs(smp_processor_id()); 330 rcu_preempt_qs();
358 if (!t->rcu_read_unlock_special) { 331 if (!t->rcu_read_unlock_special.s) {
359 local_irq_restore(flags); 332 local_irq_restore(flags);
360 return; 333 return;
361 } 334 }
@@ -368,8 +341,8 @@ void rcu_read_unlock_special(struct task_struct *t)
368 } 341 }
369 342
370 /* Clean up if blocked during RCU read-side critical section. */ 343 /* Clean up if blocked during RCU read-side critical section. */
371 if (special & RCU_READ_UNLOCK_BLOCKED) { 344 if (special.b.blocked) {
372 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; 345 t->rcu_read_unlock_special.b.blocked = false;
373 346
374 /* 347 /*
375 * Remove this task from the list it blocked on. The 348 * Remove this task from the list it blocked on. The
@@ -653,12 +626,13 @@ static void rcu_preempt_check_callbacks(int cpu)
653 struct task_struct *t = current; 626 struct task_struct *t = current;
654 627
655 if (t->rcu_read_lock_nesting == 0) { 628 if (t->rcu_read_lock_nesting == 0) {
656 rcu_preempt_qs(cpu); 629 rcu_preempt_qs();
657 return; 630 return;
658 } 631 }
659 if (t->rcu_read_lock_nesting > 0 && 632 if (t->rcu_read_lock_nesting > 0 &&
660 per_cpu(rcu_preempt_data, cpu).qs_pending) 633 per_cpu(rcu_preempt_data, cpu).qs_pending &&
661 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; 634 !per_cpu(rcu_preempt_data, cpu).passed_quiesce)
635 t->rcu_read_unlock_special.b.need_qs = true;
662} 636}
663 637
664#ifdef CONFIG_RCU_BOOST 638#ifdef CONFIG_RCU_BOOST
@@ -819,11 +793,6 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
819 * In fact, if you are using synchronize_rcu_expedited() in a loop, 793 * In fact, if you are using synchronize_rcu_expedited() in a loop,
820 * please restructure your code to batch your updates, and then Use a 794 * please restructure your code to batch your updates, and then Use a
821 * single synchronize_rcu() instead. 795 * single synchronize_rcu() instead.
822 *
823 * Note that it is illegal to call this function while holding any lock
824 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
825 * to call this function from a CPU-hotplug notifier. Failing to observe
826 * these restriction will result in deadlock.
827 */ 796 */
828void synchronize_rcu_expedited(void) 797void synchronize_rcu_expedited(void)
829{ 798{
@@ -845,7 +814,11 @@ void synchronize_rcu_expedited(void)
845 * being boosted. This simplifies the process of moving tasks 814 * being boosted. This simplifies the process of moving tasks
846 * from leaf to root rcu_node structures. 815 * from leaf to root rcu_node structures.
847 */ 816 */
848 get_online_cpus(); 817 if (!try_get_online_cpus()) {
818 /* CPU-hotplug operation in flight, fall back to normal GP. */
819 wait_rcu_gp(call_rcu);
820 return;
821 }
849 822
850 /* 823 /*
851 * Acquire lock, falling back to synchronize_rcu() if too many 824 * Acquire lock, falling back to synchronize_rcu() if too many
@@ -897,7 +870,8 @@ void synchronize_rcu_expedited(void)
897 870
898 /* Clean up and exit. */ 871 /* Clean up and exit. */
899 smp_mb(); /* ensure expedited GP seen before counter increment. */ 872 smp_mb(); /* ensure expedited GP seen before counter increment. */
900 ACCESS_ONCE(sync_rcu_preempt_exp_count)++; 873 ACCESS_ONCE(sync_rcu_preempt_exp_count) =
874 sync_rcu_preempt_exp_count + 1;
901unlock_mb_ret: 875unlock_mb_ret:
902 mutex_unlock(&sync_rcu_preempt_exp_mutex); 876 mutex_unlock(&sync_rcu_preempt_exp_mutex);
903mb_ret: 877mb_ret:
@@ -941,7 +915,7 @@ void exit_rcu(void)
941 return; 915 return;
942 t->rcu_read_lock_nesting = 1; 916 t->rcu_read_lock_nesting = 1;
943 barrier(); 917 barrier();
944 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; 918 t->rcu_read_unlock_special.b.blocked = true;
945 __rcu_read_unlock(); 919 __rcu_read_unlock();
946} 920}
947 921
@@ -1462,14 +1436,13 @@ static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1462}; 1436};
1463 1437
1464/* 1438/*
1465 * Spawn all kthreads -- called as soon as the scheduler is running. 1439 * Spawn boost kthreads -- called as soon as the scheduler is running.
1466 */ 1440 */
1467static int __init rcu_spawn_kthreads(void) 1441static void __init rcu_spawn_boost_kthreads(void)
1468{ 1442{
1469 struct rcu_node *rnp; 1443 struct rcu_node *rnp;
1470 int cpu; 1444 int cpu;
1471 1445
1472 rcu_scheduler_fully_active = 1;
1473 for_each_possible_cpu(cpu) 1446 for_each_possible_cpu(cpu)
1474 per_cpu(rcu_cpu_has_work, cpu) = 0; 1447 per_cpu(rcu_cpu_has_work, cpu) = 0;
1475 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1448 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
@@ -1479,9 +1452,7 @@ static int __init rcu_spawn_kthreads(void)
1479 rcu_for_each_leaf_node(rcu_state_p, rnp) 1452 rcu_for_each_leaf_node(rcu_state_p, rnp)
1480 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1453 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1481 } 1454 }
1482 return 0;
1483} 1455}
1484early_initcall(rcu_spawn_kthreads);
1485 1456
1486static void rcu_prepare_kthreads(int cpu) 1457static void rcu_prepare_kthreads(int cpu)
1487{ 1458{
@@ -1519,12 +1490,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1519{ 1490{
1520} 1491}
1521 1492
1522static int __init rcu_scheduler_really_started(void) 1493static void __init rcu_spawn_boost_kthreads(void)
1523{ 1494{
1524 rcu_scheduler_fully_active = 1;
1525 return 0;
1526} 1495}
1527early_initcall(rcu_scheduler_really_started);
1528 1496
1529static void rcu_prepare_kthreads(int cpu) 1497static void rcu_prepare_kthreads(int cpu)
1530{ 1498{
@@ -1625,7 +1593,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
1625 1593
1626 /* Exit early if we advanced recently. */ 1594 /* Exit early if we advanced recently. */
1627 if (jiffies == rdtp->last_advance_all) 1595 if (jiffies == rdtp->last_advance_all)
1628 return 0; 1596 return false;
1629 rdtp->last_advance_all = jiffies; 1597 rdtp->last_advance_all = jiffies;
1630 1598
1631 for_each_rcu_flavor(rsp) { 1599 for_each_rcu_flavor(rsp) {
@@ -1848,7 +1816,7 @@ static int rcu_oom_notify(struct notifier_block *self,
1848 get_online_cpus(); 1816 get_online_cpus();
1849 for_each_online_cpu(cpu) { 1817 for_each_online_cpu(cpu) {
1850 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); 1818 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1851 cond_resched(); 1819 cond_resched_rcu_qs();
1852 } 1820 }
1853 put_online_cpus(); 1821 put_online_cpus();
1854 1822
@@ -2075,7 +2043,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2075 if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) 2043 if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
2076 return; 2044 return;
2077 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { 2045 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
2078 /* Prior xchg orders against prior callback enqueue. */ 2046 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
2079 ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; 2047 ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
2080 wake_up(&rdp_leader->nocb_wq); 2048 wake_up(&rdp_leader->nocb_wq);
2081 } 2049 }
@@ -2104,6 +2072,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2104 ACCESS_ONCE(*old_rhpp) = rhp; 2072 ACCESS_ONCE(*old_rhpp) = rhp;
2105 atomic_long_add(rhcount, &rdp->nocb_q_count); 2073 atomic_long_add(rhcount, &rdp->nocb_q_count);
2106 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 2074 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2075 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
2107 2076
2108 /* If we are not being polled and there is a kthread, awaken it ... */ 2077 /* If we are not being polled and there is a kthread, awaken it ... */
2109 t = ACCESS_ONCE(rdp->nocb_kthread); 2078 t = ACCESS_ONCE(rdp->nocb_kthread);
@@ -2120,16 +2089,23 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2120 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2089 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2121 TPS("WakeEmpty")); 2090 TPS("WakeEmpty"));
2122 } else { 2091 } else {
2123 rdp->nocb_defer_wakeup = true; 2092 rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
2124 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2093 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2125 TPS("WakeEmptyIsDeferred")); 2094 TPS("WakeEmptyIsDeferred"));
2126 } 2095 }
2127 rdp->qlen_last_fqs_check = 0; 2096 rdp->qlen_last_fqs_check = 0;
2128 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2097 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2129 /* ... or if many callbacks queued. */ 2098 /* ... or if many callbacks queued. */
2130 wake_nocb_leader(rdp, true); 2099 if (!irqs_disabled_flags(flags)) {
2100 wake_nocb_leader(rdp, true);
2101 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2102 TPS("WakeOvf"));
2103 } else {
2104 rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
2105 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2106 TPS("WakeOvfIsDeferred"));
2107 }
2131 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2108 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2132 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
2133 } else { 2109 } else {
2134 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); 2110 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
2135 } 2111 }
@@ -2150,7 +2126,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2150{ 2126{
2151 2127
2152 if (!rcu_is_nocb_cpu(rdp->cpu)) 2128 if (!rcu_is_nocb_cpu(rdp->cpu))
2153 return 0; 2129 return false;
2154 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); 2130 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2155 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 2131 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2156 trace_rcu_kfree_callback(rdp->rsp->name, rhp, 2132 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
@@ -2161,7 +2137,18 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2161 trace_rcu_callback(rdp->rsp->name, rhp, 2137 trace_rcu_callback(rdp->rsp->name, rhp,
2162 -atomic_long_read(&rdp->nocb_q_count_lazy), 2138 -atomic_long_read(&rdp->nocb_q_count_lazy),
2163 -atomic_long_read(&rdp->nocb_q_count)); 2139 -atomic_long_read(&rdp->nocb_q_count));
2164 return 1; 2140
2141 /*
2142 * If called from an extended quiescent state with interrupts
2143 * disabled, invoke the RCU core in order to allow the idle-entry
2144 * deferred-wakeup check to function.
2145 */
2146 if (irqs_disabled_flags(flags) &&
2147 !rcu_is_watching() &&
2148 cpu_online(smp_processor_id()))
2149 invoke_rcu_core();
2150
2151 return true;
2165} 2152}
2166 2153
2167/* 2154/*
@@ -2177,7 +2164,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2177 2164
2178 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */ 2165 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2179 if (!rcu_is_nocb_cpu(smp_processor_id())) 2166 if (!rcu_is_nocb_cpu(smp_processor_id()))
2180 return 0; 2167 return false;
2181 rsp->qlen = 0; 2168 rsp->qlen = 0;
2182 rsp->qlen_lazy = 0; 2169 rsp->qlen_lazy = 0;
2183 2170
@@ -2196,7 +2183,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2196 rsp->orphan_nxtlist = NULL; 2183 rsp->orphan_nxtlist = NULL;
2197 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2184 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2198 } 2185 }
2199 return 1; 2186 return true;
2200} 2187}
2201 2188
2202/* 2189/*
@@ -2229,7 +2216,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2229 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); 2216 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2230 if (likely(d)) 2217 if (likely(d))
2231 break; 2218 break;
2232 flush_signals(current); 2219 WARN_ON(signal_pending(current));
2233 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); 2220 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2234 } 2221 }
2235 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); 2222 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
@@ -2288,7 +2275,7 @@ wait_again:
2288 if (!rcu_nocb_poll) 2275 if (!rcu_nocb_poll)
2289 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, 2276 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2290 "WokeEmpty"); 2277 "WokeEmpty");
2291 flush_signals(current); 2278 WARN_ON(signal_pending(current));
2292 schedule_timeout_interruptible(1); 2279 schedule_timeout_interruptible(1);
2293 2280
2294 /* Rescan in case we were a victim of memory ordering. */ 2281 /* Rescan in case we were a victim of memory ordering. */
@@ -2327,6 +2314,7 @@ wait_again:
2327 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count); 2314 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
2328 atomic_long_add(rdp->nocb_gp_count_lazy, 2315 atomic_long_add(rdp->nocb_gp_count_lazy,
2329 &rdp->nocb_follower_count_lazy); 2316 &rdp->nocb_follower_count_lazy);
2317 smp_mb__after_atomic(); /* Store *tail before wakeup. */
2330 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2318 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2331 /* 2319 /*
2332 * List was empty, wake up the follower. 2320 * List was empty, wake up the follower.
@@ -2367,7 +2355,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
2367 if (!rcu_nocb_poll) 2355 if (!rcu_nocb_poll)
2368 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2356 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2369 "WokeEmpty"); 2357 "WokeEmpty");
2370 flush_signals(current); 2358 WARN_ON(signal_pending(current));
2371 schedule_timeout_interruptible(1); 2359 schedule_timeout_interruptible(1);
2372 } 2360 }
2373} 2361}
@@ -2428,15 +2416,16 @@ static int rcu_nocb_kthread(void *arg)
2428 list = next; 2416 list = next;
2429 } 2417 }
2430 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2418 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2431 ACCESS_ONCE(rdp->nocb_p_count) -= c; 2419 ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
2432 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl; 2420 ACCESS_ONCE(rdp->nocb_p_count_lazy) =
2421 rdp->nocb_p_count_lazy - cl;
2433 rdp->n_nocbs_invoked += c; 2422 rdp->n_nocbs_invoked += c;
2434 } 2423 }
2435 return 0; 2424 return 0;
2436} 2425}
2437 2426
2438/* Is a deferred wakeup of rcu_nocb_kthread() required? */ 2427/* Is a deferred wakeup of rcu_nocb_kthread() required? */
2439static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2428static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2440{ 2429{
2441 return ACCESS_ONCE(rdp->nocb_defer_wakeup); 2430 return ACCESS_ONCE(rdp->nocb_defer_wakeup);
2442} 2431}
@@ -2444,11 +2433,79 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2444/* Do a deferred wakeup of rcu_nocb_kthread(). */ 2433/* Do a deferred wakeup of rcu_nocb_kthread(). */
2445static void do_nocb_deferred_wakeup(struct rcu_data *rdp) 2434static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2446{ 2435{
2436 int ndw;
2437
2447 if (!rcu_nocb_need_deferred_wakeup(rdp)) 2438 if (!rcu_nocb_need_deferred_wakeup(rdp))
2448 return; 2439 return;
2449 ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; 2440 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
2450 wake_nocb_leader(rdp, false); 2441 ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
2451 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); 2442 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
2443 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2444}
2445
2446void __init rcu_init_nohz(void)
2447{
2448 int cpu;
2449 bool need_rcu_nocb_mask = true;
2450 struct rcu_state *rsp;
2451
2452#ifdef CONFIG_RCU_NOCB_CPU_NONE
2453 need_rcu_nocb_mask = false;
2454#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
2455
2456#if defined(CONFIG_NO_HZ_FULL)
2457 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2458 need_rcu_nocb_mask = true;
2459#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2460
2461 if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2462 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2463 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2464 return;
2465 }
2466 have_rcu_nocb_mask = true;
2467 }
2468 if (!have_rcu_nocb_mask)
2469 return;
2470
2471#ifdef CONFIG_RCU_NOCB_CPU_ZERO
2472 pr_info("\tOffload RCU callbacks from CPU 0\n");
2473 cpumask_set_cpu(0, rcu_nocb_mask);
2474#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
2475#ifdef CONFIG_RCU_NOCB_CPU_ALL
2476 pr_info("\tOffload RCU callbacks from all CPUs\n");
2477 cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
2478#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
2479#if defined(CONFIG_NO_HZ_FULL)
2480 if (tick_nohz_full_running)
2481 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2482#endif /* #if defined(CONFIG_NO_HZ_FULL) */
2483
2484 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2485 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2486 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2487 rcu_nocb_mask);
2488 }
2489 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
2490 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
2491 if (rcu_nocb_poll)
2492 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2493
2494 for_each_rcu_flavor(rsp) {
2495 for_each_cpu(cpu, rcu_nocb_mask) {
2496 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2497
2498 /*
2499 * If there are early callbacks, they will need
2500 * to be moved to the nocb lists.
2501 */
2502 WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
2503 &rdp->nxtlist &&
2504 rdp->nxttail[RCU_NEXT_TAIL] != NULL);
2505 init_nocb_callback_list(rdp);
2506 }
2507 rcu_organize_nocb_kthreads(rsp);
2508 }
2452} 2509}
2453 2510
2454/* Initialize per-rcu_data variables for no-CBs CPUs. */ 2511/* Initialize per-rcu_data variables for no-CBs CPUs. */
@@ -2459,15 +2516,85 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2459 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2516 rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2460} 2517}
2461 2518
2519/*
2520 * If the specified CPU is a no-CBs CPU that does not already have its
2521 * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are
2522 * brought online out of order, this can require re-organizing the
2523 * leader-follower relationships.
2524 */
2525static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2526{
2527 struct rcu_data *rdp;
2528 struct rcu_data *rdp_last;
2529 struct rcu_data *rdp_old_leader;
2530 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2531 struct task_struct *t;
2532
2533 /*
2534 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2535 * then nothing to do.
2536 */
2537 if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2538 return;
2539
2540 /* If we didn't spawn the leader first, reorganize! */
2541 rdp_old_leader = rdp_spawn->nocb_leader;
2542 if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2543 rdp_last = NULL;
2544 rdp = rdp_old_leader;
2545 do {
2546 rdp->nocb_leader = rdp_spawn;
2547 if (rdp_last && rdp != rdp_spawn)
2548 rdp_last->nocb_next_follower = rdp;
2549 rdp_last = rdp;
2550 rdp = rdp->nocb_next_follower;
2551 rdp_last->nocb_next_follower = NULL;
2552 } while (rdp);
2553 rdp_spawn->nocb_next_follower = rdp_old_leader;
2554 }
2555
2556 /* Spawn the kthread for this CPU and RCU flavor. */
2557 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2558 "rcuo%c/%d", rsp->abbr, cpu);
2559 BUG_ON(IS_ERR(t));
2560 ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
2561}
2562
2563/*
2564 * If the specified CPU is a no-CBs CPU that does not already have its
2565 * rcuo kthreads, spawn them.
2566 */
2567static void rcu_spawn_all_nocb_kthreads(int cpu)
2568{
2569 struct rcu_state *rsp;
2570
2571 if (rcu_scheduler_fully_active)
2572 for_each_rcu_flavor(rsp)
2573 rcu_spawn_one_nocb_kthread(rsp, cpu);
2574}
2575
2576/*
2577 * Once the scheduler is running, spawn rcuo kthreads for all online
2578 * no-CBs CPUs. This assumes that the early_initcall()s happen before
2579 * non-boot CPUs come online -- if this changes, we will need to add
2580 * some mutual exclusion.
2581 */
2582static void __init rcu_spawn_nocb_kthreads(void)
2583{
2584 int cpu;
2585
2586 for_each_online_cpu(cpu)
2587 rcu_spawn_all_nocb_kthreads(cpu);
2588}
2589
2462/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ 2590/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
2463static int rcu_nocb_leader_stride = -1; 2591static int rcu_nocb_leader_stride = -1;
2464module_param(rcu_nocb_leader_stride, int, 0444); 2592module_param(rcu_nocb_leader_stride, int, 0444);
2465 2593
2466/* 2594/*
2467 * Create a kthread for each RCU flavor for each no-CBs CPU. 2595 * Initialize leader-follower relationships for all no-CBs CPU.
2468 * Also initialize leader-follower relationships.
2469 */ 2596 */
2470static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2597static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2471{ 2598{
2472 int cpu; 2599 int cpu;
2473 int ls = rcu_nocb_leader_stride; 2600 int ls = rcu_nocb_leader_stride;
@@ -2475,14 +2602,9 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2475 struct rcu_data *rdp; 2602 struct rcu_data *rdp;
2476 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ 2603 struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */
2477 struct rcu_data *rdp_prev = NULL; 2604 struct rcu_data *rdp_prev = NULL;
2478 struct task_struct *t;
2479 2605
2480 if (rcu_nocb_mask == NULL) 2606 if (!have_rcu_nocb_mask)
2481 return; 2607 return;
2482#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
2483 if (tick_nohz_full_running)
2484 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2485#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
2486 if (ls == -1) { 2608 if (ls == -1) {
2487 ls = int_sqrt(nr_cpu_ids); 2609 ls = int_sqrt(nr_cpu_ids);
2488 rcu_nocb_leader_stride = ls; 2610 rcu_nocb_leader_stride = ls;
@@ -2505,21 +2627,15 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2505 rdp_prev->nocb_next_follower = rdp; 2627 rdp_prev->nocb_next_follower = rdp;
2506 } 2628 }
2507 rdp_prev = rdp; 2629 rdp_prev = rdp;
2508
2509 /* Spawn the kthread for this CPU. */
2510 t = kthread_run(rcu_nocb_kthread, rdp,
2511 "rcuo%c/%d", rsp->abbr, cpu);
2512 BUG_ON(IS_ERR(t));
2513 ACCESS_ONCE(rdp->nocb_kthread) = t;
2514 } 2630 }
2515} 2631}
2516 2632
2517/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */ 2633/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2518static bool init_nocb_callback_list(struct rcu_data *rdp) 2634static bool init_nocb_callback_list(struct rcu_data *rdp)
2519{ 2635{
2520 if (rcu_nocb_mask == NULL || 2636 if (!rcu_is_nocb_cpu(rdp->cpu))
2521 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2522 return false; 2637 return false;
2638
2523 rdp->nxttail[RCU_NEXT_TAIL] = NULL; 2639 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2524 return true; 2640 return true;
2525} 2641}
@@ -2541,21 +2657,21 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
2541static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2657static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2542 bool lazy, unsigned long flags) 2658 bool lazy, unsigned long flags)
2543{ 2659{
2544 return 0; 2660 return false;
2545} 2661}
2546 2662
2547static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2663static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2548 struct rcu_data *rdp, 2664 struct rcu_data *rdp,
2549 unsigned long flags) 2665 unsigned long flags)
2550{ 2666{
2551 return 0; 2667 return false;
2552} 2668}
2553 2669
2554static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2670static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2555{ 2671{
2556} 2672}
2557 2673
2558static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) 2674static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2559{ 2675{
2560 return false; 2676 return false;
2561} 2677}
@@ -2564,7 +2680,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2564{ 2680{
2565} 2681}
2566 2682
2567static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2683static void rcu_spawn_all_nocb_kthreads(int cpu)
2684{
2685}
2686
2687static void __init rcu_spawn_nocb_kthreads(void)
2568{ 2688{
2569} 2689}
2570 2690
@@ -2595,16 +2715,6 @@ static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2595 2715
2596#ifdef CONFIG_NO_HZ_FULL_SYSIDLE 2716#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2597 2717
2598/*
2599 * Define RCU flavor that holds sysidle state. This needs to be the
2600 * most active flavor of RCU.
2601 */
2602#ifdef CONFIG_PREEMPT_RCU
2603static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
2604#else /* #ifdef CONFIG_PREEMPT_RCU */
2605static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
2606#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
2607
2608static int full_sysidle_state; /* Current system-idle state. */ 2718static int full_sysidle_state; /* Current system-idle state. */
2609#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */ 2719#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */
2610#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */ 2720#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
@@ -2622,6 +2732,10 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2622{ 2732{
2623 unsigned long j; 2733 unsigned long j;
2624 2734
2735 /* If there are no nohz_full= CPUs, no need to track this. */
2736 if (!tick_nohz_full_enabled())
2737 return;
2738
2625 /* Adjust nesting, check for fully idle. */ 2739 /* Adjust nesting, check for fully idle. */
2626 if (irq) { 2740 if (irq) {
2627 rdtp->dynticks_idle_nesting--; 2741 rdtp->dynticks_idle_nesting--;
@@ -2687,6 +2801,10 @@ void rcu_sysidle_force_exit(void)
2687 */ 2801 */
2688static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) 2802static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2689{ 2803{
2804 /* If there are no nohz_full= CPUs, no need to track this. */
2805 if (!tick_nohz_full_enabled())
2806 return;
2807
2690 /* Adjust nesting, check for already non-idle. */ 2808 /* Adjust nesting, check for already non-idle. */
2691 if (irq) { 2809 if (irq) {
2692 rdtp->dynticks_idle_nesting++; 2810 rdtp->dynticks_idle_nesting++;
@@ -2741,12 +2859,16 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2741 unsigned long j; 2859 unsigned long j;
2742 struct rcu_dynticks *rdtp = rdp->dynticks; 2860 struct rcu_dynticks *rdtp = rdp->dynticks;
2743 2861
2862 /* If there are no nohz_full= CPUs, don't check system-wide idleness. */
2863 if (!tick_nohz_full_enabled())
2864 return;
2865
2744 /* 2866 /*
2745 * If some other CPU has already reported non-idle, if this is 2867 * If some other CPU has already reported non-idle, if this is
2746 * not the flavor of RCU that tracks sysidle state, or if this 2868 * not the flavor of RCU that tracks sysidle state, or if this
2747 * is an offline or the timekeeping CPU, nothing to do. 2869 * is an offline or the timekeeping CPU, nothing to do.
2748 */ 2870 */
2749 if (!*isidle || rdp->rsp != rcu_sysidle_state || 2871 if (!*isidle || rdp->rsp != rcu_state_p ||
2750 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu) 2872 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2751 return; 2873 return;
2752 if (rcu_gp_in_progress(rdp->rsp)) 2874 if (rcu_gp_in_progress(rdp->rsp))
@@ -2772,7 +2894,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2772 */ 2894 */
2773static bool is_sysidle_rcu_state(struct rcu_state *rsp) 2895static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2774{ 2896{
2775 return rsp == rcu_sysidle_state; 2897 return rsp == rcu_state_p;
2776} 2898}
2777 2899
2778/* 2900/*
@@ -2850,7 +2972,7 @@ static void rcu_sysidle_cancel(void)
2850static void rcu_sysidle_report(struct rcu_state *rsp, int isidle, 2972static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2851 unsigned long maxj, bool gpkt) 2973 unsigned long maxj, bool gpkt)
2852{ 2974{
2853 if (rsp != rcu_sysidle_state) 2975 if (rsp != rcu_state_p)
2854 return; /* Wrong flavor, ignore. */ 2976 return; /* Wrong flavor, ignore. */
2855 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) 2977 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2856 return; /* Running state machine from timekeeping CPU. */ 2978 return; /* Running state machine from timekeeping CPU. */
@@ -2867,6 +2989,10 @@ static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2867static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 2989static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2868 unsigned long maxj) 2990 unsigned long maxj)
2869{ 2991{
2992 /* If there are no nohz_full= CPUs, no need to track this. */
2993 if (!tick_nohz_full_enabled())
2994 return;
2995
2870 rcu_sysidle_report(rsp, isidle, maxj, true); 2996 rcu_sysidle_report(rsp, isidle, maxj, true);
2871} 2997}
2872 2998
@@ -2893,7 +3019,8 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
2893 3019
2894/* 3020/*
2895 * Check to see if the system is fully idle, other than the timekeeping CPU. 3021 * Check to see if the system is fully idle, other than the timekeeping CPU.
2896 * The caller must have disabled interrupts. 3022 * The caller must have disabled interrupts. This is not intended to be
3023 * called unless tick_nohz_full_enabled().
2897 */ 3024 */
2898bool rcu_sys_is_idle(void) 3025bool rcu_sys_is_idle(void)
2899{ 3026{
@@ -2919,13 +3046,12 @@ bool rcu_sys_is_idle(void)
2919 3046
2920 /* Scan all the CPUs looking for nonidle CPUs. */ 3047 /* Scan all the CPUs looking for nonidle CPUs. */
2921 for_each_possible_cpu(cpu) { 3048 for_each_possible_cpu(cpu) {
2922 rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu); 3049 rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
2923 rcu_sysidle_check_cpu(rdp, &isidle, &maxj); 3050 rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2924 if (!isidle) 3051 if (!isidle)
2925 break; 3052 break;
2926 } 3053 }
2927 rcu_sysidle_report(rcu_sysidle_state, 3054 rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
2928 isidle, maxj, false);
2929 oldrss = rss; 3055 oldrss = rss;
2930 rss = ACCESS_ONCE(full_sysidle_state); 3056 rss = ACCESS_ONCE(full_sysidle_state);
2931 } 3057 }
@@ -2952,7 +3078,7 @@ bool rcu_sys_is_idle(void)
2952 * provided by the memory allocator. 3078 * provided by the memory allocator.
2953 */ 3079 */
2954 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL && 3080 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2955 !rcu_gp_in_progress(rcu_sysidle_state) && 3081 !rcu_gp_in_progress(rcu_state_p) &&
2956 !rsh.inuse && xchg(&rsh.inuse, 1) == 0) 3082 !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
2957 call_rcu(&rsh.rh, rcu_sysidle_cb); 3083 call_rcu(&rsh.rh, rcu_sysidle_cb);
2958 return false; 3084 return false;
@@ -3036,3 +3162,19 @@ static void rcu_bind_gp_kthread(void)
3036 housekeeping_affine(current); 3162 housekeeping_affine(current);
3037#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 3163#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3038} 3164}
3165
3166/* Record the current task on dyntick-idle entry. */
3167static void rcu_dynticks_task_enter(void)
3168{
3169#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3170 ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
3171#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3172}
3173
3174/* Record no current task on dyntick-idle exit. */
3175static void rcu_dynticks_task_exit(void)
3176{
3177#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3178 ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
3179#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3180}