summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index abef9c358d47..264f0284c0bd 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -369,6 +369,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
369static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 369static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
370 bool user) 370 bool user)
371{ 371{
372 struct rcu_state *rsp;
373 struct rcu_data *rdp;
374
372 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 375 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
373 if (!user && !is_idle_task(current)) { 376 if (!user && !is_idle_task(current)) {
374 struct task_struct *idle __maybe_unused = 377 struct task_struct *idle __maybe_unused =
@@ -380,6 +383,10 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
380 current->pid, current->comm, 383 current->pid, current->comm,
381 idle->pid, idle->comm); /* must be idle task! */ 384 idle->pid, idle->comm); /* must be idle task! */
382 } 385 }
386 for_each_rcu_flavor(rsp) {
387 rdp = this_cpu_ptr(rsp->rda);
388 do_nocb_deferred_wakeup(rdp);
389 }
383 rcu_prepare_for_idle(smp_processor_id()); 390 rcu_prepare_for_idle(smp_processor_id());
384 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 391 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
385 smp_mb__before_atomic_inc(); /* See above. */ 392 smp_mb__before_atomic_inc(); /* See above. */
@@ -1928,13 +1935,13 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1928 * Adopt the RCU callbacks from the specified rcu_state structure's 1935 * Adopt the RCU callbacks from the specified rcu_state structure's
1929 * orphanage. The caller must hold the ->orphan_lock. 1936 * orphanage. The caller must hold the ->orphan_lock.
1930 */ 1937 */
1931static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) 1938static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
1932{ 1939{
1933 int i; 1940 int i;
1934 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); 1941 struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
1935 1942
1936 /* No-CBs CPUs are handled specially. */ 1943 /* No-CBs CPUs are handled specially. */
1937 if (rcu_nocb_adopt_orphan_cbs(rsp, rdp)) 1944 if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
1938 return; 1945 return;
1939 1946
1940 /* Do the accounting first. */ 1947 /* Do the accounting first. */
@@ -2013,7 +2020,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2013 2020
2014 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2021 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2015 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2022 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2016 rcu_adopt_orphan_cbs(rsp); 2023 rcu_adopt_orphan_cbs(rsp, flags);
2017 2024
2018 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2025 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
2019 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2026 mask = rdp->grpmask; /* rnp->grplo is constant. */
@@ -2330,6 +2337,9 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2330 /* If there are callbacks ready, invoke them. */ 2337 /* If there are callbacks ready, invoke them. */
2331 if (cpu_has_callbacks_ready_to_invoke(rdp)) 2338 if (cpu_has_callbacks_ready_to_invoke(rdp))
2332 invoke_rcu_callbacks(rsp, rdp); 2339 invoke_rcu_callbacks(rsp, rdp);
2340
2341 /* Do any needed deferred wakeups of rcuo kthreads. */
2342 do_nocb_deferred_wakeup(rdp);
2333} 2343}
2334 2344
2335/* 2345/*
@@ -2464,7 +2474,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2464 2474
2465 if (cpu != -1) 2475 if (cpu != -1)
2466 rdp = per_cpu_ptr(rsp->rda, cpu); 2476 rdp = per_cpu_ptr(rsp->rda, cpu);
2467 offline = !__call_rcu_nocb(rdp, head, lazy); 2477 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2468 WARN_ON_ONCE(offline); 2478 WARN_ON_ONCE(offline);
2469 /* _call_rcu() is illegal on offline CPU; leak the callback. */ 2479 /* _call_rcu() is illegal on offline CPU; leak the callback. */
2470 local_irq_restore(flags); 2480 local_irq_restore(flags);
@@ -2817,6 +2827,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2817 return 1; 2827 return 1;
2818 } 2828 }
2819 2829
2830 /* Does this CPU need a deferred NOCB wakeup? */
2831 if (rcu_nocb_need_deferred_wakeup(rdp)) {
2832 rdp->n_rp_nocb_defer_wakeup++;
2833 return 1;
2834 }
2835
2820 /* nothing to do */ 2836 /* nothing to do */
2821 rdp->n_rp_need_nothing++; 2837 rdp->n_rp_need_nothing++;
2822 return 0; 2838 return 0;