diff options
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 24 | ||||
-rw-r--r-- | kernel/rcu/tree.h | 9 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 55 | ||||
-rw-r--r-- | kernel/rcu/tree_trace.c | 3 |
4 files changed, 74 insertions, 17 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index abef9c358d47..264f0284c0bd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -369,6 +369,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
369 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | 369 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
370 | bool user) | 370 | bool user) |
371 | { | 371 | { |
372 | struct rcu_state *rsp; | ||
373 | struct rcu_data *rdp; | ||
374 | |||
372 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); | 375 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); |
373 | if (!user && !is_idle_task(current)) { | 376 | if (!user && !is_idle_task(current)) { |
374 | struct task_struct *idle __maybe_unused = | 377 | struct task_struct *idle __maybe_unused = |
@@ -380,6 +383,10 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, | |||
380 | current->pid, current->comm, | 383 | current->pid, current->comm, |
381 | idle->pid, idle->comm); /* must be idle task! */ | 384 | idle->pid, idle->comm); /* must be idle task! */ |
382 | } | 385 | } |
386 | for_each_rcu_flavor(rsp) { | ||
387 | rdp = this_cpu_ptr(rsp->rda); | ||
388 | do_nocb_deferred_wakeup(rdp); | ||
389 | } | ||
383 | rcu_prepare_for_idle(smp_processor_id()); | 390 | rcu_prepare_for_idle(smp_processor_id()); |
384 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ | 391 | /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ |
385 | smp_mb__before_atomic_inc(); /* See above. */ | 392 | smp_mb__before_atomic_inc(); /* See above. */ |
@@ -1928,13 +1935,13 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, | |||
1928 | * Adopt the RCU callbacks from the specified rcu_state structure's | 1935 | * Adopt the RCU callbacks from the specified rcu_state structure's |
1929 | * orphanage. The caller must hold the ->orphan_lock. | 1936 | * orphanage. The caller must hold the ->orphan_lock. |
1930 | */ | 1937 | */ |
1931 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | 1938 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) |
1932 | { | 1939 | { |
1933 | int i; | 1940 | int i; |
1934 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); | 1941 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); |
1935 | 1942 | ||
1936 | /* No-CBs CPUs are handled specially. */ | 1943 | /* No-CBs CPUs are handled specially. */ |
1937 | if (rcu_nocb_adopt_orphan_cbs(rsp, rdp)) | 1944 | if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) |
1938 | return; | 1945 | return; |
1939 | 1946 | ||
1940 | /* Do the accounting first. */ | 1947 | /* Do the accounting first. */ |
@@ -2013,7 +2020,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
2013 | 2020 | ||
2014 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ | 2021 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ |
2015 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); | 2022 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); |
2016 | rcu_adopt_orphan_cbs(rsp); | 2023 | rcu_adopt_orphan_cbs(rsp, flags); |
2017 | 2024 | ||
2018 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | 2025 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
2019 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 2026 | mask = rdp->grpmask; /* rnp->grplo is constant. */ |
@@ -2330,6 +2337,9 @@ __rcu_process_callbacks(struct rcu_state *rsp) | |||
2330 | /* If there are callbacks ready, invoke them. */ | 2337 | /* If there are callbacks ready, invoke them. */ |
2331 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 2338 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
2332 | invoke_rcu_callbacks(rsp, rdp); | 2339 | invoke_rcu_callbacks(rsp, rdp); |
2340 | |||
2341 | /* Do any needed deferred wakeups of rcuo kthreads. */ | ||
2342 | do_nocb_deferred_wakeup(rdp); | ||
2333 | } | 2343 | } |
2334 | 2344 | ||
2335 | /* | 2345 | /* |
@@ -2464,7 +2474,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
2464 | 2474 | ||
2465 | if (cpu != -1) | 2475 | if (cpu != -1) |
2466 | rdp = per_cpu_ptr(rsp->rda, cpu); | 2476 | rdp = per_cpu_ptr(rsp->rda, cpu); |
2467 | offline = !__call_rcu_nocb(rdp, head, lazy); | 2477 | offline = !__call_rcu_nocb(rdp, head, lazy, flags); |
2468 | WARN_ON_ONCE(offline); | 2478 | WARN_ON_ONCE(offline); |
2469 | /* _call_rcu() is illegal on offline CPU; leak the callback. */ | 2479 | /* _call_rcu() is illegal on offline CPU; leak the callback. */ |
2470 | local_irq_restore(flags); | 2480 | local_irq_restore(flags); |
@@ -2817,6 +2827,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2817 | return 1; | 2827 | return 1; |
2818 | } | 2828 | } |
2819 | 2829 | ||
2830 | /* Does this CPU need a deferred NOCB wakeup? */ | ||
2831 | if (rcu_nocb_need_deferred_wakeup(rdp)) { | ||
2832 | rdp->n_rp_nocb_defer_wakeup++; | ||
2833 | return 1; | ||
2834 | } | ||
2835 | |||
2820 | /* nothing to do */ | 2836 | /* nothing to do */ |
2821 | rdp->n_rp_need_nothing++; | 2837 | rdp->n_rp_need_nothing++; |
2822 | return 0; | 2838 | return 0; |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8e34d8674a4e..a87adfc2916b 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -317,6 +317,7 @@ struct rcu_data { | |||
317 | unsigned long n_rp_cpu_needs_gp; | 317 | unsigned long n_rp_cpu_needs_gp; |
318 | unsigned long n_rp_gp_completed; | 318 | unsigned long n_rp_gp_completed; |
319 | unsigned long n_rp_gp_started; | 319 | unsigned long n_rp_gp_started; |
320 | unsigned long n_rp_nocb_defer_wakeup; | ||
320 | unsigned long n_rp_need_nothing; | 321 | unsigned long n_rp_need_nothing; |
321 | 322 | ||
322 | /* 6) _rcu_barrier() and OOM callbacks. */ | 323 | /* 6) _rcu_barrier() and OOM callbacks. */ |
@@ -335,6 +336,7 @@ struct rcu_data { | |||
335 | int nocb_p_count_lazy; /* (approximate). */ | 336 | int nocb_p_count_lazy; /* (approximate). */ |
336 | wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ | 337 | wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ |
337 | struct task_struct *nocb_kthread; | 338 | struct task_struct *nocb_kthread; |
339 | bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ | ||
338 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | 340 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
339 | 341 | ||
340 | /* 8) RCU CPU stall data. */ | 342 | /* 8) RCU CPU stall data. */ |
@@ -550,9 +552,12 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); | |||
550 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); | 552 | static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); |
551 | static void rcu_init_one_nocb(struct rcu_node *rnp); | 553 | static void rcu_init_one_nocb(struct rcu_node *rnp); |
552 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | 554 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
553 | bool lazy); | 555 | bool lazy, unsigned long flags); |
554 | static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 556 | static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
555 | struct rcu_data *rdp); | 557 | struct rcu_data *rdp, |
558 | unsigned long flags); | ||
559 | static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); | ||
560 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp); | ||
556 | static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); | 561 | static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); |
557 | static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); | 562 | static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); |
558 | static void rcu_kick_nohz_cpu(int cpu); | 563 | static void rcu_kick_nohz_cpu(int cpu); |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b023e5407111..752ffaa0d681 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -2104,7 +2104,8 @@ bool rcu_is_nocb_cpu(int cpu) | |||
2104 | static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | 2104 | static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, |
2105 | struct rcu_head *rhp, | 2105 | struct rcu_head *rhp, |
2106 | struct rcu_head **rhtp, | 2106 | struct rcu_head **rhtp, |
2107 | int rhcount, int rhcount_lazy) | 2107 | int rhcount, int rhcount_lazy, |
2108 | unsigned long flags) | ||
2108 | { | 2109 | { |
2109 | int len; | 2110 | int len; |
2110 | struct rcu_head **old_rhpp; | 2111 | struct rcu_head **old_rhpp; |
@@ -2125,9 +2126,16 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2125 | } | 2126 | } |
2126 | len = atomic_long_read(&rdp->nocb_q_count); | 2127 | len = atomic_long_read(&rdp->nocb_q_count); |
2127 | if (old_rhpp == &rdp->nocb_head) { | 2128 | if (old_rhpp == &rdp->nocb_head) { |
2128 | wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ | 2129 | if (!irqs_disabled_flags(flags)) { |
2130 | wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */ | ||
2131 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2132 | TPS("WakeEmpty")); | ||
2133 | } else { | ||
2134 | rdp->nocb_defer_wakeup = true; | ||
2135 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2136 | TPS("WakeEmptyIsDeferred")); | ||
2137 | } | ||
2129 | rdp->qlen_last_fqs_check = 0; | 2138 | rdp->qlen_last_fqs_check = 0; |
2130 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeEmpty")); | ||
2131 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { | 2139 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { |
2132 | wake_up_process(t); /* ... or if many callbacks queued. */ | 2140 | wake_up_process(t); /* ... or if many callbacks queued. */ |
2133 | rdp->qlen_last_fqs_check = LONG_MAX / 2; | 2141 | rdp->qlen_last_fqs_check = LONG_MAX / 2; |
@@ -2148,12 +2156,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2148 | * "rcuo" kthread can find it. | 2156 | * "rcuo" kthread can find it. |
2149 | */ | 2157 | */ |
2150 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | 2158 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
2151 | bool lazy) | 2159 | bool lazy, unsigned long flags) |
2152 | { | 2160 | { |
2153 | 2161 | ||
2154 | if (!rcu_is_nocb_cpu(rdp->cpu)) | 2162 | if (!rcu_is_nocb_cpu(rdp->cpu)) |
2155 | return 0; | 2163 | return 0; |
2156 | __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy); | 2164 | __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); |
2157 | if (__is_kfree_rcu_offset((unsigned long)rhp->func)) | 2165 | if (__is_kfree_rcu_offset((unsigned long)rhp->func)) |
2158 | trace_rcu_kfree_callback(rdp->rsp->name, rhp, | 2166 | trace_rcu_kfree_callback(rdp->rsp->name, rhp, |
2159 | (unsigned long)rhp->func, | 2167 | (unsigned long)rhp->func, |
@@ -2171,7 +2179,8 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | |||
2171 | * not a no-CBs CPU. | 2179 | * not a no-CBs CPU. |
2172 | */ | 2180 | */ |
2173 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 2181 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
2174 | struct rcu_data *rdp) | 2182 | struct rcu_data *rdp, |
2183 | unsigned long flags) | ||
2175 | { | 2184 | { |
2176 | long ql = rsp->qlen; | 2185 | long ql = rsp->qlen; |
2177 | long qll = rsp->qlen_lazy; | 2186 | long qll = rsp->qlen_lazy; |
@@ -2185,14 +2194,14 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | |||
2185 | /* First, enqueue the donelist, if any. This preserves CB ordering. */ | 2194 | /* First, enqueue the donelist, if any. This preserves CB ordering. */ |
2186 | if (rsp->orphan_donelist != NULL) { | 2195 | if (rsp->orphan_donelist != NULL) { |
2187 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, | 2196 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, |
2188 | rsp->orphan_donetail, ql, qll); | 2197 | rsp->orphan_donetail, ql, qll, flags); |
2189 | ql = qll = 0; | 2198 | ql = qll = 0; |
2190 | rsp->orphan_donelist = NULL; | 2199 | rsp->orphan_donelist = NULL; |
2191 | rsp->orphan_donetail = &rsp->orphan_donelist; | 2200 | rsp->orphan_donetail = &rsp->orphan_donelist; |
2192 | } | 2201 | } |
2193 | if (rsp->orphan_nxtlist != NULL) { | 2202 | if (rsp->orphan_nxtlist != NULL) { |
2194 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, | 2203 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, |
2195 | rsp->orphan_nxttail, ql, qll); | 2204 | rsp->orphan_nxttail, ql, qll, flags); |
2196 | ql = qll = 0; | 2205 | ql = qll = 0; |
2197 | rsp->orphan_nxtlist = NULL; | 2206 | rsp->orphan_nxtlist = NULL; |
2198 | rsp->orphan_nxttail = &rsp->orphan_nxtlist; | 2207 | rsp->orphan_nxttail = &rsp->orphan_nxtlist; |
@@ -2314,6 +2323,22 @@ static int rcu_nocb_kthread(void *arg) | |||
2314 | return 0; | 2323 | return 0; |
2315 | } | 2324 | } |
2316 | 2325 | ||
2326 | /* Is a deferred wakeup of rcu_nocb_kthread() required? */ | ||
2327 | static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) | ||
2328 | { | ||
2329 | return ACCESS_ONCE(rdp->nocb_defer_wakeup); | ||
2330 | } | ||
2331 | |||
2332 | /* Do a deferred wakeup of rcu_nocb_kthread(). */ | ||
2333 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | ||
2334 | { | ||
2335 | if (!rcu_nocb_need_deferred_wakeup(rdp)) | ||
2336 | return; | ||
2337 | ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; | ||
2338 | wake_up(&rdp->nocb_wq); | ||
2339 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); | ||
2340 | } | ||
2341 | |||
2317 | /* Initialize per-rcu_data variables for no-CBs CPUs. */ | 2342 | /* Initialize per-rcu_data variables for no-CBs CPUs. */ |
2318 | static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) | 2343 | static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) |
2319 | { | 2344 | { |
@@ -2369,13 +2394,14 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) | |||
2369 | } | 2394 | } |
2370 | 2395 | ||
2371 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | 2396 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
2372 | bool lazy) | 2397 | bool lazy, unsigned long flags) |
2373 | { | 2398 | { |
2374 | return 0; | 2399 | return 0; |
2375 | } | 2400 | } |
2376 | 2401 | ||
2377 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 2402 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
2378 | struct rcu_data *rdp) | 2403 | struct rcu_data *rdp, |
2404 | unsigned long flags) | ||
2379 | { | 2405 | { |
2380 | return 0; | 2406 | return 0; |
2381 | } | 2407 | } |
@@ -2384,6 +2410,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) | |||
2384 | { | 2410 | { |
2385 | } | 2411 | } |
2386 | 2412 | ||
2413 | static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) | ||
2414 | { | ||
2415 | return false; | ||
2416 | } | ||
2417 | |||
2418 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | ||
2419 | { | ||
2420 | } | ||
2421 | |||
2387 | static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) | 2422 | static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) |
2388 | { | 2423 | { |
2389 | } | 2424 | } |
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index 3596797b7e46..4def475336d4 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c | |||
@@ -364,9 +364,10 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) | |||
364 | rdp->n_rp_report_qs, | 364 | rdp->n_rp_report_qs, |
365 | rdp->n_rp_cb_ready, | 365 | rdp->n_rp_cb_ready, |
366 | rdp->n_rp_cpu_needs_gp); | 366 | rdp->n_rp_cpu_needs_gp); |
367 | seq_printf(m, "gpc=%ld gps=%ld nn=%ld\n", | 367 | seq_printf(m, "gpc=%ld gps=%ld nn=%ld ndw%ld\n", |
368 | rdp->n_rp_gp_completed, | 368 | rdp->n_rp_gp_completed, |
369 | rdp->n_rp_gp_started, | 369 | rdp->n_rp_gp_started, |
370 | rdp->n_rp_nocb_defer_wakeup, | ||
370 | rdp->n_rp_need_nothing); | 371 | rdp->n_rp_need_nothing); |
371 | } | 372 | } |
372 | 373 | ||