diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-12-16 05:43:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-12-16 05:43:41 -0500 |
commit | 73a7ac2808fa52bdab1781646568b6f90c3d7034 (patch) | |
tree | b3a79f3ce811167c37e9c0e65aeb8a7c70bed4c8 /kernel/rcu/tree_plugin.h | |
parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) | |
parent | 0d3c55bc9fd58393bd3bd9974991ec1f815e1326 (diff) |
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull v3.14 RCU updates from Paul E. McKenney.
The main changes:
* Update RCU documentation.
* Miscellaneous fixes.
* Add RCU torture scripts.
* Static-analysis improvements.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 89 |
1 files changed, 76 insertions, 13 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 08a765232432..3ca32736e264 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -361,10 +361,14 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
361 | special = t->rcu_read_unlock_special; | 361 | special = t->rcu_read_unlock_special; |
362 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 362 | if (special & RCU_READ_UNLOCK_NEED_QS) { |
363 | rcu_preempt_qs(smp_processor_id()); | 363 | rcu_preempt_qs(smp_processor_id()); |
364 | if (!t->rcu_read_unlock_special) { | ||
365 | local_irq_restore(flags); | ||
366 | return; | ||
367 | } | ||
364 | } | 368 | } |
365 | 369 | ||
366 | /* Hardware IRQ handlers cannot block. */ | 370 | /* Hardware IRQ handlers cannot block, complain if they get here. */ |
367 | if (in_irq() || in_serving_softirq()) { | 371 | if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) { |
368 | local_irq_restore(flags); | 372 | local_irq_restore(flags); |
369 | return; | 373 | return; |
370 | } | 374 | } |
@@ -779,8 +783,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |||
779 | } | 783 | } |
780 | if (rnp->parent == NULL) { | 784 | if (rnp->parent == NULL) { |
781 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 785 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
782 | if (wake) | 786 | if (wake) { |
787 | smp_mb(); /* EGP done before wake_up(). */ | ||
783 | wake_up(&sync_rcu_preempt_exp_wq); | 788 | wake_up(&sync_rcu_preempt_exp_wq); |
789 | } | ||
784 | break; | 790 | break; |
785 | } | 791 | } |
786 | mask = rnp->grpmask; | 792 | mask = rnp->grpmask; |
@@ -1852,6 +1858,7 @@ static int rcu_oom_notify(struct notifier_block *self, | |||
1852 | 1858 | ||
1853 | /* Wait for callbacks from earlier instance to complete. */ | 1859 | /* Wait for callbacks from earlier instance to complete. */ |
1854 | wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); | 1860 | wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); |
1861 | smp_mb(); /* Ensure callback reuse happens after callback invocation. */ | ||
1855 | 1862 | ||
1856 | /* | 1863 | /* |
1857 | * Prevent premature wakeup: ensure that all increments happen | 1864 | * Prevent premature wakeup: ensure that all increments happen |
@@ -2101,7 +2108,8 @@ bool rcu_is_nocb_cpu(int cpu) | |||
2101 | static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | 2108 | static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, |
2102 | struct rcu_head *rhp, | 2109 | struct rcu_head *rhp, |
2103 | struct rcu_head **rhtp, | 2110 | struct rcu_head **rhtp, |
2104 | int rhcount, int rhcount_lazy) | 2111 | int rhcount, int rhcount_lazy, |
2112 | unsigned long flags) | ||
2105 | { | 2113 | { |
2106 | int len; | 2114 | int len; |
2107 | struct rcu_head **old_rhpp; | 2115 | struct rcu_head **old_rhpp; |
@@ -2122,9 +2130,16 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2122 | } | 2130 | } |
2123 | len = atomic_long_read(&rdp->nocb_q_count); | 2131 | len = atomic_long_read(&rdp->nocb_q_count); |
2124 | if (old_rhpp == &rdp->nocb_head) { | 2132 | if (old_rhpp == &rdp->nocb_head) { |
2125 | wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ | 2133 | if (!irqs_disabled_flags(flags)) { |
2134 | wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */ | ||
2135 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2136 | TPS("WakeEmpty")); | ||
2137 | } else { | ||
2138 | rdp->nocb_defer_wakeup = true; | ||
2139 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | ||
2140 | TPS("WakeEmptyIsDeferred")); | ||
2141 | } | ||
2126 | rdp->qlen_last_fqs_check = 0; | 2142 | rdp->qlen_last_fqs_check = 0; |
2127 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeEmpty")); | ||
2128 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { | 2143 | } else if (len > rdp->qlen_last_fqs_check + qhimark) { |
2129 | wake_up_process(t); /* ... or if many callbacks queued. */ | 2144 | wake_up_process(t); /* ... or if many callbacks queued. */ |
2130 | rdp->qlen_last_fqs_check = LONG_MAX / 2; | 2145 | rdp->qlen_last_fqs_check = LONG_MAX / 2; |
@@ -2145,12 +2160,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, | |||
2145 | * "rcuo" kthread can find it. | 2160 | * "rcuo" kthread can find it. |
2146 | */ | 2161 | */ |
2147 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | 2162 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
2148 | bool lazy) | 2163 | bool lazy, unsigned long flags) |
2149 | { | 2164 | { |
2150 | 2165 | ||
2151 | if (!rcu_is_nocb_cpu(rdp->cpu)) | 2166 | if (!rcu_is_nocb_cpu(rdp->cpu)) |
2152 | return 0; | 2167 | return 0; |
2153 | __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy); | 2168 | __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); |
2154 | if (__is_kfree_rcu_offset((unsigned long)rhp->func)) | 2169 | if (__is_kfree_rcu_offset((unsigned long)rhp->func)) |
2155 | trace_rcu_kfree_callback(rdp->rsp->name, rhp, | 2170 | trace_rcu_kfree_callback(rdp->rsp->name, rhp, |
2156 | (unsigned long)rhp->func, | 2171 | (unsigned long)rhp->func, |
@@ -2168,7 +2183,8 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | |||
2168 | * not a no-CBs CPU. | 2183 | * not a no-CBs CPU. |
2169 | */ | 2184 | */ |
2170 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 2185 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
2171 | struct rcu_data *rdp) | 2186 | struct rcu_data *rdp, |
2187 | unsigned long flags) | ||
2172 | { | 2188 | { |
2173 | long ql = rsp->qlen; | 2189 | long ql = rsp->qlen; |
2174 | long qll = rsp->qlen_lazy; | 2190 | long qll = rsp->qlen_lazy; |
@@ -2182,14 +2198,14 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | |||
2182 | /* First, enqueue the donelist, if any. This preserves CB ordering. */ | 2198 | /* First, enqueue the donelist, if any. This preserves CB ordering. */ |
2183 | if (rsp->orphan_donelist != NULL) { | 2199 | if (rsp->orphan_donelist != NULL) { |
2184 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, | 2200 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, |
2185 | rsp->orphan_donetail, ql, qll); | 2201 | rsp->orphan_donetail, ql, qll, flags); |
2186 | ql = qll = 0; | 2202 | ql = qll = 0; |
2187 | rsp->orphan_donelist = NULL; | 2203 | rsp->orphan_donelist = NULL; |
2188 | rsp->orphan_donetail = &rsp->orphan_donelist; | 2204 | rsp->orphan_donetail = &rsp->orphan_donelist; |
2189 | } | 2205 | } |
2190 | if (rsp->orphan_nxtlist != NULL) { | 2206 | if (rsp->orphan_nxtlist != NULL) { |
2191 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, | 2207 | __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, |
2192 | rsp->orphan_nxttail, ql, qll); | 2208 | rsp->orphan_nxttail, ql, qll, flags); |
2193 | ql = qll = 0; | 2209 | ql = qll = 0; |
2194 | rsp->orphan_nxtlist = NULL; | 2210 | rsp->orphan_nxtlist = NULL; |
2195 | rsp->orphan_nxttail = &rsp->orphan_nxtlist; | 2211 | rsp->orphan_nxttail = &rsp->orphan_nxtlist; |
@@ -2250,6 +2266,7 @@ static int rcu_nocb_kthread(void *arg) | |||
2250 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 2266 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |
2251 | TPS("Sleep")); | 2267 | TPS("Sleep")); |
2252 | wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); | 2268 | wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); |
2269 | /* Memory barrier provide by xchg() below. */ | ||
2253 | } else if (firsttime) { | 2270 | } else if (firsttime) { |
2254 | firsttime = 0; | 2271 | firsttime = 0; |
2255 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, | 2272 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, |
@@ -2310,6 +2327,22 @@ static int rcu_nocb_kthread(void *arg) | |||
2310 | return 0; | 2327 | return 0; |
2311 | } | 2328 | } |
2312 | 2329 | ||
2330 | /* Is a deferred wakeup of rcu_nocb_kthread() required? */ | ||
2331 | static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) | ||
2332 | { | ||
2333 | return ACCESS_ONCE(rdp->nocb_defer_wakeup); | ||
2334 | } | ||
2335 | |||
2336 | /* Do a deferred wakeup of rcu_nocb_kthread(). */ | ||
2337 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | ||
2338 | { | ||
2339 | if (!rcu_nocb_need_deferred_wakeup(rdp)) | ||
2340 | return; | ||
2341 | ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; | ||
2342 | wake_up(&rdp->nocb_wq); | ||
2343 | trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); | ||
2344 | } | ||
2345 | |||
2313 | /* Initialize per-rcu_data variables for no-CBs CPUs. */ | 2346 | /* Initialize per-rcu_data variables for no-CBs CPUs. */ |
2314 | static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) | 2347 | static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) |
2315 | { | 2348 | { |
@@ -2365,13 +2398,14 @@ static void rcu_init_one_nocb(struct rcu_node *rnp) | |||
2365 | } | 2398 | } |
2366 | 2399 | ||
2367 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, | 2400 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
2368 | bool lazy) | 2401 | bool lazy, unsigned long flags) |
2369 | { | 2402 | { |
2370 | return 0; | 2403 | return 0; |
2371 | } | 2404 | } |
2372 | 2405 | ||
2373 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | 2406 | static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
2374 | struct rcu_data *rdp) | 2407 | struct rcu_data *rdp, |
2408 | unsigned long flags) | ||
2375 | { | 2409 | { |
2376 | return 0; | 2410 | return 0; |
2377 | } | 2411 | } |
@@ -2380,6 +2414,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) | |||
2380 | { | 2414 | { |
2381 | } | 2415 | } |
2382 | 2416 | ||
2417 | static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) | ||
2418 | { | ||
2419 | return false; | ||
2420 | } | ||
2421 | |||
2422 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | ||
2423 | { | ||
2424 | } | ||
2425 | |||
2383 | static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) | 2426 | static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) |
2384 | { | 2427 | { |
2385 | } | 2428 | } |
@@ -2829,3 +2872,23 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp) | |||
2829 | } | 2872 | } |
2830 | 2873 | ||
2831 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 2874 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
2875 | |||
2876 | /* | ||
2877 | * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the | ||
2878 | * grace-period kthread will do force_quiescent_state() processing? | ||
2879 | * The idea is to avoid waking up RCU core processing on such a | ||
2880 | * CPU unless the grace period has extended for too long. | ||
2881 | * | ||
2882 | * This code relies on the fact that all NO_HZ_FULL CPUs are also | ||
2883 | * CONFIG_RCU_NOCB_CPUs. | ||
2884 | */ | ||
2885 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp) | ||
2886 | { | ||
2887 | #ifdef CONFIG_NO_HZ_FULL | ||
2888 | if (tick_nohz_full_cpu(smp_processor_id()) && | ||
2889 | (!rcu_gp_in_progress(rsp) || | ||
2890 | ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) | ||
2891 | return 1; | ||
2892 | #endif /* #ifdef CONFIG_NO_HZ_FULL */ | ||
2893 | return 0; | ||
2894 | } | ||