aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:25:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:25:12 -0500
commita693c46e14c9fdadbcd68ddfa94a4f72495531a9 (patch)
treeae8cd363c78959159b3b897b13c2d78c6923d355 /kernel/rcu/tree_plugin.h
parent6ffbe7d1fabddc768724656f159759cae7818cd9 (diff)
parent73a7ac2808fa52bdab1781646568b6f90c3d7034 (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar: - add RCU torture scripts/tooling - static analysis improvements - update RCU documentation - miscellaneous fixes * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits) rcu: Remove "extern" from function declarations in kernel/rcu/rcu.h rcu: Remove "extern" from function declarations in include/linux/*rcu*.h rcu/torture: Dynamically allocate SRCU output buffer to avoid overflow rcu: Don't activate RCU core on NO_HZ_FULL CPUs rcu: Warn on allegedly impossible rcu_read_unlock_special() from irq rcu: Add an RCU_INITIALIZER for global RCU-protected pointers rcu: Make rcu_assign_pointer's assignment volatile and type-safe bonding: Use RCU_INIT_POINTER() for better overhead and for sparse rcu: Add comment on evaluate-once properties of rcu_assign_pointer(). rcu: Provide better diagnostics for blocking in RCU callback functions rcu: Improve SRCU's grace-period comments rcu: Fix CONFIG_RCU_FANOUT_EXACT for odd fanout/leaf values rcu: Fix coccinelle warnings rcutorture: Stop tracking FSF's postal address rcutorture: Move checkarg to functions.sh rcutorture: Flag errors and warnings with color coding rcutorture: Record results from repeated runs of the same test scenario rcutorture: Test summary at end of run with less chattiness rcutorture: Update comment in kvm.sh listing typical RCU trace events rcutorture: Add tracing-enabled version of TREE08 ...
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h89
1 files changed, 76 insertions, 13 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 506a7a97a2e2..6e2ef4b2b920 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -363,10 +363,14 @@ void rcu_read_unlock_special(struct task_struct *t)
363 special = t->rcu_read_unlock_special; 363 special = t->rcu_read_unlock_special;
364 if (special & RCU_READ_UNLOCK_NEED_QS) { 364 if (special & RCU_READ_UNLOCK_NEED_QS) {
365 rcu_preempt_qs(smp_processor_id()); 365 rcu_preempt_qs(smp_processor_id());
366 if (!t->rcu_read_unlock_special) {
367 local_irq_restore(flags);
368 return;
369 }
366 } 370 }
367 371
368 /* Hardware IRQ handlers cannot block. */ 372 /* Hardware IRQ handlers cannot block, complain if they get here. */
369 if (in_irq() || in_serving_softirq()) { 373 if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
370 local_irq_restore(flags); 374 local_irq_restore(flags);
371 return; 375 return;
372 } 376 }
@@ -785,8 +789,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
785 } 789 }
786 if (rnp->parent == NULL) { 790 if (rnp->parent == NULL) {
787 raw_spin_unlock_irqrestore(&rnp->lock, flags); 791 raw_spin_unlock_irqrestore(&rnp->lock, flags);
788 if (wake) 792 if (wake) {
793 smp_mb(); /* EGP done before wake_up(). */
789 wake_up(&sync_rcu_preempt_exp_wq); 794 wake_up(&sync_rcu_preempt_exp_wq);
795 }
790 break; 796 break;
791 } 797 }
792 mask = rnp->grpmask; 798 mask = rnp->grpmask;
@@ -1864,6 +1870,7 @@ static int rcu_oom_notify(struct notifier_block *self,
1864 1870
1865 /* Wait for callbacks from earlier instance to complete. */ 1871 /* Wait for callbacks from earlier instance to complete. */
1866 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); 1872 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1873 smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1867 1874
1868 /* 1875 /*
1869 * Prevent premature wakeup: ensure that all increments happen 1876 * Prevent premature wakeup: ensure that all increments happen
@@ -2113,7 +2120,8 @@ bool rcu_is_nocb_cpu(int cpu)
2113static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, 2120static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2114 struct rcu_head *rhp, 2121 struct rcu_head *rhp,
2115 struct rcu_head **rhtp, 2122 struct rcu_head **rhtp,
2116 int rhcount, int rhcount_lazy) 2123 int rhcount, int rhcount_lazy,
2124 unsigned long flags)
2117{ 2125{
2118 int len; 2126 int len;
2119 struct rcu_head **old_rhpp; 2127 struct rcu_head **old_rhpp;
@@ -2134,9 +2142,16 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2134 } 2142 }
2135 len = atomic_long_read(&rdp->nocb_q_count); 2143 len = atomic_long_read(&rdp->nocb_q_count);
2136 if (old_rhpp == &rdp->nocb_head) { 2144 if (old_rhpp == &rdp->nocb_head) {
2137 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ 2145 if (!irqs_disabled_flags(flags)) {
2146 wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */
2147 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2148 TPS("WakeEmpty"));
2149 } else {
2150 rdp->nocb_defer_wakeup = true;
2151 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2152 TPS("WakeEmptyIsDeferred"));
2153 }
2138 rdp->qlen_last_fqs_check = 0; 2154 rdp->qlen_last_fqs_check = 0;
2139 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeEmpty"));
2140 } else if (len > rdp->qlen_last_fqs_check + qhimark) { 2155 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2141 wake_up_process(t); /* ... or if many callbacks queued. */ 2156 wake_up_process(t); /* ... or if many callbacks queued. */
2142 rdp->qlen_last_fqs_check = LONG_MAX / 2; 2157 rdp->qlen_last_fqs_check = LONG_MAX / 2;
@@ -2157,12 +2172,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2157 * "rcuo" kthread can find it. 2172 * "rcuo" kthread can find it.
2158 */ 2173 */
2159static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2174static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2160 bool lazy) 2175 bool lazy, unsigned long flags)
2161{ 2176{
2162 2177
2163 if (!rcu_is_nocb_cpu(rdp->cpu)) 2178 if (!rcu_is_nocb_cpu(rdp->cpu))
2164 return 0; 2179 return 0;
2165 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy); 2180 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2166 if (__is_kfree_rcu_offset((unsigned long)rhp->func)) 2181 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2167 trace_rcu_kfree_callback(rdp->rsp->name, rhp, 2182 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2168 (unsigned long)rhp->func, 2183 (unsigned long)rhp->func,
@@ -2180,7 +2195,8 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2180 * not a no-CBs CPU. 2195 * not a no-CBs CPU.
2181 */ 2196 */
2182static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2197static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2183 struct rcu_data *rdp) 2198 struct rcu_data *rdp,
2199 unsigned long flags)
2184{ 2200{
2185 long ql = rsp->qlen; 2201 long ql = rsp->qlen;
2186 long qll = rsp->qlen_lazy; 2202 long qll = rsp->qlen_lazy;
@@ -2194,14 +2210,14 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2194 /* First, enqueue the donelist, if any. This preserves CB ordering. */ 2210 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2195 if (rsp->orphan_donelist != NULL) { 2211 if (rsp->orphan_donelist != NULL) {
2196 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist, 2212 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2197 rsp->orphan_donetail, ql, qll); 2213 rsp->orphan_donetail, ql, qll, flags);
2198 ql = qll = 0; 2214 ql = qll = 0;
2199 rsp->orphan_donelist = NULL; 2215 rsp->orphan_donelist = NULL;
2200 rsp->orphan_donetail = &rsp->orphan_donelist; 2216 rsp->orphan_donetail = &rsp->orphan_donelist;
2201 } 2217 }
2202 if (rsp->orphan_nxtlist != NULL) { 2218 if (rsp->orphan_nxtlist != NULL) {
2203 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist, 2219 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2204 rsp->orphan_nxttail, ql, qll); 2220 rsp->orphan_nxttail, ql, qll, flags);
2205 ql = qll = 0; 2221 ql = qll = 0;
2206 rsp->orphan_nxtlist = NULL; 2222 rsp->orphan_nxtlist = NULL;
2207 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2223 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
@@ -2263,6 +2279,7 @@ static int rcu_nocb_kthread(void *arg)
2263 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2279 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2264 TPS("Sleep")); 2280 TPS("Sleep"));
2265 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); 2281 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2282 /* Memory barrier provide by xchg() below. */
2266 } else if (firsttime) { 2283 } else if (firsttime) {
2267 firsttime = 0; 2284 firsttime = 0;
2268 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, 2285 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
@@ -2323,6 +2340,22 @@ static int rcu_nocb_kthread(void *arg)
2323 return 0; 2340 return 0;
2324} 2341}
2325 2342
2343/* Is a deferred wakeup of rcu_nocb_kthread() required? */
2344static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2345{
2346 return ACCESS_ONCE(rdp->nocb_defer_wakeup);
2347}
2348
2349/* Do a deferred wakeup of rcu_nocb_kthread(). */
2350static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2351{
2352 if (!rcu_nocb_need_deferred_wakeup(rdp))
2353 return;
2354 ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
2355 wake_up(&rdp->nocb_wq);
2356 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
2357}
2358
2326/* Initialize per-rcu_data variables for no-CBs CPUs. */ 2359/* Initialize per-rcu_data variables for no-CBs CPUs. */
2327static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) 2360static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2328{ 2361{
@@ -2378,13 +2411,14 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
2378} 2411}
2379 2412
2380static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 2413static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2381 bool lazy) 2414 bool lazy, unsigned long flags)
2382{ 2415{
2383 return 0; 2416 return 0;
2384} 2417}
2385 2418
2386static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 2419static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2387 struct rcu_data *rdp) 2420 struct rcu_data *rdp,
2421 unsigned long flags)
2388{ 2422{
2389 return 0; 2423 return 0;
2390} 2424}
@@ -2393,6 +2427,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2393{ 2427{
2394} 2428}
2395 2429
2430static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2431{
2432 return false;
2433}
2434
2435static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2436{
2437}
2438
2396static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) 2439static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2397{ 2440{
2398} 2441}
@@ -2842,3 +2885,23 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2842} 2885}
2843 2886
2844#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 2887#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
2888
2889/*
2890 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2891 * grace-period kthread will do force_quiescent_state() processing?
2892 * The idea is to avoid waking up RCU core processing on such a
2893 * CPU unless the grace period has extended for too long.
2894 *
2895 * This code relies on the fact that all NO_HZ_FULL CPUs are also
2896 * CONFIG_RCU_NOCB_CPUs.
2897 */
2898static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2899{
2900#ifdef CONFIG_NO_HZ_FULL
2901 if (tick_nohz_full_cpu(smp_processor_id()) &&
2902 (!rcu_gp_in_progress(rsp) ||
2903 ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
2904 return 1;
2905#endif /* #ifdef CONFIG_NO_HZ_FULL */
2906 return 0;
2907}