diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2013-02-10 23:48:58 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2013-03-26 11:04:44 -0400 |
commit | dae6e64d2bcfd4b06304ab864c7e3a4f6b5fedf4 (patch) | |
tree | 79172d32aab5e0cecf8fc7ab4cf1fabf14328d81 /kernel/rcutree_plugin.h | |
parent | 911af505ef407c2511106c224dd640f882f0f590 (diff) |
rcu: Introduce proper blocking to no-CBs kthreads GP waits
Currently, the no-CBs kthreads do repeated timed waits for grace periods
to elapse. This is crude and energy inefficient, so this commit allows
no-CBs kthreads to specify exactly which grace period they are waiting
for and also allows them to block for the entire duration until the
desired grace period completes.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 129 |
1 files changed, 112 insertions, 17 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3e33aefce0ea..90a191452550 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -2176,11 +2176,51 @@ static int __init parse_rcu_nocb_poll(char *arg) | |||
2176 | early_param("rcu_nocb_poll", parse_rcu_nocb_poll); | 2176 | early_param("rcu_nocb_poll", parse_rcu_nocb_poll); |
2177 | 2177 | ||
2178 | /* | 2178 | /* |
2179 | * Does this CPU needs a grace period due to offloaded callbacks? | 2179 | * Do any no-CBs CPUs need another grace period? |
2180 | * | ||
2181 | * Interrupts must be disabled. If the caller does not hold the root | ||
2182 | * rnp_node structure's ->lock, the results are advisory only. | ||
2183 | */ | ||
2184 | static int rcu_nocb_needs_gp(struct rcu_state *rsp) | ||
2185 | { | ||
2186 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
2187 | |||
2188 | return rnp->n_nocb_gp_requests[(ACCESS_ONCE(rnp->completed) + 1) & 0x1]; | ||
2189 | } | ||
2190 | |||
2191 | /* | ||
2192 | * Clean up this rcu_node structure's no-CBs state at the end of | ||
2193 | * a grace period, and also return whether any no-CBs CPU associated | ||
2194 | * with this rcu_node structure needs another grace period. | ||
2195 | */ | ||
2196 | static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | ||
2197 | { | ||
2198 | int c = rnp->completed; | ||
2199 | int needmore; | ||
2200 | |||
2201 | wake_up_all(&rnp->nocb_gp_wq[c & 0x1]); | ||
2202 | rnp->n_nocb_gp_requests[c & 0x1] = 0; | ||
2203 | needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1]; | ||
2204 | return needmore; | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * Set the root rcu_node structure's ->n_nocb_gp_requests field | ||
2209 | * based on the sum of those of all rcu_node structures. This does | ||
2210 | * double-count the root rcu_node structure's requests, but this | ||
2211 | * is necessary to handle the possibility of a rcu_nocb_kthread() | ||
2212 | * having awakened during the time that the rcu_node structures | ||
2213 | * were being updated for the end of the previous grace period. | ||
2180 | */ | 2214 | */ |
2181 | static int rcu_nocb_needs_gp(struct rcu_data *rdp) | 2215 | static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) |
2216 | { | ||
2217 | rnp->n_nocb_gp_requests[(rnp->completed + 1) & 0x1] += nrq; | ||
2218 | } | ||
2219 | |||
2220 | static void rcu_init_one_nocb(struct rcu_node *rnp) | ||
2182 | { | 2221 | { |
2183 | return rdp->nocb_needs_gp; | 2222 | init_waitqueue_head(&rnp->nocb_gp_wq[0]); |
2223 | init_waitqueue_head(&rnp->nocb_gp_wq[1]); | ||
2184 | } | 2224 | } |
2185 | 2225 | ||
2186 | /* Is the specified CPU a no-CPUs CPU? */ | 2226 | /* Is the specified CPU a no-CPUs CPU? */ |
@@ -2289,31 +2329,73 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, | |||
2289 | static void rcu_nocb_wait_gp(struct rcu_data *rdp) | 2329 | static void rcu_nocb_wait_gp(struct rcu_data *rdp) |
2290 | { | 2330 | { |
2291 | unsigned long c; | 2331 | unsigned long c; |
2332 | bool d; | ||
2292 | unsigned long flags; | 2333 | unsigned long flags; |
2293 | unsigned long j; | 2334 | unsigned long flags1; |
2294 | struct rcu_node *rnp = rdp->mynode; | 2335 | struct rcu_node *rnp = rdp->mynode; |
2336 | struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); | ||
2295 | 2337 | ||
2296 | raw_spin_lock_irqsave(&rnp->lock, flags); | 2338 | raw_spin_lock_irqsave(&rnp->lock, flags); |
2297 | c = rnp->completed + 2; | 2339 | c = rnp->completed + 2; |
2298 | rdp->nocb_needs_gp = true; | 2340 | |
2299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2341 | /* Count our request for a grace period. */ |
2342 | rnp->n_nocb_gp_requests[c & 0x1]++; | ||
2343 | |||
2344 | if (rnp->gpnum != rnp->completed) { | ||
2345 | |||
2346 | /* | ||
2347 | * This rcu_node structure believes that a grace period | ||
2348 | * is in progress, so we are done. When this grace | ||
2349 | * period ends, our request will be acted upon. | ||
2350 | */ | ||
2351 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2352 | |||
2353 | } else { | ||
2354 | |||
2355 | /* | ||
2356 | * Might not be a grace period, check root rcu_node | ||
2357 | * structure to see if we must start one. | ||
2358 | */ | ||
2359 | if (rnp != rnp_root) | ||
2360 | raw_spin_lock(&rnp_root->lock); /* irqs disabled. */ | ||
2361 | if (rnp_root->gpnum != rnp_root->completed) { | ||
2362 | raw_spin_unlock(&rnp_root->lock); /* irqs disabled. */ | ||
2363 | } else { | ||
2364 | |||
2365 | /* | ||
2366 | * No grace period, so we need to start one. | ||
2367 | * The good news is that we can wait for exactly | ||
2368 | * one grace period instead of part of the current | ||
2369 | * grace period and all of the next grace period. | ||
2370 | * Adjust counters accordingly and start the | ||
2371 | * needed grace period. | ||
2372 | */ | ||
2373 | rnp->n_nocb_gp_requests[c & 0x1]--; | ||
2374 | c = rnp_root->completed + 1; | ||
2375 | rnp->n_nocb_gp_requests[c & 0x1]++; | ||
2376 | rnp_root->n_nocb_gp_requests[c & 0x1]++; | ||
2377 | local_save_flags(flags1); | ||
2378 | rcu_start_gp(rdp->rsp, flags1); /* Rlses ->lock. */ | ||
2379 | } | ||
2380 | |||
2381 | /* Clean up locking and irq state. */ | ||
2382 | if (rnp != rnp_root) | ||
2383 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2384 | else | ||
2385 | local_irq_restore(flags); | ||
2386 | } | ||
2300 | 2387 | ||
2301 | /* | 2388 | /* |
2302 | * Wait for the grace period. Do so interruptibly to avoid messing | 2389 | * Wait for the grace period. Do so interruptibly to avoid messing |
2303 | * up the load average. | 2390 | * up the load average. |
2304 | */ | 2391 | */ |
2305 | for (;;) { | 2392 | for (;;) { |
2306 | j = jiffies; | 2393 | wait_event_interruptible( |
2307 | schedule_timeout_interruptible(2); | 2394 | rnp->nocb_gp_wq[c & 0x1], |
2308 | raw_spin_lock_irqsave(&rnp->lock, flags); | 2395 | (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); |
2309 | if (ULONG_CMP_GE(rnp->completed, c)) { | 2396 | if (likely(d)) |
2310 | rdp->nocb_needs_gp = false; | ||
2311 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2312 | break; | 2397 | break; |
2313 | } | 2398 | flush_signals(current); |
2314 | if (j == jiffies) | ||
2315 | flush_signals(current); | ||
2316 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2317 | } | 2399 | } |
2318 | smp_mb(); /* Ensure that CB invocation happens after GP end. */ | 2400 | smp_mb(); /* Ensure that CB invocation happens after GP end. */ |
2319 | } | 2401 | } |
@@ -2416,11 +2498,24 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) | |||
2416 | 2498 | ||
2417 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ | 2499 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
2418 | 2500 | ||
2419 | static int rcu_nocb_needs_gp(struct rcu_data *rdp) | 2501 | static int rcu_nocb_needs_gp(struct rcu_state *rsp) |
2502 | { | ||
2503 | return 0; | ||
2504 | } | ||
2505 | |||
2506 | static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) | ||
2420 | { | 2507 | { |
2421 | return 0; | 2508 | return 0; |
2422 | } | 2509 | } |
2423 | 2510 | ||
2511 | static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) | ||
2512 | { | ||
2513 | } | ||
2514 | |||
2515 | static void rcu_init_one_nocb(struct rcu_node *rnp) | ||
2516 | { | ||
2517 | } | ||
2518 | |||
2424 | static bool is_nocb_cpu(int cpu) | 2519 | static bool is_nocb_cpu(int cpu) |
2425 | { | 2520 | { |
2426 | return false; | 2521 | return false; |