diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-07-11 14:30:24 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-16 13:08:02 -0400 |
commit | 35ce7f29a44a888c45c0a9f202f69e10613c5306 (patch) | |
tree | 71d72916c0582574a4317aaabd4f24291547326b /kernel/rcu/tree_plugin.h | |
parent | 9386c0b75dda05f535a10ea1abf1817fe292c81c (diff) |
rcu: Create rcuo kthreads only for onlined CPUs
RCU currently uses for_each_possible_cpu() to spawn rcuo kthreads,
which can result in more rcuo kthreads than one would expect, for
example, derRichard reported 64 CPUs worth of rcuo kthreads on an
8-CPU image. This commit therefore creates rcuo kthreads only for
those CPUs that actually come online.
This was reported by derRichard on the OFTC IRC network.
Reported-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Tested-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 90 |
1 files changed, 79 insertions, 11 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 410c74424d96..31c7afb611fd 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -2479,6 +2479,7 @@ void __init rcu_init_nohz(void) | |||
2479 | rdp->nxttail[RCU_NEXT_TAIL] != NULL); | 2479 | rdp->nxttail[RCU_NEXT_TAIL] != NULL); |
2480 | init_nocb_callback_list(rdp); | 2480 | init_nocb_callback_list(rdp); |
2481 | } | 2481 | } |
2482 | rcu_organize_nocb_kthreads(rsp); | ||
2482 | } | 2483 | } |
2483 | } | 2484 | } |
2484 | 2485 | ||
@@ -2490,15 +2491,85 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) | |||
2490 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; | 2491 | rdp->nocb_follower_tail = &rdp->nocb_follower_head; |
2491 | } | 2492 | } |
2492 | 2493 | ||
2494 | /* | ||
2495 | * If the specified CPU is a no-CBs CPU that does not already have its | ||
2496 | * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are | ||
2497 | * brought online out of order, this can require re-organizing the | ||
2498 | * leader-follower relationships. | ||
2499 | */ | ||
2500 | static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) | ||
2501 | { | ||
2502 | struct rcu_data *rdp; | ||
2503 | struct rcu_data *rdp_last; | ||
2504 | struct rcu_data *rdp_old_leader; | ||
2505 | struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); | ||
2506 | struct task_struct *t; | ||
2507 | |||
2508 | /* | ||
2509 | * If this isn't a no-CBs CPU or if it already has an rcuo kthread, | ||
2510 | * then nothing to do. | ||
2511 | */ | ||
2512 | if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread) | ||
2513 | return; | ||
2514 | |||
2515 | /* If we didn't spawn the leader first, reorganize! */ | ||
2516 | rdp_old_leader = rdp_spawn->nocb_leader; | ||
2517 | if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) { | ||
2518 | rdp_last = NULL; | ||
2519 | rdp = rdp_old_leader; | ||
2520 | do { | ||
2521 | rdp->nocb_leader = rdp_spawn; | ||
2522 | if (rdp_last && rdp != rdp_spawn) | ||
2523 | rdp_last->nocb_next_follower = rdp; | ||
2524 | rdp_last = rdp; | ||
2525 | rdp = rdp->nocb_next_follower; | ||
2526 | rdp_last->nocb_next_follower = NULL; | ||
2527 | } while (rdp); | ||
2528 | rdp_spawn->nocb_next_follower = rdp_old_leader; | ||
2529 | } | ||
2530 | |||
2531 | /* Spawn the kthread for this CPU and RCU flavor. */ | ||
2532 | t = kthread_run(rcu_nocb_kthread, rdp_spawn, | ||
2533 | "rcuo%c/%d", rsp->abbr, cpu); | ||
2534 | BUG_ON(IS_ERR(t)); | ||
2535 | ACCESS_ONCE(rdp_spawn->nocb_kthread) = t; | ||
2536 | } | ||
2537 | |||
2538 | /* | ||
2539 | * If the specified CPU is a no-CBs CPU that does not already have its | ||
2540 | * rcuo kthreads, spawn them. | ||
2541 | */ | ||
2542 | static void rcu_spawn_all_nocb_kthreads(int cpu) | ||
2543 | { | ||
2544 | struct rcu_state *rsp; | ||
2545 | |||
2546 | if (rcu_scheduler_fully_active) | ||
2547 | for_each_rcu_flavor(rsp) | ||
2548 | rcu_spawn_one_nocb_kthread(rsp, cpu); | ||
2549 | } | ||
2550 | |||
2551 | /* | ||
2552 | * Once the scheduler is running, spawn rcuo kthreads for all online | ||
2553 | * no-CBs CPUs. This assumes that the early_initcall()s happen before | ||
2554 | * non-boot CPUs come online -- if this changes, we will need to add | ||
2555 | * some mutual exclusion. | ||
2556 | */ | ||
2557 | static void __init rcu_spawn_nocb_kthreads(void) | ||
2558 | { | ||
2559 | int cpu; | ||
2560 | |||
2561 | for_each_online_cpu(cpu) | ||
2562 | rcu_spawn_all_nocb_kthreads(cpu); | ||
2563 | } | ||
2564 | |||
2493 | /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ | 2565 | /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */ |
2494 | static int rcu_nocb_leader_stride = -1; | 2566 | static int rcu_nocb_leader_stride = -1; |
2495 | module_param(rcu_nocb_leader_stride, int, 0444); | 2567 | module_param(rcu_nocb_leader_stride, int, 0444); |
2496 | 2568 | ||
2497 | /* | 2569 | /* |
2498 | * Create a kthread for each RCU flavor for each no-CBs CPU. | 2570 | * Initialize leader-follower relationships for all no-CBs CPU. |
2499 | * Also initialize leader-follower relationships. | ||
2500 | */ | 2571 | */ |
2501 | static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) | 2572 | static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp) |
2502 | { | 2573 | { |
2503 | int cpu; | 2574 | int cpu; |
2504 | int ls = rcu_nocb_leader_stride; | 2575 | int ls = rcu_nocb_leader_stride; |
@@ -2506,7 +2577,6 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) | |||
2506 | struct rcu_data *rdp; | 2577 | struct rcu_data *rdp; |
2507 | struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ | 2578 | struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */ |
2508 | struct rcu_data *rdp_prev = NULL; | 2579 | struct rcu_data *rdp_prev = NULL; |
2509 | struct task_struct *t; | ||
2510 | 2580 | ||
2511 | if (rcu_nocb_mask == NULL) | 2581 | if (rcu_nocb_mask == NULL) |
2512 | return; | 2582 | return; |
@@ -2532,12 +2602,6 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) | |||
2532 | rdp_prev->nocb_next_follower = rdp; | 2602 | rdp_prev->nocb_next_follower = rdp; |
2533 | } | 2603 | } |
2534 | rdp_prev = rdp; | 2604 | rdp_prev = rdp; |
2535 | |||
2536 | /* Spawn the kthread for this CPU. */ | ||
2537 | t = kthread_run(rcu_nocb_kthread, rdp, | ||
2538 | "rcuo%c/%d", rsp->abbr, cpu); | ||
2539 | BUG_ON(IS_ERR(t)); | ||
2540 | ACCESS_ONCE(rdp->nocb_kthread) = t; | ||
2541 | } | 2605 | } |
2542 | } | 2606 | } |
2543 | 2607 | ||
@@ -2591,7 +2655,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) | |||
2591 | { | 2655 | { |
2592 | } | 2656 | } |
2593 | 2657 | ||
2594 | static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp) | 2658 | static void rcu_spawn_all_nocb_kthreads(int cpu) |
2659 | { | ||
2660 | } | ||
2661 | |||
2662 | static void __init rcu_spawn_nocb_kthreads(void) | ||
2595 | { | 2663 | { |
2596 | } | 2664 | } |
2597 | 2665 | ||