diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-06-12 14:01:13 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:33:24 -0400 |
commit | 6ce75a2326e6f8b3bdfb60e1de7934b89858e87b (patch) | |
tree | c3ecd512422ad9f445f69c78037142a99bc8db2c /kernel/rcutree.c | |
parent | 1bca8cf1a2c3c6683b12ad28a3e826ca7a834978 (diff) |
rcu: Introduce for_each_rcu_flavor() and use it
The arrival of TREE_PREEMPT_RCU some years back included some ugly
code involving either #ifdef or #ifdef'ed wrapper functions to iterate
over all non-SRCU flavors of RCU. This commit therefore introduces
a for_each_rcu_flavor() iterator over the rcu_state structures for each
flavor of RCU to clean up a bit of the ugliness.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 5376a156be8a..b61c3ffc80e9 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -84,6 +84,7 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh); | |||
84 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 84 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
85 | 85 | ||
86 | static struct rcu_state *rcu_state; | 86 | static struct rcu_state *rcu_state; |
87 | LIST_HEAD(rcu_struct_flavors); | ||
87 | 88 | ||
88 | /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ | 89 | /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ |
89 | static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; | 90 | static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; |
@@ -860,9 +861,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) | |||
860 | */ | 861 | */ |
861 | void rcu_cpu_stall_reset(void) | 862 | void rcu_cpu_stall_reset(void) |
862 | { | 863 | { |
863 | rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; | 864 | struct rcu_state *rsp; |
864 | rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; | 865 | |
865 | rcu_preempt_stall_reset(); | 866 | for_each_rcu_flavor(rsp) |
867 | rsp->jiffies_stall = jiffies + ULONG_MAX / 2; | ||
866 | } | 868 | } |
867 | 869 | ||
868 | static struct notifier_block rcu_panic_block = { | 870 | static struct notifier_block rcu_panic_block = { |
@@ -1827,10 +1829,11 @@ __rcu_process_callbacks(struct rcu_state *rsp) | |||
1827 | */ | 1829 | */ |
1828 | static void rcu_process_callbacks(struct softirq_action *unused) | 1830 | static void rcu_process_callbacks(struct softirq_action *unused) |
1829 | { | 1831 | { |
1832 | struct rcu_state *rsp; | ||
1833 | |||
1830 | trace_rcu_utilization("Start RCU core"); | 1834 | trace_rcu_utilization("Start RCU core"); |
1831 | __rcu_process_callbacks(&rcu_sched_state); | 1835 | for_each_rcu_flavor(rsp) |
1832 | __rcu_process_callbacks(&rcu_bh_state); | 1836 | __rcu_process_callbacks(rsp); |
1833 | rcu_preempt_process_callbacks(); | ||
1834 | trace_rcu_utilization("End RCU core"); | 1837 | trace_rcu_utilization("End RCU core"); |
1835 | } | 1838 | } |
1836 | 1839 | ||
@@ -2241,9 +2244,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2241 | */ | 2244 | */ |
2242 | static int rcu_pending(int cpu) | 2245 | static int rcu_pending(int cpu) |
2243 | { | 2246 | { |
2244 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || | 2247 | struct rcu_state *rsp; |
2245 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || | 2248 | |
2246 | rcu_preempt_pending(cpu); | 2249 | for_each_rcu_flavor(rsp) |
2250 | if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu))) | ||
2251 | return 1; | ||
2252 | return 0; | ||
2247 | } | 2253 | } |
2248 | 2254 | ||
2249 | /* | 2255 | /* |
@@ -2253,10 +2259,13 @@ static int rcu_pending(int cpu) | |||
2253 | */ | 2259 | */ |
2254 | static int rcu_cpu_has_callbacks(int cpu) | 2260 | static int rcu_cpu_has_callbacks(int cpu) |
2255 | { | 2261 | { |
2262 | struct rcu_state *rsp; | ||
2263 | |||
2256 | /* RCU callbacks either ready or pending? */ | 2264 | /* RCU callbacks either ready or pending? */ |
2257 | return per_cpu(rcu_sched_data, cpu).nxtlist || | 2265 | for_each_rcu_flavor(rsp) |
2258 | per_cpu(rcu_bh_data, cpu).nxtlist || | 2266 | if (per_cpu_ptr(rsp->rda, cpu)->nxtlist) |
2259 | rcu_preempt_cpu_has_callbacks(cpu); | 2267 | return 1; |
2268 | return 0; | ||
2260 | } | 2269 | } |
2261 | 2270 | ||
2262 | /* | 2271 | /* |
@@ -2551,9 +2560,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2551 | 2560 | ||
2552 | static void __cpuinit rcu_prepare_cpu(int cpu) | 2561 | static void __cpuinit rcu_prepare_cpu(int cpu) |
2553 | { | 2562 | { |
2554 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); | 2563 | struct rcu_state *rsp; |
2555 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); | 2564 | |
2556 | rcu_preempt_init_percpu_data(cpu); | 2565 | for_each_rcu_flavor(rsp) |
2566 | rcu_init_percpu_data(cpu, rsp, | ||
2567 | strcmp(rsp->name, "rcu_preempt") == 0); | ||
2557 | } | 2568 | } |
2558 | 2569 | ||
2559 | /* | 2570 | /* |
@@ -2565,6 +2576,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2565 | long cpu = (long)hcpu; | 2576 | long cpu = (long)hcpu; |
2566 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 2577 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
2567 | struct rcu_node *rnp = rdp->mynode; | 2578 | struct rcu_node *rnp = rdp->mynode; |
2579 | struct rcu_state *rsp; | ||
2568 | 2580 | ||
2569 | trace_rcu_utilization("Start CPU hotplug"); | 2581 | trace_rcu_utilization("Start CPU hotplug"); |
2570 | switch (action) { | 2582 | switch (action) { |
@@ -2589,18 +2601,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2589 | * touch any data without introducing corruption. We send the | 2601 | * touch any data without introducing corruption. We send the |
2590 | * dying CPU's callbacks to an arbitrarily chosen online CPU. | 2602 | * dying CPU's callbacks to an arbitrarily chosen online CPU. |
2591 | */ | 2603 | */ |
2592 | rcu_cleanup_dying_cpu(&rcu_bh_state); | 2604 | for_each_rcu_flavor(rsp) |
2593 | rcu_cleanup_dying_cpu(&rcu_sched_state); | 2605 | rcu_cleanup_dying_cpu(rsp); |
2594 | rcu_preempt_cleanup_dying_cpu(); | ||
2595 | rcu_cleanup_after_idle(cpu); | 2606 | rcu_cleanup_after_idle(cpu); |
2596 | break; | 2607 | break; |
2597 | case CPU_DEAD: | 2608 | case CPU_DEAD: |
2598 | case CPU_DEAD_FROZEN: | 2609 | case CPU_DEAD_FROZEN: |
2599 | case CPU_UP_CANCELED: | 2610 | case CPU_UP_CANCELED: |
2600 | case CPU_UP_CANCELED_FROZEN: | 2611 | case CPU_UP_CANCELED_FROZEN: |
2601 | rcu_cleanup_dead_cpu(cpu, &rcu_bh_state); | 2612 | for_each_rcu_flavor(rsp) |
2602 | rcu_cleanup_dead_cpu(cpu, &rcu_sched_state); | 2613 | rcu_cleanup_dead_cpu(cpu, rsp); |
2603 | rcu_preempt_cleanup_dead_cpu(cpu); | ||
2604 | break; | 2614 | break; |
2605 | default: | 2615 | default: |
2606 | break; | 2616 | break; |
@@ -2717,6 +2727,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
2717 | per_cpu_ptr(rsp->rda, i)->mynode = rnp; | 2727 | per_cpu_ptr(rsp->rda, i)->mynode = rnp; |
2718 | rcu_boot_init_percpu_data(i, rsp); | 2728 | rcu_boot_init_percpu_data(i, rsp); |
2719 | } | 2729 | } |
2730 | list_add(&rsp->flavors, &rcu_struct_flavors); | ||
2720 | } | 2731 | } |
2721 | 2732 | ||
2722 | /* | 2733 | /* |