diff options
author | Uma Sharma <uma.sharma523@gmail.com> | 2014-03-24 01:32:09 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-05-14 14:41:04 -0400 |
commit | e534165bbf6a04d001748c573c7d6a7bae3713a5 (patch) | |
tree | 0644c4fc01f6c29912740acddc34cf8684141095 /kernel/rcu | |
parent | f5d2a0450ddfda337ffee04abf4f7b40f132c509 (diff) |
rcu: Variable name changed in tree_plugin.h and used in tree.c
The variable and struct both having the name "rcu_state" confuses
sparse in some situations, so this commit changes the variable to
"rcu_state_p" in order to avoid this confusion. This also makes
things easier for human readers.
Signed-off-by: Uma Sharma <uma.sharma523@gmail.com>
[ paulmck: Changed the declaration and several additional uses. ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 16 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 16 |
2 files changed, 16 insertions, 16 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3bbe48939e47..3e3f13e8b429 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -101,7 +101,7 @@ DEFINE_PER_CPU(struct rcu_data, sname##_data) | |||
101 | RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); | 101 | RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); |
102 | RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); | 102 | RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); |
103 | 103 | ||
104 | static struct rcu_state *rcu_state; | 104 | static struct rcu_state *rcu_state_p; |
105 | LIST_HEAD(rcu_struct_flavors); | 105 | LIST_HEAD(rcu_struct_flavors); |
106 | 106 | ||
107 | /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ | 107 | /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ |
@@ -275,7 +275,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |||
275 | */ | 275 | */ |
276 | void rcu_force_quiescent_state(void) | 276 | void rcu_force_quiescent_state(void) |
277 | { | 277 | { |
278 | force_quiescent_state(rcu_state); | 278 | force_quiescent_state(rcu_state_p); |
279 | } | 279 | } |
280 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 280 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
281 | 281 | ||
@@ -327,7 +327,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, | |||
327 | 327 | ||
328 | switch (test_type) { | 328 | switch (test_type) { |
329 | case RCU_FLAVOR: | 329 | case RCU_FLAVOR: |
330 | rsp = rcu_state; | 330 | rsp = rcu_state_p; |
331 | break; | 331 | break; |
332 | case RCU_BH_FLAVOR: | 332 | case RCU_BH_FLAVOR: |
333 | rsp = &rcu_bh_state; | 333 | rsp = &rcu_bh_state; |
@@ -910,7 +910,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, | |||
910 | * we will beat on the first one until it gets unstuck, then move | 910 | * we will beat on the first one until it gets unstuck, then move |
911 | * to the next. Only do this for the primary flavor of RCU. | 911 | * to the next. Only do this for the primary flavor of RCU. |
912 | */ | 912 | */ |
913 | if (rdp->rsp == rcu_state && | 913 | if (rdp->rsp == rcu_state_p && |
914 | ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { | 914 | ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { |
915 | rdp->rsp->jiffies_resched += 5; | 915 | rdp->rsp->jiffies_resched += 5; |
916 | resched_cpu(rdp->cpu); | 916 | resched_cpu(rdp->cpu); |
@@ -2660,7 +2660,7 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
2660 | void kfree_call_rcu(struct rcu_head *head, | 2660 | void kfree_call_rcu(struct rcu_head *head, |
2661 | void (*func)(struct rcu_head *rcu)) | 2661 | void (*func)(struct rcu_head *rcu)) |
2662 | { | 2662 | { |
2663 | __call_rcu(head, func, rcu_state, -1, 1); | 2663 | __call_rcu(head, func, rcu_state_p, -1, 1); |
2664 | } | 2664 | } |
2665 | EXPORT_SYMBOL_GPL(kfree_call_rcu); | 2665 | EXPORT_SYMBOL_GPL(kfree_call_rcu); |
2666 | 2666 | ||
@@ -2787,7 +2787,7 @@ unsigned long get_state_synchronize_rcu(void) | |||
2787 | * time-consuming work between get_state_synchronize_rcu() | 2787 | * time-consuming work between get_state_synchronize_rcu() |
2788 | * and cond_synchronize_rcu(). | 2788 | * and cond_synchronize_rcu(). |
2789 | */ | 2789 | */ |
2790 | return smp_load_acquire(&rcu_state->gpnum); | 2790 | return smp_load_acquire(&rcu_state_p->gpnum); |
2791 | } | 2791 | } |
2792 | EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); | 2792 | EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); |
2793 | 2793 | ||
@@ -2813,7 +2813,7 @@ void cond_synchronize_rcu(unsigned long oldstate) | |||
2813 | * Ensure that this load happens before any RCU-destructive | 2813 | * Ensure that this load happens before any RCU-destructive |
2814 | * actions the caller might carry out after we return. | 2814 | * actions the caller might carry out after we return. |
2815 | */ | 2815 | */ |
2816 | newstate = smp_load_acquire(&rcu_state->completed); | 2816 | newstate = smp_load_acquire(&rcu_state_p->completed); |
2817 | if (ULONG_CMP_GE(oldstate, newstate)) | 2817 | if (ULONG_CMP_GE(oldstate, newstate)) |
2818 | synchronize_rcu(); | 2818 | synchronize_rcu(); |
2819 | } | 2819 | } |
@@ -3354,7 +3354,7 @@ static int rcu_cpu_notify(struct notifier_block *self, | |||
3354 | unsigned long action, void *hcpu) | 3354 | unsigned long action, void *hcpu) |
3355 | { | 3355 | { |
3356 | long cpu = (long)hcpu; | 3356 | long cpu = (long)hcpu; |
3357 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 3357 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); |
3358 | struct rcu_node *rnp = rdp->mynode; | 3358 | struct rcu_node *rnp = rdp->mynode; |
3359 | struct rcu_state *rsp; | 3359 | struct rcu_state *rsp; |
3360 | 3360 | ||
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 2e579c38bd91..29977ae84e7e 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -116,7 +116,7 @@ static void __init rcu_bootup_announce_oddness(void) | |||
116 | #ifdef CONFIG_TREE_PREEMPT_RCU | 116 | #ifdef CONFIG_TREE_PREEMPT_RCU |
117 | 117 | ||
118 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); | 118 | RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); |
119 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 119 | static struct rcu_state *rcu_state_p = &rcu_preempt_state; |
120 | 120 | ||
121 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 121 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
122 | 122 | ||
@@ -947,7 +947,7 @@ void exit_rcu(void) | |||
947 | 947 | ||
948 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 948 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
949 | 949 | ||
950 | static struct rcu_state *rcu_state = &rcu_sched_state; | 950 | static struct rcu_state *rcu_state_p = &rcu_sched_state; |
951 | 951 | ||
952 | /* | 952 | /* |
953 | * Tell them what RCU they are running. | 953 | * Tell them what RCU they are running. |
@@ -1468,11 +1468,11 @@ static int __init rcu_spawn_kthreads(void) | |||
1468 | for_each_possible_cpu(cpu) | 1468 | for_each_possible_cpu(cpu) |
1469 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1469 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1470 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); | 1470 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
1471 | rnp = rcu_get_root(rcu_state); | 1471 | rnp = rcu_get_root(rcu_state_p); |
1472 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1472 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1473 | if (NUM_RCU_NODES > 1) { | 1473 | if (NUM_RCU_NODES > 1) { |
1474 | rcu_for_each_leaf_node(rcu_state, rnp) | 1474 | rcu_for_each_leaf_node(rcu_state_p, rnp) |
1475 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1475 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1476 | } | 1476 | } |
1477 | return 0; | 1477 | return 0; |
1478 | } | 1478 | } |
@@ -1480,12 +1480,12 @@ early_initcall(rcu_spawn_kthreads); | |||
1480 | 1480 | ||
1481 | static void rcu_prepare_kthreads(int cpu) | 1481 | static void rcu_prepare_kthreads(int cpu) |
1482 | { | 1482 | { |
1483 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 1483 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); |
1484 | struct rcu_node *rnp = rdp->mynode; | 1484 | struct rcu_node *rnp = rdp->mynode; |
1485 | 1485 | ||
1486 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | 1486 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ |
1487 | if (rcu_scheduler_fully_active) | 1487 | if (rcu_scheduler_fully_active) |
1488 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1488 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
1489 | } | 1489 | } |
1490 | 1490 | ||
1491 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1491 | #else /* #ifdef CONFIG_RCU_BOOST */ |