diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-29 02:26:01 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-02 15:33:21 -0400 |
commit | 037b64ed0bf2405a1a01542164d3418564b44fff (patch) | |
tree | 59aa7e486e1673564f2a5687992130dffa71ca1b | |
parent | 6c90cc7bf077f28144013e949ee0c122012d194a (diff) |
rcu: Place pointer to call_rcu() in rcu_data structure
This is a preparatory commit for increasing rcu_barrier()'s concurrency.
It adds a pointer in the rcu_data structure to the corresponding call_rcu()
function. This allows a pointer to the rcu_data structure to imply the
function pointer, which allows _rcu_barrier() state to be placed in the
rcu_state structure.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r-- | kernel/rcutree.c | 27 | ||||
-rw-r--r-- | kernel/rcutree.h | 2 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 5 |
3 files changed, 17 insertions, 17 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index dd7fd96c90c5..00c518fa34bb 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -62,8 +62,9 @@ | |||
62 | 62 | ||
63 | static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; | 63 | static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; |
64 | 64 | ||
65 | #define RCU_STATE_INITIALIZER(sname) { \ | 65 | #define RCU_STATE_INITIALIZER(sname, cr) { \ |
66 | .level = { &sname##_state.node[0] }, \ | 66 | .level = { &sname##_state.node[0] }, \ |
67 | .call = cr, \ | ||
67 | .fqs_state = RCU_GP_IDLE, \ | 68 | .fqs_state = RCU_GP_IDLE, \ |
68 | .gpnum = -300, \ | 69 | .gpnum = -300, \ |
69 | .completed = -300, \ | 70 | .completed = -300, \ |
@@ -76,10 +77,11 @@ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; | |||
76 | .name = #sname, \ | 77 | .name = #sname, \ |
77 | } | 78 | } |
78 | 79 | ||
79 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched); | 80 | struct rcu_state rcu_sched_state = |
81 | RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched); | ||
80 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
81 | 83 | ||
82 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh); | 84 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh); |
83 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 85 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
84 | 86 | ||
85 | static struct rcu_state *rcu_state; | 87 | static struct rcu_state *rcu_state; |
@@ -2282,21 +2284,17 @@ static void rcu_barrier_func(void *type) | |||
2282 | { | 2284 | { |
2283 | int cpu = smp_processor_id(); | 2285 | int cpu = smp_processor_id(); |
2284 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | 2286 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); |
2285 | void (*call_rcu_func)(struct rcu_head *head, | 2287 | struct rcu_state *rsp = type; |
2286 | void (*func)(struct rcu_head *head)); | ||
2287 | 2288 | ||
2288 | atomic_inc(&rcu_barrier_cpu_count); | 2289 | atomic_inc(&rcu_barrier_cpu_count); |
2289 | call_rcu_func = type; | 2290 | rsp->call(head, rcu_barrier_callback); |
2290 | call_rcu_func(head, rcu_barrier_callback); | ||
2291 | } | 2291 | } |
2292 | 2292 | ||
2293 | /* | 2293 | /* |
2294 | * Orchestrate the specified type of RCU barrier, waiting for all | 2294 | * Orchestrate the specified type of RCU barrier, waiting for all |
2295 | * RCU callbacks of the specified type to complete. | 2295 | * RCU callbacks of the specified type to complete. |
2296 | */ | 2296 | */ |
2297 | static void _rcu_barrier(struct rcu_state *rsp, | 2297 | static void _rcu_barrier(struct rcu_state *rsp) |
2298 | void (*call_rcu_func)(struct rcu_head *head, | ||
2299 | void (*func)(struct rcu_head *head))) | ||
2300 | { | 2298 | { |
2301 | int cpu; | 2299 | int cpu; |
2302 | unsigned long flags; | 2300 | unsigned long flags; |
@@ -2348,8 +2346,7 @@ static void _rcu_barrier(struct rcu_state *rsp, | |||
2348 | while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) | 2346 | while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) |
2349 | schedule_timeout_interruptible(1); | 2347 | schedule_timeout_interruptible(1); |
2350 | } else if (ACCESS_ONCE(rdp->qlen)) { | 2348 | } else if (ACCESS_ONCE(rdp->qlen)) { |
2351 | smp_call_function_single(cpu, rcu_barrier_func, | 2349 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); |
2352 | (void *)call_rcu_func, 1); | ||
2353 | preempt_enable(); | 2350 | preempt_enable(); |
2354 | } else { | 2351 | } else { |
2355 | preempt_enable(); | 2352 | preempt_enable(); |
@@ -2370,7 +2367,7 @@ static void _rcu_barrier(struct rcu_state *rsp, | |||
2370 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 2367 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
2371 | atomic_inc(&rcu_barrier_cpu_count); | 2368 | atomic_inc(&rcu_barrier_cpu_count); |
2372 | smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ | 2369 | smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ |
2373 | call_rcu_func(&rh, rcu_barrier_callback); | 2370 | rsp->call(&rh, rcu_barrier_callback); |
2374 | 2371 | ||
2375 | /* | 2372 | /* |
2376 | * Now that we have an rcu_barrier_callback() callback on each | 2373 | * Now that we have an rcu_barrier_callback() callback on each |
@@ -2393,7 +2390,7 @@ static void _rcu_barrier(struct rcu_state *rsp, | |||
2393 | */ | 2390 | */ |
2394 | void rcu_barrier_bh(void) | 2391 | void rcu_barrier_bh(void) |
2395 | { | 2392 | { |
2396 | _rcu_barrier(&rcu_bh_state, call_rcu_bh); | 2393 | _rcu_barrier(&rcu_bh_state); |
2397 | } | 2394 | } |
2398 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | 2395 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
2399 | 2396 | ||
@@ -2402,7 +2399,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh); | |||
2402 | */ | 2399 | */ |
2403 | void rcu_barrier_sched(void) | 2400 | void rcu_barrier_sched(void) |
2404 | { | 2401 | { |
2405 | _rcu_barrier(&rcu_sched_state, call_rcu_sched); | 2402 | _rcu_barrier(&rcu_sched_state); |
2406 | } | 2403 | } |
2407 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 2404 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
2408 | 2405 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 780a0195d35a..049896a835d9 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -364,6 +364,8 @@ struct rcu_state { | |||
364 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | 364 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ |
365 | u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ | 365 | u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ |
366 | struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ | 366 | struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ |
367 | void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ | ||
368 | void (*func)(struct rcu_head *head)); | ||
367 | 369 | ||
368 | /* The following fields are guarded by the root rcu_node's lock. */ | 370 | /* The following fields are guarded by the root rcu_node's lock. */ |
369 | 371 | ||
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ef2b5231afa4..9cb3a68819fa 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -78,7 +78,8 @@ static void __init rcu_bootup_announce_oddness(void) | |||
78 | 78 | ||
79 | #ifdef CONFIG_TREE_PREEMPT_RCU | 79 | #ifdef CONFIG_TREE_PREEMPT_RCU |
80 | 80 | ||
81 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); | 81 | struct rcu_state rcu_preempt_state = |
82 | RCU_STATE_INITIALIZER(rcu_preempt, call_rcu); | ||
82 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
83 | static struct rcu_state *rcu_state = &rcu_preempt_state; | 84 | static struct rcu_state *rcu_state = &rcu_preempt_state; |
84 | 85 | ||
@@ -944,7 +945,7 @@ static int rcu_preempt_cpu_has_callbacks(int cpu) | |||
944 | */ | 945 | */ |
945 | void rcu_barrier(void) | 946 | void rcu_barrier(void) |
946 | { | 947 | { |
947 | _rcu_barrier(&rcu_preempt_state, call_rcu); | 948 | _rcu_barrier(&rcu_preempt_state); |
948 | } | 949 | } |
949 | EXPORT_SYMBOL_GPL(rcu_barrier); | 950 | EXPORT_SYMBOL_GPL(rcu_barrier); |
950 | 951 | ||