aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-08-21 13:42:52 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-10-06 14:25:21 -0400
commit07899a6e5f56136028c44a57ad0451e797365ac3 (patch)
tree48fc600387bae30a64284ae0d7acfb767277cfde
parent3a518b76af7bb411efe6dd090fbf098e29accb2e (diff)
rcu_sync: Introduce rcu_sync_dtor()
This commit allows rcu_sync structures to be safely deallocated, The trick is to add a new ->wait field to the gp_ops array. This field is a pointer to the rcu_barrier() function corresponding to the flavor of RCU in question. This allows a new rcu_sync_dtor() to wait for any outstanding callbacks before freeing the rcu_sync structure. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--kernel/rcu/sync.c26
2 files changed, 27 insertions, 0 deletions
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index 1f2d4fc30b04..8069d6468bc4 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -62,6 +62,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
62extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); 62extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
63extern void rcu_sync_enter(struct rcu_sync *); 63extern void rcu_sync_enter(struct rcu_sync *);
64extern void rcu_sync_exit(struct rcu_sync *); 64extern void rcu_sync_exit(struct rcu_sync *);
65extern void rcu_sync_dtor(struct rcu_sync *);
65 66
66#define __RCU_SYNC_INITIALIZER(name, type) { \ 67#define __RCU_SYNC_INITIALIZER(name, type) { \
67 .gp_state = 0, \ 68 .gp_state = 0, \
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 01c9807a7f73..1e353f0a2b66 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -32,6 +32,7 @@
32static const struct { 32static const struct {
33 void (*sync)(void); 33 void (*sync)(void);
34 void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); 34 void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
35 void (*wait)(void);
35#ifdef CONFIG_PROVE_RCU 36#ifdef CONFIG_PROVE_RCU
36 int (*held)(void); 37 int (*held)(void);
37#endif 38#endif
@@ -39,16 +40,19 @@ static const struct {
39 [RCU_SYNC] = { 40 [RCU_SYNC] = {
40 .sync = synchronize_rcu, 41 .sync = synchronize_rcu,
41 .call = call_rcu, 42 .call = call_rcu,
43 .wait = rcu_barrier,
42 __INIT_HELD(rcu_read_lock_held) 44 __INIT_HELD(rcu_read_lock_held)
43 }, 45 },
44 [RCU_SCHED_SYNC] = { 46 [RCU_SCHED_SYNC] = {
45 .sync = synchronize_sched, 47 .sync = synchronize_sched,
46 .call = call_rcu_sched, 48 .call = call_rcu_sched,
49 .wait = rcu_barrier_sched,
47 __INIT_HELD(rcu_read_lock_sched_held) 50 __INIT_HELD(rcu_read_lock_sched_held)
48 }, 51 },
49 [RCU_BH_SYNC] = { 52 [RCU_BH_SYNC] = {
50 .sync = synchronize_rcu_bh, 53 .sync = synchronize_rcu_bh,
51 .call = call_rcu_bh, 54 .call = call_rcu_bh,
55 .wait = rcu_barrier_bh,
52 __INIT_HELD(rcu_read_lock_bh_held) 56 __INIT_HELD(rcu_read_lock_bh_held)
53 }, 57 },
54}; 58};
@@ -195,3 +199,25 @@ void rcu_sync_exit(struct rcu_sync *rsp)
195 } 199 }
196 spin_unlock_irq(&rsp->rss_lock); 200 spin_unlock_irq(&rsp->rss_lock);
197} 201}
202
203/**
204 * rcu_sync_dtor() - Clean up an rcu_sync structure
205 * @rsp: Pointer to rcu_sync structure to be cleaned up
206 */
207void rcu_sync_dtor(struct rcu_sync *rsp)
208{
209 int cb_state;
210
211 BUG_ON(rsp->gp_count);
212
213 spin_lock_irq(&rsp->rss_lock);
214 if (rsp->cb_state == CB_REPLAY)
215 rsp->cb_state = CB_PENDING;
216 cb_state = rsp->cb_state;
217 spin_unlock_irq(&rsp->rss_lock);
218
219 if (cb_state != CB_IDLE) {
220 gp_ops[rsp->gp_type].wait();
221 BUG_ON(rsp->cb_state != CB_IDLE);
222 }
223}