aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-14 19:17:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-14 19:17:18 -0400
commita1240cf74e8228f7c80d44af17914c0ffc5633fb (patch)
treeab8c5841940f9d20e6ffb6c91301a4146b63fdb8 /lib
parent1d039859330b874d48080885eb31f4f129c246f1 (diff)
parent7d9ab9b6adffd9c474c1274acb5f6208f9a09cf3 (diff)
Merge branch 'for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
Pull percpu updates from Dennis Zhou: "This includes changes to let percpu_ref release the backing percpu memory earlier after it has been switched to atomic in cases where the percpu ref is not revived. This will help recycle percpu memory earlier in cases where the refcounts are pinned for prolonged periods of time" * 'for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu: percpu_ref: release percpu memory early without PERCPU_REF_ALLOW_REINIT md: initialize percpu refcounters using PERCU_REF_ALLOW_REINIT io_uring: initialize percpu refcounters using PERCU_REF_ALLOW_REINIT percpu_ref: introduce PERCPU_REF_ALLOW_REINIT flag
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 071a76c7bac0..4f6c6ebbbbde 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -70,11 +70,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
70 return -ENOMEM; 70 return -ENOMEM;
71 71
72 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; 72 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
73 ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
73 74
74 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) 75 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
75 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; 76 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
76 else 77 ref->allow_reinit = true;
78 } else {
77 start_count += PERCPU_COUNT_BIAS; 79 start_count += PERCPU_COUNT_BIAS;
80 }
78 81
79 if (flags & PERCPU_REF_INIT_DEAD) 82 if (flags & PERCPU_REF_INIT_DEAD)
80 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; 83 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -120,6 +123,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
120 ref->confirm_switch = NULL; 123 ref->confirm_switch = NULL;
121 wake_up_all(&percpu_ref_switch_waitq); 124 wake_up_all(&percpu_ref_switch_waitq);
122 125
126 if (!ref->allow_reinit)
127 percpu_ref_exit(ref);
128
123 /* drop ref from percpu_ref_switch_to_atomic() */ 129 /* drop ref from percpu_ref_switch_to_atomic() */
124 percpu_ref_put(ref); 130 percpu_ref_put(ref);
125} 131}
@@ -195,6 +201,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
195 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) 201 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
196 return; 202 return;
197 203
204 if (WARN_ON_ONCE(!ref->allow_reinit))
205 return;
206
198 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); 207 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
199 208
200 /* 209 /*