aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-12-10 18:42:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:05 -0500
commite8ea14cc6eadfe2ea63e9989e16e62625a2619f8 (patch)
tree8109a731e199928c2fd87d3633a56a3251d85245 /include/linux/percpu-refcount.h
parent5ac8fb31ad2ebd6492d1c5e8f31846b532f03945 (diff)
mm: memcontrol: take a css reference for each charged page
Charges currently pin the css indirectly by playing tricks during css_offline(): user pages stall the offlining process until all of them have been reparented, whereas kmemcg acquires a keep-alive reference if outstanding kernel pages are detected at that point. In preparation for removing all this complexity, make the pinning explicit and acquire a css references for every charged page. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h47
1 files changed, 38 insertions, 9 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 51ce60c35f4c..530b249f7ea4 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -147,28 +147,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
147} 147}
148 148
149/** 149/**
150 * percpu_ref_get - increment a percpu refcount 150 * percpu_ref_get_many - increment a percpu refcount
151 * @ref: percpu_ref to get 151 * @ref: percpu_ref to get
152 * @nr: number of references to get
152 * 153 *
153 * Analagous to atomic_long_inc(). 154 * Analogous to atomic_long_add().
154 * 155 *
155 * This function is safe to call as long as @ref is between init and exit. 156 * This function is safe to call as long as @ref is between init and exit.
156 */ 157 */
157static inline void percpu_ref_get(struct percpu_ref *ref) 158static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
158{ 159{
159 unsigned long __percpu *percpu_count; 160 unsigned long __percpu *percpu_count;
160 161
161 rcu_read_lock_sched(); 162 rcu_read_lock_sched();
162 163
163 if (__ref_is_percpu(ref, &percpu_count)) 164 if (__ref_is_percpu(ref, &percpu_count))
164 this_cpu_inc(*percpu_count); 165 this_cpu_add(*percpu_count, nr);
165 else 166 else
166 atomic_long_inc(&ref->count); 167 atomic_long_add(nr, &ref->count);
167 168
168 rcu_read_unlock_sched(); 169 rcu_read_unlock_sched();
169} 170}
170 171
171/** 172/**
173 * percpu_ref_get - increment a percpu refcount
174 * @ref: percpu_ref to get
175 *
176 * Analagous to atomic_long_inc().
177 *
178 * This function is safe to call as long as @ref is between init and exit.
179 */
180static inline void percpu_ref_get(struct percpu_ref *ref)
181{
182 percpu_ref_get_many(ref, 1);
183}
184
185/**
172 * percpu_ref_tryget - try to increment a percpu refcount 186 * percpu_ref_tryget - try to increment a percpu refcount
173 * @ref: percpu_ref to try-get 187 * @ref: percpu_ref to try-get
174 * 188 *
@@ -231,29 +245,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
231} 245}
232 246
233/** 247/**
234 * percpu_ref_put - decrement a percpu refcount 248 * percpu_ref_put_many - decrement a percpu refcount
235 * @ref: percpu_ref to put 249 * @ref: percpu_ref to put
250 * @nr: number of references to put
236 * 251 *
237 * Decrement the refcount, and if 0, call the release function (which was passed 252 * Decrement the refcount, and if 0, call the release function (which was passed
238 * to percpu_ref_init()) 253 * to percpu_ref_init())
239 * 254 *
240 * This function is safe to call as long as @ref is between init and exit. 255 * This function is safe to call as long as @ref is between init and exit.
241 */ 256 */
242static inline void percpu_ref_put(struct percpu_ref *ref) 257static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
243{ 258{
244 unsigned long __percpu *percpu_count; 259 unsigned long __percpu *percpu_count;
245 260
246 rcu_read_lock_sched(); 261 rcu_read_lock_sched();
247 262
248 if (__ref_is_percpu(ref, &percpu_count)) 263 if (__ref_is_percpu(ref, &percpu_count))
249 this_cpu_dec(*percpu_count); 264 this_cpu_sub(*percpu_count, nr);
250 else if (unlikely(atomic_long_dec_and_test(&ref->count))) 265 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
251 ref->release(ref); 266 ref->release(ref);
252 267
253 rcu_read_unlock_sched(); 268 rcu_read_unlock_sched();
254} 269}
255 270
256/** 271/**
272 * percpu_ref_put - decrement a percpu refcount
273 * @ref: percpu_ref to put
274 *
275 * Decrement the refcount, and if 0, call the release function (which was passed
276 * to percpu_ref_init())
277 *
278 * This function is safe to call as long as @ref is between init and exit.
279 */
280static inline void percpu_ref_put(struct percpu_ref *ref)
281{
282 percpu_ref_put_many(ref, 1);
283}
284
285/**
257 * percpu_ref_is_zero - test whether a percpu refcount reached zero 286 * percpu_ref_is_zero - test whether a percpu refcount reached zero
258 * @ref: percpu_ref to test 287 * @ref: percpu_ref to test
259 * 288 *