diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2014-12-10 18:42:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 20:41:05 -0500 |
commit | e8ea14cc6eadfe2ea63e9989e16e62625a2619f8 (patch) | |
tree | 8109a731e199928c2fd87d3633a56a3251d85245 /include | |
parent | 5ac8fb31ad2ebd6492d1c5e8f31846b532f03945 (diff) |
mm: memcontrol: take a css reference for each charged page
Charges currently pin the css indirectly by playing tricks during
css_offline(): user pages stall the offlining process until all of them
have been reparented, whereas kmemcg acquires a keep-alive reference if
outstanding kernel pages are detected at that point.
In preparation for removing all this complexity, make the pinning explicit
and acquire a css references for every charged page.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cgroup.h | 26 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 47 |
2 files changed, 64 insertions, 9 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..9f96b25965c2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) | |||
113 | } | 113 | } |
114 | 114 | ||
115 | /** | 115 | /** |
116 | * css_get_many - obtain references on the specified css | ||
117 | * @css: target css | ||
118 | * @n: number of references to get | ||
119 | * | ||
120 | * The caller must already have a reference. | ||
121 | */ | ||
122 | static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) | ||
123 | { | ||
124 | if (!(css->flags & CSS_NO_REF)) | ||
125 | percpu_ref_get_many(&css->refcnt, n); | ||
126 | } | ||
127 | |||
128 | /** | ||
116 | * css_tryget - try to obtain a reference on the specified css | 129 | * css_tryget - try to obtain a reference on the specified css |
117 | * @css: target css | 130 | * @css: target css |
118 | * | 131 | * |
@@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) | |||
159 | percpu_ref_put(&css->refcnt); | 172 | percpu_ref_put(&css->refcnt); |
160 | } | 173 | } |
161 | 174 | ||
175 | /** | ||
176 | * css_put_many - put css references | ||
177 | * @css: target css | ||
178 | * @n: number of references to put | ||
179 | * | ||
180 | * Put references obtained via css_get() and css_tryget_online(). | ||
181 | */ | ||
182 | static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) | ||
183 | { | ||
184 | if (!(css->flags & CSS_NO_REF)) | ||
185 | percpu_ref_put_many(&css->refcnt, n); | ||
186 | } | ||
187 | |||
162 | /* bits in struct cgroup flags field */ | 188 | /* bits in struct cgroup flags field */ |
163 | enum { | 189 | enum { |
164 | /* Control Group requires release notifications to userspace */ | 190 | /* Control Group requires release notifications to userspace */ |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..530b249f7ea4 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -147,28 +147,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, | |||
147 | } | 147 | } |
148 | 148 | ||
149 | /** | 149 | /** |
150 | * percpu_ref_get - increment a percpu refcount | 150 | * percpu_ref_get_many - increment a percpu refcount |
151 | * @ref: percpu_ref to get | 151 | * @ref: percpu_ref to get |
152 | * @nr: number of references to get | ||
152 | * | 153 | * |
153 | * Analagous to atomic_long_inc(). | 154 | * Analogous to atomic_long_add(). |
154 | * | 155 | * |
155 | * This function is safe to call as long as @ref is between init and exit. | 156 | * This function is safe to call as long as @ref is between init and exit. |
156 | */ | 157 | */ |
157 | static inline void percpu_ref_get(struct percpu_ref *ref) | 158 | static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) |
158 | { | 159 | { |
159 | unsigned long __percpu *percpu_count; | 160 | unsigned long __percpu *percpu_count; |
160 | 161 | ||
161 | rcu_read_lock_sched(); | 162 | rcu_read_lock_sched(); |
162 | 163 | ||
163 | if (__ref_is_percpu(ref, &percpu_count)) | 164 | if (__ref_is_percpu(ref, &percpu_count)) |
164 | this_cpu_inc(*percpu_count); | 165 | this_cpu_add(*percpu_count, nr); |
165 | else | 166 | else |
166 | atomic_long_inc(&ref->count); | 167 | atomic_long_add(nr, &ref->count); |
167 | 168 | ||
168 | rcu_read_unlock_sched(); | 169 | rcu_read_unlock_sched(); |
169 | } | 170 | } |
170 | 171 | ||
171 | /** | 172 | /** |
173 | * percpu_ref_get - increment a percpu refcount | ||
174 | * @ref: percpu_ref to get | ||
175 | * | ||
176 | * Analagous to atomic_long_inc(). | ||
177 | * | ||
178 | * This function is safe to call as long as @ref is between init and exit. | ||
179 | */ | ||
180 | static inline void percpu_ref_get(struct percpu_ref *ref) | ||
181 | { | ||
182 | percpu_ref_get_many(ref, 1); | ||
183 | } | ||
184 | |||
185 | /** | ||
172 | * percpu_ref_tryget - try to increment a percpu refcount | 186 | * percpu_ref_tryget - try to increment a percpu refcount |
173 | * @ref: percpu_ref to try-get | 187 | * @ref: percpu_ref to try-get |
174 | * | 188 | * |
@@ -231,29 +245,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) | |||
231 | } | 245 | } |
232 | 246 | ||
233 | /** | 247 | /** |
234 | * percpu_ref_put - decrement a percpu refcount | 248 | * percpu_ref_put_many - decrement a percpu refcount |
235 | * @ref: percpu_ref to put | 249 | * @ref: percpu_ref to put |
250 | * @nr: number of references to put | ||
236 | * | 251 | * |
237 | * Decrement the refcount, and if 0, call the release function (which was passed | 252 | * Decrement the refcount, and if 0, call the release function (which was passed |
238 | * to percpu_ref_init()) | 253 | * to percpu_ref_init()) |
239 | * | 254 | * |
240 | * This function is safe to call as long as @ref is between init and exit. | 255 | * This function is safe to call as long as @ref is between init and exit. |
241 | */ | 256 | */ |
242 | static inline void percpu_ref_put(struct percpu_ref *ref) | 257 | static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) |
243 | { | 258 | { |
244 | unsigned long __percpu *percpu_count; | 259 | unsigned long __percpu *percpu_count; |
245 | 260 | ||
246 | rcu_read_lock_sched(); | 261 | rcu_read_lock_sched(); |
247 | 262 | ||
248 | if (__ref_is_percpu(ref, &percpu_count)) | 263 | if (__ref_is_percpu(ref, &percpu_count)) |
249 | this_cpu_dec(*percpu_count); | 264 | this_cpu_sub(*percpu_count, nr); |
250 | else if (unlikely(atomic_long_dec_and_test(&ref->count))) | 265 | else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) |
251 | ref->release(ref); | 266 | ref->release(ref); |
252 | 267 | ||
253 | rcu_read_unlock_sched(); | 268 | rcu_read_unlock_sched(); |
254 | } | 269 | } |
255 | 270 | ||
256 | /** | 271 | /** |
272 | * percpu_ref_put - decrement a percpu refcount | ||
273 | * @ref: percpu_ref to put | ||
274 | * | ||
275 | * Decrement the refcount, and if 0, call the release function (which was passed | ||
276 | * to percpu_ref_init()) | ||
277 | * | ||
278 | * This function is safe to call as long as @ref is between init and exit. | ||
279 | */ | ||
280 | static inline void percpu_ref_put(struct percpu_ref *ref) | ||
281 | { | ||
282 | percpu_ref_put_many(ref, 1); | ||
283 | } | ||
284 | |||
285 | /** | ||
257 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | 286 | * percpu_ref_is_zero - test whether a percpu refcount reached zero |
258 | * @ref: percpu_ref to test | 287 | * @ref: percpu_ref to test |
259 | * | 288 | * |