aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-06-04 19:07:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:01 -0400
commitc67a8a685a6e9abbaf0235e084168f15a721ae39 (patch)
tree946c9890d1638aa5540b596beefc305ad5dc26d0 /mm/slab.h
parent1e32e77f95d60b121b6072e3e3a650a7f93068f9 (diff)
memcg, slab: merge memcg_{bind,release}_pages to memcg_{un}charge_slab
Currently we have two pairs of kmemcg-related functions that are called on slab alloc/free. The first is memcg_{bind,release}_pages that count the total number of pages allocated on a kmem cache. The second is memcg_{un}charge_slab that {un}charge slab pages to kmemcg resource counter. Let's just merge them to keep the code clean. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Glauber Costa <glommer@gmail.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h25
1 files changed, 2 insertions, 23 deletions
diff --git a/mm/slab.h b/mm/slab.h
index b59447ac4533..961a3fb1f5a2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -121,18 +121,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
121 return !s->memcg_params || s->memcg_params->is_root_cache; 121 return !s->memcg_params || s->memcg_params->is_root_cache;
122} 122}
123 123
124static inline void memcg_bind_pages(struct kmem_cache *s, int order)
125{
126 if (!is_root_cache(s))
127 atomic_add(1 << order, &s->memcg_params->nr_pages);
128}
129
130static inline void memcg_release_pages(struct kmem_cache *s, int order)
131{
132 if (!is_root_cache(s))
133 atomic_sub(1 << order, &s->memcg_params->nr_pages);
134}
135
136static inline bool slab_equal_or_root(struct kmem_cache *s, 124static inline bool slab_equal_or_root(struct kmem_cache *s,
137 struct kmem_cache *p) 125 struct kmem_cache *p)
138{ 126{
@@ -198,8 +186,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
198 return 0; 186 return 0;
199 if (is_root_cache(s)) 187 if (is_root_cache(s))
200 return 0; 188 return 0;
201 return memcg_charge_kmem(s->memcg_params->memcg, gfp, 189 return __memcg_charge_slab(s, gfp, order);
202 PAGE_SIZE << order);
203} 190}
204 191
205static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) 192static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@@ -208,7 +195,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
208 return; 195 return;
209 if (is_root_cache(s)) 196 if (is_root_cache(s))
210 return; 197 return;
211 memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order); 198 __memcg_uncharge_slab(s, order);
212} 199}
213#else 200#else
214static inline bool is_root_cache(struct kmem_cache *s) 201static inline bool is_root_cache(struct kmem_cache *s)
@@ -216,14 +203,6 @@ static inline bool is_root_cache(struct kmem_cache *s)
216 return true; 203 return true;
217} 204}
218 205
219static inline void memcg_bind_pages(struct kmem_cache *s, int order)
220{
221}
222
223static inline void memcg_release_pages(struct kmem_cache *s, int order)
224{
225}
226
227static inline bool slab_equal_or_root(struct kmem_cache *s, 206static inline bool slab_equal_or_root(struct kmem_cache *s,
228 struct kmem_cache *p) 207 struct kmem_cache *p)
229{ 208{