summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-03-17 17:18:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commit58e698af4c6347c726090d5480b2e51d1d07edf9 (patch)
treee6d705907c23ebccb6b1439bde35e979ed650487 /lib
parentb6ecd2dea4435a771a99c497a6ac5df6d3618c5a (diff)
radix-tree: account radix_tree_node to memory cgroup
Allocation of radix_tree_node objects can be easily triggered from userspace, so we should account them to memory cgroup. Besides, we need them accounted for making shadow node shrinker per memcg (see mm/workingset.c). A tricky thing about accounting radix_tree_node objects is that they are mostly allocated through radix_tree_preload(), so we can't just set SLAB_ACCOUNT for radix_tree_node_cachep - that would likely result in a lot of unrelated cgroups using objects from each other's caches. One way to overcome this would be making radix tree preloads per memcg, but that would probably look cumbersome and overcomplicated. Instead, we make radix_tree_node_alloc() first try to allocate from the cache with __GFP_ACCOUNT, no matter if the caller has preloaded or not, and only if it fails fall back on using per cpu preloads. This should make most allocations accounted. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/radix-tree.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6b79e9026e24..224b369f5a5e 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -192,6 +192,15 @@ radix_tree_node_alloc(struct radix_tree_root *root)
192 struct radix_tree_preload *rtp; 192 struct radix_tree_preload *rtp;
193 193
194 /* 194 /*
195 * Even if the caller has preloaded, try to allocate from the
196 * cache first for the new node to get accounted.
197 */
198 ret = kmem_cache_alloc(radix_tree_node_cachep,
199 gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
200 if (ret)
201 goto out;
202
203 /*
195 * Provided the caller has preloaded here, we will always 204 * Provided the caller has preloaded here, we will always
196 * succeed in getting a node here (and never reach 205 * succeed in getting a node here (and never reach
197 * kmem_cache_alloc) 206 * kmem_cache_alloc)
@@ -208,10 +217,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
208 * for debugging. 217 * for debugging.
209 */ 218 */
210 kmemleak_update_trace(ret); 219 kmemleak_update_trace(ret);
220 goto out;
211 } 221 }
212 if (ret == NULL) 222 ret = kmem_cache_alloc(radix_tree_node_cachep,
213 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 223 gfp_mask | __GFP_ACCOUNT);
214 224out:
215 BUG_ON(radix_tree_is_indirect_ptr(ret)); 225 BUG_ON(radix_tree_is_indirect_ptr(ret));
216 return ret; 226 return ret;
217} 227}