diff options
author | Vladimir Davydov <vdavydov@virtuozzo.com> | 2016-03-17 17:17:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 18:09:34 -0400 |
commit | fcff7d7eebe6d31e2ce20d994555c86a90197034 (patch) | |
tree | 776a49bee21274e04baf973a281fb764df4764b1 /mm | |
parent | 6a618957ad17d8f4f4c7eeede752685374b1b176 (diff) |
mm: memcontrol: do not bypass slab charge if memcg is offline
Slab pages are charged in two steps. First, an appropriate per memcg
cache is selected (see memcg_kmem_get_cache) basing on the current
context, then the new slab page is charged to the memory cgroup which
the selected cache was created for (see memcg_charge_slab ->
__memcg_kmem_charge_memcg). It is OK to bypass kmemcg charge at step 1,
but if step 1 succeeded and we successfully allocated a new slab page,
step 2 must be performed, otherwise we would get a per memcg kmem cache
which contains a slab that does not hold a reference to the memory
cgroup owning the cache. Since per memcg kmem caches are destroyed on
memcg css free, this could result in freeing a cache while there are
still active objects in it.
However, currently we will bypass slab page charge if the memory cgroup
owning the cache is offline (see __memcg_kmem_charge_memcg). This is
very unlikely to occur in practice, because for this to happen a process
must be migrated to a different cgroup and the old cgroup must be
removed while the process is in kmalloc somewhere between steps 1 and 2
(e.g. trying to allocate a new page). Nevertheless, it's still better
to eliminate such a possibility.
Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 42882c1e7fce..5c9d45e4c739 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2325,9 +2325,6 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | |||
2325 | struct page_counter *counter; | 2325 | struct page_counter *counter; |
2326 | int ret; | 2326 | int ret; |
2327 | 2327 | ||
2328 | if (!memcg_kmem_online(memcg)) | ||
2329 | return 0; | ||
2330 | |||
2331 | ret = try_charge(memcg, gfp, nr_pages); | 2328 | ret = try_charge(memcg, gfp, nr_pages); |
2332 | if (ret) | 2329 | if (ret) |
2333 | return ret; | 2330 | return ret; |
@@ -2346,10 +2343,11 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | |||
2346 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | 2343 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) |
2347 | { | 2344 | { |
2348 | struct mem_cgroup *memcg; | 2345 | struct mem_cgroup *memcg; |
2349 | int ret; | 2346 | int ret = 0; |
2350 | 2347 | ||
2351 | memcg = get_mem_cgroup_from_mm(current->mm); | 2348 | memcg = get_mem_cgroup_from_mm(current->mm); |
2352 | ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); | 2349 | if (memcg_kmem_online(memcg)) |
2350 | ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); | ||
2353 | css_put(&memcg->css); | 2351 | css_put(&memcg->css); |
2354 | return ret; | 2352 | return ret; |
2355 | } | 2353 | } |