aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c97
1 files changed, 48 insertions, 49 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 796820925de0..f20a57b7faf2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -59,6 +59,7 @@
59#include <net/sock.h> 59#include <net/sock.h>
60#include <net/ip.h> 60#include <net/ip.h>
61#include <net/tcp_memcontrol.h> 61#include <net/tcp_memcontrol.h>
62#include "slab.h"
62 63
63#include <asm/uaccess.h> 64#include <asm/uaccess.h>
64 65
@@ -2968,7 +2969,7 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2968 2969
2969 VM_BUG_ON(p->is_root_cache); 2970 VM_BUG_ON(p->is_root_cache);
2970 cachep = p->root_cache; 2971 cachep = p->root_cache;
2971 return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; 2972 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2972} 2973}
2973 2974
2974#ifdef CONFIG_SLABINFO 2975#ifdef CONFIG_SLABINFO
@@ -2997,21 +2998,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2997 struct res_counter *fail_res; 2998 struct res_counter *fail_res;
2998 struct mem_cgroup *_memcg; 2999 struct mem_cgroup *_memcg;
2999 int ret = 0; 3000 int ret = 0;
3000 bool may_oom;
3001 3001
3002 ret = res_counter_charge(&memcg->kmem, size, &fail_res); 3002 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
3003 if (ret) 3003 if (ret)
3004 return ret; 3004 return ret;
3005 3005
3006 /*
3007 * Conditions under which we can wait for the oom_killer. Those are
3008 * the same conditions tested by the core page allocator
3009 */
3010 may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
3011
3012 _memcg = memcg; 3006 _memcg = memcg;
3013 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, 3007 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
3014 &_memcg, may_oom); 3008 &_memcg, oom_gfp_allowed(gfp));
3015 3009
3016 if (ret == -EINTR) { 3010 if (ret == -EINTR) {
3017 /* 3011 /*
@@ -3151,7 +3145,7 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3151{ 3145{
3152 struct memcg_cache_params *cur_params = s->memcg_params; 3146 struct memcg_cache_params *cur_params = s->memcg_params;
3153 3147
3154 VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache); 3148 VM_BUG_ON(!is_root_cache(s));
3155 3149
3156 if (num_groups > memcg_limited_groups_array_size) { 3150 if (num_groups > memcg_limited_groups_array_size) {
3157 int i; 3151 int i;
@@ -3412,7 +3406,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3412 idx = memcg_cache_id(memcg); 3406 idx = memcg_cache_id(memcg);
3413 3407
3414 mutex_lock(&memcg_cache_mutex); 3408 mutex_lock(&memcg_cache_mutex);
3415 new_cachep = cachep->memcg_params->memcg_caches[idx]; 3409 new_cachep = cache_from_memcg_idx(cachep, idx);
3416 if (new_cachep) { 3410 if (new_cachep) {
3417 css_put(&memcg->css); 3411 css_put(&memcg->css);
3418 goto out; 3412 goto out;
@@ -3458,8 +3452,8 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3458 * we'll take the set_limit_mutex to protect ourselves against this. 3452 * we'll take the set_limit_mutex to protect ourselves against this.
3459 */ 3453 */
3460 mutex_lock(&set_limit_mutex); 3454 mutex_lock(&set_limit_mutex);
3461 for (i = 0; i < memcg_limited_groups_array_size; i++) { 3455 for_each_memcg_cache_index(i) {
3462 c = s->memcg_params->memcg_caches[i]; 3456 c = cache_from_memcg_idx(s, i);
3463 if (!c) 3457 if (!c)
3464 continue; 3458 continue;
3465 3459
@@ -3592,8 +3586,8 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3592 * code updating memcg_caches will issue a write barrier to match this. 3586 * code updating memcg_caches will issue a write barrier to match this.
3593 */ 3587 */
3594 read_barrier_depends(); 3588 read_barrier_depends();
3595 if (likely(cachep->memcg_params->memcg_caches[idx])) { 3589 if (likely(cache_from_memcg_idx(cachep, idx))) {
3596 cachep = cachep->memcg_params->memcg_caches[idx]; 3590 cachep = cache_from_memcg_idx(cachep, idx);
3597 goto out; 3591 goto out;
3598 } 3592 }
3599 3593
@@ -5389,45 +5383,50 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5389static int memcg_numa_stat_show(struct cgroup_subsys_state *css, 5383static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
5390 struct cftype *cft, struct seq_file *m) 5384 struct cftype *cft, struct seq_file *m)
5391{ 5385{
5386 struct numa_stat {
5387 const char *name;
5388 unsigned int lru_mask;
5389 };
5390
5391 static const struct numa_stat stats[] = {
5392 { "total", LRU_ALL },
5393 { "file", LRU_ALL_FILE },
5394 { "anon", LRU_ALL_ANON },
5395 { "unevictable", BIT(LRU_UNEVICTABLE) },
5396 };
5397 const struct numa_stat *stat;
5392 int nid; 5398 int nid;
5393 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 5399 unsigned long nr;
5394 unsigned long node_nr;
5395 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5400 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5396 5401
5397 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL); 5402 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5398 seq_printf(m, "total=%lu", total_nr); 5403 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5399 for_each_node_state(nid, N_MEMORY) { 5404 seq_printf(m, "%s=%lu", stat->name, nr);
5400 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL); 5405 for_each_node_state(nid, N_MEMORY) {
5401 seq_printf(m, " N%d=%lu", nid, node_nr); 5406 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5402 } 5407 stat->lru_mask);
5403 seq_putc(m, '\n'); 5408 seq_printf(m, " N%d=%lu", nid, nr);
5404 5409 }
5405 file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE); 5410 seq_putc(m, '\n');
5406 seq_printf(m, "file=%lu", file_nr); 5411 }
5407 for_each_node_state(nid, N_MEMORY) { 5412
5408 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5413 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5409 LRU_ALL_FILE); 5414 struct mem_cgroup *iter;
5410 seq_printf(m, " N%d=%lu", nid, node_nr); 5415
5411 } 5416 nr = 0;
5412 seq_putc(m, '\n'); 5417 for_each_mem_cgroup_tree(iter, memcg)
5413 5418 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5414 anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON); 5419 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5415 seq_printf(m, "anon=%lu", anon_nr); 5420 for_each_node_state(nid, N_MEMORY) {
5416 for_each_node_state(nid, N_MEMORY) { 5421 nr = 0;
5417 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 5422 for_each_mem_cgroup_tree(iter, memcg)
5418 LRU_ALL_ANON); 5423 nr += mem_cgroup_node_nr_lru_pages(
5419 seq_printf(m, " N%d=%lu", nid, node_nr); 5424 iter, nid, stat->lru_mask);
5425 seq_printf(m, " N%d=%lu", nid, nr);
5426 }
5427 seq_putc(m, '\n');
5420 } 5428 }
5421 seq_putc(m, '\n');
5422 5429
5423 unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
5424 seq_printf(m, "unevictable=%lu", unevictable_nr);
5425 for_each_node_state(nid, N_MEMORY) {
5426 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5427 BIT(LRU_UNEVICTABLE));
5428 seq_printf(m, " N%d=%lu", nid, node_nr);
5429 }
5430 seq_putc(m, '\n');
5431 return 0; 5430 return 0;
5432} 5431}
5433#endif /* CONFIG_NUMA */ 5432#endif /* CONFIG_NUMA */