diff options
author | Jan Blunck <jblunck@suse.de> | 2009-01-07 21:07:53 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:04 -0500 |
commit | c8dad2bb6307f5b00f804a686917105206a4d5c9 (patch) | |
tree | 5f1b7dd1fc4ae5789e35e5d0dade0d37be9fe52e /mm | |
parent | f817ed48535ac6510ebae7c4116f24a5f9268834 (diff) |
memcg: reduce size of mem_cgroup by using nr_cpu_ids
As Jan Blunck <jblunck@suse.de> pointed out, allocating per-cpu stat for
memcg to the size of NR_CPUS is not good.
This patch changes mem_cgroup's cpustat allocation not based on NR_CPUS
but based on nr_cpu_ids.
Reviewed-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 49234d93988a..e00f25e6545f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -60,7 +60,7 @@ struct mem_cgroup_stat_cpu { | |||
60 | } ____cacheline_aligned_in_smp; | 60 | } ____cacheline_aligned_in_smp; |
61 | 61 | ||
62 | struct mem_cgroup_stat { | 62 | struct mem_cgroup_stat { |
63 | struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; | 63 | struct mem_cgroup_stat_cpu cpustat[0]; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /* | 66 | /* |
@@ -129,11 +129,10 @@ struct mem_cgroup { | |||
129 | 129 | ||
130 | int prev_priority; /* for recording reclaim priority */ | 130 | int prev_priority; /* for recording reclaim priority */ |
131 | /* | 131 | /* |
132 | * statistics. | 132 | * statistics. This must be placed at the end of memcg. |
133 | */ | 133 | */ |
134 | struct mem_cgroup_stat stat; | 134 | struct mem_cgroup_stat stat; |
135 | }; | 135 | }; |
136 | static struct mem_cgroup init_mem_cgroup; | ||
137 | 136 | ||
138 | enum charge_type { | 137 | enum charge_type { |
139 | MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | 138 | MEM_CGROUP_CHARGE_TYPE_CACHE = 0, |
@@ -1293,23 +1292,30 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
1293 | kfree(mem->info.nodeinfo[node]); | 1292 | kfree(mem->info.nodeinfo[node]); |
1294 | } | 1293 | } |
1295 | 1294 | ||
1295 | static int mem_cgroup_size(void) | ||
1296 | { | ||
1297 | int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu); | ||
1298 | return sizeof(struct mem_cgroup) + cpustat_size; | ||
1299 | } | ||
1300 | |||
1296 | static struct mem_cgroup *mem_cgroup_alloc(void) | 1301 | static struct mem_cgroup *mem_cgroup_alloc(void) |
1297 | { | 1302 | { |
1298 | struct mem_cgroup *mem; | 1303 | struct mem_cgroup *mem; |
1304 | int size = mem_cgroup_size(); | ||
1299 | 1305 | ||
1300 | if (sizeof(*mem) < PAGE_SIZE) | 1306 | if (size < PAGE_SIZE) |
1301 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); | 1307 | mem = kmalloc(size, GFP_KERNEL); |
1302 | else | 1308 | else |
1303 | mem = vmalloc(sizeof(*mem)); | 1309 | mem = vmalloc(size); |
1304 | 1310 | ||
1305 | if (mem) | 1311 | if (mem) |
1306 | memset(mem, 0, sizeof(*mem)); | 1312 | memset(mem, 0, size); |
1307 | return mem; | 1313 | return mem; |
1308 | } | 1314 | } |
1309 | 1315 | ||
1310 | static void mem_cgroup_free(struct mem_cgroup *mem) | 1316 | static void mem_cgroup_free(struct mem_cgroup *mem) |
1311 | { | 1317 | { |
1312 | if (sizeof(*mem) < PAGE_SIZE) | 1318 | if (mem_cgroup_size() < PAGE_SIZE) |
1313 | kfree(mem); | 1319 | kfree(mem); |
1314 | else | 1320 | else |
1315 | vfree(mem); | 1321 | vfree(mem); |
@@ -1322,13 +1328,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1322 | struct mem_cgroup *mem; | 1328 | struct mem_cgroup *mem; |
1323 | int node; | 1329 | int node; |
1324 | 1330 | ||
1325 | if (unlikely((cont->parent) == NULL)) { | 1331 | mem = mem_cgroup_alloc(); |
1326 | mem = &init_mem_cgroup; | 1332 | if (!mem) |
1327 | } else { | 1333 | return ERR_PTR(-ENOMEM); |
1328 | mem = mem_cgroup_alloc(); | ||
1329 | if (!mem) | ||
1330 | return ERR_PTR(-ENOMEM); | ||
1331 | } | ||
1332 | 1334 | ||
1333 | res_counter_init(&mem->res); | 1335 | res_counter_init(&mem->res); |
1334 | 1336 | ||
@@ -1340,8 +1342,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1340 | free_out: | 1342 | free_out: |
1341 | for_each_node_state(node, N_POSSIBLE) | 1343 | for_each_node_state(node, N_POSSIBLE) |
1342 | free_mem_cgroup_per_zone_info(mem, node); | 1344 | free_mem_cgroup_per_zone_info(mem, node); |
1343 | if (cont->parent != NULL) | 1345 | mem_cgroup_free(mem); |
1344 | mem_cgroup_free(mem); | ||
1345 | return ERR_PTR(-ENOMEM); | 1346 | return ERR_PTR(-ENOMEM); |
1346 | } | 1347 | } |
1347 | 1348 | ||