diff options
author | Balbir Singh <balbir@linux.vnet.ibm.com> | 2008-02-07 03:14:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 11:42:19 -0500 |
commit | e1a1cd590e3fcb0d2e230128daf2337ea55387dc (patch) | |
tree | eb660ab340c657a1eb595b2d4d8e8b62783bf6fb /mm/vmscan.c | |
parent | bed7161a519a2faef53e1bce1b47595e297c1d14 (diff) |
Memory controller: make charging gfp mask aware
Nick Piggin pointed out that swap cache and page cache addition routines
could be called from non GFP_KERNEL contexts. This patch makes the
charging routine aware of the gfp context. Charging might fail if the
cgroup is over it's limit, in which case a suitable error is returned.
This patch was tested on a Powerpc box. I am still looking at being able
to test the path, through which allocations happen in non GFP_KERNEL
contexts.
[kamezawa.hiroyu@jp.fujitsu.com: problem with ZONE_MOVABLE]
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 5 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 215f6a726b2f..b7d868cbca09 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1337,16 +1337,11 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask) | |||
1337 | 1337 | ||
1338 | #ifdef CONFIG_CGROUP_MEM_CONT | 1338 | #ifdef CONFIG_CGROUP_MEM_CONT |
1339 | 1339 | ||
1340 | #ifdef CONFIG_HIGHMEM | 1340 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, |
1341 | #define ZONE_USERPAGES ZONE_HIGHMEM | 1341 | gfp_t gfp_mask) |
1342 | #else | ||
1343 | #define ZONE_USERPAGES ZONE_NORMAL | ||
1344 | #endif | ||
1345 | |||
1346 | unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont) | ||
1347 | { | 1342 | { |
1348 | struct scan_control sc = { | 1343 | struct scan_control sc = { |
1349 | .gfp_mask = GFP_KERNEL, | 1344 | .gfp_mask = gfp_mask, |
1350 | .may_writepage = !laptop_mode, | 1345 | .may_writepage = !laptop_mode, |
1351 | .may_swap = 1, | 1346 | .may_swap = 1, |
1352 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1347 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
@@ -1357,9 +1352,10 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont) | |||
1357 | }; | 1352 | }; |
1358 | int node; | 1353 | int node; |
1359 | struct zone **zones; | 1354 | struct zone **zones; |
1355 | int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE); | ||
1360 | 1356 | ||
1361 | for_each_online_node(node) { | 1357 | for_each_online_node(node) { |
1362 | zones = NODE_DATA(node)->node_zonelists[ZONE_USERPAGES].zones; | 1358 | zones = NODE_DATA(node)->node_zonelists[target_zone].zones; |
1363 | if (do_try_to_free_pages(zones, sc.gfp_mask, &sc)) | 1359 | if (do_try_to_free_pages(zones, sc.gfp_mask, &sc)) |
1364 | return 1; | 1360 | return 1; |
1365 | } | 1361 | } |