aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2009-09-23 18:56:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 10:20:59 -0400
commit75822b4495b62e8721e9b88e3cf9e653a0c85b73 (patch)
treeb8d4c10f03a4e289f0a66b982243fd8980f9df07 /mm
parentf64c3f54940d6929a2b6dcffaab942bd62be2e66 (diff)
memory controller: soft limit refactor reclaim flags
Refactor mem_cgroup_hierarchical_reclaim() Refactor the arguments passed to mem_cgroup_hierarchical_reclaim() into flags, so that new parameters don't have to be passed as we make the reclaim routine more flexible Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0ed325943cd1..90f0b13e1c3c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -252,6 +252,14 @@ enum charge_type {
252#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 252#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
253#define MEMFILE_ATTR(val) ((val) & 0xffff) 253#define MEMFILE_ATTR(val) ((val) & 0xffff)
254 254
255/*
256 * Reclaim flags for mem_cgroup_hierarchical_reclaim
257 */
258#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
259#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
260#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
261#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
262
255static void mem_cgroup_get(struct mem_cgroup *mem); 263static void mem_cgroup_get(struct mem_cgroup *mem);
256static void mem_cgroup_put(struct mem_cgroup *mem); 264static void mem_cgroup_put(struct mem_cgroup *mem);
257static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 265static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
@@ -1029,11 +1037,14 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1029 * If shrink==true, for avoiding to free too much, this returns immedieately. 1037 * If shrink==true, for avoiding to free too much, this returns immedieately.
1030 */ 1038 */
1031static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1039static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1032 gfp_t gfp_mask, bool noswap, bool shrink) 1040 gfp_t gfp_mask,
1041 unsigned long reclaim_options)
1033{ 1042{
1034 struct mem_cgroup *victim; 1043 struct mem_cgroup *victim;
1035 int ret, total = 0; 1044 int ret, total = 0;
1036 int loop = 0; 1045 int loop = 0;
1046 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1047 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1037 1048
1038 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1049 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1039 if (root_mem->memsw_is_minimum) 1050 if (root_mem->memsw_is_minimum)
@@ -1171,7 +1182,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1171 1182
1172 while (1) { 1183 while (1) {
1173 int ret; 1184 int ret;
1174 bool noswap = false; 1185 unsigned long flags = 0;
1175 1186
1176 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res, 1187 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res,
1177 &soft_fail_res); 1188 &soft_fail_res);
@@ -1184,7 +1195,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1184 break; 1195 break;
1185 /* mem+swap counter fails */ 1196 /* mem+swap counter fails */
1186 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1197 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
1187 noswap = true; 1198 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1188 mem_over_limit = mem_cgroup_from_res_counter(fail_res, 1199 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1189 memsw); 1200 memsw);
1190 } else 1201 } else
@@ -1196,7 +1207,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1196 goto nomem; 1207 goto nomem;
1197 1208
1198 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask, 1209 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
1199 noswap, false); 1210 flags);
1200 if (ret) 1211 if (ret)
1201 continue; 1212 continue;
1202 1213
@@ -2008,7 +2019,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2008 break; 2019 break;
2009 2020
2010 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, 2021 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
2011 false, true); 2022 MEM_CGROUP_RECLAIM_SHRINK);
2012 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 2023 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2013 /* Usage is reduced ? */ 2024 /* Usage is reduced ? */
2014 if (curusage >= oldusage) 2025 if (curusage >= oldusage)
@@ -2060,7 +2071,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2060 if (!ret) 2071 if (!ret)
2061 break; 2072 break;
2062 2073
2063 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true); 2074 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
2075 MEM_CGROUP_RECLAIM_NOSWAP |
2076 MEM_CGROUP_RECLAIM_SHRINK);
2064 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 2077 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2065 /* Usage is reduced ? */ 2078 /* Usage is reduced ? */
2066 if (curusage >= oldusage) 2079 if (curusage >= oldusage)