diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2009-10-01 18:44:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-01 19:11:13 -0400 |
commit | ef8745c1e7fc5413d760b3b958f3fd3a0beaad72 (patch) | |
tree | a1f1998dbcf06e84fe3539192e440e9d1bb876f2 /mm/memcontrol.c | |
parent | 4e649152cbaa1aedd01821d200ab9d597fe469e4 (diff) |
memcg: reduce check for softlimit excess
In charge/uncharge/reclaim path, usage_in_excess is calculated repeatedly
and it takes res_counter's spin_lock every time.
This patch removes unnecessary calls for res_count_soft_limit_excess.
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 31 |
1 files changed, 15 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1ae8c439584a..f99f5991d6bb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page) | |||
313 | static void | 313 | static void |
314 | __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, | 314 | __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, |
315 | struct mem_cgroup_per_zone *mz, | 315 | struct mem_cgroup_per_zone *mz, |
316 | struct mem_cgroup_tree_per_zone *mctz) | 316 | struct mem_cgroup_tree_per_zone *mctz, |
317 | unsigned long long new_usage_in_excess) | ||
317 | { | 318 | { |
318 | struct rb_node **p = &mctz->rb_root.rb_node; | 319 | struct rb_node **p = &mctz->rb_root.rb_node; |
319 | struct rb_node *parent = NULL; | 320 | struct rb_node *parent = NULL; |
@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, | |||
322 | if (mz->on_tree) | 323 | if (mz->on_tree) |
323 | return; | 324 | return; |
324 | 325 | ||
325 | mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res); | 326 | mz->usage_in_excess = new_usage_in_excess; |
327 | if (!mz->usage_in_excess) | ||
328 | return; | ||
326 | while (*p) { | 329 | while (*p) { |
327 | parent = *p; | 330 | parent = *p; |
328 | mz_node = rb_entry(parent, struct mem_cgroup_per_zone, | 331 | mz_node = rb_entry(parent, struct mem_cgroup_per_zone, |
@@ -382,7 +385,7 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem) | |||
382 | 385 | ||
383 | static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) | 386 | static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) |
384 | { | 387 | { |
385 | unsigned long long new_usage_in_excess; | 388 | unsigned long long excess; |
386 | struct mem_cgroup_per_zone *mz; | 389 | struct mem_cgroup_per_zone *mz; |
387 | struct mem_cgroup_tree_per_zone *mctz; | 390 | struct mem_cgroup_tree_per_zone *mctz; |
388 | int nid = page_to_nid(page); | 391 | int nid = page_to_nid(page); |
@@ -395,25 +398,21 @@ static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) | |||
395 | */ | 398 | */ |
396 | for (; mem; mem = parent_mem_cgroup(mem)) { | 399 | for (; mem; mem = parent_mem_cgroup(mem)) { |
397 | mz = mem_cgroup_zoneinfo(mem, nid, zid); | 400 | mz = mem_cgroup_zoneinfo(mem, nid, zid); |
398 | new_usage_in_excess = | 401 | excess = res_counter_soft_limit_excess(&mem->res); |
399 | res_counter_soft_limit_excess(&mem->res); | ||
400 | /* | 402 | /* |
401 | * We have to update the tree if mz is on RB-tree or | 403 | * We have to update the tree if mz is on RB-tree or |
402 | * mem is over its softlimit. | 404 | * mem is over its softlimit. |
403 | */ | 405 | */ |
404 | if (new_usage_in_excess || mz->on_tree) { | 406 | if (excess || mz->on_tree) { |
405 | spin_lock(&mctz->lock); | 407 | spin_lock(&mctz->lock); |
406 | /* if on-tree, remove it */ | 408 | /* if on-tree, remove it */ |
407 | if (mz->on_tree) | 409 | if (mz->on_tree) |
408 | __mem_cgroup_remove_exceeded(mem, mz, mctz); | 410 | __mem_cgroup_remove_exceeded(mem, mz, mctz); |
409 | /* | 411 | /* |
410 | * if over soft limit, insert again. mz->usage_in_excess | 412 | * Insert again. mz->usage_in_excess will be updated. |
411 | * will be updated properly. | 413 | * If excess is 0, no tree ops. |
412 | */ | 414 | */ |
413 | if (new_usage_in_excess) | 415 | __mem_cgroup_insert_exceeded(mem, mz, mctz, excess); |
414 | __mem_cgroup_insert_exceeded(mem, mz, mctz); | ||
415 | else | ||
416 | mz->usage_in_excess = 0; | ||
417 | spin_unlock(&mctz->lock); | 416 | spin_unlock(&mctz->lock); |
418 | } | 417 | } |
419 | } | 418 | } |
@@ -2221,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
2221 | unsigned long reclaimed; | 2220 | unsigned long reclaimed; |
2222 | int loop = 0; | 2221 | int loop = 0; |
2223 | struct mem_cgroup_tree_per_zone *mctz; | 2222 | struct mem_cgroup_tree_per_zone *mctz; |
2223 | unsigned long long excess; | ||
2224 | 2224 | ||
2225 | if (order > 0) | 2225 | if (order > 0) |
2226 | return 0; | 2226 | return 0; |
@@ -2272,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
2272 | break; | 2272 | break; |
2273 | } while (1); | 2273 | } while (1); |
2274 | } | 2274 | } |
2275 | mz->usage_in_excess = | ||
2276 | res_counter_soft_limit_excess(&mz->mem->res); | ||
2277 | __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); | 2275 | __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); |
2276 | excess = res_counter_soft_limit_excess(&mz->mem->res); | ||
2278 | /* | 2277 | /* |
2279 | * One school of thought says that we should not add | 2278 | * One school of thought says that we should not add |
2280 | * back the node to the tree if reclaim returns 0. | 2279 | * back the node to the tree if reclaim returns 0. |
@@ -2283,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
2283 | * memory to reclaim from. Consider this as a longer | 2282 | * memory to reclaim from. Consider this as a longer |
2284 | * term TODO. | 2283 | * term TODO. |
2285 | */ | 2284 | */ |
2286 | if (mz->usage_in_excess) | 2285 | /* If excess == 0, no tree ops */ |
2287 | __mem_cgroup_insert_exceeded(mz->mem, mz, mctz); | 2286 | __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); |
2288 | spin_unlock(&mctz->lock); | 2287 | spin_unlock(&mctz->lock); |
2289 | css_put(&mz->mem->css); | 2288 | css_put(&mz->mem->css); |
2290 | loop++; | 2289 | loop++; |