diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-02-07 03:14:19 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 11:42:20 -0500 |
commit | 3be91277e754c7db04eae145ba622b3a3e3ad96d (patch) | |
tree | 347aa7cd186ed87ba7585d63e6584c94fffc2fd6 | |
parent | ac44d354d5c9ced49b1165d6496f134501134219 (diff) |
memcgroup: tidy up mem_cgroup_charge_common
Tidy up mem_cgroup_charge_common before extending it. Adjust some comments,
but mainly clean up its loop: I've an aversion to loops full of continues,
then a break or a goto at the bottom. And the is_atomic test should be on the
__GFP_WAIT bit, not GFP_ATOMIC bits.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memcontrol.c | 44 |
1 files changed, 15 insertions, 29 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 160101a05b4d..dbf571547c03 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -345,23 +345,22 @@ retry: | |||
345 | goto done; | 345 | goto done; |
346 | } | 346 | } |
347 | } | 347 | } |
348 | |||
349 | unlock_page_cgroup(page); | 348 | unlock_page_cgroup(page); |
350 | 349 | ||
351 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); | 350 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); |
352 | if (pc == NULL) | 351 | if (pc == NULL) |
353 | goto err; | 352 | goto err; |
354 | 353 | ||
355 | rcu_read_lock(); | ||
356 | /* | 354 | /* |
357 | * We always charge the cgroup the mm_struct belongs to | 355 | * We always charge the cgroup the mm_struct belongs to. |
358 | * the mm_struct's mem_cgroup changes on task migration if the | 356 | * The mm_struct's mem_cgroup changes on task migration if the |
359 | * thread group leader migrates. It's possible that mm is not | 357 | * thread group leader migrates. It's possible that mm is not |
360 | * set, if so charge the init_mm (happens for pagecache usage). | 358 | * set, if so charge the init_mm (happens for pagecache usage). |
361 | */ | 359 | */ |
362 | if (!mm) | 360 | if (!mm) |
363 | mm = &init_mm; | 361 | mm = &init_mm; |
364 | 362 | ||
363 | rcu_read_lock(); | ||
365 | mem = rcu_dereference(mm->mem_cgroup); | 364 | mem = rcu_dereference(mm->mem_cgroup); |
366 | /* | 365 | /* |
367 | * For every charge from the cgroup, increment reference | 366 | * For every charge from the cgroup, increment reference |
@@ -375,12 +374,8 @@ retry: | |||
375 | * the cgroup limit. | 374 | * the cgroup limit. |
376 | */ | 375 | */ |
377 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { | 376 | while (res_counter_charge(&mem->res, PAGE_SIZE)) { |
378 | bool is_atomic = gfp_mask & GFP_ATOMIC; | 377 | if (!(gfp_mask & __GFP_WAIT)) |
379 | /* | 378 | goto out; |
380 | * We cannot reclaim under GFP_ATOMIC, fail the charge | ||
381 | */ | ||
382 | if (is_atomic) | ||
383 | goto noreclaim; | ||
384 | 379 | ||
385 | if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | 380 | if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) |
386 | continue; | 381 | continue; |
@@ -394,23 +389,12 @@ retry: | |||
394 | */ | 389 | */ |
395 | if (res_counter_check_under_limit(&mem->res)) | 390 | if (res_counter_check_under_limit(&mem->res)) |
396 | continue; | 391 | continue; |
397 | /* | 392 | |
398 | * Since we control both RSS and cache, we end up with a | 393 | if (!nr_retries--) { |
399 | * very interesting scenario where we end up reclaiming | 394 | mem_cgroup_out_of_memory(mem, gfp_mask); |
400 | * memory (essentially RSS), since the memory is pushed | 395 | goto out; |
401 | * to swap cache, we eventually end up adding those | ||
402 | * pages back to our list. Hence we give ourselves a | ||
403 | * few chances before we fail | ||
404 | */ | ||
405 | else if (nr_retries--) { | ||
406 | congestion_wait(WRITE, HZ/10); | ||
407 | continue; | ||
408 | } | 396 | } |
409 | noreclaim: | 397 | congestion_wait(WRITE, HZ/10); |
410 | css_put(&mem->css); | ||
411 | if (!is_atomic) | ||
412 | mem_cgroup_out_of_memory(mem, GFP_KERNEL); | ||
413 | goto free_pc; | ||
414 | } | 398 | } |
415 | 399 | ||
416 | atomic_set(&pc->ref_cnt, 1); | 400 | atomic_set(&pc->ref_cnt, 1); |
@@ -419,10 +403,11 @@ noreclaim: | |||
419 | pc->flags = 0; | 403 | pc->flags = 0; |
420 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) | 404 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) |
421 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; | 405 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; |
406 | |||
422 | if (page_cgroup_assign_new_page_cgroup(page, pc)) { | 407 | if (page_cgroup_assign_new_page_cgroup(page, pc)) { |
423 | /* | 408 | /* |
424 | * an another charge is added to this page already. | 409 | * Another charge has been added to this page already. |
425 | * we do take lock_page_cgroup(page) again and read | 410 | * We take lock_page_cgroup(page) again and read |
426 | * page->cgroup, increment refcnt.... just retry is OK. | 411 | * page->cgroup, increment refcnt.... just retry is OK. |
427 | */ | 412 | */ |
428 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 413 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
@@ -437,7 +422,8 @@ noreclaim: | |||
437 | 422 | ||
438 | done: | 423 | done: |
439 | return 0; | 424 | return 0; |
440 | free_pc: | 425 | out: |
426 | css_put(&mem->css); | ||
441 | kfree(pc); | 427 | kfree(pc); |
442 | err: | 428 | err: |
443 | return -ENOMEM; | 429 | return -ENOMEM; |