aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-03-04 17:29:08 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 19:35:15 -0500
commit7e924aafa4b03ff71de34af8553d9a1ebc86c071 (patch)
tree21735b7369ede27f34c41184b0a686020b80e2c6 /mm
parent9442ec9df40d952b0de185ae5638a74970388e01 (diff)
memcg: mem_cgroup_charge never NULL
My memcgroup patch to fix hang with shmem/tmpfs added NULL page handling to mem_cgroup_charge_common. It seemed convenient at the time, but hard to justify now: there's a perfectly appropriate swappage to charge and uncharge instead, this is not on any hot path through shmem_getpage, and no performance hit was observed from the slight extra overhead. So revert that NULL page handling from mem_cgroup_charge_common; and make it clearer by bringing page_cgroup_assign_new_page_cgroup into its body - that was a helper I found more of a hindrance to understanding. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c61
-rw-r--r--mm/shmem.c9
2 files changed, 27 insertions, 43 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9e170d3c71e5..83ba13ad31e1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -301,25 +301,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
301} 301}
302 302
303/* 303/*
304 * Tie new page_cgroup to struct page under lock_page_cgroup()
305 * This can fail if the page has been tied to a page_cgroup.
306 * If success, returns 0.
307 */
308static int page_cgroup_assign_new_page_cgroup(struct page *page,
309 struct page_cgroup *pc)
310{
311 int ret = 0;
312
313 lock_page_cgroup(page);
314 if (!page_get_page_cgroup(page))
315 page_assign_page_cgroup(page, pc);
316 else /* A page is tied to other pc. */
317 ret = 1;
318 unlock_page_cgroup(page);
319 return ret;
320}
321
322/*
323 * Clear page->page_cgroup member under lock_page_cgroup(). 304 * Clear page->page_cgroup member under lock_page_cgroup().
324 * If given "pc" value is different from one page->page_cgroup, 305 * If given "pc" value is different from one page->page_cgroup,
325 * page->cgroup is not cleared. 306 * page->cgroup is not cleared.
@@ -585,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
585 * with it 566 * with it
586 */ 567 */
587retry: 568retry:
588 if (page) { 569 lock_page_cgroup(page);
589 lock_page_cgroup(page); 570 pc = page_get_page_cgroup(page);
590 pc = page_get_page_cgroup(page); 571 /*
591 /* 572 * The page_cgroup exists and
592 * The page_cgroup exists and 573 * the page has already been accounted.
593 * the page has already been accounted. 574 */
594 */ 575 if (pc) {
595 if (pc) { 576 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
596 if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { 577 /* this page is under being uncharged ? */
597 /* this page is under being uncharged ? */ 578 unlock_page_cgroup(page);
598 unlock_page_cgroup(page); 579 cpu_relax();
599 cpu_relax(); 580 goto retry;
600 goto retry; 581 } else {
601 } else { 582 unlock_page_cgroup(page);
602 unlock_page_cgroup(page); 583 goto done;
603 goto done;
604 }
605 } 584 }
606 unlock_page_cgroup(page);
607 } 585 }
586 unlock_page_cgroup(page);
608 587
609 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); 588 pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
610 if (pc == NULL) 589 if (pc == NULL)
@@ -663,7 +642,9 @@ retry:
663 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) 642 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
664 pc->flags |= PAGE_CGROUP_FLAG_CACHE; 643 pc->flags |= PAGE_CGROUP_FLAG_CACHE;
665 644
666 if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) { 645 lock_page_cgroup(page);
646 if (page_get_page_cgroup(page)) {
647 unlock_page_cgroup(page);
667 /* 648 /*
668 * Another charge has been added to this page already. 649 * Another charge has been added to this page already.
669 * We take lock_page_cgroup(page) again and read 650 * We take lock_page_cgroup(page) again and read
@@ -672,10 +653,10 @@ retry:
672 res_counter_uncharge(&mem->res, PAGE_SIZE); 653 res_counter_uncharge(&mem->res, PAGE_SIZE);
673 css_put(&mem->css); 654 css_put(&mem->css);
674 kfree(pc); 655 kfree(pc);
675 if (!page)
676 goto done;
677 goto retry; 656 goto retry;
678 } 657 }
658 page_assign_page_cgroup(page, pc);
659 unlock_page_cgroup(page);
679 660
680 mz = page_cgroup_zoneinfo(pc); 661 mz = page_cgroup_zoneinfo(pc);
681 spin_lock_irqsave(&mz->lru_lock, flags); 662 spin_lock_irqsave(&mz->lru_lock, flags);
diff --git a/mm/shmem.c b/mm/shmem.c
index 90b576cbc06e..3372bc579e89 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1370,14 +1370,17 @@ repeat:
1370 shmem_swp_unmap(entry); 1370 shmem_swp_unmap(entry);
1371 spin_unlock(&info->lock); 1371 spin_unlock(&info->lock);
1372 unlock_page(swappage); 1372 unlock_page(swappage);
1373 page_cache_release(swappage);
1374 if (error == -ENOMEM) { 1373 if (error == -ENOMEM) {
1375 /* allow reclaim from this memory cgroup */ 1374 /* allow reclaim from this memory cgroup */
1376 error = mem_cgroup_cache_charge(NULL, 1375 error = mem_cgroup_cache_charge(swappage,
1377 current->mm, gfp & ~__GFP_HIGHMEM); 1376 current->mm, gfp & ~__GFP_HIGHMEM);
1378 if (error) 1377 if (error) {
1378 page_cache_release(swappage);
1379 goto failed; 1379 goto failed;
1380 }
1381 mem_cgroup_uncharge_page(swappage);
1380 } 1382 }
1383 page_cache_release(swappage);
1381 goto repeat; 1384 goto repeat;
1382 } 1385 }
1383 } else if (sgp == SGP_READ && !filepage) { 1386 } else if (sgp == SGP_READ && !filepage) {