aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2008-02-07 03:14:10 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:19 -0500
commitae41be374293e70e1ed441d986afcc6e744ef9d9 (patch)
treed8e2143820bbf3ed2f1f79ed99ee430284567b93 /mm
parent9175e0311ec9e6d1bf1f6dfecf9268baf08765e6 (diff)
bugfix for memory cgroup controller: migration under memory controller fix
While using memory control cgroup, page-migration under it works as following. == 1. uncharge all refs at try to unmap. 2. charge regs again remove_migration_ptes() == This is simple but has following problems. == The page is uncharged and charged back again if *mapped*. - This means that cgroup before migration can be different from one after migration - If page is not mapped but charged as page cache, charge is just ignored (because not mapped, it will not be uncharged before migration) This is memory leak. == This patch tries to keep memory cgroup at page migration by increasing one refcnt during it. 3 functions are added. mem_cgroup_prepare_migration() --- increase refcnt of page->page_cgroup mem_cgroup_end_migration() --- decrease refcnt of page->page_cgroup mem_cgroup_page_migration() --- copy page->page_cgroup from old page to new page. During migration - old page is under PG_locked. - new page is under PG_locked, too. - both old page and new page is not on LRU. These 3 facts guarantee that page_cgroup() migration has no race. Tested and worked well in x86_64/fake-NUMA box. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c43
-rw-r--r--mm/migrate.c13
2 files changed, 53 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3270ce7375db..128f45c16fa6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -492,6 +492,49 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
492 } 492 }
493 } 493 }
494} 494}
495/*
496 * Returns non-zero if a page (under migration) has valid page_cgroup member.
497 * Refcnt of page_cgroup is incremented.
498 */
499
500int mem_cgroup_prepare_migration(struct page *page)
501{
502 struct page_cgroup *pc;
503 int ret = 0;
504 lock_page_cgroup(page);
505 pc = page_get_page_cgroup(page);
506 if (pc && atomic_inc_not_zero(&pc->ref_cnt))
507 ret = 1;
508 unlock_page_cgroup(page);
509 return ret;
510}
511
512void mem_cgroup_end_migration(struct page *page)
513{
514 struct page_cgroup *pc = page_get_page_cgroup(page);
515 mem_cgroup_uncharge(pc);
516}
517/*
518 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
519 * And no race with uncharge() routines because page_cgroup for *page*
520 * has extra one reference by mem_cgroup_prepare_migration.
521 */
522
523void mem_cgroup_page_migration(struct page *page, struct page *newpage)
524{
525 struct page_cgroup *pc;
526retry:
527 pc = page_get_page_cgroup(page);
528 if (!pc)
529 return;
530 if (clear_page_cgroup(page, pc) != pc)
531 goto retry;
532 pc->page = newpage;
533 lock_page_cgroup(newpage);
534 page_assign_page_cgroup(newpage, pc);
535 unlock_page_cgroup(newpage);
536 return;
537}
495 538
496int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) 539int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
497{ 540{
diff --git a/mm/migrate.c b/mm/migrate.c
index 763794144697..a73504ff5ab9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -593,9 +593,10 @@ static int move_to_new_page(struct page *newpage, struct page *page)
593 else 593 else
594 rc = fallback_migrate_page(mapping, newpage, page); 594 rc = fallback_migrate_page(mapping, newpage, page);
595 595
596 if (!rc) 596 if (!rc) {
597 mem_cgroup_page_migration(page, newpage);
597 remove_migration_ptes(page, newpage); 598 remove_migration_ptes(page, newpage);
598 else 599 } else
599 newpage->mapping = NULL; 600 newpage->mapping = NULL;
600 601
601 unlock_page(newpage); 602 unlock_page(newpage);
@@ -614,6 +615,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
614 int *result = NULL; 615 int *result = NULL;
615 struct page *newpage = get_new_page(page, private, &result); 616 struct page *newpage = get_new_page(page, private, &result);
616 int rcu_locked = 0; 617 int rcu_locked = 0;
618 int charge = 0;
617 619
618 if (!newpage) 620 if (!newpage)
619 return -ENOMEM; 621 return -ENOMEM;
@@ -673,14 +675,19 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
673 goto rcu_unlock; 675 goto rcu_unlock;
674 } 676 }
675 677
678 charge = mem_cgroup_prepare_migration(page);
676 /* Establish migration ptes or remove ptes */ 679 /* Establish migration ptes or remove ptes */
677 try_to_unmap(page, 1); 680 try_to_unmap(page, 1);
678 681
679 if (!page_mapped(page)) 682 if (!page_mapped(page))
680 rc = move_to_new_page(newpage, page); 683 rc = move_to_new_page(newpage, page);
681 684
682 if (rc) 685 if (rc) {
683 remove_migration_ptes(page, page); 686 remove_migration_ptes(page, page);
687 if (charge)
688 mem_cgroup_end_migration(page);
689 } else if (charge)
690 mem_cgroup_end_migration(newpage);
684rcu_unlock: 691rcu_unlock:
685 if (rcu_locked) 692 if (rcu_locked)
686 rcu_read_unlock(); 693 rcu_read_unlock();