aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-03-04 17:29:16 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 19:35:15 -0500
commitfb59e9f1e9786635ea12e12bf6adbb132e10f979 (patch)
tree9104d67895e0e85e3014c60389146ab2318faa7c /mm/memcontrol.c
parent9b3c0a07e0fca35e36751680de3e4c76dbff5df3 (diff)
memcg: fix oops on NULL lru list
While testing force_empty, during an exit_mmap, __mem_cgroup_remove_list called from mem_cgroup_uncharge_page oopsed on a NULL pointer in the lru list. I couldn't see what racing tasks on other cpus were doing, but surmise that another must have been in mem_cgroup_charge_common on the same page, between its unlock_page_cgroup and spin_lock_irqsave near done (thanks to that kzalloc which I'd almost changed to a kmalloc). Normally such a race cannot happen, the ref_cnt prevents it, the final uncharge cannot race with the initial charge. But force_empty buggers the ref_cnt, that's what it's all about; and thereafter forced pages are vulnerable to races such as this (just think of a shared page also mapped into an mm of another mem_cgroup than that just emptied). And remain vulnerable until they're freed indefinitely later. This patch just fixes the oops by moving the unlock_page_cgroups down below adding to and removing from the list (only possible given the previous patch); and while we're at it, we might as well make it an invariant that page->page_cgroup is always set while pc is on lru. But this behaviour of force_empty seems highly unsatisfactory to me: why have a ref_cnt if we always have to cope with it being violated (as in the earlier page migration patch). We may prefer force_empty to move pages to an orphan mem_cgroup (could be the root, but better not), from which other cgroups could recover them; we might need to reverse the locking again; but no time now for such concerns. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f72067094e05..8b9f6cae938e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -623,13 +623,13 @@ retry:
623 goto retry; 623 goto retry;
624 } 624 }
625 page_assign_page_cgroup(page, pc); 625 page_assign_page_cgroup(page, pc);
626 unlock_page_cgroup(page);
627 626
628 mz = page_cgroup_zoneinfo(pc); 627 mz = page_cgroup_zoneinfo(pc);
629 spin_lock_irqsave(&mz->lru_lock, flags); 628 spin_lock_irqsave(&mz->lru_lock, flags);
630 __mem_cgroup_add_list(pc); 629 __mem_cgroup_add_list(pc);
631 spin_unlock_irqrestore(&mz->lru_lock, flags); 630 spin_unlock_irqrestore(&mz->lru_lock, flags);
632 631
632 unlock_page_cgroup(page);
633done: 633done:
634 return 0; 634 return 0;
635out: 635out:
@@ -677,14 +677,14 @@ void mem_cgroup_uncharge_page(struct page *page)
677 VM_BUG_ON(pc->ref_cnt <= 0); 677 VM_BUG_ON(pc->ref_cnt <= 0);
678 678
679 if (--(pc->ref_cnt) == 0) { 679 if (--(pc->ref_cnt) == 0) {
680 page_assign_page_cgroup(page, NULL);
681 unlock_page_cgroup(page);
682
683 mz = page_cgroup_zoneinfo(pc); 680 mz = page_cgroup_zoneinfo(pc);
684 spin_lock_irqsave(&mz->lru_lock, flags); 681 spin_lock_irqsave(&mz->lru_lock, flags);
685 __mem_cgroup_remove_list(pc); 682 __mem_cgroup_remove_list(pc);
686 spin_unlock_irqrestore(&mz->lru_lock, flags); 683 spin_unlock_irqrestore(&mz->lru_lock, flags);
687 684
685 page_assign_page_cgroup(page, NULL);
686 unlock_page_cgroup(page);
687
688 mem = pc->mem_cgroup; 688 mem = pc->mem_cgroup;
689 res_counter_uncharge(&mem->res, PAGE_SIZE); 689 res_counter_uncharge(&mem->res, PAGE_SIZE);
690 css_put(&mem->css); 690 css_put(&mem->css);
@@ -736,23 +736,24 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
736 return; 736 return;
737 } 737 }
738 738
739 page_assign_page_cgroup(page, NULL);
740 unlock_page_cgroup(page);
741
742 mz = page_cgroup_zoneinfo(pc); 739 mz = page_cgroup_zoneinfo(pc);
743 spin_lock_irqsave(&mz->lru_lock, flags); 740 spin_lock_irqsave(&mz->lru_lock, flags);
744 __mem_cgroup_remove_list(pc); 741 __mem_cgroup_remove_list(pc);
745 spin_unlock_irqrestore(&mz->lru_lock, flags); 742 spin_unlock_irqrestore(&mz->lru_lock, flags);
746 743
744 page_assign_page_cgroup(page, NULL);
745 unlock_page_cgroup(page);
746
747 pc->page = newpage; 747 pc->page = newpage;
748 lock_page_cgroup(newpage); 748 lock_page_cgroup(newpage);
749 page_assign_page_cgroup(newpage, pc); 749 page_assign_page_cgroup(newpage, pc);
750 unlock_page_cgroup(newpage);
751 750
752 mz = page_cgroup_zoneinfo(pc); 751 mz = page_cgroup_zoneinfo(pc);
753 spin_lock_irqsave(&mz->lru_lock, flags); 752 spin_lock_irqsave(&mz->lru_lock, flags);
754 __mem_cgroup_add_list(pc); 753 __mem_cgroup_add_list(pc);
755 spin_unlock_irqrestore(&mz->lru_lock, flags); 754 spin_unlock_irqrestore(&mz->lru_lock, flags);
755
756 unlock_page_cgroup(newpage);
756} 757}
757 758
758/* 759/*