aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-03-04 17:29:07 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 19:35:15 -0500
commit9442ec9df40d952b0de185ae5638a74970388e01 (patch)
tree14b06d71203be119d93736464ca49f37ce402c1c /mm/page_alloc.c
parent98837c7f82ef78aa38f40462aa2fcac68fd3acbf (diff)
memcg: bad page if page_cgroup when free
Replace free_hot_cold_page's VM_BUG_ON(page_get_page_cgroup(page)) by a "Bad page state" and clear: most users don't have CONFIG_DEBUG_VM on, and if it were set here, it'd likely cause corruption when the page is reused. Don't use page_assign_page_cgroup to clear it: that should be private to memcontrol.c, and always called with the lock taken; and memmap_init_zone doesn't need it either - like page->mapping and other pointers throughout the kernel, Linux assumes pointers in zeroed structures are NULL pointers. Instead use page_reset_bad_cgroup, added to memcontrol.h for this only. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e76cf94725c9..402a504f1228 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -222,13 +222,19 @@ static inline int bad_range(struct zone *zone, struct page *page)
222 222
223static void bad_page(struct page *page) 223static void bad_page(struct page *page)
224{ 224{
225 printk(KERN_EMERG "Bad page state in process '%s'\n" 225 void *pc = page_get_page_cgroup(page);
226 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 226
227 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 227 printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
228 KERN_EMERG "Backtrace:\n", 228 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
229 current->comm, page, (int)(2*sizeof(unsigned long)), 229 current->comm, page, (int)(2*sizeof(unsigned long)),
230 (unsigned long)page->flags, page->mapping, 230 (unsigned long)page->flags, page->mapping,
231 page_mapcount(page), page_count(page)); 231 page_mapcount(page), page_count(page));
232 if (pc) {
233 printk(KERN_EMERG "cgroup:%p\n", pc);
234 page_reset_bad_cgroup(page);
235 }
236 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
237 KERN_EMERG "Backtrace:\n");
232 dump_stack(); 238 dump_stack();
233 page->flags &= ~(1 << PG_lru | 239 page->flags &= ~(1 << PG_lru |
234 1 << PG_private | 240 1 << PG_private |
@@ -454,6 +460,7 @@ static inline int free_pages_check(struct page *page)
454{ 460{
455 if (unlikely(page_mapcount(page) | 461 if (unlikely(page_mapcount(page) |
456 (page->mapping != NULL) | 462 (page->mapping != NULL) |
463 (page_get_page_cgroup(page) != NULL) |
457 (page_count(page) != 0) | 464 (page_count(page) != 0) |
458 (page->flags & ( 465 (page->flags & (
459 1 << PG_lru | 466 1 << PG_lru |
@@ -603,6 +610,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
603{ 610{
604 if (unlikely(page_mapcount(page) | 611 if (unlikely(page_mapcount(page) |
605 (page->mapping != NULL) | 612 (page->mapping != NULL) |
613 (page_get_page_cgroup(page) != NULL) |
606 (page_count(page) != 0) | 614 (page_count(page) != 0) |
607 (page->flags & ( 615 (page->flags & (
608 1 << PG_lru | 616 1 << PG_lru |
@@ -989,7 +997,6 @@ static void free_hot_cold_page(struct page *page, int cold)
989 997
990 if (!PageHighMem(page)) 998 if (!PageHighMem(page))
991 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 999 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
992 VM_BUG_ON(page_get_page_cgroup(page));
993 arch_free_page(page, 0); 1000 arch_free_page(page, 0);
994 kernel_map_pages(page, 1, 0); 1001 kernel_map_pages(page, 1, 0);
995 1002
@@ -2528,7 +2535,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2528 set_page_links(page, zone, nid, pfn); 2535 set_page_links(page, zone, nid, pfn);
2529 init_page_count(page); 2536 init_page_count(page);
2530 reset_page_mapcount(page); 2537 reset_page_mapcount(page);
2531 page_assign_page_cgroup(page, NULL);
2532 SetPageReserved(page); 2538 SetPageReserved(page);
2533 2539
2534 /* 2540 /*