aboutsummaryrefslogtreecommitdiffstats
path: root/mm/debug.c
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2018-07-03 20:02:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-03 20:32:19 -0400
commitfc36def997cfd6cbff3eda4f82853a5c311c5466 (patch)
tree30bcbc53d846d0af57df66256968458590cb7cd7 /mm/debug.c
parent5e4e290d3751607726a62f0b49e11261a0a9345e (diff)
mm: teach dump_page() to correctly output poisoned struct pages
If struct page is poisoned, and uninitialized access is detected via PF_POISONED_CHECK(page) dump_page() is called to output the page. But, the dump_page() itself accesses struct page to determine how to print it, and therefore gets into a recursive loop. For example: dump_page() __dump_page() PageSlab(page) PF_POISONED_CHECK(page) VM_BUG_ON_PGFLAGS(PagePoisoned(page), page) dump_page() recursion loop. Link: http://lkml.kernel.org/r/20180702180536.2552-1-pasha.tatashin@oracle.com Fixes: f165b378bbdf ("mm: uninitialized struct page poisoning sanity checking") Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/debug.c')
-rw-r--r--mm/debug.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/mm/debug.c b/mm/debug.c
index 56e2d9125ea5..38c926520c97 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = {
43 43
44void __dump_page(struct page *page, const char *reason) 44void __dump_page(struct page *page, const char *reason)
45{ 45{
46 bool page_poisoned = PagePoisoned(page);
47 int mapcount;
48
49 /*
50 * If struct page is poisoned don't access Page*() functions as that
51 * leads to recursive loop. Page*() check for poisoned pages, and calls
52 * dump_page() when detected.
53 */
54 if (page_poisoned) {
55 pr_emerg("page:%px is uninitialized and poisoned", page);
56 goto hex_only;
57 }
58
46 /* 59 /*
47 * Avoid VM_BUG_ON() in page_mapcount(). 60 * Avoid VM_BUG_ON() in page_mapcount().
48 * page->_mapcount space in struct page is used by sl[aou]b pages to 61 * page->_mapcount space in struct page is used by sl[aou]b pages to
49 * encode own info. 62 * encode own info.
50 */ 63 */
51 int mapcount = PageSlab(page) ? 0 : page_mapcount(page); 64 mapcount = PageSlab(page) ? 0 : page_mapcount(page);
52 65
53 pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", 66 pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
54 page, page_ref_count(page), mapcount, 67 page, page_ref_count(page), mapcount,
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason)
60 73
61 pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); 74 pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
62 75
76hex_only:
63 print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, 77 print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
64 sizeof(unsigned long), page, 78 sizeof(unsigned long), page,
65 sizeof(struct page), false); 79 sizeof(struct page), false);
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason)
68 pr_alert("page dumped because: %s\n", reason); 82 pr_alert("page dumped because: %s\n", reason);
69 83
70#ifdef CONFIG_MEMCG 84#ifdef CONFIG_MEMCG
71 if (page->mem_cgroup) 85 if (!page_poisoned && page->mem_cgroup)
72 pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); 86 pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
73#endif 87#endif
74} 88}