aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2012-01-10 18:07:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 19:30:42 -0500
commitc0a32fc5a2e470d0b02597b23ad79a317735253e (patch)
tree2d164edae0062918ca2088772c00b0615781353b /include/linux/mm.h
parent1399ff86f2a2bbacbbe68fa00c5f8c752b344723 (diff)
mm: more intensive memory corruption debugging
With CONFIG_DEBUG_PAGEALLOC configured, the CPU will generate an exception on access (read,write) to an unallocated page, which permits us to catch code which corrupts memory. However the kernel is trying to maximise memory usage, hence there are usually few free pages in the system and buggy code usually corrupts some crucial data. This patch changes the buddy allocator to keep more free/protected pages and to interlace free/protected and allocated pages to increase the probability of catching corruption. When the kernel is compiled with CONFIG_DEBUG_PAGEALLOC, debug_guardpage_minorder defines the minimum order used by the page allocator to grant a request. The requested size will be returned with the remaining pages used as guard pages. The default value of debug_guardpage_minorder is zero: no change from current behaviour. [akpm@linux-foundation.org: tweak documentation, s/flg/flag/] Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5d9b4c9813bd..5568553a41fd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1618,5 +1618,22 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
1618 unsigned int pages_per_huge_page); 1618 unsigned int pages_per_huge_page);
1619#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 1619#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
1620 1620
1621#ifdef CONFIG_DEBUG_PAGEALLOC
1622extern unsigned int _debug_guardpage_minorder;
1623
1624static inline unsigned int debug_guardpage_minorder(void)
1625{
1626 return _debug_guardpage_minorder;
1627}
1628
1629static inline bool page_is_guard(struct page *page)
1630{
1631 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1632}
1633#else
1634static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1635static inline bool page_is_guard(struct page *page) { return false; }
1636#endif /* CONFIG_DEBUG_PAGEALLOC */
1637
1621#endif /* __KERNEL__ */ 1638#endif /* __KERNEL__ */
1622#endif /* _LINUX_MM_H */ 1639#endif /* _LINUX_MM_H */