aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-12-12 19:55:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:48 -0500
commite30825f1869a75b29a69dc8e0aaaaccc492092cf (patch)
tree33adc902e098cf23d8a8b9d97ad01b1d06e28e93 /mm/page_alloc.c
parenteefa864b701d78dc9753c70a3540a2e9ae192595 (diff)
mm/debug-pagealloc: prepare boottime configurable on/off
Until now, debug-pagealloc needs extra flags in struct page, so we need to recompile whole source code when we decide to use it. This is really painful, because it takes some time to recompile and sometimes rebuild is not possible due to third party module depending on struct page. So, we can't use this good feature in many cases. Now, we have the page extension feature that allows us to insert extra flags to outside of struct page. This gets rid of third party module issue mentioned above. And, this allows us to determine if we need extra memory for this page extension in boottime. With these property, we can avoid using debug-pagealloc in boottime with low computational overhead in the kernel built with CONFIG_DEBUG_PAGEALLOC. This will help our development process greatly. This patch is the preparation step to achive above goal. debug-pagealloc originally uses extra field of struct page, but, after this patch, it will use field of struct page_ext. Because memory for page_ext is allocated later than initialization of page allocator in CONFIG_SPARSEMEM, we should disable debug-pagealloc feature temporarily until initialization of page_ext. This patch implements this. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@sr71.net> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Jungsoo Son <jungsoo.son@lge.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b64666cf5865..e0a39d328ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,7 +56,7 @@
56#include <linux/prefetch.h> 56#include <linux/prefetch.h>
57#include <linux/mm_inline.h> 57#include <linux/mm_inline.h>
58#include <linux/migrate.h> 58#include <linux/migrate.h>
59#include <linux/page-debug-flags.h> 59#include <linux/page_ext.h>
60#include <linux/hugetlb.h> 60#include <linux/hugetlb.h>
61#include <linux/sched/rt.h> 61#include <linux/sched/rt.h>
62 62
@@ -425,6 +425,22 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
425 425
426#ifdef CONFIG_DEBUG_PAGEALLOC 426#ifdef CONFIG_DEBUG_PAGEALLOC
427unsigned int _debug_guardpage_minorder; 427unsigned int _debug_guardpage_minorder;
428bool _debug_guardpage_enabled __read_mostly;
429
430static bool need_debug_guardpage(void)
431{
432 return true;
433}
434
435static void init_debug_guardpage(void)
436{
437 _debug_guardpage_enabled = true;
438}
439
440struct page_ext_operations debug_guardpage_ops = {
441 .need = need_debug_guardpage,
442 .init = init_debug_guardpage,
443};
428 444
429static int __init debug_guardpage_minorder_setup(char *buf) 445static int __init debug_guardpage_minorder_setup(char *buf)
430{ 446{
@@ -443,7 +459,14 @@ __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
443static inline void set_page_guard(struct zone *zone, struct page *page, 459static inline void set_page_guard(struct zone *zone, struct page *page,
444 unsigned int order, int migratetype) 460 unsigned int order, int migratetype)
445{ 461{
446 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 462 struct page_ext *page_ext;
463
464 if (!debug_guardpage_enabled())
465 return;
466
467 page_ext = lookup_page_ext(page);
468 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
469
447 INIT_LIST_HEAD(&page->lru); 470 INIT_LIST_HEAD(&page->lru);
448 set_page_private(page, order); 471 set_page_private(page, order);
449 /* Guard pages are not available for any usage */ 472 /* Guard pages are not available for any usage */
@@ -453,12 +476,20 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
453static inline void clear_page_guard(struct zone *zone, struct page *page, 476static inline void clear_page_guard(struct zone *zone, struct page *page,
454 unsigned int order, int migratetype) 477 unsigned int order, int migratetype)
455{ 478{
456 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 479 struct page_ext *page_ext;
480
481 if (!debug_guardpage_enabled())
482 return;
483
484 page_ext = lookup_page_ext(page);
485 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
486
457 set_page_private(page, 0); 487 set_page_private(page, 0);
458 if (!is_migrate_isolate(migratetype)) 488 if (!is_migrate_isolate(migratetype))
459 __mod_zone_freepage_state(zone, (1 << order), migratetype); 489 __mod_zone_freepage_state(zone, (1 << order), migratetype);
460} 490}
461#else 491#else
492struct page_ext_operations debug_guardpage_ops = { NULL, };
462static inline void set_page_guard(struct zone *zone, struct page *page, 493static inline void set_page_guard(struct zone *zone, struct page *page,
463 unsigned int order, int migratetype) {} 494 unsigned int order, int migratetype) {}
464static inline void clear_page_guard(struct zone *zone, struct page *page, 495static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -869,6 +900,7 @@ static inline void expand(struct zone *zone, struct page *page,
869 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 900 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
870 901
871 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 902 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
903 debug_guardpage_enabled() &&
872 high < debug_guardpage_minorder()) { 904 high < debug_guardpage_minorder()) {
873 /* 905 /*
874 * Mark as guard pages (or page), that will allow to 906 * Mark as guard pages (or page), that will allow to