aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-07-26 18:23:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 19:19:19 -0400
commit46f24fd857b37bb86ddd5d0ac3d194e984dfdf1c (patch)
tree7ebef7dc0e77ec8bf1c52b468d0de64351317fdc
parentf2ca0b55710752588ccff5224a11e6aea43a996a (diff)
mm/page_alloc: introduce post allocation processing on page allocator
This patch is motivated from Hugh and Vlastimil's concern [1]. There are two ways to get freepage from the allocator. One is using normal memory allocation API and the other is __isolate_free_page() which is internally used for compaction and pageblock isolation. Later usage is rather tricky since it doesn't do whole post allocation processing done by normal API. One problematic thing I already know is that poisoned page would not be checked if it is allocated by __isolate_free_page(). Perhaps, there would be more. We could add more debug logic for allocated page in the future and this separation would cause more problem. I'd like to fix this situation at this time. Solution is simple. This patch commonize some logic for newly allocated page and uses it on all sites. This will solve the problem. [1] http://marc.info/?i=alpine.LSU.2.11.1604270029350.7066%40eggly.anvils%3E [iamjoonsoo.kim@lge.com: mm-page_alloc-introduce-post-allocation-processing-on-page-allocator-v3] Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1466150259-27727-9-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/compaction.c8
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c23
-rw-r--r--mm/page_isolation.c4
4 files changed, 18 insertions, 19 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 4ae1294068a8..64df5fe052db 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -74,14 +74,8 @@ static void map_pages(struct list_head *list)
74 74
75 order = page_private(page); 75 order = page_private(page);
76 nr_pages = 1 << order; 76 nr_pages = 1 << order;
77 set_page_private(page, 0);
78 set_page_refcounted(page);
79 77
80 arch_alloc_page(page, order); 78 post_alloc_hook(page, order, __GFP_MOVABLE);
81 kernel_map_pages(page, nr_pages, 1);
82 kasan_alloc_pages(page, order);
83
84 set_page_owner(page, order, __GFP_MOVABLE);
85 if (order) 79 if (order)
86 split_page(page, order); 80 split_page(page, order);
87 81
diff --git a/mm/internal.h b/mm/internal.h
index 2524ec880e24..fbfba0cc2c35 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -150,6 +150,8 @@ extern int __isolate_free_page(struct page *page, unsigned int order);
150extern void __free_pages_bootmem(struct page *page, unsigned long pfn, 150extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
151 unsigned int order); 151 unsigned int order);
152extern void prep_compound_page(struct page *page, unsigned int order); 152extern void prep_compound_page(struct page *page, unsigned int order);
153extern void post_alloc_hook(struct page *page, unsigned int order,
154 gfp_t gfp_flags);
153extern int user_min_free_kbytes; 155extern int user_min_free_kbytes;
154 156
155#if defined CONFIG_COMPACTION || defined CONFIG_CMA 157#if defined CONFIG_COMPACTION || defined CONFIG_CMA
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a82b303c19b1..13cf4c665321 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1724,6 +1724,19 @@ static bool check_new_pages(struct page *page, unsigned int order)
1724 return false; 1724 return false;
1725} 1725}
1726 1726
1727inline void post_alloc_hook(struct page *page, unsigned int order,
1728 gfp_t gfp_flags)
1729{
1730 set_page_private(page, 0);
1731 set_page_refcounted(page);
1732
1733 arch_alloc_page(page, order);
1734 kernel_map_pages(page, 1 << order, 1);
1735 kernel_poison_pages(page, 1 << order, 1);
1736 kasan_alloc_pages(page, order);
1737 set_page_owner(page, order, gfp_flags);
1738}
1739
1727static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1740static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1728 unsigned int alloc_flags) 1741 unsigned int alloc_flags)
1729{ 1742{
@@ -1736,13 +1749,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
1736 poisoned &= page_is_poisoned(p); 1749 poisoned &= page_is_poisoned(p);
1737 } 1750 }
1738 1751
1739 set_page_private(page, 0); 1752 post_alloc_hook(page, order, gfp_flags);
1740 set_page_refcounted(page);
1741
1742 arch_alloc_page(page, order);
1743 kernel_map_pages(page, 1 << order, 1);
1744 kernel_poison_pages(page, 1 << order, 1);
1745 kasan_alloc_pages(page, order);
1746 1753
1747 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) 1754 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
1748 for (i = 0; i < (1 << order); i++) 1755 for (i = 0; i < (1 << order); i++)
@@ -1751,8 +1758,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
1751 if (order && (gfp_flags & __GFP_COMP)) 1758 if (order && (gfp_flags & __GFP_COMP))
1752 prep_compound_page(page, order); 1759 prep_compound_page(page, order);
1753 1760
1754 set_page_owner(page, order, gfp_flags);
1755
1756 /* 1761 /*
1757 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1762 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1758 * allocate the page. The expectation is that the caller is taking 1763 * allocate the page. The expectation is that the caller is taking
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 927f5ee24c87..4639163b78f9 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -128,9 +128,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
128out: 128out:
129 spin_unlock_irqrestore(&zone->lock, flags); 129 spin_unlock_irqrestore(&zone->lock, flags);
130 if (isolated_page) { 130 if (isolated_page) {
131 kernel_map_pages(page, (1 << order), 1); 131 post_alloc_hook(page, order, __GFP_MOVABLE);
132 set_page_refcounted(page);
133 set_page_owner(page, order, __GFP_MOVABLE);
134 __free_pages(isolated_page, order); 132 __free_pages(isolated_page, order);
135 } 133 }
136} 134}