summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2019-10-14 17:11:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-10-14 18:04:00 -0400
commit5556cfe8d994d5e7b4d50fd91597b8dc0b3a82fd (patch)
treea3e55b8ba5ff41d419619f44e6d12d52be35e78e
parent2abd839aa7e615f2bbc50c8ba7deb9e40d186768 (diff)
mm, page_owner: fix off-by-one error in __set_page_owner_handle()
Patch series "followups to debug_pagealloc improvements through page_owner", v3. These are followups to [1] which made it to Linus meanwhile. Patches 1 and 3 are based on Kirill's review, patch 2 on KASAN request [2]. It would be nice if all of this made it to 5.4 with [1] already there (or at least Patch 1). This patch (of 3): As noted by Kirill, commit 7e2f2a0cd17c ("mm, page_owner: record page owner for each subpage") has introduced an off-by-one error in __set_page_owner_handle() when looking up page_ext for subpages. As a result, the head page page_owner info is set twice, while for the last tail page, it's not set at all. Fix this and also make the code more efficient by advancing the page_ext pointer we already have, instead of calling lookup_page_ext() for each subpage. Since the full size of struct page_ext is not known at compile time, we can't use a simple page_ext++ statement, so introduce a page_ext_next() inline function for that. Link: http://lkml.kernel.org/r/20190930122916.14969-2-vbabka@suse.cz Fixes: 7e2f2a0cd17c ("mm, page_owner: record page owner for each subpage") Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reported-by: Kirill A. Shutemov <kirill@shutemov.name> Reported-by: Miles Chen <miles.chen@mediatek.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Walter Wu <walter-zh.wu@mediatek.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/page_ext.h8
-rw-r--r--mm/page_ext.c23
-rw-r--r--mm/page_owner.c15
3 files changed, 24 insertions, 22 deletions
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 682fd465df06..5e856512bafb 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -36,6 +36,7 @@ struct page_ext {
36 unsigned long flags; 36 unsigned long flags;
37}; 37};
38 38
39extern unsigned long page_ext_size;
39extern void pgdat_page_ext_init(struct pglist_data *pgdat); 40extern void pgdat_page_ext_init(struct pglist_data *pgdat);
40 41
41#ifdef CONFIG_SPARSEMEM 42#ifdef CONFIG_SPARSEMEM
@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
52 53
53struct page_ext *lookup_page_ext(const struct page *page); 54struct page_ext *lookup_page_ext(const struct page *page);
54 55
56static inline struct page_ext *page_ext_next(struct page_ext *curr)
57{
58 void *next = curr;
59 next += page_ext_size;
60 return next;
61}
62
55#else /* !CONFIG_PAGE_EXTENSION */ 63#else /* !CONFIG_PAGE_EXTENSION */
56struct page_ext; 64struct page_ext;
57 65
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 5f5769c7db3b..4ade843ff588 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = {
67#endif 67#endif
68}; 68};
69 69
70unsigned long page_ext_size = sizeof(struct page_ext);
71
70static unsigned long total_usage; 72static unsigned long total_usage;
71static unsigned long extra_mem;
72 73
73static bool __init invoke_need_callbacks(void) 74static bool __init invoke_need_callbacks(void)
74{ 75{
@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void)
78 79
79 for (i = 0; i < entries; i++) { 80 for (i = 0; i < entries; i++) {
80 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { 81 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
81 page_ext_ops[i]->offset = sizeof(struct page_ext) + 82 page_ext_ops[i]->offset = page_ext_size;
82 extra_mem; 83 page_ext_size += page_ext_ops[i]->size;
83 extra_mem += page_ext_ops[i]->size;
84 need = true; 84 need = true;
85 } 85 }
86 } 86 }
@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void)
99 } 99 }
100} 100}
101 101
102static unsigned long get_entry_size(void)
103{
104 return sizeof(struct page_ext) + extra_mem;
105}
106
107static inline struct page_ext *get_entry(void *base, unsigned long index) 102static inline struct page_ext *get_entry(void *base, unsigned long index)
108{ 103{
109 return base + get_entry_size() * index; 104 return base + page_ext_size * index;
110} 105}
111 106
112#if !defined(CONFIG_SPARSEMEM) 107#if !defined(CONFIG_SPARSEMEM)
@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid)
156 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) 151 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
157 nr_pages += MAX_ORDER_NR_PAGES; 152 nr_pages += MAX_ORDER_NR_PAGES;
158 153
159 table_size = get_entry_size() * nr_pages; 154 table_size = page_ext_size * nr_pages;
160 155
161 base = memblock_alloc_try_nid( 156 base = memblock_alloc_try_nid(
162 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 157 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
234 if (section->page_ext) 229 if (section->page_ext)
235 return 0; 230 return 0;
236 231
237 table_size = get_entry_size() * PAGES_PER_SECTION; 232 table_size = page_ext_size * PAGES_PER_SECTION;
238 base = alloc_page_ext(table_size, nid); 233 base = alloc_page_ext(table_size, nid);
239 234
240 /* 235 /*
@@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
254 * we need to apply a mask. 249 * we need to apply a mask.
255 */ 250 */
256 pfn &= PAGE_SECTION_MASK; 251 pfn &= PAGE_SECTION_MASK;
257 section->page_ext = (void *)base - get_entry_size() * pfn; 252 section->page_ext = (void *)base - page_ext_size * pfn;
258 total_usage += table_size; 253 total_usage += table_size;
259 return 0; 254 return 0;
260} 255}
@@ -267,7 +262,7 @@ static void free_page_ext(void *addr)
267 struct page *page = virt_to_page(addr); 262 struct page *page = virt_to_page(addr);
268 size_t table_size; 263 size_t table_size;
269 264
270 table_size = get_entry_size() * PAGES_PER_SECTION; 265 table_size = page_ext_size * PAGES_PER_SECTION;
271 266
272 BUG_ON(PageReserved(page)); 267 BUG_ON(PageReserved(page));
273 kmemleak_free(addr); 268 kmemleak_free(addr);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index dee931184788..d3cf5d336ccf 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -156,10 +156,10 @@ void __reset_page_owner(struct page *page, unsigned int order)
156 handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); 156 handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
157#endif 157#endif
158 158
159 page_ext = lookup_page_ext(page);
160 if (unlikely(!page_ext))
161 return;
159 for (i = 0; i < (1 << order); i++) { 162 for (i = 0; i < (1 << order); i++) {
160 page_ext = lookup_page_ext(page + i);
161 if (unlikely(!page_ext))
162 continue;
163 __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); 163 __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
164#ifdef CONFIG_DEBUG_PAGEALLOC 164#ifdef CONFIG_DEBUG_PAGEALLOC
165 if (debug_pagealloc_enabled()) { 165 if (debug_pagealloc_enabled()) {
@@ -167,6 +167,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
167 page_owner->free_handle = handle; 167 page_owner->free_handle = handle;
168 } 168 }
169#endif 169#endif
170 page_ext = page_ext_next(page_ext);
170 } 171 }
171} 172}
172 173
@@ -186,7 +187,7 @@ static inline void __set_page_owner_handle(struct page *page,
186 __set_bit(PAGE_EXT_OWNER, &page_ext->flags); 187 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
187 __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags); 188 __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
188 189
189 page_ext = lookup_page_ext(page + i); 190 page_ext = page_ext_next(page_ext);
190 } 191 }
191} 192}
192 193
@@ -224,12 +225,10 @@ void __split_page_owner(struct page *page, unsigned int order)
224 if (unlikely(!page_ext)) 225 if (unlikely(!page_ext))
225 return; 226 return;
226 227
227 page_owner = get_page_owner(page_ext); 228 for (i = 0; i < (1 << order); i++) {
228 page_owner->order = 0;
229 for (i = 1; i < (1 << order); i++) {
230 page_ext = lookup_page_ext(page + i);
231 page_owner = get_page_owner(page_ext); 229 page_owner = get_page_owner(page_ext);
232 page_owner->order = 0; 230 page_owner->order = 0;
231 page_ext = page_ext_next(page_ext);
233 } 232 }
234} 233}
235 234