summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-04-25 05:45:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2019-04-29 06:37:50 -0400
commitaf52bf6b92f7d8783c1e712cad6ef7d37cd773b2 (patch)
treea4aa9d0c6bd6e429bdb969a3aada6914a5ed9c75
parent880e049c9ce9020384ce305c71375aa1cb54addb (diff)
mm/page_owner: Simplify stack trace handling
Replace the indirection through struct stack_trace by using the storage array based interfaces. The original code in all printing functions is really wrong. It allocates a storage array on stack which is unused because depot_fetch_stack() does not store anything in it. It overwrites the entries pointer in the stack_trace struct so it points to the depot storage. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: linux-mm@kvack.org Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: David Rientjes <rientjes@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094802.067210525@linutronix.de
-rw-r--r--mm/page_owner.c79
1 files changed, 28 insertions, 51 deletions
diff --git a/mm/page_owner.c b/mm/page_owner.c
index df277e6bc3c6..addcbb2ae4e4 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -58,15 +58,10 @@ static bool need_page_owner(void)
58static __always_inline depot_stack_handle_t create_dummy_stack(void) 58static __always_inline depot_stack_handle_t create_dummy_stack(void)
59{ 59{
60 unsigned long entries[4]; 60 unsigned long entries[4];
61 struct stack_trace dummy; 61 unsigned int nr_entries;
62 62
63 dummy.nr_entries = 0; 63 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
64 dummy.max_entries = ARRAY_SIZE(entries); 64 return stack_depot_save(entries, nr_entries, GFP_KERNEL);
65 dummy.entries = &entries[0];
66 dummy.skip = 0;
67
68 save_stack_trace(&dummy);
69 return depot_save_stack(&dummy, GFP_KERNEL);
70} 65}
71 66
72static noinline void register_dummy_stack(void) 67static noinline void register_dummy_stack(void)
@@ -120,46 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order)
120 } 115 }
121} 116}
122 117
123static inline bool check_recursive_alloc(struct stack_trace *trace, 118static inline bool check_recursive_alloc(unsigned long *entries,
124 unsigned long ip) 119 unsigned int nr_entries,
120 unsigned long ip)
125{ 121{
126 int i; 122 unsigned int i;
127
128 if (!trace->nr_entries)
129 return false;
130 123
131 for (i = 0; i < trace->nr_entries; i++) { 124 for (i = 0; i < nr_entries; i++) {
132 if (trace->entries[i] == ip) 125 if (entries[i] == ip)
133 return true; 126 return true;
134 } 127 }
135
136 return false; 128 return false;
137} 129}
138 130
139static noinline depot_stack_handle_t save_stack(gfp_t flags) 131static noinline depot_stack_handle_t save_stack(gfp_t flags)
140{ 132{
141 unsigned long entries[PAGE_OWNER_STACK_DEPTH]; 133 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
142 struct stack_trace trace = {
143 .nr_entries = 0,
144 .entries = entries,
145 .max_entries = PAGE_OWNER_STACK_DEPTH,
146 .skip = 2
147 };
148 depot_stack_handle_t handle; 134 depot_stack_handle_t handle;
135 unsigned int nr_entries;
149 136
150 save_stack_trace(&trace); 137 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
151 138
152 /* 139 /*
153 * We need to check recursion here because our request to stackdepot 140 * We need to check recursion here because our request to
154 * could trigger memory allocation to save new entry. New memory 141 * stackdepot could trigger memory allocation to save new
155 * allocation would reach here and call depot_save_stack() again 142 * entry. New memory allocation would reach here and call
156 * if we don't catch it. There is still not enough memory in stackdepot 143 * stack_depot_save_entries() again if we don't catch it. There is
157 * so it would try to allocate memory again and loop forever. 144 * still not enough memory in stackdepot so it would try to
145 * allocate memory again and loop forever.
158 */ 146 */
159 if (check_recursive_alloc(&trace, _RET_IP_)) 147 if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
160 return dummy_handle; 148 return dummy_handle;
161 149
162 handle = depot_save_stack(&trace, flags); 150 handle = stack_depot_save(entries, nr_entries, flags);
163 if (!handle) 151 if (!handle)
164 handle = failure_handle; 152 handle = failure_handle;
165 153
@@ -337,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
337 struct page *page, struct page_owner *page_owner, 325 struct page *page, struct page_owner *page_owner,
338 depot_stack_handle_t handle) 326 depot_stack_handle_t handle)
339{ 327{
340 int ret; 328 int ret, pageblock_mt, page_mt;
341 int pageblock_mt, page_mt; 329 unsigned long *entries;
330 unsigned int nr_entries;
342 char *kbuf; 331 char *kbuf;
343 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
344 struct stack_trace trace = {
345 .nr_entries = 0,
346 .entries = entries,
347 .max_entries = PAGE_OWNER_STACK_DEPTH,
348 .skip = 0
349 };
350 332
351 count = min_t(size_t, count, PAGE_SIZE); 333 count = min_t(size_t, count, PAGE_SIZE);
352 kbuf = kmalloc(count, GFP_KERNEL); 334 kbuf = kmalloc(count, GFP_KERNEL);
@@ -375,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
375 if (ret >= count) 357 if (ret >= count)
376 goto err; 358 goto err;
377 359
378 depot_fetch_stack(handle, &trace); 360 nr_entries = stack_depot_fetch(handle, &entries);
379 ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); 361 ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
380 if (ret >= count) 362 if (ret >= count)
381 goto err; 363 goto err;
382 364
@@ -407,14 +389,9 @@ void __dump_page_owner(struct page *page)
407{ 389{
408 struct page_ext *page_ext = lookup_page_ext(page); 390 struct page_ext *page_ext = lookup_page_ext(page);
409 struct page_owner *page_owner; 391 struct page_owner *page_owner;
410 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
411 struct stack_trace trace = {
412 .nr_entries = 0,
413 .entries = entries,
414 .max_entries = PAGE_OWNER_STACK_DEPTH,
415 .skip = 0
416 };
417 depot_stack_handle_t handle; 392 depot_stack_handle_t handle;
393 unsigned long *entries;
394 unsigned int nr_entries;
418 gfp_t gfp_mask; 395 gfp_t gfp_mask;
419 int mt; 396 int mt;
420 397
@@ -438,10 +415,10 @@ void __dump_page_owner(struct page *page)
438 return; 415 return;
439 } 416 }
440 417
441 depot_fetch_stack(handle, &trace); 418 nr_entries = stack_depot_fetch(handle, &entries);
442 pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", 419 pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
443 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); 420 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
444 print_stack_trace(&trace, 0); 421 stack_trace_print(entries, nr_entries, 0);
445 422
446 if (page_owner->last_migrate_reason != -1) 423 if (page_owner->last_migrate_reason != -1)
447 pr_alert("page has been migrated, last migrate reason: %s\n", 424 pr_alert("page has been migrated, last migrate reason: %s\n",