aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_owner.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-12-12 19:56:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:48 -0500
commit48c96a3685795e52903e60c7ee115e5e22e7d640 (patch)
tree49940b1971c9b487a52b2c91b2423eee9278ced5 /mm/page_owner.c
parent9a92a6ce6f842713ccd0025c5228fe8bea61234c (diff)
mm/page_owner: keep track of page owners
This is the page owner tracking code which is introduced so far ago. It is resident on Andrew's tree, though, nobody tried to upstream so it remain as is. Our company uses this feature actively to debug memory leak or to find a memory hogger so I decide to upstream this feature. This functionality help us to know who allocates the page. When allocating a page, we store some information about allocation in extra memory. Later, if we need to know status of all pages, we can get and analyze it from this stored information. In previous version of this feature, extra memory is statically defined in struct page, but, in this version, extra memory is allocated outside of struct page. It enables us to turn on/off this feature at boottime without considerable memory waste. Although we already have tracepoint for tracing page allocation/free, using it to analyze page owner is rather complex. We need to enlarge the trace buffer for preventing overlapping until userspace program launched. And, launched program continually dump out the trace buffer for later analysis and it would change system behaviour with more possibility rather than just keeping it in memory, so bad for debug. Moreover, we can use page_owner feature further for various purposes. For example, we can use it for fragmentation statistics implemented in this patch. And, I also plan to implement some CMA failure debugging feature using this interface. I'd like to give the credit for all developers contributed this feature, but, it's not easy because I don't know exact history. Sorry about that. Below is people who has "Signed-off-by" in the patches in Andrew's tree. Contributor: Alexander Nyberg <alexn@dsv.su.se> Mel Gorman <mgorman@suse.de> Dave Hansen <dave@linux.vnet.ibm.com> Minchan Kim <minchan@kernel.org> Michal Nazarewicz <mina86@mina86.com> Andrew Morton <akpm@linux-foundation.org> Jungsoo Son <jungsoo.son@lge.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@sr71.net> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Jungsoo Son <jungsoo.son@lge.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_owner.c')
-rw-r--r--mm/page_owner.c222
1 files changed, 222 insertions, 0 deletions
diff --git a/mm/page_owner.c b/mm/page_owner.c
new file mode 100644
index 000000000000..85eec7ea6735
--- /dev/null
+++ b/mm/page_owner.c
@@ -0,0 +1,222 @@
1#include <linux/debugfs.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/bootmem.h>
6#include <linux/stacktrace.h>
7#include <linux/page_owner.h>
8#include "internal.h"
9
10static bool page_owner_disabled = true;
11bool page_owner_inited __read_mostly;
12
13static int early_page_owner_param(char *buf)
14{
15 if (!buf)
16 return -EINVAL;
17
18 if (strcmp(buf, "on") == 0)
19 page_owner_disabled = false;
20
21 return 0;
22}
23early_param("page_owner", early_page_owner_param);
24
25static bool need_page_owner(void)
26{
27 if (page_owner_disabled)
28 return false;
29
30 return true;
31}
32
33static void init_page_owner(void)
34{
35 if (page_owner_disabled)
36 return;
37
38 page_owner_inited = true;
39}
40
41struct page_ext_operations page_owner_ops = {
42 .need = need_page_owner,
43 .init = init_page_owner,
44};
45
46void __reset_page_owner(struct page *page, unsigned int order)
47{
48 int i;
49 struct page_ext *page_ext;
50
51 for (i = 0; i < (1 << order); i++) {
52 page_ext = lookup_page_ext(page + i);
53 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
54 }
55}
56
57void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
58{
59 struct page_ext *page_ext;
60 struct stack_trace *trace;
61
62 page_ext = lookup_page_ext(page);
63
64 trace = &page_ext->trace;
65 trace->nr_entries = 0;
66 trace->max_entries = ARRAY_SIZE(page_ext->trace_entries);
67 trace->entries = &page_ext->trace_entries[0];
68 trace->skip = 3;
69 save_stack_trace(&page_ext->trace);
70
71 page_ext->order = order;
72 page_ext->gfp_mask = gfp_mask;
73
74 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
75}
76
77static ssize_t
78print_page_owner(char __user *buf, size_t count, unsigned long pfn,
79 struct page *page, struct page_ext *page_ext)
80{
81 int ret;
82 int pageblock_mt, page_mt;
83 char *kbuf;
84
85 kbuf = kmalloc(count, GFP_KERNEL);
86 if (!kbuf)
87 return -ENOMEM;
88
89 ret = snprintf(kbuf, count,
90 "Page allocated via order %u, mask 0x%x\n",
91 page_ext->order, page_ext->gfp_mask);
92
93 if (ret >= count)
94 goto err;
95
96 /* Print information relevant to grouping pages by mobility */
97 pageblock_mt = get_pfnblock_migratetype(page, pfn);
98 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
99 ret += snprintf(kbuf + ret, count - ret,
100 "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
101 pfn,
102 pfn >> pageblock_order,
103 pageblock_mt,
104 pageblock_mt != page_mt ? "Fallback" : " ",
105 PageLocked(page) ? "K" : " ",
106 PageError(page) ? "E" : " ",
107 PageReferenced(page) ? "R" : " ",
108 PageUptodate(page) ? "U" : " ",
109 PageDirty(page) ? "D" : " ",
110 PageLRU(page) ? "L" : " ",
111 PageActive(page) ? "A" : " ",
112 PageSlab(page) ? "S" : " ",
113 PageWriteback(page) ? "W" : " ",
114 PageCompound(page) ? "C" : " ",
115 PageSwapCache(page) ? "B" : " ",
116 PageMappedToDisk(page) ? "M" : " ");
117
118 if (ret >= count)
119 goto err;
120
121 ret += snprint_stack_trace(kbuf + ret, count - ret,
122 &page_ext->trace, 0);
123 if (ret >= count)
124 goto err;
125
126 ret += snprintf(kbuf + ret, count - ret, "\n");
127 if (ret >= count)
128 goto err;
129
130 if (copy_to_user(buf, kbuf, ret))
131 ret = -EFAULT;
132
133 kfree(kbuf);
134 return ret;
135
136err:
137 kfree(kbuf);
138 return -ENOMEM;
139}
140
141static ssize_t
142read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
143{
144 unsigned long pfn;
145 struct page *page;
146 struct page_ext *page_ext;
147
148 if (!page_owner_inited)
149 return -EINVAL;
150
151 page = NULL;
152 pfn = min_low_pfn + *ppos;
153
154 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
155 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
156 pfn++;
157
158 drain_all_pages(NULL);
159
160 /* Find an allocated page */
161 for (; pfn < max_pfn; pfn++) {
162 /*
163 * If the new page is in a new MAX_ORDER_NR_PAGES area,
164 * validate the area as existing, skip it if not
165 */
166 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
167 pfn += MAX_ORDER_NR_PAGES - 1;
168 continue;
169 }
170
171 /* Check for holes within a MAX_ORDER area */
172 if (!pfn_valid_within(pfn))
173 continue;
174
175 page = pfn_to_page(pfn);
176 if (PageBuddy(page)) {
177 unsigned long freepage_order = page_order_unsafe(page);
178
179 if (freepage_order < MAX_ORDER)
180 pfn += (1UL << freepage_order) - 1;
181 continue;
182 }
183
184 page_ext = lookup_page_ext(page);
185
186 /*
187 * Pages allocated before initialization of page_owner are
188 * non-buddy and have no page_owner info.
189 */
190 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
191 continue;
192
193 /* Record the next PFN to read in the file offset */
194 *ppos = (pfn - min_low_pfn) + 1;
195
196 return print_page_owner(buf, count, pfn, page, page_ext);
197 }
198
199 return 0;
200}
201
202static const struct file_operations proc_page_owner_operations = {
203 .read = read_page_owner,
204};
205
206static int __init pageowner_init(void)
207{
208 struct dentry *dentry;
209
210 if (!page_owner_inited) {
211 pr_info("page_owner is disabled\n");
212 return 0;
213 }
214
215 dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
216 NULL, &proc_page_owner_operations);
217 if (IS_ERR(dentry))
218 return PTR_ERR(dentry);
219
220 return 0;
221}
222module_init(pageowner_init)