summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--include/linux/page_ext.h10
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--mm/Makefile1
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/page_owner.c222
-rw-r--r--mm/vmstat.c101
-rw-r--r--tools/vm/Makefile4
-rw-r--r--tools/vm/page_owner_sort.c144
11 files changed, 554 insertions, 3 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6f067954675b..68153642c44e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2513,6 +2513,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2513 OSS [HW,OSS] 2513 OSS [HW,OSS]
2514 See Documentation/sound/oss/oss-parameters.txt 2514 See Documentation/sound/oss/oss-parameters.txt
2515 2515
2516 page_owner= [KNL] Boot-time page_owner enabling option.
2517 Storage of the information about who allocated
2518 each page is disabled in default. With this switch,
2519 we can turn it on.
2520 on: enable the feature
2521
2516 panic= [KNL] Kernel behaviour on panic: delay <timeout> 2522 panic= [KNL] Kernel behaviour on panic: delay <timeout>
2517 timeout > 0: seconds before rebooting 2523 timeout > 0: seconds before rebooting
2518 timeout = 0: wait forever 2524 timeout = 0: wait forever
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 61c0f05f9069..d2a2c84c72d0 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -1,6 +1,9 @@
1#ifndef __LINUX_PAGE_EXT_H 1#ifndef __LINUX_PAGE_EXT_H
2#define __LINUX_PAGE_EXT_H 2#define __LINUX_PAGE_EXT_H
3 3
4#include <linux/types.h>
5#include <linux/stacktrace.h>
6
4struct pglist_data; 7struct pglist_data;
5struct page_ext_operations { 8struct page_ext_operations {
6 bool (*need)(void); 9 bool (*need)(void);
@@ -22,6 +25,7 @@ struct page_ext_operations {
22enum page_ext_flags { 25enum page_ext_flags {
23 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ 26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
24 PAGE_EXT_DEBUG_GUARD, 27 PAGE_EXT_DEBUG_GUARD,
28 PAGE_EXT_OWNER,
25}; 29};
26 30
27/* 31/*
@@ -33,6 +37,12 @@ enum page_ext_flags {
33 */ 37 */
34struct page_ext { 38struct page_ext {
35 unsigned long flags; 39 unsigned long flags;
40#ifdef CONFIG_PAGE_OWNER
41 unsigned int order;
42 gfp_t gfp_mask;
43 struct stack_trace trace;
44 unsigned long trace_entries[8];
45#endif
36}; 46};
37 47
38extern void pgdat_page_ext_init(struct pglist_data *pgdat); 48extern void pgdat_page_ext_init(struct pglist_data *pgdat);
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
new file mode 100644
index 000000000000..b48c3471c254
--- /dev/null
+++ b/include/linux/page_owner.h
@@ -0,0 +1,38 @@
1#ifndef __LINUX_PAGE_OWNER_H
2#define __LINUX_PAGE_OWNER_H
3
4#ifdef CONFIG_PAGE_OWNER
5extern bool page_owner_inited;
6extern struct page_ext_operations page_owner_ops;
7
8extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask);
11
12static inline void reset_page_owner(struct page *page, unsigned int order)
13{
14 if (likely(!page_owner_inited))
15 return;
16
17 __reset_page_owner(page, order);
18}
19
20static inline void set_page_owner(struct page *page,
21 unsigned int order, gfp_t gfp_mask)
22{
23 if (likely(!page_owner_inited))
24 return;
25
26 __set_page_owner(page, order, gfp_mask);
27}
28#else
29static inline void reset_page_owner(struct page *page, unsigned int order)
30{
31}
32static inline void set_page_owner(struct page *page,
33 unsigned int order, gfp_t gfp_mask)
34{
35}
36
37#endif /* CONFIG_PAGE_OWNER */
38#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d780351835e9..5f2ce616c046 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -227,6 +227,22 @@ config UNUSED_SYMBOLS
227 you really need it, and what the merge plan to the mainline kernel for 227 you really need it, and what the merge plan to the mainline kernel for
228 your module is. 228 your module is.
229 229
230config PAGE_OWNER
231 bool "Track page owner"
232 depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
233 select DEBUG_FS
234 select STACKTRACE
235 select PAGE_EXTENSION
236 help
237 This keeps track of what call chain is the owner of a page, may
238 help to find bare alloc_page(s) leaks. Even if you include this
239 feature on your build, it is disabled in default. You should pass
240 "page_owner=on" to boot parameter in order to enable it. Eats
241 a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
242 for user-space helper.
243
244 If unsure, say N.
245
230config DEBUG_FS 246config DEBUG_FS
231 bool "Debug Filesystem" 247 bool "Debug Filesystem"
232 help 248 help
diff --git a/mm/Makefile b/mm/Makefile
index 580cd3f392af..4bf586e66378 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
63obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o 63obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
64obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o 64obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
65obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o 65obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
66obj-$(CONFIG_PAGE_OWNER) += page_owner.o
66obj-$(CONFIG_CLEANCACHE) += cleancache.o 67obj-$(CONFIG_CLEANCACHE) += cleancache.o
67obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o 68obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
68obj-$(CONFIG_ZPOOL) += zpool.o 69obj-$(CONFIG_ZPOOL) += zpool.o
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 303d38516807..c13b6b29add2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -59,6 +59,7 @@
59#include <linux/page_ext.h> 59#include <linux/page_ext.h>
60#include <linux/hugetlb.h> 60#include <linux/hugetlb.h>
61#include <linux/sched/rt.h> 61#include <linux/sched/rt.h>
62#include <linux/page_owner.h>
62 63
63#include <asm/sections.h> 64#include <asm/sections.h>
64#include <asm/tlbflush.h> 65#include <asm/tlbflush.h>
@@ -813,6 +814,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
813 if (bad) 814 if (bad)
814 return false; 815 return false;
815 816
817 reset_page_owner(page, order);
818
816 if (!PageHighMem(page)) { 819 if (!PageHighMem(page)) {
817 debug_check_no_locks_freed(page_address(page), 820 debug_check_no_locks_freed(page_address(page),
818 PAGE_SIZE << order); 821 PAGE_SIZE << order);
@@ -988,6 +991,8 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
988 if (order && (gfp_flags & __GFP_COMP)) 991 if (order && (gfp_flags & __GFP_COMP))
989 prep_compound_page(page, order); 992 prep_compound_page(page, order);
990 993
994 set_page_owner(page, order, gfp_flags);
995
991 return 0; 996 return 0;
992} 997}
993 998
@@ -1560,8 +1565,11 @@ void split_page(struct page *page, unsigned int order)
1560 split_page(virt_to_page(page[0].shadow), order); 1565 split_page(virt_to_page(page[0].shadow), order);
1561#endif 1566#endif
1562 1567
1563 for (i = 1; i < (1 << order); i++) 1568 set_page_owner(page, 0, 0);
1569 for (i = 1; i < (1 << order); i++) {
1564 set_page_refcounted(page + i); 1570 set_page_refcounted(page + i);
1571 set_page_owner(page + i, 0, 0);
1572 }
1565} 1573}
1566EXPORT_SYMBOL_GPL(split_page); 1574EXPORT_SYMBOL_GPL(split_page);
1567 1575
@@ -1601,6 +1609,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
1601 } 1609 }
1602 } 1610 }
1603 1611
1612 set_page_owner(page, order, 0);
1604 return 1UL << order; 1613 return 1UL << order;
1605} 1614}
1606 1615
diff --git a/mm/page_ext.c b/mm/page_ext.c
index c2cd7b15f0de..d86fd2f5353f 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -5,6 +5,7 @@
5#include <linux/memory.h> 5#include <linux/memory.h>
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/kmemleak.h> 7#include <linux/kmemleak.h>
8#include <linux/page_owner.h>
8 9
9/* 10/*
10 * struct page extension 11 * struct page extension
@@ -55,6 +56,9 @@ static struct page_ext_operations *page_ext_ops[] = {
55#ifdef CONFIG_PAGE_POISONING 56#ifdef CONFIG_PAGE_POISONING
56 &page_poisoning_ops, 57 &page_poisoning_ops,
57#endif 58#endif
59#ifdef CONFIG_PAGE_OWNER
60 &page_owner_ops,
61#endif
58}; 62};
59 63
60static unsigned long total_usage; 64static unsigned long total_usage;
diff --git a/mm/page_owner.c b/mm/page_owner.c
new file mode 100644
index 000000000000..85eec7ea6735
--- /dev/null
+++ b/mm/page_owner.c
@@ -0,0 +1,222 @@
1#include <linux/debugfs.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/bootmem.h>
6#include <linux/stacktrace.h>
7#include <linux/page_owner.h>
8#include "internal.h"
9
10static bool page_owner_disabled = true;
11bool page_owner_inited __read_mostly;
12
13static int early_page_owner_param(char *buf)
14{
15 if (!buf)
16 return -EINVAL;
17
18 if (strcmp(buf, "on") == 0)
19 page_owner_disabled = false;
20
21 return 0;
22}
23early_param("page_owner", early_page_owner_param);
24
25static bool need_page_owner(void)
26{
27 if (page_owner_disabled)
28 return false;
29
30 return true;
31}
32
33static void init_page_owner(void)
34{
35 if (page_owner_disabled)
36 return;
37
38 page_owner_inited = true;
39}
40
41struct page_ext_operations page_owner_ops = {
42 .need = need_page_owner,
43 .init = init_page_owner,
44};
45
46void __reset_page_owner(struct page *page, unsigned int order)
47{
48 int i;
49 struct page_ext *page_ext;
50
51 for (i = 0; i < (1 << order); i++) {
52 page_ext = lookup_page_ext(page + i);
53 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
54 }
55}
56
57void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
58{
59 struct page_ext *page_ext;
60 struct stack_trace *trace;
61
62 page_ext = lookup_page_ext(page);
63
64 trace = &page_ext->trace;
65 trace->nr_entries = 0;
66 trace->max_entries = ARRAY_SIZE(page_ext->trace_entries);
67 trace->entries = &page_ext->trace_entries[0];
68 trace->skip = 3;
69 save_stack_trace(&page_ext->trace);
70
71 page_ext->order = order;
72 page_ext->gfp_mask = gfp_mask;
73
74 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
75}
76
77static ssize_t
78print_page_owner(char __user *buf, size_t count, unsigned long pfn,
79 struct page *page, struct page_ext *page_ext)
80{
81 int ret;
82 int pageblock_mt, page_mt;
83 char *kbuf;
84
85 kbuf = kmalloc(count, GFP_KERNEL);
86 if (!kbuf)
87 return -ENOMEM;
88
89 ret = snprintf(kbuf, count,
90 "Page allocated via order %u, mask 0x%x\n",
91 page_ext->order, page_ext->gfp_mask);
92
93 if (ret >= count)
94 goto err;
95
96 /* Print information relevant to grouping pages by mobility */
97 pageblock_mt = get_pfnblock_migratetype(page, pfn);
98 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
99 ret += snprintf(kbuf + ret, count - ret,
100 "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
101 pfn,
102 pfn >> pageblock_order,
103 pageblock_mt,
104 pageblock_mt != page_mt ? "Fallback" : " ",
105 PageLocked(page) ? "K" : " ",
106 PageError(page) ? "E" : " ",
107 PageReferenced(page) ? "R" : " ",
108 PageUptodate(page) ? "U" : " ",
109 PageDirty(page) ? "D" : " ",
110 PageLRU(page) ? "L" : " ",
111 PageActive(page) ? "A" : " ",
112 PageSlab(page) ? "S" : " ",
113 PageWriteback(page) ? "W" : " ",
114 PageCompound(page) ? "C" : " ",
115 PageSwapCache(page) ? "B" : " ",
116 PageMappedToDisk(page) ? "M" : " ");
117
118 if (ret >= count)
119 goto err;
120
121 ret += snprint_stack_trace(kbuf + ret, count - ret,
122 &page_ext->trace, 0);
123 if (ret >= count)
124 goto err;
125
126 ret += snprintf(kbuf + ret, count - ret, "\n");
127 if (ret >= count)
128 goto err;
129
130 if (copy_to_user(buf, kbuf, ret))
131 ret = -EFAULT;
132
133 kfree(kbuf);
134 return ret;
135
136err:
137 kfree(kbuf);
138 return -ENOMEM;
139}
140
141static ssize_t
142read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
143{
144 unsigned long pfn;
145 struct page *page;
146 struct page_ext *page_ext;
147
148 if (!page_owner_inited)
149 return -EINVAL;
150
151 page = NULL;
152 pfn = min_low_pfn + *ppos;
153
154 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
155 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
156 pfn++;
157
158 drain_all_pages(NULL);
159
160 /* Find an allocated page */
161 for (; pfn < max_pfn; pfn++) {
162 /*
163 * If the new page is in a new MAX_ORDER_NR_PAGES area,
164 * validate the area as existing, skip it if not
165 */
166 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
167 pfn += MAX_ORDER_NR_PAGES - 1;
168 continue;
169 }
170
171 /* Check for holes within a MAX_ORDER area */
172 if (!pfn_valid_within(pfn))
173 continue;
174
175 page = pfn_to_page(pfn);
176 if (PageBuddy(page)) {
177 unsigned long freepage_order = page_order_unsafe(page);
178
179 if (freepage_order < MAX_ORDER)
180 pfn += (1UL << freepage_order) - 1;
181 continue;
182 }
183
184 page_ext = lookup_page_ext(page);
185
186 /*
187 * Pages allocated before initialization of page_owner are
188 * non-buddy and have no page_owner info.
189 */
190 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
191 continue;
192
193 /* Record the next PFN to read in the file offset */
194 *ppos = (pfn - min_low_pfn) + 1;
195
196 return print_page_owner(buf, count, pfn, page, page_ext);
197 }
198
199 return 0;
200}
201
202static const struct file_operations proc_page_owner_operations = {
203 .read = read_page_owner,
204};
205
206static int __init pageowner_init(void)
207{
208 struct dentry *dentry;
209
210 if (!page_owner_inited) {
211 pr_info("page_owner is disabled\n");
212 return 0;
213 }
214
215 dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
216 NULL, &proc_page_owner_operations);
217 if (IS_ERR(dentry))
218 return PTR_ERR(dentry);
219
220 return 0;
221}
222module_init(pageowner_init)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1b12d390dc68..b090e9e3d626 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -22,6 +22,8 @@
22#include <linux/writeback.h> 22#include <linux/writeback.h>
23#include <linux/compaction.h> 23#include <linux/compaction.h>
24#include <linux/mm_inline.h> 24#include <linux/mm_inline.h>
25#include <linux/page_ext.h>
26#include <linux/page_owner.h>
25 27
26#include "internal.h" 28#include "internal.h"
27 29
@@ -1017,6 +1019,104 @@ static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1017 return 0; 1019 return 0;
1018} 1020}
1019 1021
1022#ifdef CONFIG_PAGE_OWNER
1023static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1024 pg_data_t *pgdat,
1025 struct zone *zone)
1026{
1027 struct page *page;
1028 struct page_ext *page_ext;
1029 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1030 unsigned long end_pfn = pfn + zone->spanned_pages;
1031 unsigned long count[MIGRATE_TYPES] = { 0, };
1032 int pageblock_mt, page_mt;
1033 int i;
1034
1035 /* Scan block by block. First and last block may be incomplete */
1036 pfn = zone->zone_start_pfn;
1037
1038 /*
1039 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1040 * a zone boundary, it will be double counted between zones. This does
1041 * not matter as the mixed block count will still be correct
1042 */
1043 for (; pfn < end_pfn; ) {
1044 if (!pfn_valid(pfn)) {
1045 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1046 continue;
1047 }
1048
1049 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1050 block_end_pfn = min(block_end_pfn, end_pfn);
1051
1052 page = pfn_to_page(pfn);
1053 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1054
1055 for (; pfn < block_end_pfn; pfn++) {
1056 if (!pfn_valid_within(pfn))
1057 continue;
1058
1059 page = pfn_to_page(pfn);
1060 if (PageBuddy(page)) {
1061 pfn += (1UL << page_order(page)) - 1;
1062 continue;
1063 }
1064
1065 if (PageReserved(page))
1066 continue;
1067
1068 page_ext = lookup_page_ext(page);
1069
1070 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1071 continue;
1072
1073 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1074 if (pageblock_mt != page_mt) {
1075 if (is_migrate_cma(pageblock_mt))
1076 count[MIGRATE_MOVABLE]++;
1077 else
1078 count[pageblock_mt]++;
1079
1080 pfn = block_end_pfn;
1081 break;
1082 }
1083 pfn += (1UL << page_ext->order) - 1;
1084 }
1085 }
1086
1087 /* Print counts */
1088 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1089 for (i = 0; i < MIGRATE_TYPES; i++)
1090 seq_printf(m, "%12lu ", count[i]);
1091 seq_putc(m, '\n');
1092}
1093#endif /* CONFIG_PAGE_OWNER */
1094
1095/*
1096 * Print out the number of pageblocks for each migratetype that contain pages
1097 * of other types. This gives an indication of how well fallbacks are being
1098 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1099 * to determine what is going on
1100 */
1101static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1102{
1103#ifdef CONFIG_PAGE_OWNER
1104 int mtype;
1105
1106 if (!page_owner_inited)
1107 return;
1108
1109 drain_all_pages(NULL);
1110
1111 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1112 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1113 seq_printf(m, "%12s ", migratetype_names[mtype]);
1114 seq_putc(m, '\n');
1115
1116 walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1117#endif /* CONFIG_PAGE_OWNER */
1118}
1119
1020/* 1120/*
1021 * This prints out statistics in relation to grouping pages by mobility. 1121 * This prints out statistics in relation to grouping pages by mobility.
1022 * It is expensive to collect so do not constantly read the file. 1122 * It is expensive to collect so do not constantly read the file.
@@ -1034,6 +1134,7 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg)
1034 seq_putc(m, '\n'); 1134 seq_putc(m, '\n');
1035 pagetypeinfo_showfree(m, pgdat); 1135 pagetypeinfo_showfree(m, pgdat);
1036 pagetypeinfo_showblockcount(m, pgdat); 1136 pagetypeinfo_showblockcount(m, pgdat);
1137 pagetypeinfo_showmixedcount(m, pgdat);
1037 1138
1038 return 0; 1139 return 0;
1039} 1140}
diff --git a/tools/vm/Makefile b/tools/vm/Makefile
index 3d907dacf2ac..ac884b65a072 100644
--- a/tools/vm/Makefile
+++ b/tools/vm/Makefile
@@ -1,6 +1,6 @@
1# Makefile for vm tools 1# Makefile for vm tools
2# 2#
3TARGETS=page-types slabinfo 3TARGETS=page-types slabinfo page_owner_sort
4 4
5LIB_DIR = ../lib/api 5LIB_DIR = ../lib/api
6LIBS = $(LIB_DIR)/libapikfs.a 6LIBS = $(LIB_DIR)/libapikfs.a
@@ -18,5 +18,5 @@ $(LIBS):
18 $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS) 18 $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
19 19
20clean: 20clean:
21 $(RM) page-types slabinfo 21 $(RM) page-types slabinfo page_owner_sort
22 make -C $(LIB_DIR) clean 22 make -C $(LIB_DIR) clean
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
new file mode 100644
index 000000000000..77147b42d598
--- /dev/null
+++ b/tools/vm/page_owner_sort.c
@@ -0,0 +1,144 @@
1/*
2 * User-space helper to sort the output of /sys/kernel/debug/page_owner
3 *
4 * Example use:
5 * cat /sys/kernel/debug/page_owner > page_owner_full.txt
6 * grep -v ^PFN page_owner_full.txt > page_owner.txt
7 * ./sort page_owner.txt sorted_page_owner.txt
8*/
9
10#include <stdio.h>
11#include <stdlib.h>
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <fcntl.h>
15#include <unistd.h>
16#include <string.h>
17
18struct block_list {
19 char *txt;
20 int len;
21 int num;
22};
23
24
25static struct block_list *list;
26static int list_size;
27static int max_size;
28
29struct block_list *block_head;
30
31int read_block(char *buf, int buf_size, FILE *fin)
32{
33 char *curr = buf, *const buf_end = buf + buf_size;
34
35 while (buf_end - curr > 1 && fgets(curr, buf_end - curr, fin)) {
36 if (*curr == '\n') /* empty line */
37 return curr - buf;
38 curr += strlen(curr);
39 }
40
41 return -1; /* EOF or no space left in buf. */
42}
43
44static int compare_txt(const void *p1, const void *p2)
45{
46 const struct block_list *l1 = p1, *l2 = p2;
47
48 return strcmp(l1->txt, l2->txt);
49}
50
51static int compare_num(const void *p1, const void *p2)
52{
53 const struct block_list *l1 = p1, *l2 = p2;
54
55 return l2->num - l1->num;
56}
57
58static void add_list(char *buf, int len)
59{
60 if (list_size != 0 &&
61 len == list[list_size-1].len &&
62 memcmp(buf, list[list_size-1].txt, len) == 0) {
63 list[list_size-1].num++;
64 return;
65 }
66 if (list_size == max_size) {
67 printf("max_size too small??\n");
68 exit(1);
69 }
70 list[list_size].txt = malloc(len+1);
71 list[list_size].len = len;
72 list[list_size].num = 1;
73 memcpy(list[list_size].txt, buf, len);
74 list[list_size].txt[len] = 0;
75 list_size++;
76 if (list_size % 1000 == 0) {
77 printf("loaded %d\r", list_size);
78 fflush(stdout);
79 }
80}
81
82#define BUF_SIZE 1024
83
84int main(int argc, char **argv)
85{
86 FILE *fin, *fout;
87 char buf[BUF_SIZE];
88 int ret, i, count;
89 struct block_list *list2;
90 struct stat st;
91
92 if (argc < 3) {
93 printf("Usage: ./program <input> <output>\n");
94 perror("open: ");
95 exit(1);
96 }
97
98 fin = fopen(argv[1], "r");
99 fout = fopen(argv[2], "w");
100 if (!fin || !fout) {
101 printf("Usage: ./program <input> <output>\n");
102 perror("open: ");
103 exit(1);
104 }
105
106 fstat(fileno(fin), &st);
107 max_size = st.st_size / 100; /* hack ... */
108
109 list = malloc(max_size * sizeof(*list));
110
111 for ( ; ; ) {
112 ret = read_block(buf, BUF_SIZE, fin);
113 if (ret < 0)
114 break;
115
116 add_list(buf, ret);
117 }
118
119 printf("loaded %d\n", list_size);
120
121 printf("sorting ....\n");
122
123 qsort(list, list_size, sizeof(list[0]), compare_txt);
124
125 list2 = malloc(sizeof(*list) * list_size);
126
127 printf("culling\n");
128
129 for (i = count = 0; i < list_size; i++) {
130 if (count == 0 ||
131 strcmp(list2[count-1].txt, list[i].txt) != 0) {
132 list2[count++] = list[i];
133 } else {
134 list2[count-1].num += list[i].num;
135 }
136 }
137
138 qsort(list2, count, sizeof(list[0]), compare_num);
139
140 for (i = 0; i < count; i++)
141 fprintf(fout, "%d times:\n%s\n", list2[i].num, list2[i].txt);
142
143 return 0;
144}