aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--mm/Makefile1
-rw-r--r--mm/kmemtrace.c333
3 files changed, 0 insertions, 336 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cc9f91e7daf4..1c0b7504cab3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -267,8 +267,6 @@ config HW_BRANCH_TRACER
267config KMEMTRACE 267config KMEMTRACE
268 bool "Trace SLAB allocations" 268 bool "Trace SLAB allocations"
269 select TRACING 269 select TRACING
270 select MARKERS
271 select RELAY
272 help 270 help
273 kmemtrace provides tracing for slab allocator functions, such as 271 kmemtrace provides tracing for slab allocator functions, such as
274 kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected 272 kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
diff --git a/mm/Makefile b/mm/Makefile
index c92e8af13206..51c27709cc7c 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -35,4 +35,3 @@ obj-$(CONFIG_MIGRATION) += migrate.o
35obj-$(CONFIG_SMP) += allocpercpu.o 35obj-$(CONFIG_SMP) += allocpercpu.o
36obj-$(CONFIG_QUICKLIST) += quicklist.o 36obj-$(CONFIG_QUICKLIST) += quicklist.o
37obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 37obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
38obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c
deleted file mode 100644
index 0573b5080cc4..000000000000
--- a/mm/kmemtrace.c
+++ /dev/null
@@ -1,333 +0,0 @@
1/*
2 * Copyright (C) 2008 Pekka Enberg, Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#include <linux/string.h>
8#include <linux/debugfs.h>
9#include <linux/relay.h>
10#include <linux/module.h>
11#include <linux/marker.h>
12#include <linux/gfp.h>
13#include <trace/kmemtrace.h>
14
15#define KMEMTRACE_SUBBUF_SIZE 524288
16#define KMEMTRACE_DEF_N_SUBBUFS 20
17
18static struct rchan *kmemtrace_chan;
19static u32 kmemtrace_buf_overruns;
20
21static unsigned int kmemtrace_n_subbufs;
22
23/* disabled by default */
24static unsigned int kmemtrace_enabled;
25
26/*
27 * The sequence number is used for reordering kmemtrace packets
28 * in userspace, since they are logged as per-CPU data.
29 *
30 * atomic_t should always be a 32-bit signed integer. Wraparound is not
31 * likely to occur, but userspace can deal with it by expecting a certain
32 * sequence number in the next packet that will be read.
33 */
34static atomic_t kmemtrace_seq_num;
35
36#define KMEMTRACE_ABI_VERSION 1
37
38static u32 kmemtrace_abi_version __read_mostly = KMEMTRACE_ABI_VERSION;
39
40enum kmemtrace_event_id {
41 KMEMTRACE_EVENT_ALLOC = 0,
42 KMEMTRACE_EVENT_FREE,
43};
44
45struct kmemtrace_event {
46 u8 event_id;
47 u8 type_id;
48 u16 event_size;
49 s32 seq_num;
50 u64 call_site;
51 u64 ptr;
52} __attribute__ ((__packed__));
53
54struct kmemtrace_stats_alloc {
55 u64 bytes_req;
56 u64 bytes_alloc;
57 u32 gfp_flags;
58 s32 numa_node;
59} __attribute__ ((__packed__));
60
61static void kmemtrace_probe_alloc(void *probe_data, void *call_data,
62 const char *format, va_list *args)
63{
64 unsigned long flags;
65 struct kmemtrace_event *ev;
66 struct kmemtrace_stats_alloc *stats;
67 void *buf;
68
69 local_irq_save(flags);
70
71 buf = relay_reserve(kmemtrace_chan,
72 sizeof(struct kmemtrace_event) +
73 sizeof(struct kmemtrace_stats_alloc));
74 if (!buf)
75 goto failed;
76
77 /*
78 * Don't convert this to use structure initializers,
79 * C99 does not guarantee the rvalues evaluation order.
80 */
81
82 ev = buf;
83 ev->event_id = KMEMTRACE_EVENT_ALLOC;
84 ev->type_id = va_arg(*args, int);
85 ev->event_size = sizeof(struct kmemtrace_event) +
86 sizeof(struct kmemtrace_stats_alloc);
87 ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
88 ev->call_site = va_arg(*args, unsigned long);
89 ev->ptr = va_arg(*args, unsigned long);
90
91 stats = buf + sizeof(struct kmemtrace_event);
92 stats->bytes_req = va_arg(*args, unsigned long);
93 stats->bytes_alloc = va_arg(*args, unsigned long);
94 stats->gfp_flags = va_arg(*args, unsigned long);
95 stats->numa_node = va_arg(*args, int);
96
97failed:
98 local_irq_restore(flags);
99}
100
101static void kmemtrace_probe_free(void *probe_data, void *call_data,
102 const char *format, va_list *args)
103{
104 unsigned long flags;
105 struct kmemtrace_event *ev;
106
107 local_irq_save(flags);
108
109 ev = relay_reserve(kmemtrace_chan, sizeof(struct kmemtrace_event));
110 if (!ev)
111 goto failed;
112
113 /*
114 * Don't convert this to use structure initializers,
115 * C99 does not guarantee the rvalues evaluation order.
116 */
117 ev->event_id = KMEMTRACE_EVENT_FREE;
118 ev->type_id = va_arg(*args, int);
119 ev->event_size = sizeof(struct kmemtrace_event);
120 ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
121 ev->call_site = va_arg(*args, unsigned long);
122 ev->ptr = va_arg(*args, unsigned long);
123
124failed:
125 local_irq_restore(flags);
126}
127
128static struct dentry *
129kmemtrace_create_buf_file(const char *filename, struct dentry *parent,
130 int mode, struct rchan_buf *buf, int *is_global)
131{
132 return debugfs_create_file(filename, mode, parent, buf,
133 &relay_file_operations);
134}
135
136static int kmemtrace_remove_buf_file(struct dentry *dentry)
137{
138 debugfs_remove(dentry);
139
140 return 0;
141}
142
143static int kmemtrace_subbuf_start(struct rchan_buf *buf,
144 void *subbuf,
145 void *prev_subbuf,
146 size_t prev_padding)
147{
148 if (relay_buf_full(buf)) {
149 /*
150 * We know it's not SMP-safe, but neither
151 * debugfs_create_u32() is.
152 */
153 kmemtrace_buf_overruns++;
154 return 0;
155 }
156
157 return 1;
158}
159
160static struct rchan_callbacks relay_callbacks = {
161 .create_buf_file = kmemtrace_create_buf_file,
162 .remove_buf_file = kmemtrace_remove_buf_file,
163 .subbuf_start = kmemtrace_subbuf_start,
164};
165
166static struct dentry *kmemtrace_dir;
167static struct dentry *kmemtrace_overruns_dentry;
168static struct dentry *kmemtrace_abi_version_dentry;
169
170static struct dentry *kmemtrace_enabled_dentry;
171
172static int kmemtrace_start_probes(void)
173{
174 int err;
175
176 err = marker_probe_register("kmemtrace_alloc", "type_id %d "
177 "call_site %lu ptr %lu "
178 "bytes_req %lu bytes_alloc %lu "
179 "gfp_flags %lu node %d",
180 kmemtrace_probe_alloc, NULL);
181 if (err)
182 return err;
183 err = marker_probe_register("kmemtrace_free", "type_id %d "
184 "call_site %lu ptr %lu",
185 kmemtrace_probe_free, NULL);
186
187 return err;
188}
189
190static void kmemtrace_stop_probes(void)
191{
192 marker_probe_unregister("kmemtrace_alloc",
193 kmemtrace_probe_alloc, NULL);
194 marker_probe_unregister("kmemtrace_free",
195 kmemtrace_probe_free, NULL);
196}
197
198static int kmemtrace_enabled_get(void *data, u64 *val)
199{
200 *val = *((int *) data);
201
202 return 0;
203}
204
205static int kmemtrace_enabled_set(void *data, u64 val)
206{
207 u64 old_val = kmemtrace_enabled;
208
209 *((int *) data) = !!val;
210
211 if (old_val == val)
212 return 0;
213 if (val)
214 kmemtrace_start_probes();
215 else
216 kmemtrace_stop_probes();
217
218 return 0;
219}
220
221DEFINE_SIMPLE_ATTRIBUTE(kmemtrace_enabled_fops,
222 kmemtrace_enabled_get,
223 kmemtrace_enabled_set, "%llu\n");
224
225static void kmemtrace_cleanup(void)
226{
227 if (kmemtrace_enabled_dentry)
228 debugfs_remove(kmemtrace_enabled_dentry);
229
230 kmemtrace_stop_probes();
231
232 if (kmemtrace_abi_version_dentry)
233 debugfs_remove(kmemtrace_abi_version_dentry);
234 if (kmemtrace_overruns_dentry)
235 debugfs_remove(kmemtrace_overruns_dentry);
236
237 relay_close(kmemtrace_chan);
238 kmemtrace_chan = NULL;
239
240 if (kmemtrace_dir)
241 debugfs_remove(kmemtrace_dir);
242}
243
244static int __init kmemtrace_setup_late(void)
245{
246 if (!kmemtrace_chan)
247 goto failed;
248
249 kmemtrace_dir = debugfs_create_dir("kmemtrace", NULL);
250 if (!kmemtrace_dir)
251 goto cleanup;
252
253 kmemtrace_abi_version_dentry =
254 debugfs_create_u32("abi_version", S_IRUSR,
255 kmemtrace_dir, &kmemtrace_abi_version);
256 kmemtrace_overruns_dentry =
257 debugfs_create_u32("total_overruns", S_IRUSR,
258 kmemtrace_dir, &kmemtrace_buf_overruns);
259 if (!kmemtrace_overruns_dentry || !kmemtrace_abi_version_dentry)
260 goto cleanup;
261
262 kmemtrace_enabled_dentry =
263 debugfs_create_file("enabled", S_IRUSR | S_IWUSR,
264 kmemtrace_dir, &kmemtrace_enabled,
265 &kmemtrace_enabled_fops);
266 if (!kmemtrace_enabled_dentry)
267 goto cleanup;
268
269 if (relay_late_setup_files(kmemtrace_chan, "cpu", kmemtrace_dir))
270 goto cleanup;
271
272 printk(KERN_INFO "kmemtrace: fully up.\n");
273
274 return 0;
275
276cleanup:
277 kmemtrace_cleanup();
278failed:
279 return 1;
280}
281late_initcall(kmemtrace_setup_late);
282
283static int __init kmemtrace_set_boot_enabled(char *str)
284{
285 if (!str)
286 return -EINVAL;
287
288 if (!strcmp(str, "yes"))
289 kmemtrace_enabled = 1;
290 else if (!strcmp(str, "no"))
291 kmemtrace_enabled = 0;
292 else
293 return -EINVAL;
294
295 return 0;
296}
297early_param("kmemtrace.enable", kmemtrace_set_boot_enabled);
298
299static int __init kmemtrace_set_subbufs(char *str)
300{
301 get_option(&str, &kmemtrace_n_subbufs);
302 return 0;
303}
304early_param("kmemtrace.subbufs", kmemtrace_set_subbufs);
305
306void kmemtrace_init(void)
307{
308 if (!kmemtrace_n_subbufs)
309 kmemtrace_n_subbufs = KMEMTRACE_DEF_N_SUBBUFS;
310
311 kmemtrace_chan = relay_open(NULL, NULL, KMEMTRACE_SUBBUF_SIZE,
312 kmemtrace_n_subbufs, &relay_callbacks,
313 NULL);
314 if (!kmemtrace_chan) {
315 printk(KERN_ERR "kmemtrace: could not open relay channel.\n");
316 return;
317 }
318
319 if (!kmemtrace_enabled) {
320 printk(KERN_INFO "kmemtrace: disabled. Pass "
321 "kemtrace.enable=yes as kernel parameter for "
322 "boot-time tracing.\n");
323 return;
324 }
325 if (kmemtrace_start_probes()) {
326 printk(KERN_ERR "kmemtrace: could not register marker probes!\n");
327 kmemtrace_cleanup();
328 return;
329 }
330
331 printk(KERN_INFO "kmemtrace: enabled.\n");
332}
333