aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>2008-08-10 13:14:03 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-12-29 08:34:01 -0500
commitb9ce08c01020eb28bfbfa6faf1c740281c5f418e (patch)
treeb42bbda9a44a9e62d952816482b340bc4b70870b /mm
parent35995a4d815586bc968a857f7235707940a2f755 (diff)
kmemtrace: Core implementation.
kmemtrace provides tracing for slab allocator functions, such as kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected data is then fed to the userspace application in order to analyse allocation hotspots, internal fragmentation and so on, making it possible to see how well an allocator performs, as well as debug and profile kernel code. Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile1
-rw-r--r--mm/kmemtrace.c335
2 files changed, 336 insertions, 0 deletions
diff --git a/mm/Makefile b/mm/Makefile
index c06b45a1ff5f..3782eb66d4b3 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_MIGRATION) += migrate.o
34obj-$(CONFIG_SMP) += allocpercpu.o 34obj-$(CONFIG_SMP) += allocpercpu.o
35obj-$(CONFIG_QUICKLIST) += quicklist.o 35obj-$(CONFIG_QUICKLIST) += quicklist.o
36obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 36obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
37obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
diff --git a/mm/kmemtrace.c b/mm/kmemtrace.c
new file mode 100644
index 000000000000..83ad1cc71a92
--- /dev/null
+++ b/mm/kmemtrace.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright (C) 2008 Pekka Enberg, Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#include <linux/string.h>
8#include <linux/debugfs.h>
9#include <linux/relay.h>
10#include <linux/module.h>
11#include <linux/marker.h>
12#include <linux/gfp.h>
13#include <linux/kmemtrace.h>
14
15#define KMEMTRACE_SUBBUF_SIZE 524288
16#define KMEMTRACE_DEF_N_SUBBUFS 20
17
18static struct rchan *kmemtrace_chan;
19static u32 kmemtrace_buf_overruns;
20
21static unsigned int kmemtrace_n_subbufs;
22#ifdef CONFIG_KMEMTRACE_DEFAULT_ENABLED
23static unsigned int kmemtrace_enabled = 1;
24#else
25static unsigned int kmemtrace_enabled = 0;
26#endif
27
28/*
29 * The sequence number is used for reordering kmemtrace packets
30 * in userspace, since they are logged as per-CPU data.
31 *
32 * atomic_t should always be a 32-bit signed integer. Wraparound is not
33 * likely to occur, but userspace can deal with it by expecting a certain
34 * sequence number in the next packet that will be read.
35 */
36static atomic_t kmemtrace_seq_num;
37
38#define KMEMTRACE_ABI_VERSION 1
39
40static u32 kmemtrace_abi_version __read_mostly = KMEMTRACE_ABI_VERSION;
41
42enum kmemtrace_event_id {
43 KMEMTRACE_EVENT_ALLOC = 0,
44 KMEMTRACE_EVENT_FREE,
45};
46
47struct kmemtrace_event {
48 u8 event_id;
49 u8 type_id;
50 u16 event_size;
51 s32 seq_num;
52 u64 call_site;
53 u64 ptr;
54} __attribute__ ((__packed__));
55
56struct kmemtrace_stats_alloc {
57 u64 bytes_req;
58 u64 bytes_alloc;
59 u32 gfp_flags;
60 s32 numa_node;
61} __attribute__ ((__packed__));
62
63static void kmemtrace_probe_alloc(void *probe_data, void *call_data,
64 const char *format, va_list *args)
65{
66 unsigned long flags;
67 struct kmemtrace_event *ev;
68 struct kmemtrace_stats_alloc *stats;
69 void *buf;
70
71 local_irq_save(flags);
72
73 buf = relay_reserve(kmemtrace_chan,
74 sizeof(struct kmemtrace_event) +
75 sizeof(struct kmemtrace_stats_alloc));
76 if (!buf)
77 goto failed;
78
79 /*
80 * Don't convert this to use structure initializers,
81 * C99 does not guarantee the rvalues evaluation order.
82 */
83
84 ev = buf;
85 ev->event_id = KMEMTRACE_EVENT_ALLOC;
86 ev->type_id = va_arg(*args, int);
87 ev->event_size = sizeof(struct kmemtrace_event) +
88 sizeof(struct kmemtrace_stats_alloc);
89 ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
90 ev->call_site = va_arg(*args, unsigned long);
91 ev->ptr = va_arg(*args, unsigned long);
92
93 stats = buf + sizeof(struct kmemtrace_event);
94 stats->bytes_req = va_arg(*args, unsigned long);
95 stats->bytes_alloc = va_arg(*args, unsigned long);
96 stats->gfp_flags = va_arg(*args, unsigned long);
97 stats->numa_node = va_arg(*args, int);
98
99failed:
100 local_irq_restore(flags);
101}
102
103static void kmemtrace_probe_free(void *probe_data, void *call_data,
104 const char *format, va_list *args)
105{
106 unsigned long flags;
107 struct kmemtrace_event *ev;
108
109 local_irq_save(flags);
110
111 ev = relay_reserve(kmemtrace_chan, sizeof(struct kmemtrace_event));
112 if (!ev)
113 goto failed;
114
115 /*
116 * Don't convert this to use structure initializers,
117 * C99 does not guarantee the rvalues evaluation order.
118 */
119 ev->event_id = KMEMTRACE_EVENT_FREE;
120 ev->type_id = va_arg(*args, int);
121 ev->event_size = sizeof(struct kmemtrace_event);
122 ev->seq_num = atomic_add_return(1, &kmemtrace_seq_num);
123 ev->call_site = va_arg(*args, unsigned long);
124 ev->ptr = va_arg(*args, unsigned long);
125
126failed:
127 local_irq_restore(flags);
128}
129
130static struct dentry *
131kmemtrace_create_buf_file(const char *filename, struct dentry *parent,
132 int mode, struct rchan_buf *buf, int *is_global)
133{
134 return debugfs_create_file(filename, mode, parent, buf,
135 &relay_file_operations);
136}
137
138static int kmemtrace_remove_buf_file(struct dentry *dentry)
139{
140 debugfs_remove(dentry);
141
142 return 0;
143}
144
145static int kmemtrace_subbuf_start(struct rchan_buf *buf,
146 void *subbuf,
147 void *prev_subbuf,
148 size_t prev_padding)
149{
150 if (relay_buf_full(buf)) {
151 /*
152 * We know it's not SMP-safe, but neither
153 * debugfs_create_u32() is.
154 */
155 kmemtrace_buf_overruns++;
156 return 0;
157 }
158
159 return 1;
160}
161
162static struct rchan_callbacks relay_callbacks = {
163 .create_buf_file = kmemtrace_create_buf_file,
164 .remove_buf_file = kmemtrace_remove_buf_file,
165 .subbuf_start = kmemtrace_subbuf_start,
166};
167
168static struct dentry *kmemtrace_dir;
169static struct dentry *kmemtrace_overruns_dentry;
170static struct dentry *kmemtrace_abi_version_dentry;
171
172static struct dentry *kmemtrace_enabled_dentry;
173
174static int kmemtrace_start_probes(void)
175{
176 int err;
177
178 err = marker_probe_register("kmemtrace_alloc", "type_id %d "
179 "call_site %lu ptr %lu "
180 "bytes_req %lu bytes_alloc %lu "
181 "gfp_flags %lu node %d",
182 kmemtrace_probe_alloc, NULL);
183 if (err)
184 return err;
185 err = marker_probe_register("kmemtrace_free", "type_id %d "
186 "call_site %lu ptr %lu",
187 kmemtrace_probe_free, NULL);
188
189 return err;
190}
191
192static void kmemtrace_stop_probes(void)
193{
194 marker_probe_unregister("kmemtrace_alloc",
195 kmemtrace_probe_alloc, NULL);
196 marker_probe_unregister("kmemtrace_free",
197 kmemtrace_probe_free, NULL);
198}
199
200static int kmemtrace_enabled_get(void *data, u64 *val)
201{
202 *val = *((int *) data);
203
204 return 0;
205}
206
207static int kmemtrace_enabled_set(void *data, u64 val)
208{
209 u64 old_val = kmemtrace_enabled;
210
211 *((int *) data) = !!val;
212
213 if (old_val == val)
214 return 0;
215 if (val)
216 kmemtrace_start_probes();
217 else
218 kmemtrace_stop_probes();
219
220 return 0;
221}
222
223DEFINE_SIMPLE_ATTRIBUTE(kmemtrace_enabled_fops,
224 kmemtrace_enabled_get,
225 kmemtrace_enabled_set, "%llu\n");
226
227static void kmemtrace_cleanup(void)
228{
229 if (kmemtrace_enabled_dentry)
230 debugfs_remove(kmemtrace_enabled_dentry);
231
232 kmemtrace_stop_probes();
233
234 if (kmemtrace_abi_version_dentry)
235 debugfs_remove(kmemtrace_abi_version_dentry);
236 if (kmemtrace_overruns_dentry)
237 debugfs_remove(kmemtrace_overruns_dentry);
238
239 relay_close(kmemtrace_chan);
240 kmemtrace_chan = NULL;
241
242 if (kmemtrace_dir)
243 debugfs_remove(kmemtrace_dir);
244}
245
246static int __init kmemtrace_setup_late(void)
247{
248 if (!kmemtrace_chan)
249 goto failed;
250
251 kmemtrace_dir = debugfs_create_dir("kmemtrace", NULL);
252 if (!kmemtrace_dir)
253 goto cleanup;
254
255 kmemtrace_abi_version_dentry =
256 debugfs_create_u32("abi_version", S_IRUSR,
257 kmemtrace_dir, &kmemtrace_abi_version);
258 kmemtrace_overruns_dentry =
259 debugfs_create_u32("total_overruns", S_IRUSR,
260 kmemtrace_dir, &kmemtrace_buf_overruns);
261 if (!kmemtrace_overruns_dentry || !kmemtrace_abi_version_dentry)
262 goto cleanup;
263
264 kmemtrace_enabled_dentry =
265 debugfs_create_file("enabled", S_IRUSR | S_IWUSR,
266 kmemtrace_dir, &kmemtrace_enabled,
267 &kmemtrace_enabled_fops);
268 if (!kmemtrace_enabled_dentry)
269 goto cleanup;
270
271 if (relay_late_setup_files(kmemtrace_chan, "cpu", kmemtrace_dir))
272 goto cleanup;
273
274 printk(KERN_INFO "kmemtrace: fully up.\n");
275
276 return 0;
277
278cleanup:
279 kmemtrace_cleanup();
280failed:
281 return 1;
282}
283late_initcall(kmemtrace_setup_late);
284
285static int __init kmemtrace_set_boot_enabled(char *str)
286{
287 if (!str)
288 return -EINVAL;
289
290 if (!strcmp(str, "yes"))
291 kmemtrace_enabled = 1;
292 else if (!strcmp(str, "no"))
293 kmemtrace_enabled = 0;
294 else
295 return -EINVAL;
296
297 return 0;
298}
299early_param("kmemtrace.enable", kmemtrace_set_boot_enabled);
300
301static int __init kmemtrace_set_subbufs(char *str)
302{
303 get_option(&str, &kmemtrace_n_subbufs);
304 return 0;
305}
306early_param("kmemtrace.subbufs", kmemtrace_set_subbufs);
307
308void kmemtrace_init(void)
309{
310 if (!kmemtrace_enabled)
311 return;
312
313 if (!kmemtrace_n_subbufs)
314 kmemtrace_n_subbufs = KMEMTRACE_DEF_N_SUBBUFS;
315
316 kmemtrace_chan = relay_open(NULL, NULL, KMEMTRACE_SUBBUF_SIZE,
317 kmemtrace_n_subbufs, &relay_callbacks,
318 NULL);
319 if (unlikely(!kmemtrace_chan)) {
320 printk(KERN_ERR "kmemtrace: could not open relay channel.\n");
321 return;
322 }
323
324 if (unlikely(kmemtrace_start_probes()))
325 goto probe_fail;
326
327 printk(KERN_INFO "kmemtrace: early init successful.\n");
328
329 return;
330
331probe_fail:
332 printk(KERN_ERR "kmemtrace: could not register marker probes!\n");
333 kmemtrace_cleanup();
334}
335