diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2009-11-20 02:53:25 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-20 03:51:41 -0500 |
commit | ba77c9e11111a172c9e8687fe16a6a173a61916f (patch) | |
tree | 54dae585a10100f977c525399b7598855b3ca7df /tools/perf/builtin-kmem.c | |
parent | ce64c62074d945fe5f8a7f01bdc30125f994ea67 (diff) |
perf: Add 'perf kmem' tool
This tool is mostly a perf version of kmemtrace-user.
The following information is provided by this tool:
- the total amount of memory allocated and fragmentation per
call-site
- the total amount of memory allocated and fragmentation per
allocation
- total memory allocated and fragmentation in the collected
dataset - ...
Sample output:
# ./perf kmem record
^C
# ./perf kmem --stat caller --stat alloc -l 10
------------------------------------------------------------------------------
Callsite | Total_alloc/Per | Total_req/Per | Hit | Fragmentation
------------------------------------------------------------------------------
0xc052f37a | 790528/4096 | 790528/4096 | 193 | 0.000%
0xc0541d70 | 524288/4096 | 524288/4096 | 128 | 0.000%
0xc051cc68 | 481600/200 | 481600/200 | 2408 | 0.000%
0xc0572623 | 297444/676 | 297440/676 | 440 | 0.001%
0xc05399f1 | 73476/164 | 73472/164 | 448 | 0.005%
0xc05243bf | 51456/256 | 51456/256 | 201 | 0.000%
0xc0730d0e | 31844/497 | 31808/497 | 64 | 0.113%
0xc0734c4e | 17152/256 | 17152/256 | 67 | 0.000%
0xc0541a6d | 16384/128 | 16384/128 | 128 | 0.000%
0xc059c217 | 13120/40 | 13120/40 | 328 | 0.000%
0xc0501ee6 | 11264/88 | 11264/88 | 128 | 0.000%
0xc04daef0 | 7504/682 | 7128/648 | 11 | 5.011%
0xc04e14a3 | 4216/191 | 4216/191 | 22 | 0.000%
0xc05041ca | 3524/44 | 3520/44 | 80 | 0.114%
0xc0734fa3 | 2104/701 | 1620/540 | 3 | 23.004%
0xc05ec9f1 | 2024/289 | 2016/288 | 7 | 0.395%
0xc06a1999 | 1792/256 | 1792/256 | 7 | 0.000%
0xc0463b9a | 1584/144 | 1584/144 | 11 | 0.000%
0xc0541eb0 | 1024/16 | 1024/16 | 64 | 0.000%
0xc06a19ac | 896/128 | 896/128 | 7 | 0.000%
0xc05721c0 | 772/12 | 768/12 | 64 | 0.518%
0xc054d1e6 | 288/57 | 280/56 | 5 | 2.778%
0xc04b562e | 157/31 | 154/30 | 5 | 1.911%
0xc04b536f | 80/16 | 80/16 | 5 | 0.000%
0xc05855a0 | 64/64 | 36/36 | 1 | 43.750%
------------------------------------------------------------------------------
------------------------------------------------------------------------------
Alloc Ptr | Total_alloc/Per | Total_req/Per | Hit | Fragmentation
------------------------------------------------------------------------------
0xda884000 | 1052672/4096 | 1052672/4096 | 257 | 0.000%
0xda886000 | 262144/4096 | 262144/4096 | 64 | 0.000%
0xf60c7c00 | 16512/128 | 16512/128 | 129 | 0.000%
0xf59a4118 | 13120/40 | 13120/40 | 328 | 0.000%
0xdfd4b2c0 | 11264/88 | 11264/88 | 128 | 0.000%
0xf5274600 | 7680/256 | 7680/256 | 30 | 0.000%
0xe8395000 | 5948/594 | 5464/546 | 10 | 8.137%
0xe59c3c00 | 5748/479 | 5712/476 | 12 | 0.626%
0xf4cd1a80 | 3524/44 | 3520/44 | 80 | 0.114%
0xe5bd1600 | 2892/482 | 2856/476 | 6 | 1.245%
... | ... | ... | ... | ...
------------------------------------------------------------------------------
SUMMARY
=======
Total bytes requested: 2333626
Total bytes allocated: 2353712
Total bytes wasted on internal fragmentation: 20086
Internal fragmentation: 0.853375%
TODO:
- show sym+offset in 'callsite' column
- show cross node allocation stats
- collect more useful stats?
- ...
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: linux-mm@kvack.org <linux-mm@kvack.org>
LKML-Reference: <4B064AF5.9060208@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-kmem.c')
-rw-r--r-- | tools/perf/builtin-kmem.c | 578 |
1 files changed, 578 insertions, 0 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c new file mode 100644 index 000000000000..f315b052f819 --- /dev/null +++ b/tools/perf/builtin-kmem.c | |||
@@ -0,0 +1,578 @@ | |||
1 | #include "builtin.h" | ||
2 | #include "perf.h" | ||
3 | |||
4 | #include "util/util.h" | ||
5 | #include "util/cache.h" | ||
6 | #include "util/symbol.h" | ||
7 | #include "util/thread.h" | ||
8 | #include "util/header.h" | ||
9 | |||
10 | #include "util/parse-options.h" | ||
11 | #include "util/trace-event.h" | ||
12 | |||
13 | #include "util/debug.h" | ||
14 | #include "util/data_map.h" | ||
15 | |||
16 | #include <linux/rbtree.h> | ||
17 | |||
18 | struct alloc_stat; | ||
19 | typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); | ||
20 | |||
21 | static char const *input_name = "perf.data"; | ||
22 | |||
23 | static struct perf_header *header; | ||
24 | static u64 sample_type; | ||
25 | |||
26 | static int alloc_flag; | ||
27 | static int caller_flag; | ||
28 | |||
29 | sort_fn_t alloc_sort_fn; | ||
30 | sort_fn_t caller_sort_fn; | ||
31 | |||
32 | static int alloc_lines = -1; | ||
33 | static int caller_lines = -1; | ||
34 | |||
35 | static char *cwd; | ||
36 | static int cwdlen; | ||
37 | |||
38 | struct alloc_stat { | ||
39 | union { | ||
40 | struct { | ||
41 | char *name; | ||
42 | u64 call_site; | ||
43 | }; | ||
44 | u64 ptr; | ||
45 | }; | ||
46 | u64 bytes_req; | ||
47 | u64 bytes_alloc; | ||
48 | u32 hit; | ||
49 | |||
50 | struct rb_node node; | ||
51 | }; | ||
52 | |||
53 | static struct rb_root root_alloc_stat; | ||
54 | static struct rb_root root_alloc_sorted; | ||
55 | static struct rb_root root_caller_stat; | ||
56 | static struct rb_root root_caller_sorted; | ||
57 | |||
58 | static unsigned long total_requested, total_allocated; | ||
59 | |||
60 | struct raw_event_sample { | ||
61 | u32 size; | ||
62 | char data[0]; | ||
63 | }; | ||
64 | |||
65 | static int | ||
66 | process_comm_event(event_t *event, unsigned long offset, unsigned long head) | ||
67 | { | ||
68 | struct thread *thread = threads__findnew(event->comm.pid); | ||
69 | |||
70 | dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", | ||
71 | (void *)(offset + head), | ||
72 | (void *)(long)(event->header.size), | ||
73 | event->comm.comm, event->comm.pid); | ||
74 | |||
75 | if (thread == NULL || | ||
76 | thread__set_comm(thread, event->comm.comm)) { | ||
77 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | ||
78 | return -1; | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void insert_alloc_stat(unsigned long ptr, | ||
85 | int bytes_req, int bytes_alloc) | ||
86 | { | ||
87 | struct rb_node **node = &root_alloc_stat.rb_node; | ||
88 | struct rb_node *parent = NULL; | ||
89 | struct alloc_stat *data = NULL; | ||
90 | |||
91 | if (!alloc_flag) | ||
92 | return; | ||
93 | |||
94 | while (*node) { | ||
95 | parent = *node; | ||
96 | data = rb_entry(*node, struct alloc_stat, node); | ||
97 | |||
98 | if (ptr > data->ptr) | ||
99 | node = &(*node)->rb_right; | ||
100 | else if (ptr < data->ptr) | ||
101 | node = &(*node)->rb_left; | ||
102 | else | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | if (data && data->ptr == ptr) { | ||
107 | data->hit++; | ||
108 | data->bytes_req += bytes_req; | ||
109 | data->bytes_alloc += bytes_req; | ||
110 | } else { | ||
111 | data = malloc(sizeof(*data)); | ||
112 | data->ptr = ptr; | ||
113 | data->hit = 1; | ||
114 | data->bytes_req = bytes_req; | ||
115 | data->bytes_alloc = bytes_alloc; | ||
116 | |||
117 | rb_link_node(&data->node, parent, node); | ||
118 | rb_insert_color(&data->node, &root_alloc_stat); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static void insert_caller_stat(unsigned long call_site, | ||
123 | int bytes_req, int bytes_alloc) | ||
124 | { | ||
125 | struct rb_node **node = &root_caller_stat.rb_node; | ||
126 | struct rb_node *parent = NULL; | ||
127 | struct alloc_stat *data = NULL; | ||
128 | |||
129 | if (!caller_flag) | ||
130 | return; | ||
131 | |||
132 | while (*node) { | ||
133 | parent = *node; | ||
134 | data = rb_entry(*node, struct alloc_stat, node); | ||
135 | |||
136 | if (call_site > data->call_site) | ||
137 | node = &(*node)->rb_right; | ||
138 | else if (call_site < data->call_site) | ||
139 | node = &(*node)->rb_left; | ||
140 | else | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | if (data && data->call_site == call_site) { | ||
145 | data->hit++; | ||
146 | data->bytes_req += bytes_req; | ||
147 | data->bytes_alloc += bytes_req; | ||
148 | } else { | ||
149 | data = malloc(sizeof(*data)); | ||
150 | data->call_site = call_site; | ||
151 | data->hit = 1; | ||
152 | data->bytes_req = bytes_req; | ||
153 | data->bytes_alloc = bytes_alloc; | ||
154 | |||
155 | rb_link_node(&data->node, parent, node); | ||
156 | rb_insert_color(&data->node, &root_caller_stat); | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static void process_alloc_event(struct raw_event_sample *raw, | ||
161 | struct event *event, | ||
162 | int cpu __used, | ||
163 | u64 timestamp __used, | ||
164 | struct thread *thread __used, | ||
165 | int node __used) | ||
166 | { | ||
167 | unsigned long call_site; | ||
168 | unsigned long ptr; | ||
169 | int bytes_req; | ||
170 | int bytes_alloc; | ||
171 | |||
172 | ptr = raw_field_value(event, "ptr", raw->data); | ||
173 | call_site = raw_field_value(event, "call_site", raw->data); | ||
174 | bytes_req = raw_field_value(event, "bytes_req", raw->data); | ||
175 | bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data); | ||
176 | |||
177 | insert_alloc_stat(ptr, bytes_req, bytes_alloc); | ||
178 | insert_caller_stat(call_site, bytes_req, bytes_alloc); | ||
179 | |||
180 | total_requested += bytes_req; | ||
181 | total_allocated += bytes_alloc; | ||
182 | } | ||
183 | |||
184 | static void process_free_event(struct raw_event_sample *raw __used, | ||
185 | struct event *event __used, | ||
186 | int cpu __used, | ||
187 | u64 timestamp __used, | ||
188 | struct thread *thread __used) | ||
189 | { | ||
190 | } | ||
191 | |||
192 | static void | ||
193 | process_raw_event(event_t *raw_event __used, void *more_data, | ||
194 | int cpu, u64 timestamp, struct thread *thread) | ||
195 | { | ||
196 | struct raw_event_sample *raw = more_data; | ||
197 | struct event *event; | ||
198 | int type; | ||
199 | |||
200 | type = trace_parse_common_type(raw->data); | ||
201 | event = trace_find_event(type); | ||
202 | |||
203 | if (!strcmp(event->name, "kmalloc") || | ||
204 | !strcmp(event->name, "kmem_cache_alloc")) { | ||
205 | process_alloc_event(raw, event, cpu, timestamp, thread, 0); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | if (!strcmp(event->name, "kmalloc_node") || | ||
210 | !strcmp(event->name, "kmem_cache_alloc_node")) { | ||
211 | process_alloc_event(raw, event, cpu, timestamp, thread, 1); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | if (!strcmp(event->name, "kfree") || | ||
216 | !strcmp(event->name, "kmem_cache_free")) { | ||
217 | process_free_event(raw, event, cpu, timestamp, thread); | ||
218 | return; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static int | ||
223 | process_sample_event(event_t *event, unsigned long offset, unsigned long head) | ||
224 | { | ||
225 | u64 ip = event->ip.ip; | ||
226 | u64 timestamp = -1; | ||
227 | u32 cpu = -1; | ||
228 | u64 period = 1; | ||
229 | void *more_data = event->ip.__more_data; | ||
230 | struct thread *thread = threads__findnew(event->ip.pid); | ||
231 | |||
232 | if (sample_type & PERF_SAMPLE_TIME) { | ||
233 | timestamp = *(u64 *)more_data; | ||
234 | more_data += sizeof(u64); | ||
235 | } | ||
236 | |||
237 | if (sample_type & PERF_SAMPLE_CPU) { | ||
238 | cpu = *(u32 *)more_data; | ||
239 | more_data += sizeof(u32); | ||
240 | more_data += sizeof(u32); /* reserved */ | ||
241 | } | ||
242 | |||
243 | if (sample_type & PERF_SAMPLE_PERIOD) { | ||
244 | period = *(u64 *)more_data; | ||
245 | more_data += sizeof(u64); | ||
246 | } | ||
247 | |||
248 | dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", | ||
249 | (void *)(offset + head), | ||
250 | (void *)(long)(event->header.size), | ||
251 | event->header.misc, | ||
252 | event->ip.pid, event->ip.tid, | ||
253 | (void *)(long)ip, | ||
254 | (long long)period); | ||
255 | |||
256 | if (thread == NULL) { | ||
257 | pr_debug("problem processing %d event, skipping it.\n", | ||
258 | event->header.type); | ||
259 | return -1; | ||
260 | } | ||
261 | |||
262 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
263 | |||
264 | process_raw_event(event, more_data, cpu, timestamp, thread); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int sample_type_check(u64 type) | ||
270 | { | ||
271 | sample_type = type; | ||
272 | |||
273 | if (!(sample_type & PERF_SAMPLE_RAW)) { | ||
274 | fprintf(stderr, | ||
275 | "No trace sample to read. Did you call perf record " | ||
276 | "without -R?"); | ||
277 | return -1; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static struct perf_file_handler file_handler = { | ||
284 | .process_sample_event = process_sample_event, | ||
285 | .process_comm_event = process_comm_event, | ||
286 | .sample_type_check = sample_type_check, | ||
287 | }; | ||
288 | |||
289 | static int read_events(void) | ||
290 | { | ||
291 | register_idle_thread(); | ||
292 | register_perf_file_handler(&file_handler); | ||
293 | |||
294 | return mmap_dispatch_perf_file(&header, input_name, 0, 0, | ||
295 | &cwdlen, &cwd); | ||
296 | } | ||
297 | |||
298 | static double fragmentation(unsigned long n_req, unsigned long n_alloc) | ||
299 | { | ||
300 | if (n_alloc == 0) | ||
301 | return 0.0; | ||
302 | else | ||
303 | return 100.0 - (100.0 * n_req / n_alloc); | ||
304 | } | ||
305 | |||
306 | static void __print_result(struct rb_root *root, int n_lines, int is_caller) | ||
307 | { | ||
308 | struct rb_node *next; | ||
309 | |||
310 | printf("\n ------------------------------------------------------------------------------\n"); | ||
311 | if (is_caller) | ||
312 | printf(" Callsite |"); | ||
313 | else | ||
314 | printf(" Alloc Ptr |"); | ||
315 | printf(" Total_alloc/Per | Total_req/Per | Hit | Fragmentation\n"); | ||
316 | printf(" ------------------------------------------------------------------------------\n"); | ||
317 | |||
318 | next = rb_first(root); | ||
319 | |||
320 | while (next && n_lines--) { | ||
321 | struct alloc_stat *data; | ||
322 | |||
323 | data = rb_entry(next, struct alloc_stat, node); | ||
324 | |||
325 | printf(" %-16p | %8llu/%-6lu | %8llu/%-6lu | %6lu | %8.3f%%\n", | ||
326 | is_caller ? (void *)(unsigned long)data->call_site : | ||
327 | (void *)(unsigned long)data->ptr, | ||
328 | (unsigned long long)data->bytes_alloc, | ||
329 | (unsigned long)data->bytes_alloc / data->hit, | ||
330 | (unsigned long long)data->bytes_req, | ||
331 | (unsigned long)data->bytes_req / data->hit, | ||
332 | (unsigned long)data->hit, | ||
333 | fragmentation(data->bytes_req, data->bytes_alloc)); | ||
334 | |||
335 | next = rb_next(next); | ||
336 | } | ||
337 | |||
338 | if (n_lines == -1) | ||
339 | printf(" ... | ... | ... | ... | ... \n"); | ||
340 | |||
341 | printf(" ------------------------------------------------------------------------------\n"); | ||
342 | } | ||
343 | |||
344 | static void print_summary(void) | ||
345 | { | ||
346 | printf("\nSUMMARY\n=======\n"); | ||
347 | printf("Total bytes requested: %lu\n", total_requested); | ||
348 | printf("Total bytes allocated: %lu\n", total_allocated); | ||
349 | printf("Total bytes wasted on internal fragmentation: %lu\n", | ||
350 | total_allocated - total_requested); | ||
351 | printf("Internal fragmentation: %f%%\n", | ||
352 | fragmentation(total_requested, total_allocated)); | ||
353 | } | ||
354 | |||
355 | static void print_result(void) | ||
356 | { | ||
357 | if (caller_flag) | ||
358 | __print_result(&root_caller_sorted, caller_lines, 1); | ||
359 | if (alloc_flag) | ||
360 | __print_result(&root_alloc_sorted, alloc_lines, 0); | ||
361 | print_summary(); | ||
362 | } | ||
363 | |||
364 | static void sort_insert(struct rb_root *root, struct alloc_stat *data, | ||
365 | sort_fn_t sort_fn) | ||
366 | { | ||
367 | struct rb_node **new = &(root->rb_node); | ||
368 | struct rb_node *parent = NULL; | ||
369 | |||
370 | while (*new) { | ||
371 | struct alloc_stat *this; | ||
372 | int cmp; | ||
373 | |||
374 | this = rb_entry(*new, struct alloc_stat, node); | ||
375 | parent = *new; | ||
376 | |||
377 | cmp = sort_fn(data, this); | ||
378 | |||
379 | if (cmp > 0) | ||
380 | new = &((*new)->rb_left); | ||
381 | else | ||
382 | new = &((*new)->rb_right); | ||
383 | } | ||
384 | |||
385 | rb_link_node(&data->node, parent, new); | ||
386 | rb_insert_color(&data->node, root); | ||
387 | } | ||
388 | |||
389 | static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, | ||
390 | sort_fn_t sort_fn) | ||
391 | { | ||
392 | struct rb_node *node; | ||
393 | struct alloc_stat *data; | ||
394 | |||
395 | for (;;) { | ||
396 | node = rb_first(root); | ||
397 | if (!node) | ||
398 | break; | ||
399 | |||
400 | rb_erase(node, root); | ||
401 | data = rb_entry(node, struct alloc_stat, node); | ||
402 | sort_insert(root_sorted, data, sort_fn); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | static void sort_result(void) | ||
407 | { | ||
408 | __sort_result(&root_alloc_stat, &root_alloc_sorted, alloc_sort_fn); | ||
409 | __sort_result(&root_caller_stat, &root_caller_sorted, caller_sort_fn); | ||
410 | } | ||
411 | |||
412 | static int __cmd_kmem(void) | ||
413 | { | ||
414 | setup_pager(); | ||
415 | read_events(); | ||
416 | sort_result(); | ||
417 | print_result(); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static const char * const kmem_usage[] = { | ||
423 | "perf kmem [<options>] {record}", | ||
424 | NULL | ||
425 | }; | ||
426 | |||
427 | |||
428 | static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
429 | { | ||
430 | if (l->ptr < r->ptr) | ||
431 | return -1; | ||
432 | else if (l->ptr > r->ptr) | ||
433 | return 1; | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
438 | { | ||
439 | if (l->call_site < r->call_site) | ||
440 | return -1; | ||
441 | else if (l->call_site > r->call_site) | ||
442 | return 1; | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
447 | { | ||
448 | if (l->bytes_alloc < r->bytes_alloc) | ||
449 | return -1; | ||
450 | else if (l->bytes_alloc > r->bytes_alloc) | ||
451 | return 1; | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static int parse_sort_opt(const struct option *opt __used, | ||
456 | const char *arg, int unset __used) | ||
457 | { | ||
458 | sort_fn_t sort_fn; | ||
459 | |||
460 | if (!arg) | ||
461 | return -1; | ||
462 | |||
463 | if (strcmp(arg, "ptr") == 0) | ||
464 | sort_fn = ptr_cmp; | ||
465 | else if (strcmp(arg, "call_site") == 0) | ||
466 | sort_fn = callsite_cmp; | ||
467 | else if (strcmp(arg, "bytes") == 0) | ||
468 | sort_fn = bytes_cmp; | ||
469 | else | ||
470 | return -1; | ||
471 | |||
472 | if (caller_flag > alloc_flag) | ||
473 | caller_sort_fn = sort_fn; | ||
474 | else | ||
475 | alloc_sort_fn = sort_fn; | ||
476 | |||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | static int parse_stat_opt(const struct option *opt __used, | ||
481 | const char *arg, int unset __used) | ||
482 | { | ||
483 | if (!arg) | ||
484 | return -1; | ||
485 | |||
486 | if (strcmp(arg, "alloc") == 0) | ||
487 | alloc_flag = (caller_flag + 1); | ||
488 | else if (strcmp(arg, "caller") == 0) | ||
489 | caller_flag = (alloc_flag + 1); | ||
490 | else | ||
491 | return -1; | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static int parse_line_opt(const struct option *opt __used, | ||
496 | const char *arg, int unset __used) | ||
497 | { | ||
498 | int lines; | ||
499 | |||
500 | if (!arg) | ||
501 | return -1; | ||
502 | |||
503 | lines = strtoul(arg, NULL, 10); | ||
504 | |||
505 | if (caller_flag > alloc_flag) | ||
506 | caller_lines = lines; | ||
507 | else | ||
508 | alloc_lines = lines; | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | static const struct option kmem_options[] = { | ||
514 | OPT_STRING('i', "input", &input_name, "file", | ||
515 | "input file name"), | ||
516 | OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>", | ||
517 | "stat selector, Pass 'alloc' or 'caller'.", | ||
518 | parse_stat_opt), | ||
519 | OPT_CALLBACK('s', "sort", NULL, "key", | ||
520 | "sort by key: ptr, call_site, hit, bytes", | ||
521 | parse_sort_opt), | ||
522 | OPT_CALLBACK('l', "line", NULL, "num", | ||
523 | "show n lins", | ||
524 | parse_line_opt), | ||
525 | OPT_END() | ||
526 | }; | ||
527 | |||
528 | static const char *record_args[] = { | ||
529 | "record", | ||
530 | "-a", | ||
531 | "-R", | ||
532 | "-M", | ||
533 | "-f", | ||
534 | "-c", "1", | ||
535 | "-e", "kmem:kmalloc", | ||
536 | "-e", "kmem:kmalloc_node", | ||
537 | "-e", "kmem:kfree", | ||
538 | "-e", "kmem:kmem_cache_alloc", | ||
539 | "-e", "kmem:kmem_cache_alloc_node", | ||
540 | "-e", "kmem:kmem_cache_free", | ||
541 | }; | ||
542 | |||
543 | static int __cmd_record(int argc, const char **argv) | ||
544 | { | ||
545 | unsigned int rec_argc, i, j; | ||
546 | const char **rec_argv; | ||
547 | |||
548 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; | ||
549 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | ||
550 | |||
551 | for (i = 0; i < ARRAY_SIZE(record_args); i++) | ||
552 | rec_argv[i] = strdup(record_args[i]); | ||
553 | |||
554 | for (j = 1; j < (unsigned int)argc; j++, i++) | ||
555 | rec_argv[i] = argv[j]; | ||
556 | |||
557 | return cmd_record(i, rec_argv, NULL); | ||
558 | } | ||
559 | |||
560 | int cmd_kmem(int argc, const char **argv, const char *prefix __used) | ||
561 | { | ||
562 | symbol__init(0); | ||
563 | |||
564 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); | ||
565 | |||
566 | if (argc && !strncmp(argv[0], "rec", 3)) | ||
567 | return __cmd_record(argc, argv); | ||
568 | else if (argc) | ||
569 | usage_with_options(kmem_usage, kmem_options); | ||
570 | |||
571 | if (!alloc_sort_fn) | ||
572 | alloc_sort_fn = bytes_cmp; | ||
573 | if (!caller_sort_fn) | ||
574 | caller_sort_fn = bytes_cmp; | ||
575 | |||
576 | return __cmd_kmem(); | ||
577 | } | ||
578 | |||