aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2009-11-24 00:26:10 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-24 02:49:49 -0500
commit29b3e15289eb66788a0bf5ea4903f9fbeb1ec751 (patch)
treea7169a54cb24ac140b6b821a014db30a4ee292b7 /tools
parent7707b6b6f8d9188b612f9fc88c65411264b1ed57 (diff)
perf kmem: Default to sort by fragmentation
Make the output sort by fragmentation by default. Also make the usage of "--sort" option consistent with other perf tools. That is, we support multi keys: "--sort key1[,key2]...". # ./perf kmem --stat caller ------------------------------------------------------------------------------ Callsite |Total_alloc/Per | Total_req/Per | Hit | Frag ------------------------------------------------------------------------------ __netdev_alloc_skb+23 | 5048/1682 | 4564/1521 | 3| 9.588% perf_event_alloc.clone.0+0 | 7504/682 | 7128/648 | 11| 5.011% tracepoint_add_probe+32e | 157/31 | 154/30 | 5| 1.911% alloc_buffer_head+16 | 456/57 | 448/56 | 8| 1.754% radix_tree_preload+51 | 584/292 | 576/288 | 2| 1.370% ... TODO: - Extract duplicate code in builtin-kmem.c and builtin-sched.c into util/sort.c. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: linux-mm@kvack.org <linux-mm@kvack.org> LKML-Reference: <4B0B6E72.7010200@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-kmem.c142
1 files changed, 108 insertions, 34 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 1ef43c212d9a..dc86f1e64b66 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -26,14 +26,13 @@ static u64 sample_type;
26static int alloc_flag; 26static int alloc_flag;
27static int caller_flag; 27static int caller_flag;
28 28
29sort_fn_t alloc_sort_fn;
30sort_fn_t caller_sort_fn;
31
32static int alloc_lines = -1; 29static int alloc_lines = -1;
33static int caller_lines = -1; 30static int caller_lines = -1;
34 31
35static bool raw_ip; 32static bool raw_ip;
36 33
34static char default_sort_order[] = "frag,hit,bytes";
35
37static char *cwd; 36static char *cwd;
38static int cwdlen; 37static int cwdlen;
39 38
@@ -371,20 +370,34 @@ static void print_result(void)
371 print_summary(); 370 print_summary();
372} 371}
373 372
373struct sort_dimension {
374 const char name[20];
375 sort_fn_t cmp;
376 struct list_head list;
377};
378
379static LIST_HEAD(caller_sort);
380static LIST_HEAD(alloc_sort);
381
374static void sort_insert(struct rb_root *root, struct alloc_stat *data, 382static void sort_insert(struct rb_root *root, struct alloc_stat *data,
375 sort_fn_t sort_fn) 383 struct list_head *sort_list)
376{ 384{
377 struct rb_node **new = &(root->rb_node); 385 struct rb_node **new = &(root->rb_node);
378 struct rb_node *parent = NULL; 386 struct rb_node *parent = NULL;
387 struct sort_dimension *sort;
379 388
380 while (*new) { 389 while (*new) {
381 struct alloc_stat *this; 390 struct alloc_stat *this;
382 int cmp; 391 int cmp = 0;
383 392
384 this = rb_entry(*new, struct alloc_stat, node); 393 this = rb_entry(*new, struct alloc_stat, node);
385 parent = *new; 394 parent = *new;
386 395
387 cmp = sort_fn(data, this); 396 list_for_each_entry(sort, sort_list, list) {
397 cmp = sort->cmp(data, this);
398 if (cmp)
399 break;
400 }
388 401
389 if (cmp > 0) 402 if (cmp > 0)
390 new = &((*new)->rb_left); 403 new = &((*new)->rb_left);
@@ -397,7 +410,7 @@ static void sort_insert(struct rb_root *root, struct alloc_stat *data,
397} 410}
398 411
399static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, 412static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
400 sort_fn_t sort_fn) 413 struct list_head *sort_list)
401{ 414{
402 struct rb_node *node; 415 struct rb_node *node;
403 struct alloc_stat *data; 416 struct alloc_stat *data;
@@ -409,14 +422,14 @@ static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
409 422
410 rb_erase(node, root); 423 rb_erase(node, root);
411 data = rb_entry(node, struct alloc_stat, node); 424 data = rb_entry(node, struct alloc_stat, node);
412 sort_insert(root_sorted, data, sort_fn); 425 sort_insert(root_sorted, data, sort_list);
413 } 426 }
414} 427}
415 428
416static void sort_result(void) 429static void sort_result(void)
417{ 430{
418 __sort_result(&root_alloc_stat, &root_alloc_sorted, alloc_sort_fn); 431 __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
419 __sort_result(&root_caller_stat, &root_caller_sorted, caller_sort_fn); 432 __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
420} 433}
421 434
422static int __cmd_kmem(void) 435static int __cmd_kmem(void)
@@ -434,7 +447,6 @@ static const char * const kmem_usage[] = {
434 NULL 447 NULL
435}; 448};
436 449
437
438static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) 450static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
439{ 451{
440 if (l->ptr < r->ptr) 452 if (l->ptr < r->ptr)
@@ -444,6 +456,11 @@ static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
444 return 0; 456 return 0;
445} 457}
446 458
459static struct sort_dimension ptr_sort_dimension = {
460 .name = "ptr",
461 .cmp = ptr_cmp,
462};
463
447static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) 464static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
448{ 465{
449 if (l->call_site < r->call_site) 466 if (l->call_site < r->call_site)
@@ -453,6 +470,11 @@ static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
453 return 0; 470 return 0;
454} 471}
455 472
473static struct sort_dimension callsite_sort_dimension = {
474 .name = "callsite",
475 .cmp = callsite_cmp,
476};
477
456static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) 478static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
457{ 479{
458 if (l->hit < r->hit) 480 if (l->hit < r->hit)
@@ -462,6 +484,11 @@ static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
462 return 0; 484 return 0;
463} 485}
464 486
487static struct sort_dimension hit_sort_dimension = {
488 .name = "hit",
489 .cmp = hit_cmp,
490};
491
465static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) 492static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
466{ 493{
467 if (l->bytes_alloc < r->bytes_alloc) 494 if (l->bytes_alloc < r->bytes_alloc)
@@ -471,6 +498,11 @@ static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
471 return 0; 498 return 0;
472} 499}
473 500
501static struct sort_dimension bytes_sort_dimension = {
502 .name = "bytes",
503 .cmp = bytes_cmp,
504};
505
474static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) 506static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
475{ 507{
476 double x, y; 508 double x, y;
@@ -485,31 +517,73 @@ static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
485 return 0; 517 return 0;
486} 518}
487 519
520static struct sort_dimension frag_sort_dimension = {
521 .name = "frag",
522 .cmp = frag_cmp,
523};
524
525static struct sort_dimension *avail_sorts[] = {
526 &ptr_sort_dimension,
527 &callsite_sort_dimension,
528 &hit_sort_dimension,
529 &bytes_sort_dimension,
530 &frag_sort_dimension,
531};
532
533#define NUM_AVAIL_SORTS \
534 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
535
536static int sort_dimension__add(const char *tok, struct list_head *list)
537{
538 struct sort_dimension *sort;
539 int i;
540
541 for (i = 0; i < NUM_AVAIL_SORTS; i++) {
542 if (!strcmp(avail_sorts[i]->name, tok)) {
543 sort = malloc(sizeof(*sort));
544 if (!sort)
545 die("malloc");
546 memcpy(sort, avail_sorts[i], sizeof(*sort));
547 list_add_tail(&sort->list, list);
548 return 0;
549 }
550 }
551
552 return -1;
553}
554
555static int setup_sorting(struct list_head *sort_list, const char *arg)
556{
557 char *tok;
558 char *str = strdup(arg);
559
560 if (!str)
561 die("strdup");
562
563 while (true) {
564 tok = strsep(&str, ",");
565 if (!tok)
566 break;
567 if (sort_dimension__add(tok, sort_list) < 0) {
568 error("Unknown --sort key: '%s'", tok);
569 return -1;
570 }
571 }
572
573 free(str);
574 return 0;
575}
576
488static int parse_sort_opt(const struct option *opt __used, 577static int parse_sort_opt(const struct option *opt __used,
489 const char *arg, int unset __used) 578 const char *arg, int unset __used)
490{ 579{
491 sort_fn_t sort_fn;
492
493 if (!arg) 580 if (!arg)
494 return -1; 581 return -1;
495 582
496 if (strcmp(arg, "ptr") == 0)
497 sort_fn = ptr_cmp;
498 else if (strcmp(arg, "call_site") == 0)
499 sort_fn = callsite_cmp;
500 else if (strcmp(arg, "hit") == 0)
501 sort_fn = hit_cmp;
502 else if (strcmp(arg, "bytes") == 0)
503 sort_fn = bytes_cmp;
504 else if (strcmp(arg, "frag") == 0)
505 sort_fn = frag_cmp;
506 else
507 return -1;
508
509 if (caller_flag > alloc_flag) 583 if (caller_flag > alloc_flag)
510 caller_sort_fn = sort_fn; 584 return setup_sorting(&caller_sort, arg);
511 else 585 else
512 alloc_sort_fn = sort_fn; 586 return setup_sorting(&alloc_sort, arg);
513 587
514 return 0; 588 return 0;
515} 589}
@@ -553,8 +627,8 @@ static const struct option kmem_options[] = {
553 OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>", 627 OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
554 "stat selector, Pass 'alloc' or 'caller'.", 628 "stat selector, Pass 'alloc' or 'caller'.",
555 parse_stat_opt), 629 parse_stat_opt),
556 OPT_CALLBACK('s', "sort", NULL, "key", 630 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
557 "sort by key: ptr, call_site, hit, bytes, frag", 631 "sort by key(s): ptr, call_site, bytes, hit, frag",
558 parse_sort_opt), 632 parse_sort_opt),
559 OPT_CALLBACK('l', "line", NULL, "num", 633 OPT_CALLBACK('l', "line", NULL, "num",
560 "show n lins", 634 "show n lins",
@@ -606,10 +680,10 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used)
606 else if (argc) 680 else if (argc)
607 usage_with_options(kmem_usage, kmem_options); 681 usage_with_options(kmem_usage, kmem_options);
608 682
609 if (!alloc_sort_fn) 683 if (list_empty(&caller_sort))
610 alloc_sort_fn = bytes_cmp; 684 setup_sorting(&caller_sort, default_sort_order);
611 if (!caller_sort_fn) 685 if (list_empty(&alloc_sort))
612 caller_sort_fn = bytes_cmp; 686 setup_sorting(&alloc_sort, default_sort_order);
613 687
614 return __cmd_kmem(); 688 return __cmd_kmem();
615} 689}