diff options
Diffstat (limited to 'tools/perf/builtin-kmem.c')
| -rw-r--r-- | tools/perf/builtin-kmem.c | 807 |
1 files changed, 807 insertions, 0 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c new file mode 100644 index 000000000000..047fef74bd52 --- /dev/null +++ b/tools/perf/builtin-kmem.c | |||
| @@ -0,0 +1,807 @@ | |||
| 1 | #include "builtin.h" | ||
| 2 | #include "perf.h" | ||
| 3 | |||
| 4 | #include "util/util.h" | ||
| 5 | #include "util/cache.h" | ||
| 6 | #include "util/symbol.h" | ||
| 7 | #include "util/thread.h" | ||
| 8 | #include "util/header.h" | ||
| 9 | |||
| 10 | #include "util/parse-options.h" | ||
| 11 | #include "util/trace-event.h" | ||
| 12 | |||
| 13 | #include "util/debug.h" | ||
| 14 | #include "util/data_map.h" | ||
| 15 | |||
| 16 | #include <linux/rbtree.h> | ||
| 17 | |||
| 18 | struct alloc_stat; | ||
| 19 | typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); | ||
| 20 | |||
| 21 | static char const *input_name = "perf.data"; | ||
| 22 | |||
| 23 | static struct perf_header *header; | ||
| 24 | static u64 sample_type; | ||
| 25 | |||
| 26 | static int alloc_flag; | ||
| 27 | static int caller_flag; | ||
| 28 | |||
| 29 | static int alloc_lines = -1; | ||
| 30 | static int caller_lines = -1; | ||
| 31 | |||
| 32 | static bool raw_ip; | ||
| 33 | |||
| 34 | static char default_sort_order[] = "frag,hit,bytes"; | ||
| 35 | |||
| 36 | static int *cpunode_map; | ||
| 37 | static int max_cpu_num; | ||
| 38 | |||
| 39 | struct alloc_stat { | ||
| 40 | u64 call_site; | ||
| 41 | u64 ptr; | ||
| 42 | u64 bytes_req; | ||
| 43 | u64 bytes_alloc; | ||
| 44 | u32 hit; | ||
| 45 | u32 pingpong; | ||
| 46 | |||
| 47 | short alloc_cpu; | ||
| 48 | |||
| 49 | struct rb_node node; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static struct rb_root root_alloc_stat; | ||
| 53 | static struct rb_root root_alloc_sorted; | ||
| 54 | static struct rb_root root_caller_stat; | ||
| 55 | static struct rb_root root_caller_sorted; | ||
| 56 | |||
| 57 | static unsigned long total_requested, total_allocated; | ||
| 58 | static unsigned long nr_allocs, nr_cross_allocs; | ||
| 59 | |||
| 60 | struct raw_event_sample { | ||
| 61 | u32 size; | ||
| 62 | char data[0]; | ||
| 63 | }; | ||
| 64 | |||
| 65 | #define PATH_SYS_NODE "/sys/devices/system/node" | ||
| 66 | |||
| 67 | static void init_cpunode_map(void) | ||
| 68 | { | ||
| 69 | FILE *fp; | ||
| 70 | int i; | ||
| 71 | |||
| 72 | fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); | ||
| 73 | if (!fp) { | ||
| 74 | max_cpu_num = 4096; | ||
| 75 | return; | ||
| 76 | } | ||
| 77 | |||
| 78 | if (fscanf(fp, "%d", &max_cpu_num) < 1) | ||
| 79 | die("Failed to read 'kernel_max' from sysfs"); | ||
| 80 | max_cpu_num++; | ||
| 81 | |||
| 82 | cpunode_map = calloc(max_cpu_num, sizeof(int)); | ||
| 83 | if (!cpunode_map) | ||
| 84 | die("calloc"); | ||
| 85 | for (i = 0; i < max_cpu_num; i++) | ||
| 86 | cpunode_map[i] = -1; | ||
| 87 | fclose(fp); | ||
| 88 | } | ||
| 89 | |||
| 90 | static void setup_cpunode_map(void) | ||
| 91 | { | ||
| 92 | struct dirent *dent1, *dent2; | ||
| 93 | DIR *dir1, *dir2; | ||
| 94 | unsigned int cpu, mem; | ||
| 95 | char buf[PATH_MAX]; | ||
| 96 | |||
| 97 | init_cpunode_map(); | ||
| 98 | |||
| 99 | dir1 = opendir(PATH_SYS_NODE); | ||
| 100 | if (!dir1) | ||
| 101 | return; | ||
| 102 | |||
| 103 | while (true) { | ||
| 104 | dent1 = readdir(dir1); | ||
| 105 | if (!dent1) | ||
| 106 | break; | ||
| 107 | |||
| 108 | if (sscanf(dent1->d_name, "node%u", &mem) < 1) | ||
| 109 | continue; | ||
| 110 | |||
| 111 | snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); | ||
| 112 | dir2 = opendir(buf); | ||
| 113 | if (!dir2) | ||
| 114 | continue; | ||
| 115 | while (true) { | ||
| 116 | dent2 = readdir(dir2); | ||
| 117 | if (!dent2) | ||
| 118 | break; | ||
| 119 | if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1) | ||
| 120 | continue; | ||
| 121 | cpunode_map[cpu] = mem; | ||
| 122 | } | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, | ||
| 127 | int bytes_req, int bytes_alloc, int cpu) | ||
| 128 | { | ||
| 129 | struct rb_node **node = &root_alloc_stat.rb_node; | ||
| 130 | struct rb_node *parent = NULL; | ||
| 131 | struct alloc_stat *data = NULL; | ||
| 132 | |||
| 133 | while (*node) { | ||
| 134 | parent = *node; | ||
| 135 | data = rb_entry(*node, struct alloc_stat, node); | ||
| 136 | |||
| 137 | if (ptr > data->ptr) | ||
| 138 | node = &(*node)->rb_right; | ||
| 139 | else if (ptr < data->ptr) | ||
| 140 | node = &(*node)->rb_left; | ||
| 141 | else | ||
| 142 | break; | ||
| 143 | } | ||
| 144 | |||
| 145 | if (data && data->ptr == ptr) { | ||
| 146 | data->hit++; | ||
| 147 | data->bytes_req += bytes_req; | ||
| 148 | data->bytes_alloc += bytes_req; | ||
| 149 | } else { | ||
| 150 | data = malloc(sizeof(*data)); | ||
| 151 | if (!data) | ||
| 152 | die("malloc"); | ||
| 153 | data->ptr = ptr; | ||
| 154 | data->pingpong = 0; | ||
| 155 | data->hit = 1; | ||
| 156 | data->bytes_req = bytes_req; | ||
| 157 | data->bytes_alloc = bytes_alloc; | ||
| 158 | |||
| 159 | rb_link_node(&data->node, parent, node); | ||
| 160 | rb_insert_color(&data->node, &root_alloc_stat); | ||
| 161 | } | ||
| 162 | data->call_site = call_site; | ||
| 163 | data->alloc_cpu = cpu; | ||
| 164 | } | ||
| 165 | |||
| 166 | static void insert_caller_stat(unsigned long call_site, | ||
| 167 | int bytes_req, int bytes_alloc) | ||
| 168 | { | ||
| 169 | struct rb_node **node = &root_caller_stat.rb_node; | ||
| 170 | struct rb_node *parent = NULL; | ||
| 171 | struct alloc_stat *data = NULL; | ||
| 172 | |||
| 173 | while (*node) { | ||
| 174 | parent = *node; | ||
| 175 | data = rb_entry(*node, struct alloc_stat, node); | ||
| 176 | |||
| 177 | if (call_site > data->call_site) | ||
| 178 | node = &(*node)->rb_right; | ||
| 179 | else if (call_site < data->call_site) | ||
| 180 | node = &(*node)->rb_left; | ||
| 181 | else | ||
| 182 | break; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (data && data->call_site == call_site) { | ||
| 186 | data->hit++; | ||
| 187 | data->bytes_req += bytes_req; | ||
| 188 | data->bytes_alloc += bytes_req; | ||
| 189 | } else { | ||
| 190 | data = malloc(sizeof(*data)); | ||
| 191 | if (!data) | ||
| 192 | die("malloc"); | ||
| 193 | data->call_site = call_site; | ||
| 194 | data->pingpong = 0; | ||
| 195 | data->hit = 1; | ||
| 196 | data->bytes_req = bytes_req; | ||
| 197 | data->bytes_alloc = bytes_alloc; | ||
| 198 | |||
| 199 | rb_link_node(&data->node, parent, node); | ||
| 200 | rb_insert_color(&data->node, &root_caller_stat); | ||
| 201 | } | ||
| 202 | } | ||
| 203 | |||
| 204 | static void process_alloc_event(struct raw_event_sample *raw, | ||
| 205 | struct event *event, | ||
| 206 | int cpu, | ||
| 207 | u64 timestamp __used, | ||
| 208 | struct thread *thread __used, | ||
| 209 | int node) | ||
| 210 | { | ||
| 211 | unsigned long call_site; | ||
| 212 | unsigned long ptr; | ||
| 213 | int bytes_req; | ||
| 214 | int bytes_alloc; | ||
| 215 | int node1, node2; | ||
| 216 | |||
| 217 | ptr = raw_field_value(event, "ptr", raw->data); | ||
| 218 | call_site = raw_field_value(event, "call_site", raw->data); | ||
| 219 | bytes_req = raw_field_value(event, "bytes_req", raw->data); | ||
| 220 | bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data); | ||
| 221 | |||
| 222 | insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); | ||
| 223 | insert_caller_stat(call_site, bytes_req, bytes_alloc); | ||
| 224 | |||
| 225 | total_requested += bytes_req; | ||
| 226 | total_allocated += bytes_alloc; | ||
| 227 | |||
| 228 | if (node) { | ||
| 229 | node1 = cpunode_map[cpu]; | ||
| 230 | node2 = raw_field_value(event, "node", raw->data); | ||
| 231 | if (node1 != node2) | ||
| 232 | nr_cross_allocs++; | ||
| 233 | } | ||
| 234 | nr_allocs++; | ||
| 235 | } | ||
| 236 | |||
| 237 | static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); | ||
| 238 | static int callsite_cmp(struct alloc_stat *, struct alloc_stat *); | ||
| 239 | |||
| 240 | static struct alloc_stat *search_alloc_stat(unsigned long ptr, | ||
| 241 | unsigned long call_site, | ||
| 242 | struct rb_root *root, | ||
| 243 | sort_fn_t sort_fn) | ||
| 244 | { | ||
| 245 | struct rb_node *node = root->rb_node; | ||
| 246 | struct alloc_stat key = { .ptr = ptr, .call_site = call_site }; | ||
| 247 | |||
| 248 | while (node) { | ||
| 249 | struct alloc_stat *data; | ||
| 250 | int cmp; | ||
| 251 | |||
| 252 | data = rb_entry(node, struct alloc_stat, node); | ||
| 253 | |||
| 254 | cmp = sort_fn(&key, data); | ||
| 255 | if (cmp < 0) | ||
| 256 | node = node->rb_left; | ||
| 257 | else if (cmp > 0) | ||
| 258 | node = node->rb_right; | ||
| 259 | else | ||
| 260 | return data; | ||
| 261 | } | ||
| 262 | return NULL; | ||
| 263 | } | ||
| 264 | |||
| 265 | static void process_free_event(struct raw_event_sample *raw, | ||
| 266 | struct event *event, | ||
| 267 | int cpu, | ||
| 268 | u64 timestamp __used, | ||
| 269 | struct thread *thread __used) | ||
| 270 | { | ||
| 271 | unsigned long ptr; | ||
| 272 | struct alloc_stat *s_alloc, *s_caller; | ||
| 273 | |||
| 274 | ptr = raw_field_value(event, "ptr", raw->data); | ||
| 275 | |||
| 276 | s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); | ||
| 277 | if (!s_alloc) | ||
| 278 | return; | ||
| 279 | |||
| 280 | if (cpu != s_alloc->alloc_cpu) { | ||
| 281 | s_alloc->pingpong++; | ||
| 282 | |||
| 283 | s_caller = search_alloc_stat(0, s_alloc->call_site, | ||
| 284 | &root_caller_stat, callsite_cmp); | ||
| 285 | assert(s_caller); | ||
| 286 | s_caller->pingpong++; | ||
| 287 | } | ||
| 288 | s_alloc->alloc_cpu = -1; | ||
| 289 | } | ||
| 290 | |||
| 291 | static void | ||
| 292 | process_raw_event(event_t *raw_event __used, void *more_data, | ||
| 293 | int cpu, u64 timestamp, struct thread *thread) | ||
| 294 | { | ||
| 295 | struct raw_event_sample *raw = more_data; | ||
| 296 | struct event *event; | ||
| 297 | int type; | ||
| 298 | |||
| 299 | type = trace_parse_common_type(raw->data); | ||
| 300 | event = trace_find_event(type); | ||
| 301 | |||
| 302 | if (!strcmp(event->name, "kmalloc") || | ||
| 303 | !strcmp(event->name, "kmem_cache_alloc")) { | ||
| 304 | process_alloc_event(raw, event, cpu, timestamp, thread, 0); | ||
| 305 | return; | ||
| 306 | } | ||
| 307 | |||
| 308 | if (!strcmp(event->name, "kmalloc_node") || | ||
| 309 | !strcmp(event->name, "kmem_cache_alloc_node")) { | ||
| 310 | process_alloc_event(raw, event, cpu, timestamp, thread, 1); | ||
| 311 | return; | ||
| 312 | } | ||
| 313 | |||
| 314 | if (!strcmp(event->name, "kfree") || | ||
| 315 | !strcmp(event->name, "kmem_cache_free")) { | ||
| 316 | process_free_event(raw, event, cpu, timestamp, thread); | ||
| 317 | return; | ||
| 318 | } | ||
| 319 | } | ||
| 320 | |||
| 321 | static int process_sample_event(event_t *event) | ||
| 322 | { | ||
| 323 | u64 ip = event->ip.ip; | ||
| 324 | u64 timestamp = -1; | ||
| 325 | u32 cpu = -1; | ||
| 326 | u64 period = 1; | ||
| 327 | void *more_data = event->ip.__more_data; | ||
| 328 | struct thread *thread = threads__findnew(event->ip.pid); | ||
| 329 | |||
| 330 | if (sample_type & PERF_SAMPLE_TIME) { | ||
| 331 | timestamp = *(u64 *)more_data; | ||
| 332 | more_data += sizeof(u64); | ||
| 333 | } | ||
| 334 | |||
| 335 | if (sample_type & PERF_SAMPLE_CPU) { | ||
| 336 | cpu = *(u32 *)more_data; | ||
| 337 | more_data += sizeof(u32); | ||
| 338 | more_data += sizeof(u32); /* reserved */ | ||
| 339 | } | ||
| 340 | |||
| 341 | if (sample_type & PERF_SAMPLE_PERIOD) { | ||
| 342 | period = *(u64 *)more_data; | ||
| 343 | more_data += sizeof(u64); | ||
| 344 | } | ||
| 345 | |||
| 346 | dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", | ||
| 347 | event->header.misc, | ||
| 348 | event->ip.pid, event->ip.tid, | ||
| 349 | (void *)(long)ip, | ||
| 350 | (long long)period); | ||
| 351 | |||
| 352 | if (thread == NULL) { | ||
| 353 | pr_debug("problem processing %d event, skipping it.\n", | ||
| 354 | event->header.type); | ||
| 355 | return -1; | ||
| 356 | } | ||
| 357 | |||
| 358 | dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); | ||
| 359 | |||
| 360 | process_raw_event(event, more_data, cpu, timestamp, thread); | ||
| 361 | |||
| 362 | return 0; | ||
| 363 | } | ||
| 364 | |||
| 365 | static int sample_type_check(u64 type) | ||
| 366 | { | ||
| 367 | sample_type = type; | ||
| 368 | |||
| 369 | if (!(sample_type & PERF_SAMPLE_RAW)) { | ||
| 370 | fprintf(stderr, | ||
| 371 | "No trace sample to read. Did you call perf record " | ||
| 372 | "without -R?"); | ||
| 373 | return -1; | ||
| 374 | } | ||
| 375 | |||
| 376 | return 0; | ||
| 377 | } | ||
| 378 | |||
| 379 | static struct perf_file_handler file_handler = { | ||
| 380 | .process_sample_event = process_sample_event, | ||
| 381 | .process_comm_event = event__process_comm, | ||
| 382 | .sample_type_check = sample_type_check, | ||
| 383 | }; | ||
| 384 | |||
| 385 | static int read_events(void) | ||
| 386 | { | ||
| 387 | register_idle_thread(); | ||
| 388 | register_perf_file_handler(&file_handler); | ||
| 389 | |||
| 390 | return mmap_dispatch_perf_file(&header, input_name, 0, 0, | ||
| 391 | &event__cwdlen, &event__cwd); | ||
| 392 | } | ||
| 393 | |||
| 394 | static double fragmentation(unsigned long n_req, unsigned long n_alloc) | ||
| 395 | { | ||
| 396 | if (n_alloc == 0) | ||
| 397 | return 0.0; | ||
| 398 | else | ||
| 399 | return 100.0 - (100.0 * n_req / n_alloc); | ||
| 400 | } | ||
| 401 | |||
| 402 | static void __print_result(struct rb_root *root, int n_lines, int is_caller) | ||
| 403 | { | ||
| 404 | struct rb_node *next; | ||
| 405 | |||
| 406 | printf("%.102s\n", graph_dotted_line); | ||
| 407 | printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); | ||
| 408 | printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n"); | ||
| 409 | printf("%.102s\n", graph_dotted_line); | ||
| 410 | |||
| 411 | next = rb_first(root); | ||
| 412 | |||
| 413 | while (next && n_lines--) { | ||
| 414 | struct alloc_stat *data = rb_entry(next, struct alloc_stat, | ||
| 415 | node); | ||
| 416 | struct symbol *sym = NULL; | ||
| 417 | char buf[BUFSIZ]; | ||
| 418 | u64 addr; | ||
| 419 | |||
| 420 | if (is_caller) { | ||
| 421 | addr = data->call_site; | ||
| 422 | if (!raw_ip) | ||
| 423 | sym = thread__find_function(kthread, addr, NULL); | ||
| 424 | } else | ||
| 425 | addr = data->ptr; | ||
| 426 | |||
| 427 | if (sym != NULL) | ||
| 428 | snprintf(buf, sizeof(buf), "%s+%Lx", sym->name, | ||
| 429 | addr - sym->start); | ||
| 430 | else | ||
| 431 | snprintf(buf, sizeof(buf), "%#Lx", addr); | ||
| 432 | printf(" %-34s |", buf); | ||
| 433 | |||
| 434 | printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n", | ||
| 435 | (unsigned long long)data->bytes_alloc, | ||
| 436 | (unsigned long)data->bytes_alloc / data->hit, | ||
| 437 | (unsigned long long)data->bytes_req, | ||
| 438 | (unsigned long)data->bytes_req / data->hit, | ||
| 439 | (unsigned long)data->hit, | ||
| 440 | (unsigned long)data->pingpong, | ||
| 441 | fragmentation(data->bytes_req, data->bytes_alloc)); | ||
| 442 | |||
| 443 | next = rb_next(next); | ||
| 444 | } | ||
| 445 | |||
| 446 | if (n_lines == -1) | ||
| 447 | printf(" ... | ... | ... | ... | ... | ... \n"); | ||
| 448 | |||
| 449 | printf("%.102s\n", graph_dotted_line); | ||
| 450 | } | ||
| 451 | |||
| 452 | static void print_summary(void) | ||
| 453 | { | ||
| 454 | printf("\nSUMMARY\n=======\n"); | ||
| 455 | printf("Total bytes requested: %lu\n", total_requested); | ||
| 456 | printf("Total bytes allocated: %lu\n", total_allocated); | ||
| 457 | printf("Total bytes wasted on internal fragmentation: %lu\n", | ||
| 458 | total_allocated - total_requested); | ||
| 459 | printf("Internal fragmentation: %f%%\n", | ||
| 460 | fragmentation(total_requested, total_allocated)); | ||
| 461 | printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); | ||
| 462 | } | ||
| 463 | |||
| 464 | static void print_result(void) | ||
| 465 | { | ||
| 466 | if (caller_flag) | ||
| 467 | __print_result(&root_caller_sorted, caller_lines, 1); | ||
| 468 | if (alloc_flag) | ||
| 469 | __print_result(&root_alloc_sorted, alloc_lines, 0); | ||
| 470 | print_summary(); | ||
| 471 | } | ||
| 472 | |||
| 473 | struct sort_dimension { | ||
| 474 | const char name[20]; | ||
| 475 | sort_fn_t cmp; | ||
| 476 | struct list_head list; | ||
| 477 | }; | ||
| 478 | |||
| 479 | static LIST_HEAD(caller_sort); | ||
| 480 | static LIST_HEAD(alloc_sort); | ||
| 481 | |||
| 482 | static void sort_insert(struct rb_root *root, struct alloc_stat *data, | ||
| 483 | struct list_head *sort_list) | ||
| 484 | { | ||
| 485 | struct rb_node **new = &(root->rb_node); | ||
| 486 | struct rb_node *parent = NULL; | ||
| 487 | struct sort_dimension *sort; | ||
| 488 | |||
| 489 | while (*new) { | ||
| 490 | struct alloc_stat *this; | ||
| 491 | int cmp = 0; | ||
| 492 | |||
| 493 | this = rb_entry(*new, struct alloc_stat, node); | ||
| 494 | parent = *new; | ||
| 495 | |||
| 496 | list_for_each_entry(sort, sort_list, list) { | ||
| 497 | cmp = sort->cmp(data, this); | ||
| 498 | if (cmp) | ||
| 499 | break; | ||
| 500 | } | ||
| 501 | |||
| 502 | if (cmp > 0) | ||
| 503 | new = &((*new)->rb_left); | ||
| 504 | else | ||
| 505 | new = &((*new)->rb_right); | ||
| 506 | } | ||
| 507 | |||
| 508 | rb_link_node(&data->node, parent, new); | ||
| 509 | rb_insert_color(&data->node, root); | ||
| 510 | } | ||
| 511 | |||
| 512 | static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, | ||
| 513 | struct list_head *sort_list) | ||
| 514 | { | ||
| 515 | struct rb_node *node; | ||
| 516 | struct alloc_stat *data; | ||
| 517 | |||
| 518 | for (;;) { | ||
| 519 | node = rb_first(root); | ||
| 520 | if (!node) | ||
| 521 | break; | ||
| 522 | |||
| 523 | rb_erase(node, root); | ||
| 524 | data = rb_entry(node, struct alloc_stat, node); | ||
| 525 | sort_insert(root_sorted, data, sort_list); | ||
| 526 | } | ||
| 527 | } | ||
| 528 | |||
| 529 | static void sort_result(void) | ||
| 530 | { | ||
| 531 | __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort); | ||
| 532 | __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); | ||
| 533 | } | ||
| 534 | |||
| 535 | static int __cmd_kmem(void) | ||
| 536 | { | ||
| 537 | setup_pager(); | ||
| 538 | read_events(); | ||
| 539 | sort_result(); | ||
| 540 | print_result(); | ||
| 541 | |||
| 542 | return 0; | ||
| 543 | } | ||
| 544 | |||
| 545 | static const char * const kmem_usage[] = { | ||
| 546 | "perf kmem [<options>] {record}", | ||
| 547 | NULL | ||
| 548 | }; | ||
| 549 | |||
| 550 | static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
| 551 | { | ||
| 552 | if (l->ptr < r->ptr) | ||
| 553 | return -1; | ||
| 554 | else if (l->ptr > r->ptr) | ||
| 555 | return 1; | ||
| 556 | return 0; | ||
| 557 | } | ||
| 558 | |||
| 559 | static struct sort_dimension ptr_sort_dimension = { | ||
| 560 | .name = "ptr", | ||
| 561 | .cmp = ptr_cmp, | ||
| 562 | }; | ||
| 563 | |||
| 564 | static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
| 565 | { | ||
| 566 | if (l->call_site < r->call_site) | ||
| 567 | return -1; | ||
| 568 | else if (l->call_site > r->call_site) | ||
| 569 | return 1; | ||
| 570 | return 0; | ||
| 571 | } | ||
| 572 | |||
| 573 | static struct sort_dimension callsite_sort_dimension = { | ||
| 574 | .name = "callsite", | ||
| 575 | .cmp = callsite_cmp, | ||
| 576 | }; | ||
| 577 | |||
| 578 | static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
| 579 | { | ||
| 580 | if (l->hit < r->hit) | ||
| 581 | return -1; | ||
| 582 | else if (l->hit > r->hit) | ||
| 583 | return 1; | ||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | |||
| 587 | static struct sort_dimension hit_sort_dimension = { | ||
| 588 | .name = "hit", | ||
| 589 | .cmp = hit_cmp, | ||
| 590 | }; | ||
| 591 | |||
| 592 | static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
| 593 | { | ||
| 594 | if (l->bytes_alloc < r->bytes_alloc) | ||
| 595 | return -1; | ||
| 596 | else if (l->bytes_alloc > r->bytes_alloc) | ||
| 597 | return 1; | ||
| 598 | return 0; | ||
| 599 | } | ||
| 600 | |||
| 601 | static struct sort_dimension bytes_sort_dimension = { | ||
| 602 | .name = "bytes", | ||
| 603 | .cmp = bytes_cmp, | ||
| 604 | }; | ||
| 605 | |||
| 606 | static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
| 607 | { | ||
| 608 | double x, y; | ||
| 609 | |||
| 610 | x = fragmentation(l->bytes_req, l->bytes_alloc); | ||
| 611 | y = fragmentation(r->bytes_req, r->bytes_alloc); | ||
| 612 | |||
| 613 | if (x < y) | ||
| 614 | return -1; | ||
| 615 | else if (x > y) | ||
| 616 | return 1; | ||
| 617 | return 0; | ||
| 618 | } | ||
| 619 | |||
| 620 | static struct sort_dimension frag_sort_dimension = { | ||
| 621 | .name = "frag", | ||
| 622 | .cmp = frag_cmp, | ||
| 623 | }; | ||
| 624 | |||
| 625 | static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r) | ||
| 626 | { | ||
| 627 | if (l->pingpong < r->pingpong) | ||
| 628 | return -1; | ||
| 629 | else if (l->pingpong > r->pingpong) | ||
| 630 | return 1; | ||
| 631 | return 0; | ||
| 632 | } | ||
| 633 | |||
| 634 | static struct sort_dimension pingpong_sort_dimension = { | ||
| 635 | .name = "pingpong", | ||
| 636 | .cmp = pingpong_cmp, | ||
| 637 | }; | ||
| 638 | |||
| 639 | static struct sort_dimension *avail_sorts[] = { | ||
| 640 | &ptr_sort_dimension, | ||
| 641 | &callsite_sort_dimension, | ||
| 642 | &hit_sort_dimension, | ||
| 643 | &bytes_sort_dimension, | ||
| 644 | &frag_sort_dimension, | ||
| 645 | &pingpong_sort_dimension, | ||
| 646 | }; | ||
| 647 | |||
| 648 | #define NUM_AVAIL_SORTS \ | ||
| 649 | (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) | ||
| 650 | |||
| 651 | static int sort_dimension__add(const char *tok, struct list_head *list) | ||
| 652 | { | ||
| 653 | struct sort_dimension *sort; | ||
| 654 | int i; | ||
| 655 | |||
| 656 | for (i = 0; i < NUM_AVAIL_SORTS; i++) { | ||
| 657 | if (!strcmp(avail_sorts[i]->name, tok)) { | ||
| 658 | sort = malloc(sizeof(*sort)); | ||
| 659 | if (!sort) | ||
| 660 | die("malloc"); | ||
| 661 | memcpy(sort, avail_sorts[i], sizeof(*sort)); | ||
| 662 | list_add_tail(&sort->list, list); | ||
| 663 | return 0; | ||
| 664 | } | ||
| 665 | } | ||
| 666 | |||
| 667 | return -1; | ||
| 668 | } | ||
| 669 | |||
| 670 | static int setup_sorting(struct list_head *sort_list, const char *arg) | ||
| 671 | { | ||
| 672 | char *tok; | ||
| 673 | char *str = strdup(arg); | ||
| 674 | |||
| 675 | if (!str) | ||
| 676 | die("strdup"); | ||
| 677 | |||
| 678 | while (true) { | ||
| 679 | tok = strsep(&str, ","); | ||
| 680 | if (!tok) | ||
| 681 | break; | ||
| 682 | if (sort_dimension__add(tok, sort_list) < 0) { | ||
| 683 | error("Unknown --sort key: '%s'", tok); | ||
| 684 | return -1; | ||
| 685 | } | ||
| 686 | } | ||
| 687 | |||
| 688 | free(str); | ||
| 689 | return 0; | ||
| 690 | } | ||
| 691 | |||
| 692 | static int parse_sort_opt(const struct option *opt __used, | ||
| 693 | const char *arg, int unset __used) | ||
| 694 | { | ||
| 695 | if (!arg) | ||
| 696 | return -1; | ||
| 697 | |||
| 698 | if (caller_flag > alloc_flag) | ||
| 699 | return setup_sorting(&caller_sort, arg); | ||
| 700 | else | ||
| 701 | return setup_sorting(&alloc_sort, arg); | ||
| 702 | |||
| 703 | return 0; | ||
| 704 | } | ||
| 705 | |||
| 706 | static int parse_stat_opt(const struct option *opt __used, | ||
| 707 | const char *arg, int unset __used) | ||
| 708 | { | ||
| 709 | if (!arg) | ||
| 710 | return -1; | ||
| 711 | |||
| 712 | if (strcmp(arg, "alloc") == 0) | ||
| 713 | alloc_flag = (caller_flag + 1); | ||
| 714 | else if (strcmp(arg, "caller") == 0) | ||
| 715 | caller_flag = (alloc_flag + 1); | ||
| 716 | else | ||
| 717 | return -1; | ||
| 718 | return 0; | ||
| 719 | } | ||
| 720 | |||
| 721 | static int parse_line_opt(const struct option *opt __used, | ||
| 722 | const char *arg, int unset __used) | ||
| 723 | { | ||
| 724 | int lines; | ||
| 725 | |||
| 726 | if (!arg) | ||
| 727 | return -1; | ||
| 728 | |||
| 729 | lines = strtoul(arg, NULL, 10); | ||
| 730 | |||
| 731 | if (caller_flag > alloc_flag) | ||
| 732 | caller_lines = lines; | ||
| 733 | else | ||
| 734 | alloc_lines = lines; | ||
| 735 | |||
| 736 | return 0; | ||
| 737 | } | ||
| 738 | |||
| 739 | static const struct option kmem_options[] = { | ||
| 740 | OPT_STRING('i', "input", &input_name, "file", | ||
| 741 | "input file name"), | ||
| 742 | OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>", | ||
| 743 | "stat selector, Pass 'alloc' or 'caller'.", | ||
| 744 | parse_stat_opt), | ||
| 745 | OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", | ||
| 746 | "sort by keys: ptr, call_site, bytes, hit, pingpong, frag", | ||
| 747 | parse_sort_opt), | ||
| 748 | OPT_CALLBACK('l', "line", NULL, "num", | ||
| 749 | "show n lins", | ||
| 750 | parse_line_opt), | ||
| 751 | OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), | ||
| 752 | OPT_END() | ||
| 753 | }; | ||
| 754 | |||
| 755 | static const char *record_args[] = { | ||
| 756 | "record", | ||
| 757 | "-a", | ||
| 758 | "-R", | ||
| 759 | "-M", | ||
| 760 | "-f", | ||
| 761 | "-c", "1", | ||
| 762 | "-e", "kmem:kmalloc", | ||
| 763 | "-e", "kmem:kmalloc_node", | ||
| 764 | "-e", "kmem:kfree", | ||
| 765 | "-e", "kmem:kmem_cache_alloc", | ||
| 766 | "-e", "kmem:kmem_cache_alloc_node", | ||
| 767 | "-e", "kmem:kmem_cache_free", | ||
| 768 | }; | ||
| 769 | |||
| 770 | static int __cmd_record(int argc, const char **argv) | ||
| 771 | { | ||
| 772 | unsigned int rec_argc, i, j; | ||
| 773 | const char **rec_argv; | ||
| 774 | |||
| 775 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; | ||
| 776 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); | ||
| 777 | |||
| 778 | for (i = 0; i < ARRAY_SIZE(record_args); i++) | ||
| 779 | rec_argv[i] = strdup(record_args[i]); | ||
| 780 | |||
| 781 | for (j = 1; j < (unsigned int)argc; j++, i++) | ||
| 782 | rec_argv[i] = argv[j]; | ||
| 783 | |||
| 784 | return cmd_record(i, rec_argv, NULL); | ||
| 785 | } | ||
| 786 | |||
| 787 | int cmd_kmem(int argc, const char **argv, const char *prefix __used) | ||
| 788 | { | ||
| 789 | symbol__init(0); | ||
| 790 | |||
| 791 | argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); | ||
| 792 | |||
| 793 | if (argc && !strncmp(argv[0], "rec", 3)) | ||
| 794 | return __cmd_record(argc, argv); | ||
| 795 | else if (argc) | ||
| 796 | usage_with_options(kmem_usage, kmem_options); | ||
| 797 | |||
| 798 | if (list_empty(&caller_sort)) | ||
| 799 | setup_sorting(&caller_sort, default_sort_order); | ||
| 800 | if (list_empty(&alloc_sort)) | ||
| 801 | setup_sorting(&alloc_sort, default_sort_order); | ||
| 802 | |||
| 803 | setup_cpunode_map(); | ||
| 804 | |||
| 805 | return __cmd_kmem(); | ||
| 806 | } | ||
| 807 | |||
