aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2009-12-07 00:04:04 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-07 00:26:25 -0500
commitf48f669d42e133db839af16656fd720107ef6742 (patch)
tree0fbc78abb79390ba22c40b48e1684f1f8b5badcb /tools/perf
parentd9541ed3241bb6c2b805d3ea0e87563cf2a0c5c3 (diff)
perf_event: Eliminate raw->size
raw->size is not used, this patch just cleans it up. Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Li Zefan <lizf@cn.fujitsu.com> LKML-Reference: <4B1C8CC4.4050007@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/builtin-kmem.c38
-rw-r--r--tools/perf/builtin-sched.c94
2 files changed, 56 insertions, 76 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index f84d7a3db681..7551a5f834b8 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -57,11 +57,6 @@ static struct rb_root root_caller_sorted;
57static unsigned long total_requested, total_allocated; 57static unsigned long total_requested, total_allocated;
58static unsigned long nr_allocs, nr_cross_allocs; 58static unsigned long nr_allocs, nr_cross_allocs;
59 59
60struct raw_event_sample {
61 u32 size;
62 char data[0];
63};
64
65#define PATH_SYS_NODE "/sys/devices/system/node" 60#define PATH_SYS_NODE "/sys/devices/system/node"
66 61
67static void init_cpunode_map(void) 62static void init_cpunode_map(void)
@@ -201,7 +196,7 @@ static void insert_caller_stat(unsigned long call_site,
201 } 196 }
202} 197}
203 198
204static void process_alloc_event(struct raw_event_sample *raw, 199static void process_alloc_event(void *data,
205 struct event *event, 200 struct event *event,
206 int cpu, 201 int cpu,
207 u64 timestamp __used, 202 u64 timestamp __used,
@@ -214,10 +209,10 @@ static void process_alloc_event(struct raw_event_sample *raw,
214 int bytes_alloc; 209 int bytes_alloc;
215 int node1, node2; 210 int node1, node2;
216 211
217 ptr = raw_field_value(event, "ptr", raw->data); 212 ptr = raw_field_value(event, "ptr", data);
218 call_site = raw_field_value(event, "call_site", raw->data); 213 call_site = raw_field_value(event, "call_site", data);
219 bytes_req = raw_field_value(event, "bytes_req", raw->data); 214 bytes_req = raw_field_value(event, "bytes_req", data);
220 bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data); 215 bytes_alloc = raw_field_value(event, "bytes_alloc", data);
221 216
222 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); 217 insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
223 insert_caller_stat(call_site, bytes_req, bytes_alloc); 218 insert_caller_stat(call_site, bytes_req, bytes_alloc);
@@ -227,7 +222,7 @@ static void process_alloc_event(struct raw_event_sample *raw,
227 222
228 if (node) { 223 if (node) {
229 node1 = cpunode_map[cpu]; 224 node1 = cpunode_map[cpu];
230 node2 = raw_field_value(event, "node", raw->data); 225 node2 = raw_field_value(event, "node", data);
231 if (node1 != node2) 226 if (node1 != node2)
232 nr_cross_allocs++; 227 nr_cross_allocs++;
233 } 228 }
@@ -262,7 +257,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
262 return NULL; 257 return NULL;
263} 258}
264 259
265static void process_free_event(struct raw_event_sample *raw, 260static void process_free_event(void *data,
266 struct event *event, 261 struct event *event,
267 int cpu, 262 int cpu,
268 u64 timestamp __used, 263 u64 timestamp __used,
@@ -271,7 +266,7 @@ static void process_free_event(struct raw_event_sample *raw,
271 unsigned long ptr; 266 unsigned long ptr;
272 struct alloc_stat *s_alloc, *s_caller; 267 struct alloc_stat *s_alloc, *s_caller;
273 268
274 ptr = raw_field_value(event, "ptr", raw->data); 269 ptr = raw_field_value(event, "ptr", data);
275 270
276 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); 271 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
277 if (!s_alloc) 272 if (!s_alloc)
@@ -289,35 +284,30 @@ static void process_free_event(struct raw_event_sample *raw,
289} 284}
290 285
291static void 286static void
292process_raw_event(event_t *raw_event __used, u32 size, void *data, 287process_raw_event(event_t *raw_event __used, void *data,
293 int cpu, u64 timestamp, struct thread *thread) 288 int cpu, u64 timestamp, struct thread *thread)
294{ 289{
295 struct raw_event_sample *raw;
296 struct event *event; 290 struct event *event;
297 int type; 291 int type;
298 292
299 raw = malloc_or_die(sizeof(*raw)+size); 293 type = trace_parse_common_type(data);
300 raw->size = size;
301 memcpy(raw->data, data, size);
302
303 type = trace_parse_common_type(raw->data);
304 event = trace_find_event(type); 294 event = trace_find_event(type);
305 295
306 if (!strcmp(event->name, "kmalloc") || 296 if (!strcmp(event->name, "kmalloc") ||
307 !strcmp(event->name, "kmem_cache_alloc")) { 297 !strcmp(event->name, "kmem_cache_alloc")) {
308 process_alloc_event(raw, event, cpu, timestamp, thread, 0); 298 process_alloc_event(data, event, cpu, timestamp, thread, 0);
309 return; 299 return;
310 } 300 }
311 301
312 if (!strcmp(event->name, "kmalloc_node") || 302 if (!strcmp(event->name, "kmalloc_node") ||
313 !strcmp(event->name, "kmem_cache_alloc_node")) { 303 !strcmp(event->name, "kmem_cache_alloc_node")) {
314 process_alloc_event(raw, event, cpu, timestamp, thread, 1); 304 process_alloc_event(data, event, cpu, timestamp, thread, 1);
315 return; 305 return;
316 } 306 }
317 307
318 if (!strcmp(event->name, "kfree") || 308 if (!strcmp(event->name, "kfree") ||
319 !strcmp(event->name, "kmem_cache_free")) { 309 !strcmp(event->name, "kmem_cache_free")) {
320 process_free_event(raw, event, cpu, timestamp, thread); 310 process_free_event(data, event, cpu, timestamp, thread);
321 return; 311 return;
322 } 312 }
323} 313}
@@ -349,7 +339,7 @@ static int process_sample_event(event_t *event)
349 339
350 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 340 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
351 341
352 process_raw_event(event, data.raw_size, data.raw_data, data.cpu, 342 process_raw_event(event, data.raw_data, data.cpu,
353 data.time, thread); 343 data.time, thread);
354 344
355 return 0; 345 return 0;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 4655e16b929b..19f43faa9f81 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -628,11 +628,6 @@ static void test_calibrations(void)
628 printf("the sleep test took %Ld nsecs\n", T1-T0); 628 printf("the sleep test took %Ld nsecs\n", T1-T0);
629} 629}
630 630
631struct raw_event_sample {
632 u32 size;
633 char data[0];
634};
635
636#define FILL_FIELD(ptr, field, event, data) \ 631#define FILL_FIELD(ptr, field, event, data) \
637 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data) 632 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
638 633
@@ -1356,7 +1351,7 @@ static void sort_lat(void)
1356static struct trace_sched_handler *trace_handler; 1351static struct trace_sched_handler *trace_handler;
1357 1352
1358static void 1353static void
1359process_sched_wakeup_event(struct raw_event_sample *raw, 1354process_sched_wakeup_event(void *data,
1360 struct event *event, 1355 struct event *event,
1361 int cpu __used, 1356 int cpu __used,
1362 u64 timestamp __used, 1357 u64 timestamp __used,
@@ -1364,13 +1359,13 @@ process_sched_wakeup_event(struct raw_event_sample *raw,
1364{ 1359{
1365 struct trace_wakeup_event wakeup_event; 1360 struct trace_wakeup_event wakeup_event;
1366 1361
1367 FILL_COMMON_FIELDS(wakeup_event, event, raw->data); 1362 FILL_COMMON_FIELDS(wakeup_event, event, data);
1368 1363
1369 FILL_ARRAY(wakeup_event, comm, event, raw->data); 1364 FILL_ARRAY(wakeup_event, comm, event, data);
1370 FILL_FIELD(wakeup_event, pid, event, raw->data); 1365 FILL_FIELD(wakeup_event, pid, event, data);
1371 FILL_FIELD(wakeup_event, prio, event, raw->data); 1366 FILL_FIELD(wakeup_event, prio, event, data);
1372 FILL_FIELD(wakeup_event, success, event, raw->data); 1367 FILL_FIELD(wakeup_event, success, event, data);
1373 FILL_FIELD(wakeup_event, cpu, event, raw->data); 1368 FILL_FIELD(wakeup_event, cpu, event, data);
1374 1369
1375 if (trace_handler->wakeup_event) 1370 if (trace_handler->wakeup_event)
1376 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread); 1371 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
@@ -1469,7 +1464,7 @@ map_switch_event(struct trace_switch_event *switch_event,
1469 1464
1470 1465
1471static void 1466static void
1472process_sched_switch_event(struct raw_event_sample *raw, 1467process_sched_switch_event(void *data,
1473 struct event *event, 1468 struct event *event,
1474 int this_cpu, 1469 int this_cpu,
1475 u64 timestamp __used, 1470 u64 timestamp __used,
@@ -1477,15 +1472,15 @@ process_sched_switch_event(struct raw_event_sample *raw,
1477{ 1472{
1478 struct trace_switch_event switch_event; 1473 struct trace_switch_event switch_event;
1479 1474
1480 FILL_COMMON_FIELDS(switch_event, event, raw->data); 1475 FILL_COMMON_FIELDS(switch_event, event, data);
1481 1476
1482 FILL_ARRAY(switch_event, prev_comm, event, raw->data); 1477 FILL_ARRAY(switch_event, prev_comm, event, data);
1483 FILL_FIELD(switch_event, prev_pid, event, raw->data); 1478 FILL_FIELD(switch_event, prev_pid, event, data);
1484 FILL_FIELD(switch_event, prev_prio, event, raw->data); 1479 FILL_FIELD(switch_event, prev_prio, event, data);
1485 FILL_FIELD(switch_event, prev_state, event, raw->data); 1480 FILL_FIELD(switch_event, prev_state, event, data);
1486 FILL_ARRAY(switch_event, next_comm, event, raw->data); 1481 FILL_ARRAY(switch_event, next_comm, event, data);
1487 FILL_FIELD(switch_event, next_pid, event, raw->data); 1482 FILL_FIELD(switch_event, next_pid, event, data);
1488 FILL_FIELD(switch_event, next_prio, event, raw->data); 1483 FILL_FIELD(switch_event, next_prio, event, data);
1489 1484
1490 if (curr_pid[this_cpu] != (u32)-1) { 1485 if (curr_pid[this_cpu] != (u32)-1) {
1491 /* 1486 /*
@@ -1502,7 +1497,7 @@ process_sched_switch_event(struct raw_event_sample *raw,
1502} 1497}
1503 1498
1504static void 1499static void
1505process_sched_runtime_event(struct raw_event_sample *raw, 1500process_sched_runtime_event(void *data,
1506 struct event *event, 1501 struct event *event,
1507 int cpu __used, 1502 int cpu __used,
1508 u64 timestamp __used, 1503 u64 timestamp __used,
@@ -1510,17 +1505,17 @@ process_sched_runtime_event(struct raw_event_sample *raw,
1510{ 1505{
1511 struct trace_runtime_event runtime_event; 1506 struct trace_runtime_event runtime_event;
1512 1507
1513 FILL_ARRAY(runtime_event, comm, event, raw->data); 1508 FILL_ARRAY(runtime_event, comm, event, data);
1514 FILL_FIELD(runtime_event, pid, event, raw->data); 1509 FILL_FIELD(runtime_event, pid, event, data);
1515 FILL_FIELD(runtime_event, runtime, event, raw->data); 1510 FILL_FIELD(runtime_event, runtime, event, data);
1516 FILL_FIELD(runtime_event, vruntime, event, raw->data); 1511 FILL_FIELD(runtime_event, vruntime, event, data);
1517 1512
1518 if (trace_handler->runtime_event) 1513 if (trace_handler->runtime_event)
1519 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread); 1514 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1520} 1515}
1521 1516
1522static void 1517static void
1523process_sched_fork_event(struct raw_event_sample *raw, 1518process_sched_fork_event(void *data,
1524 struct event *event, 1519 struct event *event,
1525 int cpu __used, 1520 int cpu __used,
1526 u64 timestamp __used, 1521 u64 timestamp __used,
@@ -1528,12 +1523,12 @@ process_sched_fork_event(struct raw_event_sample *raw,
1528{ 1523{
1529 struct trace_fork_event fork_event; 1524 struct trace_fork_event fork_event;
1530 1525
1531 FILL_COMMON_FIELDS(fork_event, event, raw->data); 1526 FILL_COMMON_FIELDS(fork_event, event, data);
1532 1527
1533 FILL_ARRAY(fork_event, parent_comm, event, raw->data); 1528 FILL_ARRAY(fork_event, parent_comm, event, data);
1534 FILL_FIELD(fork_event, parent_pid, event, raw->data); 1529 FILL_FIELD(fork_event, parent_pid, event, data);
1535 FILL_ARRAY(fork_event, child_comm, event, raw->data); 1530 FILL_ARRAY(fork_event, child_comm, event, data);
1536 FILL_FIELD(fork_event, child_pid, event, raw->data); 1531 FILL_FIELD(fork_event, child_pid, event, data);
1537 1532
1538 if (trace_handler->fork_event) 1533 if (trace_handler->fork_event)
1539 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread); 1534 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
@@ -1550,7 +1545,7 @@ process_sched_exit_event(struct event *event,
1550} 1545}
1551 1546
1552static void 1547static void
1553process_sched_migrate_task_event(struct raw_event_sample *raw, 1548process_sched_migrate_task_event(void *data,
1554 struct event *event, 1549 struct event *event,
1555 int cpu __used, 1550 int cpu __used,
1556 u64 timestamp __used, 1551 u64 timestamp __used,
@@ -1558,46 +1553,42 @@ process_sched_migrate_task_event(struct raw_event_sample *raw,
1558{ 1553{
1559 struct trace_migrate_task_event migrate_task_event; 1554 struct trace_migrate_task_event migrate_task_event;
1560 1555
1561 FILL_COMMON_FIELDS(migrate_task_event, event, raw->data); 1556 FILL_COMMON_FIELDS(migrate_task_event, event, data);
1562 1557
1563 FILL_ARRAY(migrate_task_event, comm, event, raw->data); 1558 FILL_ARRAY(migrate_task_event, comm, event, data);
1564 FILL_FIELD(migrate_task_event, pid, event, raw->data); 1559 FILL_FIELD(migrate_task_event, pid, event, data);
1565 FILL_FIELD(migrate_task_event, prio, event, raw->data); 1560 FILL_FIELD(migrate_task_event, prio, event, data);
1566 FILL_FIELD(migrate_task_event, cpu, event, raw->data); 1561 FILL_FIELD(migrate_task_event, cpu, event, data);
1567 1562
1568 if (trace_handler->migrate_task_event) 1563 if (trace_handler->migrate_task_event)
1569 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); 1564 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
1570} 1565}
1571 1566
1572static void 1567static void
1573process_raw_event(event_t *raw_event __used, u32 size, void *data, 1568process_raw_event(event_t *raw_event __used, void *data,
1574 int cpu, u64 timestamp, struct thread *thread) 1569 int cpu, u64 timestamp, struct thread *thread)
1575{ 1570{
1576 struct raw_event_sample *raw;
1577 struct event *event; 1571 struct event *event;
1578 int type; 1572 int type;
1579 1573
1580 raw = malloc_or_die(sizeof(*raw)+size);
1581 raw->size = size;
1582 memcpy(raw->data, data, size);
1583 1574
1584 type = trace_parse_common_type(raw->data); 1575 type = trace_parse_common_type(data);
1585 event = trace_find_event(type); 1576 event = trace_find_event(type);
1586 1577
1587 if (!strcmp(event->name, "sched_switch")) 1578 if (!strcmp(event->name, "sched_switch"))
1588 process_sched_switch_event(raw, event, cpu, timestamp, thread); 1579 process_sched_switch_event(data, event, cpu, timestamp, thread);
1589 if (!strcmp(event->name, "sched_stat_runtime")) 1580 if (!strcmp(event->name, "sched_stat_runtime"))
1590 process_sched_runtime_event(raw, event, cpu, timestamp, thread); 1581 process_sched_runtime_event(data, event, cpu, timestamp, thread);
1591 if (!strcmp(event->name, "sched_wakeup")) 1582 if (!strcmp(event->name, "sched_wakeup"))
1592 process_sched_wakeup_event(raw, event, cpu, timestamp, thread); 1583 process_sched_wakeup_event(data, event, cpu, timestamp, thread);
1593 if (!strcmp(event->name, "sched_wakeup_new")) 1584 if (!strcmp(event->name, "sched_wakeup_new"))
1594 process_sched_wakeup_event(raw, event, cpu, timestamp, thread); 1585 process_sched_wakeup_event(data, event, cpu, timestamp, thread);
1595 if (!strcmp(event->name, "sched_process_fork")) 1586 if (!strcmp(event->name, "sched_process_fork"))
1596 process_sched_fork_event(raw, event, cpu, timestamp, thread); 1587 process_sched_fork_event(data, event, cpu, timestamp, thread);
1597 if (!strcmp(event->name, "sched_process_exit")) 1588 if (!strcmp(event->name, "sched_process_exit"))
1598 process_sched_exit_event(event, cpu, timestamp, thread); 1589 process_sched_exit_event(event, cpu, timestamp, thread);
1599 if (!strcmp(event->name, "sched_migrate_task")) 1590 if (!strcmp(event->name, "sched_migrate_task"))
1600 process_sched_migrate_task_event(raw, event, cpu, timestamp, thread); 1591 process_sched_migrate_task_event(data, event, cpu, timestamp, thread);
1601} 1592}
1602 1593
1603static int process_sample_event(event_t *event) 1594static int process_sample_event(event_t *event)
@@ -1633,8 +1624,7 @@ static int process_sample_event(event_t *event)
1633 if (profile_cpu != -1 && profile_cpu != (int)data.cpu) 1624 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
1634 return 0; 1625 return 0;
1635 1626
1636 process_raw_event(event, data.raw_size, data.raw_data, data.cpu, 1627 process_raw_event(event, data.raw_data, data.cpu, data.time, thread);
1637 data.time, thread);
1638 1628
1639 return 0; 1629 return 0;
1640} 1630}