aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile7
-rw-r--r--arch/alpha/kernel/perf_event.c3
-rw-r--r--arch/arm/configs/bcmring_defconfig2
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/perf_event_xscale.c8
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/wii_defconfig2
-rw-r--r--arch/powerpc/perf/core-book3s.c3
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c3
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig2
-rw-r--r--arch/sparc/configs/sparc64_defconfig2
-rw-r--r--arch/sparc/kernel/perf_event.c4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/ftrace.h3
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/perf_event.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c570
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c6
-rw-r--r--arch/x86/kernel/ftrace.c500
-rw-r--r--arch/x86/kernel/nmi.c10
-rw-r--r--arch/x86/kernel/traps.c8
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/ftrace.h19
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/perf_event.h29
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--init/Kconfig14
-rw-r--r--kernel/events/core.c25
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c242
-rw-r--r--kernel/trace/ring_buffer.c585
-rw-r--r--kernel/trace/trace.c503
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_printk.c4
-rw-r--r--tools/Makefile77
-rw-r--r--tools/lib/traceevent/Makefile303
-rw-r--r--tools/lib/traceevent/event-parse.c5065
-rw-r--r--tools/lib/traceevent/event-parse.h804
-rw-r--r--tools/lib/traceevent/event-utils.h80
-rw-r--r--tools/lib/traceevent/parse-filter.c2262
-rw-r--r--tools/lib/traceevent/parse-utils.c110
-rw-r--r--tools/lib/traceevent/trace-seq.c200
-rw-r--r--tools/perf/Documentation/perfconfig.example1
-rw-r--r--tools/perf/Makefile117
-rw-r--r--tools/perf/builtin-kmem.c6
-rw-r--r--tools/perf/builtin-lock.c26
-rw-r--r--tools/perf/builtin-record.c63
-rw-r--r--tools/perf/builtin-report.c21
-rw-r--r--tools/perf/builtin-sched.c42
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c86
-rw-r--r--tools/perf/builtin-test.c13
-rw-r--r--tools/perf/builtin-top.c54
-rw-r--r--tools/perf/perf.h8
-rw-r--r--tools/perf/ui/browser.c (renamed from tools/perf/util/ui/browser.c)116
-rw-r--r--tools/perf/ui/browser.h (renamed from tools/perf/util/ui/browser.h)9
-rw-r--r--tools/perf/ui/browsers/annotate.c867
-rw-r--r--tools/perf/ui/browsers/hists.c (renamed from tools/perf/util/ui/browsers/hists.c)26
-rw-r--r--tools/perf/ui/browsers/map.c (renamed from tools/perf/util/ui/browsers/map.c)6
-rw-r--r--tools/perf/ui/browsers/map.h (renamed from tools/perf/util/ui/browsers/map.h)0
-rw-r--r--tools/perf/ui/gtk/browser.c (renamed from tools/perf/util/gtk/browser.c)31
-rw-r--r--tools/perf/ui/gtk/gtk.h (renamed from tools/perf/util/gtk/gtk.h)0
-rw-r--r--tools/perf/ui/gtk/setup.c12
-rw-r--r--tools/perf/ui/helpline.c (renamed from tools/perf/util/ui/helpline.c)0
-rw-r--r--tools/perf/ui/helpline.h (renamed from tools/perf/util/ui/helpline.h)0
-rw-r--r--tools/perf/ui/keysyms.h (renamed from tools/perf/util/ui/keysyms.h)0
-rw-r--r--tools/perf/ui/libslang.h (renamed from tools/perf/util/ui/libslang.h)0
-rw-r--r--tools/perf/ui/progress.c (renamed from tools/perf/util/ui/progress.c)0
-rw-r--r--tools/perf/ui/progress.h (renamed from tools/perf/util/ui/progress.h)0
-rw-r--r--tools/perf/ui/setup.c45
-rw-r--r--tools/perf/ui/tui/setup.c (renamed from tools/perf/util/ui/setup.c)77
-rw-r--r--tools/perf/ui/ui.h (renamed from tools/perf/util/ui/ui.h)0
-rw-r--r--tools/perf/ui/util.c (renamed from tools/perf/util/ui/util.c)0
-rw-r--r--tools/perf/ui/util.h (renamed from tools/perf/util/ui/util.h)0
-rw-r--r--tools/perf/util/annotate.c599
-rw-r--r--tools/perf/util/annotate.h67
-rw-r--r--tools/perf/util/cache.h24
-rw-r--r--tools/perf/util/debug.c1
-rw-r--r--tools/perf/util/debug.h2
-rw-r--r--tools/perf/util/evlist.c16
-rw-r--r--tools/perf/util/evlist.h4
-rw-r--r--tools/perf/util/evsel.c11
-rw-r--r--tools/perf/util/header.c17
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/parse-events.c45
-rw-r--r--tools/perf/util/parse-events.h23
-rw-r--r--tools/perf/util/parse-events.y16
-rw-r--r--tools/perf/util/pmu.c70
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c16
-rw-r--r--tools/perf/util/session.c30
-rw-r--r--tools/perf/util/symbol.h5
-rw-r--r--tools/perf/util/target.c142
-rw-r--r--tools/perf/util/target.h65
-rw-r--r--tools/perf/util/thread_map.h2
-rw-r--r--tools/perf/util/top.c19
-rw-r--r--tools/perf/util/top.h6
-rw-r--r--tools/perf/util/trace-event-info.c4
-rw-r--r--tools/perf/util/trace-event-parse.c3142
-rw-r--r--tools/perf/util/trace-event-read.c44
-rw-r--r--tools/perf/util/trace-event.h269
-rw-r--r--tools/perf/util/ui/browsers/annotate.c433
-rw-r--r--tools/perf/util/usage.c38
-rw-r--r--tools/perf/util/util.c10
-rw-r--r--tools/perf/util/util.h5
-rw-r--r--tools/scripts/Makefile.include58
114 files changed, 13400 insertions, 4965 deletions
diff --git a/Makefile b/Makefile
index a6879630a3ea..0e7a44eaadd5 100644
--- a/Makefile
+++ b/Makefile
@@ -1471,6 +1471,13 @@ kernelrelease:
1471kernelversion: 1471kernelversion:
1472 @echo $(KERNELVERSION) 1472 @echo $(KERNELVERSION)
1473 1473
1474# Clear a bunch of variables before executing the submake
1475tools/: FORCE
1476 $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
1477
1478tools/%: FORCE
1479 $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
1480
1474# Single targets 1481# Single targets
1475# --------------------------------------------------------------------------- 1482# ---------------------------------------------------------------------------
1476# Single targets are compatible with: 1483# Single targets are compatible with:
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 0dae252f7a33..d821b17047e0 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -824,7 +824,6 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
824 824
825 idx = la_ptr; 825 idx = la_ptr;
826 826
827 perf_sample_data_init(&data, 0);
828 for (j = 0; j < cpuc->n_events; j++) { 827 for (j = 0; j < cpuc->n_events; j++) {
829 if (cpuc->current_idx[j] == idx) 828 if (cpuc->current_idx[j] == idx)
830 break; 829 break;
@@ -848,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
848 847
849 hwc = &event->hw; 848 hwc = &event->hw;
850 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); 849 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
851 data.period = event->hw.last_period; 850 perf_sample_data_init(&data, 0, hwc->last_period);
852 851
853 if (alpha_perf_event_set_period(event, hwc, idx)) { 852 if (alpha_perf_event_set_period(event, hwc, idx)) {
854 if (perf_event_overflow(event, &data, regs)) { 853 if (perf_event_overflow(event, &data, regs)) {
diff --git a/arch/arm/configs/bcmring_defconfig b/arch/arm/configs/bcmring_defconfig
index 795374d48f81..9e6a8fe13164 100644
--- a/arch/arm/configs/bcmring_defconfig
+++ b/arch/arm/configs/bcmring_defconfig
@@ -11,7 +11,7 @@ CONFIG_KALLSYMS_EXTRA_PASS=y
11# CONFIG_TIMERFD is not set 11# CONFIG_TIMERFD is not set
12# CONFIG_EVENTFD is not set 12# CONFIG_EVENTFD is not set
13# CONFIG_AIO is not set 13# CONFIG_AIO is not set
14CONFIG_PERF_COUNTERS=y 14CONFIG_PERF_EVENTS=y
15# CONFIG_VM_EVENT_COUNTERS is not set 15# CONFIG_VM_EVENT_COUNTERS is not set
16# CONFIG_SLUB_DEBUG is not set 16# CONFIG_SLUB_DEBUG is not set
17# CONFIG_COMPAT_BRK is not set 17# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index b78af0cc6ef3..ab627a740fa3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num,
489 */ 489 */
490 armv6_pmcr_write(pmcr); 490 armv6_pmcr_write(pmcr);
491 491
492 perf_sample_data_init(&data, 0);
493
494 cpuc = &__get_cpu_var(cpu_hw_events); 492 cpuc = &__get_cpu_var(cpu_hw_events);
495 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 493 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
496 struct perf_event *event = cpuc->events[idx]; 494 struct perf_event *event = cpuc->events[idx];
@@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
509 507
510 hwc = &event->hw; 508 hwc = &event->hw;
511 armpmu_event_update(event, hwc, idx); 509 armpmu_event_update(event, hwc, idx);
512 data.period = event->hw.last_period; 510 perf_sample_data_init(&data, 0, hwc->last_period);
513 if (!armpmu_event_set_period(event, hwc, idx)) 511 if (!armpmu_event_set_period(event, hwc, idx))
514 continue; 512 continue;
515 513
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 00755d82e2f2..d3c536068162 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1077 */ 1077 */
1078 regs = get_irq_regs(); 1078 regs = get_irq_regs();
1079 1079
1080 perf_sample_data_init(&data, 0);
1081
1082 cpuc = &__get_cpu_var(cpu_hw_events); 1080 cpuc = &__get_cpu_var(cpu_hw_events);
1083 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 1081 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1084 struct perf_event *event = cpuc->events[idx]; 1082 struct perf_event *event = cpuc->events[idx];
@@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1097 1095
1098 hwc = &event->hw; 1096 hwc = &event->hw;
1099 armpmu_event_update(event, hwc, idx); 1097 armpmu_event_update(event, hwc, idx);
1100 data.period = event->hw.last_period; 1098 perf_sample_data_init(&data, 0, hwc->last_period);
1101 if (!armpmu_event_set_period(event, hwc, idx)) 1099 if (!armpmu_event_set_period(event, hwc, idx))
1102 continue; 1100 continue;
1103 1101
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 71a21e6712f5..e34e7254e652 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
248 248
249 regs = get_irq_regs(); 249 regs = get_irq_regs();
250 250
251 perf_sample_data_init(&data, 0);
252
253 cpuc = &__get_cpu_var(cpu_hw_events); 251 cpuc = &__get_cpu_var(cpu_hw_events);
254 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
255 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
@@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
263 261
264 hwc = &event->hw; 262 hwc = &event->hw;
265 armpmu_event_update(event, hwc, idx); 263 armpmu_event_update(event, hwc, idx);
266 data.period = event->hw.last_period; 264 perf_sample_data_init(&data, 0, hwc->last_period);
267 if (!armpmu_event_set_period(event, hwc, idx)) 265 if (!armpmu_event_set_period(event, hwc, idx))
268 continue; 266 continue;
269 267
@@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
588 586
589 regs = get_irq_regs(); 587 regs = get_irq_regs();
590 588
591 perf_sample_data_init(&data, 0);
592
593 cpuc = &__get_cpu_var(cpu_hw_events); 589 cpuc = &__get_cpu_var(cpu_hw_events);
594 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 590 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
595 struct perf_event *event = cpuc->events[idx]; 591 struct perf_event *event = cpuc->events[idx];
@@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
603 599
604 hwc = &event->hw; 600 hwc = &event->hw;
605 armpmu_event_update(event, hwc, idx); 601 armpmu_event_update(event, hwc, idx);
606 data.period = event->hw.last_period; 602 perf_sample_data_init(&data, 0, hwc->last_period);
607 if (!armpmu_event_set_period(event, hwc, idx)) 603 if (!armpmu_event_set_period(event, hwc, idx))
608 continue; 604 continue;
609 605
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 811084f4e422..ab73fa2fb9b5 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
1325 1325
1326 regs = get_irq_regs(); 1326 regs = get_irq_regs();
1327 1327
1328 perf_sample_data_init(&data, 0); 1328 perf_sample_data_init(&data, 0, 0);
1329 1329
1330 switch (counters) { 1330 switch (counters) {
1331#define HANDLE_COUNTER(n) \ 1331#define HANDLE_COUNTER(n) \
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index f104ccde6b53..b1f9597fe312 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -32,7 +32,7 @@ CONFIG_RD_LZMA=y
32CONFIG_INITRAMFS_COMPRESSION_GZIP=y 32CONFIG_INITRAMFS_COMPRESSION_GZIP=y
33CONFIG_KALLSYMS_ALL=y 33CONFIG_KALLSYMS_ALL=y
34CONFIG_EMBEDDED=y 34CONFIG_EMBEDDED=y
35CONFIG_PERF_COUNTERS=y 35CONFIG_PERF_EVENTS=y
36CONFIG_PROFILING=y 36CONFIG_PROFILING=y
37CONFIG_OPROFILE=y 37CONFIG_OPROFILE=y
38CONFIG_KPROBES=y 38CONFIG_KPROBES=y
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index e74d3a483705..9ef2cc13e1b4 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
8# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 8# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
9CONFIG_EXPERT=y 9CONFIG_EXPERT=y
10# CONFIG_ELF_CORE is not set 10# CONFIG_ELF_CORE is not set
11CONFIG_PERF_COUNTERS=y 11CONFIG_PERF_EVENTS=y
12# CONFIG_VM_EVENT_COUNTERS is not set 12# CONFIG_VM_EVENT_COUNTERS is not set
13CONFIG_SLAB=y 13CONFIG_SLAB=y
14CONFIG_MODULES=y 14CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 175295fbf4f3..1e2b7d062aa4 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -9,7 +9,7 @@ CONFIG_BLK_DEV_INITRD=y
9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
10CONFIG_EXPERT=y 10CONFIG_EXPERT=y
11# CONFIG_ELF_CORE is not set 11# CONFIG_ELF_CORE is not set
12CONFIG_PERF_COUNTERS=y 12CONFIG_PERF_EVENTS=y
13# CONFIG_VM_EVENT_COUNTERS is not set 13# CONFIG_VM_EVENT_COUNTERS is not set
14CONFIG_SLAB=y 14CONFIG_SLAB=y
15CONFIG_MODULES=y 15CONFIG_MODULES=y
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 02aee03e713c..8f84bcba18da 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1299 if (record) { 1299 if (record) {
1300 struct perf_sample_data data; 1300 struct perf_sample_data data;
1301 1301
1302 perf_sample_data_init(&data, ~0ULL); 1302 perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
1303 data.period = event->hw.last_period;
1304 1303
1305 if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1304 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1306 perf_get_data_addr(regs, &data.addr); 1305 perf_get_data_addr(regs, &data.addr);
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 0a6d2a9d569c..106c53354675 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
613 if (record) { 613 if (record) {
614 struct perf_sample_data data; 614 struct perf_sample_data data;
615 615
616 perf_sample_data_init(&data, 0); 616 perf_sample_data_init(&data, 0, event->hw.last_period);
617 data.period = event->hw.last_period;
618 617
619 if (perf_event_overflow(event, &data, regs)) 618 if (perf_event_overflow(event, &data, regs))
620 fsl_emb_pmu_stop(event, 0); 619 fsl_emb_pmu_stop(event, 0);
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 7b9c696ac5e0..9bdcf72ec06a 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -5,7 +5,7 @@ CONFIG_BSD_PROCESS_ACCT=y
5CONFIG_IKCONFIG=y 5CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 6CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=16 7CONFIG_LOG_BUF_SHIFT=16
8CONFIG_PERF_COUNTERS=y 8CONFIG_PERF_EVENTS=y
9# CONFIG_COMPAT_BRK is not set 9# CONFIG_COMPAT_BRK is not set
10CONFIG_SLAB=y 10CONFIG_SLAB=y
11CONFIG_PROFILING=y 11CONFIG_PROFILING=y
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 3c1e85807403..9d8521b8c854 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6CONFIG_LOG_BUF_SHIFT=18 6CONFIG_LOG_BUF_SHIFT=18
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_PERF_COUNTERS=y 8CONFIG_PERF_EVENTS=y
9# CONFIG_COMPAT_BRK is not set 9# CONFIG_COMPAT_BRK is not set
10CONFIG_SLAB=y 10CONFIG_SLAB=y
11CONFIG_PROFILING=y 11CONFIG_PROFILING=y
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 28559ce5eeb5..5713957dcb8a 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1296,8 +1296,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1296 1296
1297 regs = args->regs; 1297 regs = args->regs;
1298 1298
1299 perf_sample_data_init(&data, 0);
1300
1301 cpuc = &__get_cpu_var(cpu_hw_events); 1299 cpuc = &__get_cpu_var(cpu_hw_events);
1302 1300
1303 /* If the PMU has the TOE IRQ enable bits, we need to do a 1301 /* If the PMU has the TOE IRQ enable bits, we need to do a
@@ -1321,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1321 if (val & (1ULL << 31)) 1319 if (val & (1ULL << 31))
1322 continue; 1320 continue;
1323 1321
1324 data.period = event->hw.last_period; 1322 perf_sample_data_init(&data, 0, hwc->last_period);
1325 if (!sparc_perf_event_set_period(event, hwc, idx)) 1323 if (!sparc_perf_event_set_period(event, hwc, idx))
1326 continue; 1324 continue;
1327 1325
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2787fbec7aed..7b383d8da7b9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -40,7 +40,6 @@ config X86
40 select HAVE_FUNCTION_GRAPH_TRACER 40 select HAVE_FUNCTION_GRAPH_TRACER
41 select HAVE_FUNCTION_GRAPH_FP_TEST 41 select HAVE_FUNCTION_GRAPH_FP_TEST
42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
43 select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
44 select HAVE_SYSCALL_TRACEPOINTS 43 select HAVE_SYSCALL_TRACEPOINTS
45 select HAVE_KVM 44 select HAVE_KVM
46 select HAVE_ARCH_KGDB 45 select HAVE_ARCH_KGDB
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 268c783ab1c0..18d9005d9e4f 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -34,6 +34,7 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36extern void mcount(void); 36extern void mcount(void);
37extern int modifying_ftrace_code;
37 38
38static inline unsigned long ftrace_call_adjust(unsigned long addr) 39static inline unsigned long ftrace_call_adjust(unsigned long addr)
39{ 40{
@@ -50,6 +51,8 @@ struct dyn_arch_ftrace {
50 /* No extra data needed for x86 */ 51 /* No extra data needed for x86 */
51}; 52};
52 53
54int ftrace_int3_handler(struct pt_regs *regs);
55
53#endif /* CONFIG_DYNAMIC_FTRACE */ 56#endif /* CONFIG_DYNAMIC_FTRACE */
54#endif /* __ASSEMBLY__ */ 57#endif /* __ASSEMBLY__ */
55#endif /* CONFIG_FUNCTION_TRACER */ 58#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ccb805966f68..957ec87385af 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -134,6 +134,8 @@
134#define MSR_AMD64_IBSFETCHCTL 0xc0011030 134#define MSR_AMD64_IBSFETCHCTL 0xc0011030
135#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 135#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
136#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 136#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
137#define MSR_AMD64_IBSFETCH_REG_COUNT 3
138#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
137#define MSR_AMD64_IBSOPCTL 0xc0011033 139#define MSR_AMD64_IBSOPCTL 0xc0011033
138#define MSR_AMD64_IBSOPRIP 0xc0011034 140#define MSR_AMD64_IBSOPRIP 0xc0011034
139#define MSR_AMD64_IBSOPDATA 0xc0011035 141#define MSR_AMD64_IBSOPDATA 0xc0011035
@@ -141,8 +143,11 @@
141#define MSR_AMD64_IBSOPDATA3 0xc0011037 143#define MSR_AMD64_IBSOPDATA3 0xc0011037
142#define MSR_AMD64_IBSDCLINAD 0xc0011038 144#define MSR_AMD64_IBSDCLINAD 0xc0011038
143#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 145#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
146#define MSR_AMD64_IBSOP_REG_COUNT 7
147#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
144#define MSR_AMD64_IBSCTL 0xc001103a 148#define MSR_AMD64_IBSCTL 0xc001103a
145#define MSR_AMD64_IBSBRTARGET 0xc001103b 149#define MSR_AMD64_IBSBRTARGET 0xc001103b
150#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
146 151
147/* Fam 15h MSRs */ 152/* Fam 15h MSRs */
148#define MSR_F15H_PERF_CTL 0xc0010200 153#define MSR_F15H_PERF_CTL 0xc0010200
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 2291895b1836..588f52ea810e 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -158,6 +158,7 @@ struct x86_pmu_capability {
158#define IBS_CAPS_OPCNT (1U<<4) 158#define IBS_CAPS_OPCNT (1U<<4)
159#define IBS_CAPS_BRNTRGT (1U<<5) 159#define IBS_CAPS_BRNTRGT (1U<<5)
160#define IBS_CAPS_OPCNTEXT (1U<<6) 160#define IBS_CAPS_OPCNTEXT (1U<<6)
161#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
161 162
162#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ 163#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
163 | IBS_CAPS_FETCHSAM \ 164 | IBS_CAPS_FETCHSAM \
@@ -170,21 +171,28 @@ struct x86_pmu_capability {
170#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 171#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
171#define IBSCTL_LVT_OFFSET_MASK 0x0F 172#define IBSCTL_LVT_OFFSET_MASK 0x0F
172 173
173/* IbsFetchCtl bits/masks */ 174/* ibs fetch bits/masks */
174#define IBS_FETCH_RAND_EN (1ULL<<57) 175#define IBS_FETCH_RAND_EN (1ULL<<57)
175#define IBS_FETCH_VAL (1ULL<<49) 176#define IBS_FETCH_VAL (1ULL<<49)
176#define IBS_FETCH_ENABLE (1ULL<<48) 177#define IBS_FETCH_ENABLE (1ULL<<48)
177#define IBS_FETCH_CNT 0xFFFF0000ULL 178#define IBS_FETCH_CNT 0xFFFF0000ULL
178#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 179#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
179 180
180/* IbsOpCtl bits */ 181/* ibs op bits/masks */
182/* lower 4 bits of the current count are ignored: */
183#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
181#define IBS_OP_CNT_CTL (1ULL<<19) 184#define IBS_OP_CNT_CTL (1ULL<<19)
182#define IBS_OP_VAL (1ULL<<18) 185#define IBS_OP_VAL (1ULL<<18)
183#define IBS_OP_ENABLE (1ULL<<17) 186#define IBS_OP_ENABLE (1ULL<<17)
184#define IBS_OP_MAX_CNT 0x0000FFFFULL 187#define IBS_OP_MAX_CNT 0x0000FFFFULL
185#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ 188#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
189#define IBS_RIP_INVALID (1ULL<<38)
186 190
191#ifdef CONFIG_X86_LOCAL_APIC
187extern u32 get_ibs_caps(void); 192extern u32 get_ibs_caps(void);
193#else
194static inline u32 get_ibs_caps(void) { return 0; }
195#endif
188 196
189#ifdef CONFIG_PERF_EVENTS 197#ifdef CONFIG_PERF_EVENTS
190extern void perf_events_lapic_init(void); 198extern void perf_events_lapic_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index bb8e03407e18..e049d6da0183 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -484,9 +484,6 @@ static int __x86_pmu_event_init(struct perf_event *event)
484 484
485 /* mark unused */ 485 /* mark unused */
486 event->hw.extra_reg.idx = EXTRA_REG_NONE; 486 event->hw.extra_reg.idx = EXTRA_REG_NONE;
487
488 /* mark not used */
489 event->hw.extra_reg.idx = EXTRA_REG_NONE;
490 event->hw.branch_reg.idx = EXTRA_REG_NONE; 487 event->hw.branch_reg.idx = EXTRA_REG_NONE;
491 488
492 return x86_pmu.hw_config(event); 489 return x86_pmu.hw_config(event);
@@ -1186,8 +1183,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1186 int idx, handled = 0; 1183 int idx, handled = 0;
1187 u64 val; 1184 u64 val;
1188 1185
1189 perf_sample_data_init(&data, 0);
1190
1191 cpuc = &__get_cpu_var(cpu_hw_events); 1186 cpuc = &__get_cpu_var(cpu_hw_events);
1192 1187
1193 /* 1188 /*
@@ -1222,7 +1217,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1222 * event overflow 1217 * event overflow
1223 */ 1218 */
1224 handled++; 1219 handled++;
1225 data.period = event->hw.last_period; 1220 perf_sample_data_init(&data, 0, event->hw.last_period);
1226 1221
1227 if (!x86_perf_event_set_period(event)) 1222 if (!x86_perf_event_set_period(event))
1228 continue; 1223 continue;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 95e7fe1c5f0b..65652265fffd 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -134,8 +134,13 @@ static u64 amd_pmu_event_map(int hw_event)
134 134
135static int amd_pmu_hw_config(struct perf_event *event) 135static int amd_pmu_hw_config(struct perf_event *event)
136{ 136{
137 int ret = x86_pmu_hw_config(event); 137 int ret;
138 138
139 /* pass precise event sampling to ibs: */
140 if (event->attr.precise_ip && get_ibs_caps())
141 return -ENOENT;
142
143 ret = x86_pmu_hw_config(event);
139 if (ret) 144 if (ret)
140 return ret; 145 return ret;
141 146
@@ -205,10 +210,8 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
205 * when we come here 210 * when we come here
206 */ 211 */
207 for (i = 0; i < x86_pmu.num_counters; i++) { 212 for (i = 0; i < x86_pmu.num_counters; i++) {
208 if (nb->owners[i] == event) { 213 if (cmpxchg(nb->owners + i, event, NULL) == event)
209 cmpxchg(nb->owners+i, event, NULL);
210 break; 214 break;
211 }
212 } 215 }
213} 216}
214 217
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 3b8a2d30d14e..da9bcdcd9856 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -9,6 +9,7 @@
9#include <linux/perf_event.h> 9#include <linux/perf_event.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/ptrace.h>
12 13
13#include <asm/apic.h> 14#include <asm/apic.h>
14 15
@@ -16,36 +17,591 @@ static u32 ibs_caps;
16 17
17#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 18#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
18 19
19static struct pmu perf_ibs; 20#include <linux/kprobes.h>
21#include <linux/hardirq.h>
22
23#include <asm/nmi.h>
24
25#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
26#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
27
28enum ibs_states {
29 IBS_ENABLED = 0,
30 IBS_STARTED = 1,
31 IBS_STOPPING = 2,
32
33 IBS_MAX_STATES,
34};
35
36struct cpu_perf_ibs {
37 struct perf_event *event;
38 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
39};
40
41struct perf_ibs {
42 struct pmu pmu;
43 unsigned int msr;
44 u64 config_mask;
45 u64 cnt_mask;
46 u64 enable_mask;
47 u64 valid_mask;
48 u64 max_period;
49 unsigned long offset_mask[1];
50 int offset_max;
51 struct cpu_perf_ibs __percpu *pcpu;
52 u64 (*get_count)(u64 config);
53};
54
55struct perf_ibs_data {
56 u32 size;
57 union {
58 u32 data[0]; /* data buffer starts here */
59 u32 caps;
60 };
61 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
62};
63
64static int
65perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
66{
67 s64 left = local64_read(&hwc->period_left);
68 s64 period = hwc->sample_period;
69 int overflow = 0;
70
71 /*
72 * If we are way outside a reasonable range then just skip forward:
73 */
74 if (unlikely(left <= -period)) {
75 left = period;
76 local64_set(&hwc->period_left, left);
77 hwc->last_period = period;
78 overflow = 1;
79 }
80
81 if (unlikely(left < (s64)min)) {
82 left += period;
83 local64_set(&hwc->period_left, left);
84 hwc->last_period = period;
85 overflow = 1;
86 }
87
88 /*
89 * If the hw period that triggers the sw overflow is too short
90 * we might hit the irq handler. This biases the results.
91 * Thus we shorten the next-to-last period and set the last
92 * period to the max period.
93 */
94 if (left > max) {
95 left -= max;
96 if (left > max)
97 left = max;
98 else if (left < min)
99 left = min;
100 }
101
102 *hw_period = (u64)left;
103
104 return overflow;
105}
106
107static int
108perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
109{
110 struct hw_perf_event *hwc = &event->hw;
111 int shift = 64 - width;
112 u64 prev_raw_count;
113 u64 delta;
114
115 /*
116 * Careful: an NMI might modify the previous event value.
117 *
118 * Our tactic to handle this is to first atomically read and
119 * exchange a new raw count - then add that new-prev delta
120 * count to the generic event atomically:
121 */
122 prev_raw_count = local64_read(&hwc->prev_count);
123 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
124 new_raw_count) != prev_raw_count)
125 return 0;
126
127 /*
128 * Now we have the new raw value and have updated the prev
129 * timestamp already. We can now calculate the elapsed delta
130 * (event-)time and add that to the generic event.
131 *
132 * Careful, not all hw sign-extends above the physical width
133 * of the count.
134 */
135 delta = (new_raw_count << shift) - (prev_raw_count << shift);
136 delta >>= shift;
137
138 local64_add(delta, &event->count);
139 local64_sub(delta, &hwc->period_left);
140
141 return 1;
142}
143
144static struct perf_ibs perf_ibs_fetch;
145static struct perf_ibs perf_ibs_op;
146
147static struct perf_ibs *get_ibs_pmu(int type)
148{
149 if (perf_ibs_fetch.pmu.type == type)
150 return &perf_ibs_fetch;
151 if (perf_ibs_op.pmu.type == type)
152 return &perf_ibs_op;
153 return NULL;
154}
155
156/*
157 * Use IBS for precise event sampling:
158 *
159 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
160 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
161 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
162 *
163 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
164 * MSRC001_1033) is used to select either cycle or micro-ops counting
165 * mode.
166 *
167 * The rip of IBS samples has skid 0. Thus, IBS supports precise
168 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
169 * rip is invalid when IBS was not able to record the rip correctly.
170 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
171 *
172 */
173static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
174{
175 switch (event->attr.precise_ip) {
176 case 0:
177 return -ENOENT;
178 case 1:
179 case 2:
180 break;
181 default:
182 return -EOPNOTSUPP;
183 }
184
185 switch (event->attr.type) {
186 case PERF_TYPE_HARDWARE:
187 switch (event->attr.config) {
188 case PERF_COUNT_HW_CPU_CYCLES:
189 *config = 0;
190 return 0;
191 }
192 break;
193 case PERF_TYPE_RAW:
194 switch (event->attr.config) {
195 case 0x0076:
196 *config = 0;
197 return 0;
198 case 0x00C1:
199 *config = IBS_OP_CNT_CTL;
200 return 0;
201 }
202 break;
203 default:
204 return -ENOENT;
205 }
206
207 return -EOPNOTSUPP;
208}
20 209
21static int perf_ibs_init(struct perf_event *event) 210static int perf_ibs_init(struct perf_event *event)
22{ 211{
23 if (perf_ibs.type != event->attr.type) 212 struct hw_perf_event *hwc = &event->hw;
213 struct perf_ibs *perf_ibs;
214 u64 max_cnt, config;
215 int ret;
216
217 perf_ibs = get_ibs_pmu(event->attr.type);
218 if (perf_ibs) {
219 config = event->attr.config;
220 } else {
221 perf_ibs = &perf_ibs_op;
222 ret = perf_ibs_precise_event(event, &config);
223 if (ret)
224 return ret;
225 }
226
227 if (event->pmu != &perf_ibs->pmu)
24 return -ENOENT; 228 return -ENOENT;
229
230 if (config & ~perf_ibs->config_mask)
231 return -EINVAL;
232
233 if (hwc->sample_period) {
234 if (config & perf_ibs->cnt_mask)
235 /* raw max_cnt may not be set */
236 return -EINVAL;
237 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
238 /*
239 * lower 4 bits can not be set in ibs max cnt,
240 * but allowing it in case we adjust the
241 * sample period to set a frequency.
242 */
243 return -EINVAL;
244 hwc->sample_period &= ~0x0FULL;
245 if (!hwc->sample_period)
246 hwc->sample_period = 0x10;
247 } else {
248 max_cnt = config & perf_ibs->cnt_mask;
249 config &= ~perf_ibs->cnt_mask;
250 event->attr.sample_period = max_cnt << 4;
251 hwc->sample_period = event->attr.sample_period;
252 }
253
254 if (!hwc->sample_period)
255 return -EINVAL;
256
257 /*
258 * If we modify hwc->sample_period, we also need to update
259 * hwc->last_period and hwc->period_left.
260 */
261 hwc->last_period = hwc->sample_period;
262 local64_set(&hwc->period_left, hwc->sample_period);
263
264 hwc->config_base = perf_ibs->msr;
265 hwc->config = config;
266
25 return 0; 267 return 0;
26} 268}
27 269
270static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
271 struct hw_perf_event *hwc, u64 *period)
272{
273 int overflow;
274
275 /* ignore lower 4 bits in min count: */
276 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
277 local64_set(&hwc->prev_count, 0);
278
279 return overflow;
280}
281
282static u64 get_ibs_fetch_count(u64 config)
283{
284 return (config & IBS_FETCH_CNT) >> 12;
285}
286
287static u64 get_ibs_op_count(u64 config)
288{
289 u64 count = 0;
290
291 if (config & IBS_OP_VAL)
292 count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
293
294 if (ibs_caps & IBS_CAPS_RDWROPCNT)
295 count += (config & IBS_OP_CUR_CNT) >> 32;
296
297 return count;
298}
299
300static void
301perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
302 u64 *config)
303{
304 u64 count = perf_ibs->get_count(*config);
305
306 /*
307 * Set width to 64 since we do not overflow on max width but
308 * instead on max count. In perf_ibs_set_period() we clear
309 * prev count manually on overflow.
310 */
311 while (!perf_event_try_update(event, count, 64)) {
312 rdmsrl(event->hw.config_base, *config);
313 count = perf_ibs->get_count(*config);
314 }
315}
316
317static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
318 struct hw_perf_event *hwc, u64 config)
319{
320 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
321}
322
323/*
324 * Erratum #420 Instruction-Based Sampling Engine May Generate
325 * Interrupt that Cannot Be Cleared:
326 *
327 * Must clear counter mask first, then clear the enable bit. See
328 * Revision Guide for AMD Family 10h Processors, Publication #41322.
329 */
330static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
331 struct hw_perf_event *hwc, u64 config)
332{
333 config &= ~perf_ibs->cnt_mask;
334 wrmsrl(hwc->config_base, config);
335 config &= ~perf_ibs->enable_mask;
336 wrmsrl(hwc->config_base, config);
337}
338
339/*
340 * We cannot restore the ibs pmu state, so we always needs to update
341 * the event while stopping it and then reset the state when starting
342 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
343 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
344 */
345static void perf_ibs_start(struct perf_event *event, int flags)
346{
347 struct hw_perf_event *hwc = &event->hw;
348 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
349 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
350 u64 period;
351
352 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
353 return;
354
355 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
356 hwc->state = 0;
357
358 perf_ibs_set_period(perf_ibs, hwc, &period);
359 set_bit(IBS_STARTED, pcpu->state);
360 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
361
362 perf_event_update_userpage(event);
363}
364
365static void perf_ibs_stop(struct perf_event *event, int flags)
366{
367 struct hw_perf_event *hwc = &event->hw;
368 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
369 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
370 u64 config;
371 int stopping;
372
373 stopping = test_and_clear_bit(IBS_STARTED, pcpu->state);
374
375 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
376 return;
377
378 rdmsrl(hwc->config_base, config);
379
380 if (stopping) {
381 set_bit(IBS_STOPPING, pcpu->state);
382 perf_ibs_disable_event(perf_ibs, hwc, config);
383 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
384 hwc->state |= PERF_HES_STOPPED;
385 }
386
387 if (hwc->state & PERF_HES_UPTODATE)
388 return;
389
390 /*
391 * Clear valid bit to not count rollovers on update, rollovers
392 * are only updated in the irq handler.
393 */
394 config &= ~perf_ibs->valid_mask;
395
396 perf_ibs_event_update(perf_ibs, event, &config);
397 hwc->state |= PERF_HES_UPTODATE;
398}
399
28static int perf_ibs_add(struct perf_event *event, int flags) 400static int perf_ibs_add(struct perf_event *event, int flags)
29{ 401{
402 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
403 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
404
405 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
406 return -ENOSPC;
407
408 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
409
410 pcpu->event = event;
411
412 if (flags & PERF_EF_START)
413 perf_ibs_start(event, PERF_EF_RELOAD);
414
30 return 0; 415 return 0;
31} 416}
32 417
33static void perf_ibs_del(struct perf_event *event, int flags) 418static void perf_ibs_del(struct perf_event *event, int flags)
34{ 419{
420 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
421 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
422
423 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
424 return;
425
426 perf_ibs_stop(event, PERF_EF_UPDATE);
427
428 pcpu->event = NULL;
429
430 perf_event_update_userpage(event);
35} 431}
36 432
37static struct pmu perf_ibs = { 433static void perf_ibs_read(struct perf_event *event) { }
38 .event_init= perf_ibs_init, 434
39 .add= perf_ibs_add, 435static struct perf_ibs perf_ibs_fetch = {
40 .del= perf_ibs_del, 436 .pmu = {
437 .task_ctx_nr = perf_invalid_context,
438
439 .event_init = perf_ibs_init,
440 .add = perf_ibs_add,
441 .del = perf_ibs_del,
442 .start = perf_ibs_start,
443 .stop = perf_ibs_stop,
444 .read = perf_ibs_read,
445 },
446 .msr = MSR_AMD64_IBSFETCHCTL,
447 .config_mask = IBS_FETCH_CONFIG_MASK,
448 .cnt_mask = IBS_FETCH_MAX_CNT,
449 .enable_mask = IBS_FETCH_ENABLE,
450 .valid_mask = IBS_FETCH_VAL,
451 .max_period = IBS_FETCH_MAX_CNT << 4,
452 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
453 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
454
455 .get_count = get_ibs_fetch_count,
41}; 456};
42 457
458static struct perf_ibs perf_ibs_op = {
459 .pmu = {
460 .task_ctx_nr = perf_invalid_context,
461
462 .event_init = perf_ibs_init,
463 .add = perf_ibs_add,
464 .del = perf_ibs_del,
465 .start = perf_ibs_start,
466 .stop = perf_ibs_stop,
467 .read = perf_ibs_read,
468 },
469 .msr = MSR_AMD64_IBSOPCTL,
470 .config_mask = IBS_OP_CONFIG_MASK,
471 .cnt_mask = IBS_OP_MAX_CNT,
472 .enable_mask = IBS_OP_ENABLE,
473 .valid_mask = IBS_OP_VAL,
474 .max_period = IBS_OP_MAX_CNT << 4,
475 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
476 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
477
478 .get_count = get_ibs_op_count,
479};
480
481static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
482{
483 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
484 struct perf_event *event = pcpu->event;
485 struct hw_perf_event *hwc = &event->hw;
486 struct perf_sample_data data;
487 struct perf_raw_record raw;
488 struct pt_regs regs;
489 struct perf_ibs_data ibs_data;
490 int offset, size, check_rip, offset_max, throttle = 0;
491 unsigned int msr;
492 u64 *buf, *config, period;
493
494 if (!test_bit(IBS_STARTED, pcpu->state)) {
495 /*
496 * Catch spurious interrupts after stopping IBS: After
497 * disabling IBS there could be still incomming NMIs
498 * with samples that even have the valid bit cleared.
499 * Mark all this NMIs as handled.
500 */
501 return test_and_clear_bit(IBS_STOPPING, pcpu->state) ? 1 : 0;
502 }
503
504 msr = hwc->config_base;
505 buf = ibs_data.regs;
506 rdmsrl(msr, *buf);
507 if (!(*buf++ & perf_ibs->valid_mask))
508 return 0;
509
510 config = &ibs_data.regs[0];
511 perf_ibs_event_update(perf_ibs, event, config);
512 perf_sample_data_init(&data, 0, hwc->last_period);
513 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
514 goto out; /* no sw counter overflow */
515
516 ibs_data.caps = ibs_caps;
517 size = 1;
518 offset = 1;
519 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
520 if (event->attr.sample_type & PERF_SAMPLE_RAW)
521 offset_max = perf_ibs->offset_max;
522 else if (check_rip)
523 offset_max = 2;
524 else
525 offset_max = 1;
526 do {
527 rdmsrl(msr + offset, *buf++);
528 size++;
529 offset = find_next_bit(perf_ibs->offset_mask,
530 perf_ibs->offset_max,
531 offset + 1);
532 } while (offset < offset_max);
533 ibs_data.size = sizeof(u64) * size;
534
535 regs = *iregs;
536 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
537 regs.flags &= ~PERF_EFLAGS_EXACT;
538 } else {
539 instruction_pointer_set(&regs, ibs_data.regs[1]);
540 regs.flags |= PERF_EFLAGS_EXACT;
541 }
542
543 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
544 raw.size = sizeof(u32) + ibs_data.size;
545 raw.data = ibs_data.data;
546 data.raw = &raw;
547 }
548
549 throttle = perf_event_overflow(event, &data, &regs);
550out:
551 if (throttle)
552 perf_ibs_disable_event(perf_ibs, hwc, *config);
553 else
554 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
555
556 perf_event_update_userpage(event);
557
558 return 1;
559}
560
561static int __kprobes
562perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
563{
564 int handled = 0;
565
566 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
567 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
568
569 if (handled)
570 inc_irq_stat(apic_perf_irqs);
571
572 return handled;
573}
574
575static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
576{
577 struct cpu_perf_ibs __percpu *pcpu;
578 int ret;
579
580 pcpu = alloc_percpu(struct cpu_perf_ibs);
581 if (!pcpu)
582 return -ENOMEM;
583
584 perf_ibs->pcpu = pcpu;
585
586 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
587 if (ret) {
588 perf_ibs->pcpu = NULL;
589 free_percpu(pcpu);
590 }
591
592 return ret;
593}
594
43static __init int perf_event_ibs_init(void) 595static __init int perf_event_ibs_init(void)
44{ 596{
45 if (!ibs_caps) 597 if (!ibs_caps)
46 return -ENODEV; /* ibs not supported by the cpu */ 598 return -ENODEV; /* ibs not supported by the cpu */
47 599
48 perf_pmu_register(&perf_ibs, "ibs", -1); 600 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
601 if (ibs_caps & IBS_CAPS_OPCNT)
602 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
603 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
604 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
49 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); 605 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
50 606
51 return 0; 607 return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 26b3e2fef104..166546ec6aef 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1027,8 +1027,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1027 u64 status; 1027 u64 status;
1028 int handled; 1028 int handled;
1029 1029
1030 perf_sample_data_init(&data, 0);
1031
1032 cpuc = &__get_cpu_var(cpu_hw_events); 1030 cpuc = &__get_cpu_var(cpu_hw_events);
1033 1031
1034 /* 1032 /*
@@ -1082,7 +1080,7 @@ again:
1082 if (!intel_pmu_save_and_restart(event)) 1080 if (!intel_pmu_save_and_restart(event))
1083 continue; 1081 continue;
1084 1082
1085 data.period = event->hw.last_period; 1083 perf_sample_data_init(&data, 0, event->hw.last_period);
1086 1084
1087 if (has_branch_stack(event)) 1085 if (has_branch_stack(event))
1088 data.br_stack = &cpuc->lbr_stack; 1086 data.br_stack = &cpuc->lbr_stack;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 7f64df19e7dd..5a3edc27f6e5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -316,8 +316,7 @@ int intel_pmu_drain_bts_buffer(void)
316 316
317 ds->bts_index = ds->bts_buffer_base; 317 ds->bts_index = ds->bts_buffer_base;
318 318
319 perf_sample_data_init(&data, 0); 319 perf_sample_data_init(&data, 0, event->hw.last_period);
320 data.period = event->hw.last_period;
321 regs.ip = 0; 320 regs.ip = 0;
322 321
323 /* 322 /*
@@ -564,8 +563,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
564 if (!intel_pmu_save_and_restart(event)) 563 if (!intel_pmu_save_and_restart(event))
565 return; 564 return;
566 565
567 perf_sample_data_init(&data, 0); 566 perf_sample_data_init(&data, 0, event->hw.last_period);
568 data.period = event->hw.last_period;
569 567
570 /* 568 /*
571 * We use the interrupt regs as a base because the PEBS record 569 * We use the interrupt regs as a base because the PEBS record
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index a2dfacfd7103..47124a73dd73 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1005,8 +1005,6 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
1005 int idx, handled = 0; 1005 int idx, handled = 0;
1006 u64 val; 1006 u64 val;
1007 1007
1008 perf_sample_data_init(&data, 0);
1009
1010 cpuc = &__get_cpu_var(cpu_hw_events); 1008 cpuc = &__get_cpu_var(cpu_hw_events);
1011 1009
1012 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1010 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1034,10 +1032,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
1034 handled += overflow; 1032 handled += overflow;
1035 1033
1036 /* event overflow for sure */ 1034 /* event overflow for sure */
1037 data.period = event->hw.last_period; 1035 perf_sample_data_init(&data, 0, hwc->last_period);
1038 1036
1039 if (!x86_perf_event_set_period(event)) 1037 if (!x86_perf_event_set_period(event))
1040 continue; 1038 continue;
1039
1040
1041 if (perf_event_overflow(event, &data, regs)) 1041 if (perf_event_overflow(event, &data, regs))
1042 x86_pmu_stop(event, 0); 1042 x86_pmu_stop(event, 0);
1043 } 1043 }
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index c9a281f272fd..32ff36596ab1 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -24,40 +24,21 @@
24#include <trace/syscall.h> 24#include <trace/syscall.h>
25 25
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/kprobes.h>
27#include <asm/ftrace.h> 28#include <asm/ftrace.h>
28#include <asm/nops.h> 29#include <asm/nops.h>
29#include <asm/nmi.h>
30
31 30
32#ifdef CONFIG_DYNAMIC_FTRACE 31#ifdef CONFIG_DYNAMIC_FTRACE
33 32
34/*
35 * modifying_code is set to notify NMIs that they need to use
36 * memory barriers when entering or exiting. But we don't want
37 * to burden NMIs with unnecessary memory barriers when code
38 * modification is not being done (which is most of the time).
39 *
40 * A mutex is already held when ftrace_arch_code_modify_prepare
41 * and post_process are called. No locks need to be taken here.
42 *
43 * Stop machine will make sure currently running NMIs are done
44 * and new NMIs will see the updated variable before we need
45 * to worry about NMIs doing memory barriers.
46 */
47static int modifying_code __read_mostly;
48static DEFINE_PER_CPU(int, save_modifying_code);
49
50int ftrace_arch_code_modify_prepare(void) 33int ftrace_arch_code_modify_prepare(void)
51{ 34{
52 set_kernel_text_rw(); 35 set_kernel_text_rw();
53 set_all_modules_text_rw(); 36 set_all_modules_text_rw();
54 modifying_code = 1;
55 return 0; 37 return 0;
56} 38}
57 39
58int ftrace_arch_code_modify_post_process(void) 40int ftrace_arch_code_modify_post_process(void)
59{ 41{
60 modifying_code = 0;
61 set_all_modules_text_ro(); 42 set_all_modules_text_ro();
62 set_kernel_text_ro(); 43 set_kernel_text_ro();
63 return 0; 44 return 0;
@@ -90,134 +71,6 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
90 return calc.code; 71 return calc.code;
91} 72}
92 73
93/*
94 * Modifying code must take extra care. On an SMP machine, if
95 * the code being modified is also being executed on another CPU
96 * that CPU will have undefined results and possibly take a GPF.
97 * We use kstop_machine to stop other CPUS from exectuing code.
98 * But this does not stop NMIs from happening. We still need
99 * to protect against that. We separate out the modification of
100 * the code to take care of this.
101 *
102 * Two buffers are added: An IP buffer and a "code" buffer.
103 *
104 * 1) Put the instruction pointer into the IP buffer
105 * and the new code into the "code" buffer.
106 * 2) Wait for any running NMIs to finish and set a flag that says
107 * we are modifying code, it is done in an atomic operation.
108 * 3) Write the code
109 * 4) clear the flag.
110 * 5) Wait for any running NMIs to finish.
111 *
112 * If an NMI is executed, the first thing it does is to call
113 * "ftrace_nmi_enter". This will check if the flag is set to write
114 * and if it is, it will write what is in the IP and "code" buffers.
115 *
116 * The trick is, it does not matter if everyone is writing the same
117 * content to the code location. Also, if a CPU is executing code
118 * it is OK to write to that code location if the contents being written
119 * are the same as what exists.
120 */
121
122#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
123static atomic_t nmi_running = ATOMIC_INIT(0);
124static int mod_code_status; /* holds return value of text write */
125static void *mod_code_ip; /* holds the IP to write to */
126static const void *mod_code_newcode; /* holds the text to write to the IP */
127
128static unsigned nmi_wait_count;
129static atomic_t nmi_update_count = ATOMIC_INIT(0);
130
131int ftrace_arch_read_dyn_info(char *buf, int size)
132{
133 int r;
134
135 r = snprintf(buf, size, "%u %u",
136 nmi_wait_count,
137 atomic_read(&nmi_update_count));
138 return r;
139}
140
141static void clear_mod_flag(void)
142{
143 int old = atomic_read(&nmi_running);
144
145 for (;;) {
146 int new = old & ~MOD_CODE_WRITE_FLAG;
147
148 if (old == new)
149 break;
150
151 old = atomic_cmpxchg(&nmi_running, old, new);
152 }
153}
154
155static void ftrace_mod_code(void)
156{
157 /*
158 * Yes, more than one CPU process can be writing to mod_code_status.
159 * (and the code itself)
160 * But if one were to fail, then they all should, and if one were
161 * to succeed, then they all should.
162 */
163 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
164 MCOUNT_INSN_SIZE);
165
166 /* if we fail, then kill any new writers */
167 if (mod_code_status)
168 clear_mod_flag();
169}
170
171void ftrace_nmi_enter(void)
172{
173 __this_cpu_write(save_modifying_code, modifying_code);
174
175 if (!__this_cpu_read(save_modifying_code))
176 return;
177
178 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
179 smp_rmb();
180 ftrace_mod_code();
181 atomic_inc(&nmi_update_count);
182 }
183 /* Must have previous changes seen before executions */
184 smp_mb();
185}
186
187void ftrace_nmi_exit(void)
188{
189 if (!__this_cpu_read(save_modifying_code))
190 return;
191
192 /* Finish all executions before clearing nmi_running */
193 smp_mb();
194 atomic_dec(&nmi_running);
195}
196
197static void wait_for_nmi_and_set_mod_flag(void)
198{
199 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
200 return;
201
202 do {
203 cpu_relax();
204 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
205
206 nmi_wait_count++;
207}
208
209static void wait_for_nmi(void)
210{
211 if (!atomic_read(&nmi_running))
212 return;
213
214 do {
215 cpu_relax();
216 } while (atomic_read(&nmi_running));
217
218 nmi_wait_count++;
219}
220
221static inline int 74static inline int
222within(unsigned long addr, unsigned long start, unsigned long end) 75within(unsigned long addr, unsigned long start, unsigned long end)
223{ 76{
@@ -238,26 +91,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
238 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 91 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
239 ip = (unsigned long)__va(__pa(ip)); 92 ip = (unsigned long)__va(__pa(ip));
240 93
241 mod_code_ip = (void *)ip; 94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
242 mod_code_newcode = new_code;
243
244 /* The buffers need to be visible before we let NMIs write them */
245 smp_mb();
246
247 wait_for_nmi_and_set_mod_flag();
248
249 /* Make sure all running NMIs have finished before we write the code */
250 smp_mb();
251
252 ftrace_mod_code();
253
254 /* Make sure the write happens before clearing the bit */
255 smp_mb();
256
257 clear_mod_flag();
258 wait_for_nmi();
259
260 return mod_code_status;
261} 95}
262 96
263static const unsigned char *ftrace_nop_replace(void) 97static const unsigned char *ftrace_nop_replace(void)
@@ -334,6 +168,336 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
334 return ret; 168 return ret;
335} 169}
336 170
171int modifying_ftrace_code __read_mostly;
172
173/*
174 * A breakpoint was added to the code address we are about to
175 * modify, and this is the handle that will just skip over it.
176 * We are either changing a nop into a trace call, or a trace
177 * call to a nop. While the change is taking place, we treat
178 * it just like it was a nop.
179 */
180int ftrace_int3_handler(struct pt_regs *regs)
181{
182 if (WARN_ON_ONCE(!regs))
183 return 0;
184
185 if (!ftrace_location(regs->ip - 1))
186 return 0;
187
188 regs->ip += MCOUNT_INSN_SIZE - 1;
189
190 return 1;
191}
192
193static int ftrace_write(unsigned long ip, const char *val, int size)
194{
195 /*
196 * On x86_64, kernel text mappings are mapped read-only with
197 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
198 * of the kernel text mapping to modify the kernel text.
199 *
200 * For 32bit kernels, these mappings are same and we can use
201 * kernel identity mapping to modify code.
202 */
203 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
204 ip = (unsigned long)__va(__pa(ip));
205
206 return probe_kernel_write((void *)ip, val, size);
207}
208
209static int add_break(unsigned long ip, const char *old)
210{
211 unsigned char replaced[MCOUNT_INSN_SIZE];
212 unsigned char brk = BREAKPOINT_INSTRUCTION;
213
214 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
215 return -EFAULT;
216
217 /* Make sure it is what we expect it to be */
218 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
219 return -EINVAL;
220
221 if (ftrace_write(ip, &brk, 1))
222 return -EPERM;
223
224 return 0;
225}
226
227static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
228{
229 unsigned const char *old;
230 unsigned long ip = rec->ip;
231
232 old = ftrace_call_replace(ip, addr);
233
234 return add_break(rec->ip, old);
235}
236
237
238static int add_brk_on_nop(struct dyn_ftrace *rec)
239{
240 unsigned const char *old;
241
242 old = ftrace_nop_replace();
243
244 return add_break(rec->ip, old);
245}
246
247static int add_breakpoints(struct dyn_ftrace *rec, int enable)
248{
249 unsigned long ftrace_addr;
250 int ret;
251
252 ret = ftrace_test_record(rec, enable);
253
254 ftrace_addr = (unsigned long)FTRACE_ADDR;
255
256 switch (ret) {
257 case FTRACE_UPDATE_IGNORE:
258 return 0;
259
260 case FTRACE_UPDATE_MAKE_CALL:
261 /* converting nop to call */
262 return add_brk_on_nop(rec);
263
264 case FTRACE_UPDATE_MAKE_NOP:
265 /* converting a call to a nop */
266 return add_brk_on_call(rec, ftrace_addr);
267 }
268 return 0;
269}
270
271/*
272 * On error, we need to remove breakpoints. This needs to
273 * be done caefully. If the address does not currently have a
274 * breakpoint, we know we are done. Otherwise, we look at the
275 * remaining 4 bytes of the instruction. If it matches a nop
276 * we replace the breakpoint with the nop. Otherwise we replace
277 * it with the call instruction.
278 */
279static int remove_breakpoint(struct dyn_ftrace *rec)
280{
281 unsigned char ins[MCOUNT_INSN_SIZE];
282 unsigned char brk = BREAKPOINT_INSTRUCTION;
283 const unsigned char *nop;
284 unsigned long ftrace_addr;
285 unsigned long ip = rec->ip;
286
287 /* If we fail the read, just give up */
288 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
289 return -EFAULT;
290
291 /* If this does not have a breakpoint, we are done */
292 if (ins[0] != brk)
293 return -1;
294
295 nop = ftrace_nop_replace();
296
297 /*
298 * If the last 4 bytes of the instruction do not match
299 * a nop, then we assume that this is a call to ftrace_addr.
300 */
301 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
302 /*
303 * For extra paranoidism, we check if the breakpoint is on
304 * a call that would actually jump to the ftrace_addr.
305 * If not, don't touch the breakpoint, we make just create
306 * a disaster.
307 */
308 ftrace_addr = (unsigned long)FTRACE_ADDR;
309 nop = ftrace_call_replace(ip, ftrace_addr);
310
311 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
312 return -EINVAL;
313 }
314
315 return probe_kernel_write((void *)ip, &nop[0], 1);
316}
317
318static int add_update_code(unsigned long ip, unsigned const char *new)
319{
320 /* skip breakpoint */
321 ip++;
322 new++;
323 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
324 return -EPERM;
325 return 0;
326}
327
328static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
329{
330 unsigned long ip = rec->ip;
331 unsigned const char *new;
332
333 new = ftrace_call_replace(ip, addr);
334 return add_update_code(ip, new);
335}
336
337static int add_update_nop(struct dyn_ftrace *rec)
338{
339 unsigned long ip = rec->ip;
340 unsigned const char *new;
341
342 new = ftrace_nop_replace();
343 return add_update_code(ip, new);
344}
345
346static int add_update(struct dyn_ftrace *rec, int enable)
347{
348 unsigned long ftrace_addr;
349 int ret;
350
351 ret = ftrace_test_record(rec, enable);
352
353 ftrace_addr = (unsigned long)FTRACE_ADDR;
354
355 switch (ret) {
356 case FTRACE_UPDATE_IGNORE:
357 return 0;
358
359 case FTRACE_UPDATE_MAKE_CALL:
360 /* converting nop to call */
361 return add_update_call(rec, ftrace_addr);
362
363 case FTRACE_UPDATE_MAKE_NOP:
364 /* converting a call to a nop */
365 return add_update_nop(rec);
366 }
367
368 return 0;
369}
370
371static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
372{
373 unsigned long ip = rec->ip;
374 unsigned const char *new;
375
376 new = ftrace_call_replace(ip, addr);
377
378 if (ftrace_write(ip, new, 1))
379 return -EPERM;
380
381 return 0;
382}
383
384static int finish_update_nop(struct dyn_ftrace *rec)
385{
386 unsigned long ip = rec->ip;
387 unsigned const char *new;
388
389 new = ftrace_nop_replace();
390
391 if (ftrace_write(ip, new, 1))
392 return -EPERM;
393 return 0;
394}
395
396static int finish_update(struct dyn_ftrace *rec, int enable)
397{
398 unsigned long ftrace_addr;
399 int ret;
400
401 ret = ftrace_update_record(rec, enable);
402
403 ftrace_addr = (unsigned long)FTRACE_ADDR;
404
405 switch (ret) {
406 case FTRACE_UPDATE_IGNORE:
407 return 0;
408
409 case FTRACE_UPDATE_MAKE_CALL:
410 /* converting nop to call */
411 return finish_update_call(rec, ftrace_addr);
412
413 case FTRACE_UPDATE_MAKE_NOP:
414 /* converting a call to a nop */
415 return finish_update_nop(rec);
416 }
417
418 return 0;
419}
420
421static void do_sync_core(void *data)
422{
423 sync_core();
424}
425
426static void run_sync(void)
427{
428 int enable_irqs = irqs_disabled();
429
430 /* We may be called with interrupts disbled (on bootup). */
431 if (enable_irqs)
432 local_irq_enable();
433 on_each_cpu(do_sync_core, NULL, 1);
434 if (enable_irqs)
435 local_irq_disable();
436}
437
438void ftrace_replace_code(int enable)
439{
440 struct ftrace_rec_iter *iter;
441 struct dyn_ftrace *rec;
442 const char *report = "adding breakpoints";
443 int count = 0;
444 int ret;
445
446 for_ftrace_rec_iter(iter) {
447 rec = ftrace_rec_iter_record(iter);
448
449 ret = add_breakpoints(rec, enable);
450 if (ret)
451 goto remove_breakpoints;
452 count++;
453 }
454
455 run_sync();
456
457 report = "updating code";
458
459 for_ftrace_rec_iter(iter) {
460 rec = ftrace_rec_iter_record(iter);
461
462 ret = add_update(rec, enable);
463 if (ret)
464 goto remove_breakpoints;
465 }
466
467 run_sync();
468
469 report = "removing breakpoints";
470
471 for_ftrace_rec_iter(iter) {
472 rec = ftrace_rec_iter_record(iter);
473
474 ret = finish_update(rec, enable);
475 if (ret)
476 goto remove_breakpoints;
477 }
478
479 run_sync();
480
481 return;
482
483 remove_breakpoints:
484 ftrace_bug(ret, rec ? rec->ip : 0);
485 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
486 for_ftrace_rec_iter(iter) {
487 rec = ftrace_rec_iter_record(iter);
488 remove_breakpoint(rec);
489 }
490}
491
492void arch_ftrace_update_code(int command)
493{
494 modifying_ftrace_code++;
495
496 ftrace_modify_all_code(command);
497
498 modifying_ftrace_code--;
499}
500
337int __init ftrace_dyn_arch_init(void *data) 501int __init ftrace_dyn_arch_init(void *data)
338{ 502{
339 /* The return code is retured via data */ 503 /* The return code is retured via data */
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 585be4bd71a5..a1faed5ac6a2 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -84,7 +84,7 @@ __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
84 84
85#define nmi_to_desc(type) (&nmi_desc[type]) 85#define nmi_to_desc(type) (&nmi_desc[type])
86 86
87static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) 87static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
88{ 88{
89 struct nmi_desc *desc = nmi_to_desc(type); 89 struct nmi_desc *desc = nmi_to_desc(type);
90 struct nmiaction *a; 90 struct nmiaction *a;
@@ -166,7 +166,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
166} 166}
167EXPORT_SYMBOL_GPL(unregister_nmi_handler); 167EXPORT_SYMBOL_GPL(unregister_nmi_handler);
168 168
169static notrace __kprobes void 169static __kprobes void
170pci_serr_error(unsigned char reason, struct pt_regs *regs) 170pci_serr_error(unsigned char reason, struct pt_regs *regs)
171{ 171{
172 /* check to see if anyone registered against these types of errors */ 172 /* check to see if anyone registered against these types of errors */
@@ -197,7 +197,7 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs)
197 outb(reason, NMI_REASON_PORT); 197 outb(reason, NMI_REASON_PORT);
198} 198}
199 199
200static notrace __kprobes void 200static __kprobes void
201io_check_error(unsigned char reason, struct pt_regs *regs) 201io_check_error(unsigned char reason, struct pt_regs *regs)
202{ 202{
203 unsigned long i; 203 unsigned long i;
@@ -228,7 +228,7 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
228 outb(reason, NMI_REASON_PORT); 228 outb(reason, NMI_REASON_PORT);
229} 229}
230 230
231static notrace __kprobes void 231static __kprobes void
232unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 232unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
233{ 233{
234 int handled; 234 int handled;
@@ -270,7 +270,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
270static DEFINE_PER_CPU(bool, swallow_nmi); 270static DEFINE_PER_CPU(bool, swallow_nmi);
271static DEFINE_PER_CPU(unsigned long, last_nmi_rip); 271static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
272 272
273static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 273static __kprobes void default_do_nmi(struct pt_regs *regs)
274{ 274{
275 unsigned char reason = 0; 275 unsigned char reason = 0;
276 int handled; 276 int handled;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ff9281f16029..92d5756d85fc 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -50,6 +50,7 @@
50#include <asm/processor.h> 50#include <asm/processor.h>
51#include <asm/debugreg.h> 51#include <asm/debugreg.h>
52#include <linux/atomic.h> 52#include <linux/atomic.h>
53#include <asm/ftrace.h>
53#include <asm/traps.h> 54#include <asm/traps.h>
54#include <asm/desc.h> 55#include <asm/desc.h>
55#include <asm/i387.h> 56#include <asm/i387.h>
@@ -303,8 +304,13 @@ gp_in_kernel:
303} 304}
304 305
305/* May run on IST stack. */ 306/* May run on IST stack. */
306dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 307dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
307{ 308{
309#ifdef CONFIG_DYNAMIC_FTRACE
310 /* ftrace must be first, everything else may cause a recursive crash */
311 if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs))
312 return;
313#endif
308#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 314#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
309 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 315 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
310 SIGTRAP) == NOTIFY_STOP) 316 SIGTRAP) == NOTIFY_STOP)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8aeadf6b553a..4e2e1cc505ab 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -486,8 +486,8 @@
486 CPU_DISCARD(init.data) \ 486 CPU_DISCARD(init.data) \
487 MEM_DISCARD(init.data) \ 487 MEM_DISCARD(init.data) \
488 KERNEL_CTORS() \ 488 KERNEL_CTORS() \
489 *(.init.rodata) \
490 MCOUNT_REC() \ 489 MCOUNT_REC() \
490 *(.init.rodata) \
491 FTRACE_EVENTS() \ 491 FTRACE_EVENTS() \
492 TRACE_SYSCALLS() \ 492 TRACE_SYSCALLS() \
493 DEV_DISCARD(init.rodata) \ 493 DEV_DISCARD(init.rodata) \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 72a6cabb4d5b..55e6d63d46d0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -286,10 +286,16 @@ struct ftrace_rec_iter *ftrace_rec_iter_start(void);
286struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 286struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
287struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 287struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
288 288
289#define for_ftrace_rec_iter(iter) \
290 for (iter = ftrace_rec_iter_start(); \
291 iter; \
292 iter = ftrace_rec_iter_next(iter))
293
294
289int ftrace_update_record(struct dyn_ftrace *rec, int enable); 295int ftrace_update_record(struct dyn_ftrace *rec, int enable);
290int ftrace_test_record(struct dyn_ftrace *rec, int enable); 296int ftrace_test_record(struct dyn_ftrace *rec, int enable);
291void ftrace_run_stop_machine(int command); 297void ftrace_run_stop_machine(int command);
292int ftrace_location(unsigned long ip); 298unsigned long ftrace_location(unsigned long ip);
293 299
294extern ftrace_func_t ftrace_trace_function; 300extern ftrace_func_t ftrace_trace_function;
295 301
@@ -308,11 +314,14 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
308/* defined in arch */ 314/* defined in arch */
309extern int ftrace_ip_converted(unsigned long ip); 315extern int ftrace_ip_converted(unsigned long ip);
310extern int ftrace_dyn_arch_init(void *data); 316extern int ftrace_dyn_arch_init(void *data);
317extern void ftrace_replace_code(int enable);
311extern int ftrace_update_ftrace_func(ftrace_func_t func); 318extern int ftrace_update_ftrace_func(ftrace_func_t func);
312extern void ftrace_caller(void); 319extern void ftrace_caller(void);
313extern void ftrace_call(void); 320extern void ftrace_call(void);
314extern void mcount_call(void); 321extern void mcount_call(void);
315 322
323void ftrace_modify_all_code(int command);
324
316#ifndef FTRACE_ADDR 325#ifndef FTRACE_ADDR
317#define FTRACE_ADDR ((unsigned long)ftrace_caller) 326#define FTRACE_ADDR ((unsigned long)ftrace_caller)
318#endif 327#endif
@@ -485,8 +494,12 @@ static inline void __ftrace_enabled_restore(int enabled)
485 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 494 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
486 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 495 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
487#else 496#else
488 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } 497/*
489 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } 498 * Use defines instead of static inlines because some arches will make code out
499 * of the CALLER_ADDR, when we really want these to be a real nop.
500 */
501# define trace_preempt_on(a0, a1) do { } while (0)
502# define trace_preempt_off(a0, a1) do { } while (0)
490#endif 503#endif
491 504
492#ifdef CONFIG_FTRACE_MCOUNT_RECORD 505#ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 645231c373c8..c0d34420a913 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -480,15 +480,16 @@ do { \
480 480
481#define trace_printk(fmt, args...) \ 481#define trace_printk(fmt, args...) \
482do { \ 482do { \
483 static const char *trace_printk_fmt \
484 __attribute__((section("__trace_printk_fmt"))) = \
485 __builtin_constant_p(fmt) ? fmt : NULL; \
486 \
483 __trace_printk_check_format(fmt, ##args); \ 487 __trace_printk_check_format(fmt, ##args); \
484 if (__builtin_constant_p(fmt)) { \
485 static const char *trace_printk_fmt \
486 __attribute__((section("__trace_printk_fmt"))) = \
487 __builtin_constant_p(fmt) ? fmt : NULL; \
488 \ 488 \
489 if (__builtin_constant_p(fmt)) \
489 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ 490 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
490 } else \ 491 else \
491 __trace_printk(_THIS_IP_, fmt, ##args); \ 492 __trace_printk(_THIS_IP_, fmt, ##args); \
492} while (0) 493} while (0)
493 494
494extern __printf(2, 3) 495extern __printf(2, 3)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ddbb6a901f65..8adf70e9e3cc 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1084,10 +1084,8 @@ extern void perf_pmu_unregister(struct pmu *pmu);
1084 1084
1085extern int perf_num_counters(void); 1085extern int perf_num_counters(void);
1086extern const char *perf_pmu_name(void); 1086extern const char *perf_pmu_name(void);
1087extern void __perf_event_task_sched_in(struct task_struct *prev, 1087extern void __perf_event_task_sched(struct task_struct *prev,
1088 struct task_struct *task); 1088 struct task_struct *next);
1089extern void __perf_event_task_sched_out(struct task_struct *prev,
1090 struct task_struct *next);
1091extern int perf_event_init_task(struct task_struct *child); 1089extern int perf_event_init_task(struct task_struct *child);
1092extern void perf_event_exit_task(struct task_struct *child); 1090extern void perf_event_exit_task(struct task_struct *child);
1093extern void perf_event_free_task(struct task_struct *task); 1091extern void perf_event_free_task(struct task_struct *task);
@@ -1132,11 +1130,14 @@ struct perf_sample_data {
1132 struct perf_branch_stack *br_stack; 1130 struct perf_branch_stack *br_stack;
1133}; 1131};
1134 1132
1135static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) 1133static inline void perf_sample_data_init(struct perf_sample_data *data,
1134 u64 addr, u64 period)
1136{ 1135{
1136 /* remaining struct members initialized in perf_prepare_sample() */
1137 data->addr = addr; 1137 data->addr = addr;
1138 data->raw = NULL; 1138 data->raw = NULL;
1139 data->br_stack = NULL; 1139 data->br_stack = NULL;
1140 data->period = period;
1140} 1141}
1141 1142
1142extern void perf_output_sample(struct perf_output_handle *handle, 1143extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1204,20 +1205,13 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1204 1205
1205extern struct static_key_deferred perf_sched_events; 1206extern struct static_key_deferred perf_sched_events;
1206 1207
1207static inline void perf_event_task_sched_in(struct task_struct *prev, 1208static inline void perf_event_task_sched(struct task_struct *prev,
1208 struct task_struct *task) 1209 struct task_struct *task)
1209{ 1210{
1210 if (static_key_false(&perf_sched_events.key))
1211 __perf_event_task_sched_in(prev, task);
1212}
1213
1214static inline void perf_event_task_sched_out(struct task_struct *prev,
1215 struct task_struct *next)
1216{
1217 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1211 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1218 1212
1219 if (static_key_false(&perf_sched_events.key)) 1213 if (static_key_false(&perf_sched_events.key))
1220 __perf_event_task_sched_out(prev, next); 1214 __perf_event_task_sched(prev, task);
1221} 1215}
1222 1216
1223extern void perf_event_mmap(struct vm_area_struct *vma); 1217extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1292,11 +1286,8 @@ extern void perf_event_disable(struct perf_event *event);
1292extern void perf_event_task_tick(void); 1286extern void perf_event_task_tick(void);
1293#else 1287#else
1294static inline void 1288static inline void
1295perf_event_task_sched_in(struct task_struct *prev, 1289perf_event_task_sched(struct task_struct *prev,
1296 struct task_struct *task) { } 1290 struct task_struct *task) { }
1297static inline void
1298perf_event_task_sched_out(struct task_struct *prev,
1299 struct task_struct *next) { }
1300static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1291static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1301static inline void perf_event_exit_task(struct task_struct *child) { } 1292static inline void perf_event_exit_task(struct task_struct *child) { }
1302static inline void perf_event_free_task(struct task_struct *task) { } 1293static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 7be2e88f23fd..6c8835f74f79 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -96,9 +96,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
96 __ring_buffer_alloc((size), (flags), &__key); \ 96 __ring_buffer_alloc((size), (flags), &__key); \
97}) 97})
98 98
99#define RING_BUFFER_ALL_CPUS -1
100
99void ring_buffer_free(struct ring_buffer *buffer); 101void ring_buffer_free(struct ring_buffer *buffer);
100 102
101int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 103int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
102 104
103void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); 105void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
104 106
@@ -129,7 +131,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
129void ring_buffer_iter_reset(struct ring_buffer_iter *iter); 131void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
130int ring_buffer_iter_empty(struct ring_buffer_iter *iter); 132int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
131 133
132unsigned long ring_buffer_size(struct ring_buffer *buffer); 134unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
133 135
134void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); 136void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
135void ring_buffer_reset(struct ring_buffer *buffer); 137void ring_buffer_reset(struct ring_buffer *buffer);
diff --git a/init/Kconfig b/init/Kconfig
index 6d18ef8071b5..da2d2d2e3f07 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1198,7 +1198,7 @@ menu "Kernel Performance Events And Counters"
1198 1198
1199config PERF_EVENTS 1199config PERF_EVENTS
1200 bool "Kernel performance events and counters" 1200 bool "Kernel performance events and counters"
1201 default y if (PROFILING || PERF_COUNTERS) 1201 default y if PROFILING
1202 depends on HAVE_PERF_EVENTS 1202 depends on HAVE_PERF_EVENTS
1203 select ANON_INODES 1203 select ANON_INODES
1204 select IRQ_WORK 1204 select IRQ_WORK
@@ -1225,18 +1225,6 @@ config PERF_EVENTS
1225 1225
1226 Say Y if unsure. 1226 Say Y if unsure.
1227 1227
1228config PERF_COUNTERS
1229 bool "Kernel performance counters (old config option)"
1230 depends on HAVE_PERF_EVENTS
1231 help
1232 This config has been obsoleted by the PERF_EVENTS
1233 config option - please see that one for details.
1234
1235 It has no effect on the kernel whether you enable
1236 it or not, it is a compatibility placeholder.
1237
1238 Say N if unsure.
1239
1240config DEBUG_PERF_USE_VMALLOC 1228config DEBUG_PERF_USE_VMALLOC
1241 default n 1229 default n
1242 bool "Debug: use vmalloc to back perf mmap() buffers" 1230 bool "Debug: use vmalloc to back perf mmap() buffers"
diff --git a/kernel/events/core.c b/kernel/events/core.c
index fd126f82b57c..91a445925855 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2039,8 +2039,8 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2039 * accessing the event control register. If a NMI hits, then it will 2039 * accessing the event control register. If a NMI hits, then it will
2040 * not restart the event. 2040 * not restart the event.
2041 */ 2041 */
2042void __perf_event_task_sched_out(struct task_struct *task, 2042static void __perf_event_task_sched_out(struct task_struct *task,
2043 struct task_struct *next) 2043 struct task_struct *next)
2044{ 2044{
2045 int ctxn; 2045 int ctxn;
2046 2046
@@ -2279,8 +2279,8 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
2279 * accessing the event control register. If a NMI hits, then it will 2279 * accessing the event control register. If a NMI hits, then it will
2280 * keep the event running. 2280 * keep the event running.
2281 */ 2281 */
2282void __perf_event_task_sched_in(struct task_struct *prev, 2282static void __perf_event_task_sched_in(struct task_struct *prev,
2283 struct task_struct *task) 2283 struct task_struct *task)
2284{ 2284{
2285 struct perf_event_context *ctx; 2285 struct perf_event_context *ctx;
2286 int ctxn; 2286 int ctxn;
@@ -2305,6 +2305,12 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2305 perf_branch_stack_sched_in(prev, task); 2305 perf_branch_stack_sched_in(prev, task);
2306} 2306}
2307 2307
2308void __perf_event_task_sched(struct task_struct *prev, struct task_struct *next)
2309{
2310 __perf_event_task_sched_out(prev, next);
2311 __perf_event_task_sched_in(prev, next);
2312}
2313
2308static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 2314static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2309{ 2315{
2310 u64 frequency = event->attr.sample_freq; 2316 u64 frequency = event->attr.sample_freq;
@@ -4957,7 +4963,7 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4957 if (rctx < 0) 4963 if (rctx < 0)
4958 return; 4964 return;
4959 4965
4960 perf_sample_data_init(&data, addr); 4966 perf_sample_data_init(&data, addr, 0);
4961 4967
4962 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 4968 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4963 4969
@@ -5215,7 +5221,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5215 .data = record, 5221 .data = record,
5216 }; 5222 };
5217 5223
5218 perf_sample_data_init(&data, addr); 5224 perf_sample_data_init(&data, addr, 0);
5219 data.raw = &raw; 5225 data.raw = &raw;
5220 5226
5221 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5227 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
@@ -5318,7 +5324,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
5318 struct perf_sample_data sample; 5324 struct perf_sample_data sample;
5319 struct pt_regs *regs = data; 5325 struct pt_regs *regs = data;
5320 5326
5321 perf_sample_data_init(&sample, bp->attr.bp_addr); 5327 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5322 5328
5323 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 5329 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5324 perf_swevent_event(bp, 1, &sample, regs); 5330 perf_swevent_event(bp, 1, &sample, regs);
@@ -5344,13 +5350,12 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5344 5350
5345 event->pmu->read(event); 5351 event->pmu->read(event);
5346 5352
5347 perf_sample_data_init(&data, 0); 5353 perf_sample_data_init(&data, 0, event->hw.last_period);
5348 data.period = event->hw.last_period;
5349 regs = get_irq_regs(); 5354 regs = get_irq_regs();
5350 5355
5351 if (regs && !perf_exclude_event(event, regs)) { 5356 if (regs && !perf_exclude_event(event, regs)) {
5352 if (!(event->attr.exclude_idle && is_idle_task(current))) 5357 if (!(event->attr.exclude_idle && is_idle_task(current)))
5353 if (perf_event_overflow(event, &data, regs)) 5358 if (__perf_event_overflow(event, 1, &data, regs))
5354 ret = HRTIMER_NORESTART; 5359 ret = HRTIMER_NORESTART;
5355 } 5360 }
5356 5361
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 03667c3fdb33..d2e2e173d8f7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1914,7 +1914,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
1914 struct task_struct *next) 1914 struct task_struct *next)
1915{ 1915{
1916 sched_info_switch(prev, next); 1916 sched_info_switch(prev, next);
1917 perf_event_task_sched_out(prev, next); 1917 perf_event_task_sched(prev, next);
1918 fire_sched_out_preempt_notifiers(prev, next); 1918 fire_sched_out_preempt_notifiers(prev, next);
1919 prepare_lock_switch(rq, next); 1919 prepare_lock_switch(rq, next);
1920 prepare_arch_switch(next); 1920 prepare_arch_switch(next);
@@ -1957,13 +1957,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1957 */ 1957 */
1958 prev_state = prev->state; 1958 prev_state = prev->state;
1959 finish_arch_switch(prev); 1959 finish_arch_switch(prev);
1960#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1961 local_irq_disable();
1962#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1963 perf_event_task_sched_in(prev, current);
1964#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1965 local_irq_enable();
1966#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1967 finish_lock_switch(rq, prev); 1960 finish_lock_switch(rq, prev);
1968 finish_arch_post_lock_switch(); 1961 finish_arch_post_lock_switch();
1969 1962
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index a1d2849f2473..d81a1a532994 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,7 +141,6 @@ if FTRACE
141config FUNCTION_TRACER 141config FUNCTION_TRACER
142 bool "Kernel Function Tracer" 142 bool "Kernel Function Tracer"
143 depends on HAVE_FUNCTION_TRACER 143 depends on HAVE_FUNCTION_TRACER
144 select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
145 select KALLSYMS 144 select KALLSYMS
146 select GENERIC_TRACER 145 select GENERIC_TRACER
147 select CONTEXT_SWITCH_TRACER 146 select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0fa92f677c92..a008663d86c8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1383,44 +1383,73 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1383 1383
1384static int ftrace_cmp_recs(const void *a, const void *b) 1384static int ftrace_cmp_recs(const void *a, const void *b)
1385{ 1385{
1386 const struct dyn_ftrace *reca = a; 1386 const struct dyn_ftrace *key = a;
1387 const struct dyn_ftrace *recb = b; 1387 const struct dyn_ftrace *rec = b;
1388 1388
1389 if (reca->ip > recb->ip) 1389 if (key->flags < rec->ip)
1390 return 1;
1391 if (reca->ip < recb->ip)
1392 return -1; 1390 return -1;
1391 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1392 return 1;
1393 return 0; 1393 return 0;
1394} 1394}
1395 1395
1396/** 1396static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1397 * ftrace_location - return true if the ip giving is a traced location
1398 * @ip: the instruction pointer to check
1399 *
1400 * Returns 1 if @ip given is a pointer to a ftrace location.
1401 * That is, the instruction that is either a NOP or call to
1402 * the function tracer. It checks the ftrace internal tables to
1403 * determine if the address belongs or not.
1404 */
1405int ftrace_location(unsigned long ip)
1406{ 1397{
1407 struct ftrace_page *pg; 1398 struct ftrace_page *pg;
1408 struct dyn_ftrace *rec; 1399 struct dyn_ftrace *rec;
1409 struct dyn_ftrace key; 1400 struct dyn_ftrace key;
1410 1401
1411 key.ip = ip; 1402 key.ip = start;
1403 key.flags = end; /* overload flags, as it is unsigned long */
1412 1404
1413 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1405 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1406 if (end < pg->records[0].ip ||
1407 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1408 continue;
1414 rec = bsearch(&key, pg->records, pg->index, 1409 rec = bsearch(&key, pg->records, pg->index,
1415 sizeof(struct dyn_ftrace), 1410 sizeof(struct dyn_ftrace),
1416 ftrace_cmp_recs); 1411 ftrace_cmp_recs);
1417 if (rec) 1412 if (rec)
1418 return 1; 1413 return rec->ip;
1419 } 1414 }
1420 1415
1421 return 0; 1416 return 0;
1422} 1417}
1423 1418
1419/**
1420 * ftrace_location - return true if the ip giving is a traced location
1421 * @ip: the instruction pointer to check
1422 *
1423 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1424 * That is, the instruction that is either a NOP or call to
1425 * the function tracer. It checks the ftrace internal tables to
1426 * determine if the address belongs or not.
1427 */
1428unsigned long ftrace_location(unsigned long ip)
1429{
1430 return ftrace_location_range(ip, ip);
1431}
1432
1433/**
1434 * ftrace_text_reserved - return true if range contains an ftrace location
1435 * @start: start of range to search
1436 * @end: end of range to search (inclusive). @end points to the last byte to check.
1437 *
1438 * Returns 1 if @start and @end contains a ftrace location.
1439 * That is, the instruction that is either a NOP or call to
1440 * the function tracer. It checks the ftrace internal tables to
1441 * determine if the address belongs or not.
1442 */
1443int ftrace_text_reserved(void *start, void *end)
1444{
1445 unsigned long ret;
1446
1447 ret = ftrace_location_range((unsigned long)start,
1448 (unsigned long)end);
1449
1450 return (int)!!ret;
1451}
1452
1424static void __ftrace_hash_rec_update(struct ftrace_ops *ops, 1453static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1425 int filter_hash, 1454 int filter_hash,
1426 bool inc) 1455 bool inc)
@@ -1520,35 +1549,6 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1520 __ftrace_hash_rec_update(ops, filter_hash, 1); 1549 __ftrace_hash_rec_update(ops, filter_hash, 1);
1521} 1550}
1522 1551
1523static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1524{
1525 if (ftrace_pages->index == ftrace_pages->size) {
1526 /* We should have allocated enough */
1527 if (WARN_ON(!ftrace_pages->next))
1528 return NULL;
1529 ftrace_pages = ftrace_pages->next;
1530 }
1531
1532 return &ftrace_pages->records[ftrace_pages->index++];
1533}
1534
1535static struct dyn_ftrace *
1536ftrace_record_ip(unsigned long ip)
1537{
1538 struct dyn_ftrace *rec;
1539
1540 if (ftrace_disabled)
1541 return NULL;
1542
1543 rec = ftrace_alloc_dyn_node(ip);
1544 if (!rec)
1545 return NULL;
1546
1547 rec->ip = ip;
1548
1549 return rec;
1550}
1551
1552static void print_ip_ins(const char *fmt, unsigned char *p) 1552static void print_ip_ins(const char *fmt, unsigned char *p)
1553{ 1553{
1554 int i; 1554 int i;
@@ -1598,21 +1598,6 @@ void ftrace_bug(int failed, unsigned long ip)
1598 } 1598 }
1599} 1599}
1600 1600
1601
1602/* Return 1 if the address range is reserved for ftrace */
1603int ftrace_text_reserved(void *start, void *end)
1604{
1605 struct dyn_ftrace *rec;
1606 struct ftrace_page *pg;
1607
1608 do_for_each_ftrace_rec(pg, rec) {
1609 if (rec->ip <= (unsigned long)end &&
1610 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1611 return 1;
1612 } while_for_each_ftrace_rec();
1613 return 0;
1614}
1615
1616static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) 1601static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1617{ 1602{
1618 unsigned long flag = 0UL; 1603 unsigned long flag = 0UL;
@@ -1698,7 +1683,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1698 return -1; /* unknow ftrace bug */ 1683 return -1; /* unknow ftrace bug */
1699} 1684}
1700 1685
1701static void ftrace_replace_code(int update) 1686void __weak ftrace_replace_code(int enable)
1702{ 1687{
1703 struct dyn_ftrace *rec; 1688 struct dyn_ftrace *rec;
1704 struct ftrace_page *pg; 1689 struct ftrace_page *pg;
@@ -1708,7 +1693,7 @@ static void ftrace_replace_code(int update)
1708 return; 1693 return;
1709 1694
1710 do_for_each_ftrace_rec(pg, rec) { 1695 do_for_each_ftrace_rec(pg, rec) {
1711 failed = __ftrace_replace_code(rec, update); 1696 failed = __ftrace_replace_code(rec, enable);
1712 if (failed) { 1697 if (failed) {
1713 ftrace_bug(failed, rec->ip); 1698 ftrace_bug(failed, rec->ip);
1714 /* Stop processing */ 1699 /* Stop processing */
@@ -1826,22 +1811,27 @@ int __weak ftrace_arch_code_modify_post_process(void)
1826 return 0; 1811 return 0;
1827} 1812}
1828 1813
1829static int __ftrace_modify_code(void *data) 1814void ftrace_modify_all_code(int command)
1830{ 1815{
1831 int *command = data; 1816 if (command & FTRACE_UPDATE_CALLS)
1832
1833 if (*command & FTRACE_UPDATE_CALLS)
1834 ftrace_replace_code(1); 1817 ftrace_replace_code(1);
1835 else if (*command & FTRACE_DISABLE_CALLS) 1818 else if (command & FTRACE_DISABLE_CALLS)
1836 ftrace_replace_code(0); 1819 ftrace_replace_code(0);
1837 1820
1838 if (*command & FTRACE_UPDATE_TRACE_FUNC) 1821 if (command & FTRACE_UPDATE_TRACE_FUNC)
1839 ftrace_update_ftrace_func(ftrace_trace_function); 1822 ftrace_update_ftrace_func(ftrace_trace_function);
1840 1823
1841 if (*command & FTRACE_START_FUNC_RET) 1824 if (command & FTRACE_START_FUNC_RET)
1842 ftrace_enable_ftrace_graph_caller(); 1825 ftrace_enable_ftrace_graph_caller();
1843 else if (*command & FTRACE_STOP_FUNC_RET) 1826 else if (command & FTRACE_STOP_FUNC_RET)
1844 ftrace_disable_ftrace_graph_caller(); 1827 ftrace_disable_ftrace_graph_caller();
1828}
1829
1830static int __ftrace_modify_code(void *data)
1831{
1832 int *command = data;
1833
1834 ftrace_modify_all_code(*command);
1845 1835
1846 return 0; 1836 return 0;
1847} 1837}
@@ -2469,57 +2459,35 @@ static int
2469ftrace_avail_open(struct inode *inode, struct file *file) 2459ftrace_avail_open(struct inode *inode, struct file *file)
2470{ 2460{
2471 struct ftrace_iterator *iter; 2461 struct ftrace_iterator *iter;
2472 int ret;
2473 2462
2474 if (unlikely(ftrace_disabled)) 2463 if (unlikely(ftrace_disabled))
2475 return -ENODEV; 2464 return -ENODEV;
2476 2465
2477 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2466 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2478 if (!iter) 2467 if (iter) {
2479 return -ENOMEM; 2468 iter->pg = ftrace_pages_start;
2480 2469 iter->ops = &global_ops;
2481 iter->pg = ftrace_pages_start;
2482 iter->ops = &global_ops;
2483
2484 ret = seq_open(file, &show_ftrace_seq_ops);
2485 if (!ret) {
2486 struct seq_file *m = file->private_data;
2487
2488 m->private = iter;
2489 } else {
2490 kfree(iter);
2491 } 2470 }
2492 2471
2493 return ret; 2472 return iter ? 0 : -ENOMEM;
2494} 2473}
2495 2474
2496static int 2475static int
2497ftrace_enabled_open(struct inode *inode, struct file *file) 2476ftrace_enabled_open(struct inode *inode, struct file *file)
2498{ 2477{
2499 struct ftrace_iterator *iter; 2478 struct ftrace_iterator *iter;
2500 int ret;
2501 2479
2502 if (unlikely(ftrace_disabled)) 2480 if (unlikely(ftrace_disabled))
2503 return -ENODEV; 2481 return -ENODEV;
2504 2482
2505 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2483 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2506 if (!iter) 2484 if (iter) {
2507 return -ENOMEM; 2485 iter->pg = ftrace_pages_start;
2508 2486 iter->flags = FTRACE_ITER_ENABLED;
2509 iter->pg = ftrace_pages_start; 2487 iter->ops = &global_ops;
2510 iter->flags = FTRACE_ITER_ENABLED;
2511 iter->ops = &global_ops;
2512
2513 ret = seq_open(file, &show_ftrace_seq_ops);
2514 if (!ret) {
2515 struct seq_file *m = file->private_data;
2516
2517 m->private = iter;
2518 } else {
2519 kfree(iter);
2520 } 2488 }
2521 2489
2522 return ret; 2490 return iter ? 0 : -ENOMEM;
2523} 2491}
2524 2492
2525static void ftrace_filter_reset(struct ftrace_hash *hash) 2493static void ftrace_filter_reset(struct ftrace_hash *hash)
@@ -3688,22 +3656,36 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3688 return 0; 3656 return 0;
3689} 3657}
3690 3658
3691static void ftrace_swap_recs(void *a, void *b, int size) 3659static int ftrace_cmp_ips(const void *a, const void *b)
3660{
3661 const unsigned long *ipa = a;
3662 const unsigned long *ipb = b;
3663
3664 if (*ipa > *ipb)
3665 return 1;
3666 if (*ipa < *ipb)
3667 return -1;
3668 return 0;
3669}
3670
3671static void ftrace_swap_ips(void *a, void *b, int size)
3692{ 3672{
3693 struct dyn_ftrace *reca = a; 3673 unsigned long *ipa = a;
3694 struct dyn_ftrace *recb = b; 3674 unsigned long *ipb = b;
3695 struct dyn_ftrace t; 3675 unsigned long t;
3696 3676
3697 t = *reca; 3677 t = *ipa;
3698 *reca = *recb; 3678 *ipa = *ipb;
3699 *recb = t; 3679 *ipb = t;
3700} 3680}
3701 3681
3702static int ftrace_process_locs(struct module *mod, 3682static int ftrace_process_locs(struct module *mod,
3703 unsigned long *start, 3683 unsigned long *start,
3704 unsigned long *end) 3684 unsigned long *end)
3705{ 3685{
3686 struct ftrace_page *start_pg;
3706 struct ftrace_page *pg; 3687 struct ftrace_page *pg;
3688 struct dyn_ftrace *rec;
3707 unsigned long count; 3689 unsigned long count;
3708 unsigned long *p; 3690 unsigned long *p;
3709 unsigned long addr; 3691 unsigned long addr;
@@ -3715,8 +3697,11 @@ static int ftrace_process_locs(struct module *mod,
3715 if (!count) 3697 if (!count)
3716 return 0; 3698 return 0;
3717 3699
3718 pg = ftrace_allocate_pages(count); 3700 sort(start, count, sizeof(*start),
3719 if (!pg) 3701 ftrace_cmp_ips, ftrace_swap_ips);
3702
3703 start_pg = ftrace_allocate_pages(count);
3704 if (!start_pg)
3720 return -ENOMEM; 3705 return -ENOMEM;
3721 3706
3722 mutex_lock(&ftrace_lock); 3707 mutex_lock(&ftrace_lock);
@@ -3729,7 +3714,7 @@ static int ftrace_process_locs(struct module *mod,
3729 if (!mod) { 3714 if (!mod) {
3730 WARN_ON(ftrace_pages || ftrace_pages_start); 3715 WARN_ON(ftrace_pages || ftrace_pages_start);
3731 /* First initialization */ 3716 /* First initialization */
3732 ftrace_pages = ftrace_pages_start = pg; 3717 ftrace_pages = ftrace_pages_start = start_pg;
3733 } else { 3718 } else {
3734 if (!ftrace_pages) 3719 if (!ftrace_pages)
3735 goto out; 3720 goto out;
@@ -3740,11 +3725,11 @@ static int ftrace_process_locs(struct module *mod,
3740 ftrace_pages = ftrace_pages->next; 3725 ftrace_pages = ftrace_pages->next;
3741 } 3726 }
3742 3727
3743 ftrace_pages->next = pg; 3728 ftrace_pages->next = start_pg;
3744 ftrace_pages = pg;
3745 } 3729 }
3746 3730
3747 p = start; 3731 p = start;
3732 pg = start_pg;
3748 while (p < end) { 3733 while (p < end) {
3749 addr = ftrace_call_adjust(*p++); 3734 addr = ftrace_call_adjust(*p++);
3750 /* 3735 /*
@@ -3755,17 +3740,26 @@ static int ftrace_process_locs(struct module *mod,
3755 */ 3740 */
3756 if (!addr) 3741 if (!addr)
3757 continue; 3742 continue;
3758 if (!ftrace_record_ip(addr)) 3743
3759 break; 3744 if (pg->index == pg->size) {
3745 /* We should have allocated enough */
3746 if (WARN_ON(!pg->next))
3747 break;
3748 pg = pg->next;
3749 }
3750
3751 rec = &pg->records[pg->index++];
3752 rec->ip = addr;
3760 } 3753 }
3761 3754
3762 /* These new locations need to be initialized */ 3755 /* We should have used all pages */
3763 ftrace_new_pgs = pg; 3756 WARN_ON(pg->next);
3757
3758 /* Assign the last page to ftrace_pages */
3759 ftrace_pages = pg;
3764 3760
3765 /* Make each individual set of pages sorted by ips */ 3761 /* These new locations need to be initialized */
3766 for (; pg; pg = pg->next) 3762 ftrace_new_pgs = start_pg;
3767 sort(pg->records, pg->index, sizeof(struct dyn_ftrace),
3768 ftrace_cmp_recs, ftrace_swap_recs);
3769 3763
3770 /* 3764 /*
3771 * We only need to disable interrupts on start up 3765 * We only need to disable interrupts on start up
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cf8d11e91efd..6420cda62336 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -23,6 +23,8 @@
23#include <asm/local.h> 23#include <asm/local.h>
24#include "trace.h" 24#include "trace.h"
25 25
26static void update_pages_handler(struct work_struct *work);
27
26/* 28/*
27 * The ring buffer header is special. We must manually up keep it. 29 * The ring buffer header is special. We must manually up keep it.
28 */ 30 */
@@ -449,6 +451,7 @@ struct ring_buffer_per_cpu {
449 raw_spinlock_t reader_lock; /* serialize readers */ 451 raw_spinlock_t reader_lock; /* serialize readers */
450 arch_spinlock_t lock; 452 arch_spinlock_t lock;
451 struct lock_class_key lock_key; 453 struct lock_class_key lock_key;
454 unsigned int nr_pages;
452 struct list_head *pages; 455 struct list_head *pages;
453 struct buffer_page *head_page; /* read from head */ 456 struct buffer_page *head_page; /* read from head */
454 struct buffer_page *tail_page; /* write to tail */ 457 struct buffer_page *tail_page; /* write to tail */
@@ -466,13 +469,18 @@ struct ring_buffer_per_cpu {
466 unsigned long read_bytes; 469 unsigned long read_bytes;
467 u64 write_stamp; 470 u64 write_stamp;
468 u64 read_stamp; 471 u64 read_stamp;
472 /* ring buffer pages to update, > 0 to add, < 0 to remove */
473 int nr_pages_to_update;
474 struct list_head new_pages; /* new pages to add */
475 struct work_struct update_pages_work;
476 struct completion update_done;
469}; 477};
470 478
471struct ring_buffer { 479struct ring_buffer {
472 unsigned pages;
473 unsigned flags; 480 unsigned flags;
474 int cpus; 481 int cpus;
475 atomic_t record_disabled; 482 atomic_t record_disabled;
483 atomic_t resize_disabled;
476 cpumask_var_t cpumask; 484 cpumask_var_t cpumask;
477 485
478 struct lock_class_key *reader_lock_key; 486 struct lock_class_key *reader_lock_key;
@@ -937,6 +945,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
937 struct list_head *head = cpu_buffer->pages; 945 struct list_head *head = cpu_buffer->pages;
938 struct buffer_page *bpage, *tmp; 946 struct buffer_page *bpage, *tmp;
939 947
948 /* Reset the head page if it exists */
949 if (cpu_buffer->head_page)
950 rb_set_head_page(cpu_buffer);
951
940 rb_head_page_deactivate(cpu_buffer); 952 rb_head_page_deactivate(cpu_buffer);
941 953
942 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 954 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
@@ -963,14 +975,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
963 return 0; 975 return 0;
964} 976}
965 977
966static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 978static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
967 unsigned nr_pages)
968{ 979{
980 int i;
969 struct buffer_page *bpage, *tmp; 981 struct buffer_page *bpage, *tmp;
970 LIST_HEAD(pages);
971 unsigned i;
972
973 WARN_ON(!nr_pages);
974 982
975 for (i = 0; i < nr_pages; i++) { 983 for (i = 0; i < nr_pages; i++) {
976 struct page *page; 984 struct page *page;
@@ -981,15 +989,13 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
981 */ 989 */
982 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 990 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
983 GFP_KERNEL | __GFP_NORETRY, 991 GFP_KERNEL | __GFP_NORETRY,
984 cpu_to_node(cpu_buffer->cpu)); 992 cpu_to_node(cpu));
985 if (!bpage) 993 if (!bpage)
986 goto free_pages; 994 goto free_pages;
987 995
988 rb_check_bpage(cpu_buffer, bpage); 996 list_add(&bpage->list, pages);
989 997
990 list_add(&bpage->list, &pages); 998 page = alloc_pages_node(cpu_to_node(cpu),
991
992 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
993 GFP_KERNEL | __GFP_NORETRY, 0); 999 GFP_KERNEL | __GFP_NORETRY, 0);
994 if (!page) 1000 if (!page)
995 goto free_pages; 1001 goto free_pages;
@@ -997,6 +1003,27 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
997 rb_init_page(bpage->page); 1003 rb_init_page(bpage->page);
998 } 1004 }
999 1005
1006 return 0;
1007
1008free_pages:
1009 list_for_each_entry_safe(bpage, tmp, pages, list) {
1010 list_del_init(&bpage->list);
1011 free_buffer_page(bpage);
1012 }
1013
1014 return -ENOMEM;
1015}
1016
1017static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1018 unsigned nr_pages)
1019{
1020 LIST_HEAD(pages);
1021
1022 WARN_ON(!nr_pages);
1023
1024 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1025 return -ENOMEM;
1026
1000 /* 1027 /*
1001 * The ring buffer page list is a circular list that does not 1028 * The ring buffer page list is a circular list that does not
1002 * start and end with a list head. All page list items point to 1029 * start and end with a list head. All page list items point to
@@ -1005,20 +1032,15 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1005 cpu_buffer->pages = pages.next; 1032 cpu_buffer->pages = pages.next;
1006 list_del(&pages); 1033 list_del(&pages);
1007 1034
1035 cpu_buffer->nr_pages = nr_pages;
1036
1008 rb_check_pages(cpu_buffer); 1037 rb_check_pages(cpu_buffer);
1009 1038
1010 return 0; 1039 return 0;
1011
1012 free_pages:
1013 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1014 list_del_init(&bpage->list);
1015 free_buffer_page(bpage);
1016 }
1017 return -ENOMEM;
1018} 1040}
1019 1041
1020static struct ring_buffer_per_cpu * 1042static struct ring_buffer_per_cpu *
1021rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 1043rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1022{ 1044{
1023 struct ring_buffer_per_cpu *cpu_buffer; 1045 struct ring_buffer_per_cpu *cpu_buffer;
1024 struct buffer_page *bpage; 1046 struct buffer_page *bpage;
@@ -1035,6 +1057,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1035 raw_spin_lock_init(&cpu_buffer->reader_lock); 1057 raw_spin_lock_init(&cpu_buffer->reader_lock);
1036 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1058 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1037 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1059 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1060 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1061 init_completion(&cpu_buffer->update_done);
1038 1062
1039 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1063 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1040 GFP_KERNEL, cpu_to_node(cpu)); 1064 GFP_KERNEL, cpu_to_node(cpu));
@@ -1052,7 +1076,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1052 1076
1053 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 1077 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1054 1078
1055 ret = rb_allocate_pages(cpu_buffer, buffer->pages); 1079 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1056 if (ret < 0) 1080 if (ret < 0)
1057 goto fail_free_reader; 1081 goto fail_free_reader;
1058 1082
@@ -1113,7 +1137,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1113{ 1137{
1114 struct ring_buffer *buffer; 1138 struct ring_buffer *buffer;
1115 int bsize; 1139 int bsize;
1116 int cpu; 1140 int cpu, nr_pages;
1117 1141
1118 /* keep it in its own cache line */ 1142 /* keep it in its own cache line */
1119 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1143 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1124,14 +1148,14 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1124 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 1148 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1125 goto fail_free_buffer; 1149 goto fail_free_buffer;
1126 1150
1127 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1151 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1128 buffer->flags = flags; 1152 buffer->flags = flags;
1129 buffer->clock = trace_clock_local; 1153 buffer->clock = trace_clock_local;
1130 buffer->reader_lock_key = key; 1154 buffer->reader_lock_key = key;
1131 1155
1132 /* need at least two pages */ 1156 /* need at least two pages */
1133 if (buffer->pages < 2) 1157 if (nr_pages < 2)
1134 buffer->pages = 2; 1158 nr_pages = 2;
1135 1159
1136 /* 1160 /*
1137 * In case of non-hotplug cpu, if the ring-buffer is allocated 1161 * In case of non-hotplug cpu, if the ring-buffer is allocated
@@ -1154,7 +1178,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1154 1178
1155 for_each_buffer_cpu(buffer, cpu) { 1179 for_each_buffer_cpu(buffer, cpu) {
1156 buffer->buffers[cpu] = 1180 buffer->buffers[cpu] =
1157 rb_allocate_cpu_buffer(buffer, cpu); 1181 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1158 if (!buffer->buffers[cpu]) 1182 if (!buffer->buffers[cpu])
1159 goto fail_free_buffers; 1183 goto fail_free_buffers;
1160 } 1184 }
@@ -1222,58 +1246,222 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
1222 1246
1223static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 1247static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1224 1248
1225static void 1249static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1226rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1227{ 1250{
1228 struct buffer_page *bpage; 1251 return local_read(&bpage->entries) & RB_WRITE_MASK;
1229 struct list_head *p; 1252}
1230 unsigned i; 1253
1254static inline unsigned long rb_page_write(struct buffer_page *bpage)
1255{
1256 return local_read(&bpage->write) & RB_WRITE_MASK;
1257}
1258
1259static int
1260rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1261{
1262 struct list_head *tail_page, *to_remove, *next_page;
1263 struct buffer_page *to_remove_page, *tmp_iter_page;
1264 struct buffer_page *last_page, *first_page;
1265 unsigned int nr_removed;
1266 unsigned long head_bit;
1267 int page_entries;
1268
1269 head_bit = 0;
1231 1270
1232 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1271 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1233 rb_head_page_deactivate(cpu_buffer); 1272 atomic_inc(&cpu_buffer->record_disabled);
1273 /*
1274 * We don't race with the readers since we have acquired the reader
1275 * lock. We also don't race with writers after disabling recording.
1276 * This makes it easy to figure out the first and the last page to be
1277 * removed from the list. We unlink all the pages in between including
1278 * the first and last pages. This is done in a busy loop so that we
1279 * lose the least number of traces.
1280 * The pages are freed after we restart recording and unlock readers.
1281 */
1282 tail_page = &cpu_buffer->tail_page->list;
1234 1283
1235 for (i = 0; i < nr_pages; i++) { 1284 /*
1236 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1285 * tail page might be on reader page, we remove the next page
1237 goto out; 1286 * from the ring buffer
1238 p = cpu_buffer->pages->next; 1287 */
1239 bpage = list_entry(p, struct buffer_page, list); 1288 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1240 list_del_init(&bpage->list); 1289 tail_page = rb_list_head(tail_page->next);
1241 free_buffer_page(bpage); 1290 to_remove = tail_page;
1291
1292 /* start of pages to remove */
1293 first_page = list_entry(rb_list_head(to_remove->next),
1294 struct buffer_page, list);
1295
1296 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1297 to_remove = rb_list_head(to_remove)->next;
1298 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1242 } 1299 }
1243 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1244 goto out;
1245 1300
1246 rb_reset_cpu(cpu_buffer); 1301 next_page = rb_list_head(to_remove)->next;
1247 rb_check_pages(cpu_buffer);
1248 1302
1249out: 1303 /*
1304 * Now we remove all pages between tail_page and next_page.
1305 * Make sure that we have head_bit value preserved for the
1306 * next page
1307 */
1308 tail_page->next = (struct list_head *)((unsigned long)next_page |
1309 head_bit);
1310 next_page = rb_list_head(next_page);
1311 next_page->prev = tail_page;
1312
1313 /* make sure pages points to a valid page in the ring buffer */
1314 cpu_buffer->pages = next_page;
1315
1316 /* update head page */
1317 if (head_bit)
1318 cpu_buffer->head_page = list_entry(next_page,
1319 struct buffer_page, list);
1320
1321 /*
1322 * change read pointer to make sure any read iterators reset
1323 * themselves
1324 */
1325 cpu_buffer->read = 0;
1326
1327 /* pages are removed, resume tracing and then free the pages */
1328 atomic_dec(&cpu_buffer->record_disabled);
1250 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1329 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1330
1331 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1332
1333 /* last buffer page to remove */
1334 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1335 list);
1336 tmp_iter_page = first_page;
1337
1338 do {
1339 to_remove_page = tmp_iter_page;
1340 rb_inc_page(cpu_buffer, &tmp_iter_page);
1341
1342 /* update the counters */
1343 page_entries = rb_page_entries(to_remove_page);
1344 if (page_entries) {
1345 /*
1346 * If something was added to this page, it was full
1347 * since it is not the tail page. So we deduct the
1348 * bytes consumed in ring buffer from here.
1349 * No need to update overruns, since this page is
1350 * deleted from ring buffer and its entries are
1351 * already accounted for.
1352 */
1353 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1354 }
1355
1356 /*
1357 * We have already removed references to this list item, just
1358 * free up the buffer_page and its page
1359 */
1360 free_buffer_page(to_remove_page);
1361 nr_removed--;
1362
1363 } while (to_remove_page != last_page);
1364
1365 RB_WARN_ON(cpu_buffer, nr_removed);
1366
1367 return nr_removed == 0;
1251} 1368}
1252 1369
1253static void 1370static int
1254rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 1371rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1255 struct list_head *pages, unsigned nr_pages)
1256{ 1372{
1257 struct buffer_page *bpage; 1373 struct list_head *pages = &cpu_buffer->new_pages;
1258 struct list_head *p; 1374 int retries, success;
1259 unsigned i;
1260 1375
1261 raw_spin_lock_irq(&cpu_buffer->reader_lock); 1376 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1262 rb_head_page_deactivate(cpu_buffer); 1377 /*
1378 * We are holding the reader lock, so the reader page won't be swapped
1379 * in the ring buffer. Now we are racing with the writer trying to
1380 * move head page and the tail page.
1381 * We are going to adapt the reader page update process where:
1382 * 1. We first splice the start and end of list of new pages between
1383 * the head page and its previous page.
1384 * 2. We cmpxchg the prev_page->next to point from head page to the
1385 * start of new pages list.
1386 * 3. Finally, we update the head->prev to the end of new list.
1387 *
1388 * We will try this process 10 times, to make sure that we don't keep
1389 * spinning.
1390 */
1391 retries = 10;
1392 success = 0;
1393 while (retries--) {
1394 struct list_head *head_page, *prev_page, *r;
1395 struct list_head *last_page, *first_page;
1396 struct list_head *head_page_with_bit;
1263 1397
1264 for (i = 0; i < nr_pages; i++) { 1398 head_page = &rb_set_head_page(cpu_buffer)->list;
1265 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1399 prev_page = head_page->prev;
1266 goto out; 1400
1267 p = pages->next; 1401 first_page = pages->next;
1268 bpage = list_entry(p, struct buffer_page, list); 1402 last_page = pages->prev;
1269 list_del_init(&bpage->list); 1403
1270 list_add_tail(&bpage->list, cpu_buffer->pages); 1404 head_page_with_bit = (struct list_head *)
1405 ((unsigned long)head_page | RB_PAGE_HEAD);
1406
1407 last_page->next = head_page_with_bit;
1408 first_page->prev = prev_page;
1409
1410 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1411
1412 if (r == head_page_with_bit) {
1413 /*
1414 * yay, we replaced the page pointer to our new list,
1415 * now, we just have to update to head page's prev
1416 * pointer to point to end of list
1417 */
1418 head_page->prev = last_page;
1419 success = 1;
1420 break;
1421 }
1271 } 1422 }
1272 rb_reset_cpu(cpu_buffer);
1273 rb_check_pages(cpu_buffer);
1274 1423
1275out: 1424 if (success)
1425 INIT_LIST_HEAD(pages);
1426 /*
1427 * If we weren't successful in adding in new pages, warn and stop
1428 * tracing
1429 */
1430 RB_WARN_ON(cpu_buffer, !success);
1276 raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1431 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1432
1433 /* free pages if they weren't inserted */
1434 if (!success) {
1435 struct buffer_page *bpage, *tmp;
1436 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1437 list) {
1438 list_del_init(&bpage->list);
1439 free_buffer_page(bpage);
1440 }
1441 }
1442 return success;
1443}
1444
1445static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1446{
1447 int success;
1448
1449 if (cpu_buffer->nr_pages_to_update > 0)
1450 success = rb_insert_pages(cpu_buffer);
1451 else
1452 success = rb_remove_pages(cpu_buffer,
1453 -cpu_buffer->nr_pages_to_update);
1454
1455 if (success)
1456 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1457}
1458
1459static void update_pages_handler(struct work_struct *work)
1460{
1461 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1462 struct ring_buffer_per_cpu, update_pages_work);
1463 rb_update_pages(cpu_buffer);
1464 complete(&cpu_buffer->update_done);
1277} 1465}
1278 1466
1279/** 1467/**
@@ -1283,16 +1471,14 @@ out:
1283 * 1471 *
1284 * Minimum size is 2 * BUF_PAGE_SIZE. 1472 * Minimum size is 2 * BUF_PAGE_SIZE.
1285 * 1473 *
1286 * Returns -1 on failure. 1474 * Returns 0 on success and < 0 on failure.
1287 */ 1475 */
1288int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) 1476int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1477 int cpu_id)
1289{ 1478{
1290 struct ring_buffer_per_cpu *cpu_buffer; 1479 struct ring_buffer_per_cpu *cpu_buffer;
1291 unsigned nr_pages, rm_pages, new_pages; 1480 unsigned nr_pages;
1292 struct buffer_page *bpage, *tmp; 1481 int cpu, err = 0;
1293 unsigned long buffer_size;
1294 LIST_HEAD(pages);
1295 int i, cpu;
1296 1482
1297 /* 1483 /*
1298 * Always succeed at resizing a non-existent buffer: 1484 * Always succeed at resizing a non-existent buffer:
@@ -1302,113 +1488,154 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1302 1488
1303 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 1489 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1304 size *= BUF_PAGE_SIZE; 1490 size *= BUF_PAGE_SIZE;
1305 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1306 1491
1307 /* we need a minimum of two pages */ 1492 /* we need a minimum of two pages */
1308 if (size < BUF_PAGE_SIZE * 2) 1493 if (size < BUF_PAGE_SIZE * 2)
1309 size = BUF_PAGE_SIZE * 2; 1494 size = BUF_PAGE_SIZE * 2;
1310 1495
1311 if (size == buffer_size) 1496 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1312 return size;
1313
1314 atomic_inc(&buffer->record_disabled);
1315 1497
1316 /* Make sure all writers are done with this buffer. */ 1498 /*
1317 synchronize_sched(); 1499 * Don't succeed if resizing is disabled, as a reader might be
1500 * manipulating the ring buffer and is expecting a sane state while
1501 * this is true.
1502 */
1503 if (atomic_read(&buffer->resize_disabled))
1504 return -EBUSY;
1318 1505
1506 /* prevent another thread from changing buffer sizes */
1319 mutex_lock(&buffer->mutex); 1507 mutex_lock(&buffer->mutex);
1320 get_online_cpus();
1321
1322 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1323 1508
1324 if (size < buffer_size) { 1509 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1510 /* calculate the pages to update */
1511 for_each_buffer_cpu(buffer, cpu) {
1512 cpu_buffer = buffer->buffers[cpu];
1325 1513
1326 /* easy case, just free pages */ 1514 cpu_buffer->nr_pages_to_update = nr_pages -
1327 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) 1515 cpu_buffer->nr_pages;
1328 goto out_fail; 1516 /*
1517 * nothing more to do for removing pages or no update
1518 */
1519 if (cpu_buffer->nr_pages_to_update <= 0)
1520 continue;
1521 /*
1522 * to add pages, make sure all new pages can be
1523 * allocated without receiving ENOMEM
1524 */
1525 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1526 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1527 &cpu_buffer->new_pages, cpu)) {
1528 /* not enough memory for new pages */
1529 err = -ENOMEM;
1530 goto out_err;
1531 }
1532 }
1329 1533
1330 rm_pages = buffer->pages - nr_pages; 1534 get_online_cpus();
1535 /*
1536 * Fire off all the required work handlers
1537 * We can't schedule on offline CPUs, but it's not necessary
1538 * since we can change their buffer sizes without any race.
1539 */
1540 for_each_buffer_cpu(buffer, cpu) {
1541 cpu_buffer = buffer->buffers[cpu];
1542 if (!cpu_buffer->nr_pages_to_update)
1543 continue;
1544
1545 if (cpu_online(cpu))
1546 schedule_work_on(cpu,
1547 &cpu_buffer->update_pages_work);
1548 else
1549 rb_update_pages(cpu_buffer);
1550 }
1331 1551
1552 /* wait for all the updates to complete */
1332 for_each_buffer_cpu(buffer, cpu) { 1553 for_each_buffer_cpu(buffer, cpu) {
1333 cpu_buffer = buffer->buffers[cpu]; 1554 cpu_buffer = buffer->buffers[cpu];
1334 rb_remove_pages(cpu_buffer, rm_pages); 1555 if (!cpu_buffer->nr_pages_to_update)
1556 continue;
1557
1558 if (cpu_online(cpu))
1559 wait_for_completion(&cpu_buffer->update_done);
1560 cpu_buffer->nr_pages_to_update = 0;
1335 } 1561 }
1336 goto out;
1337 }
1338 1562
1339 /* 1563 put_online_cpus();
1340 * This is a bit more difficult. We only want to add pages 1564 } else {
1341 * when we can allocate enough for all CPUs. We do this 1565 cpu_buffer = buffer->buffers[cpu_id];
1342 * by allocating all the pages and storing them on a local
1343 * link list. If we succeed in our allocation, then we
1344 * add these pages to the cpu_buffers. Otherwise we just free
1345 * them all and return -ENOMEM;
1346 */
1347 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1348 goto out_fail;
1349 1566
1350 new_pages = nr_pages - buffer->pages; 1567 if (nr_pages == cpu_buffer->nr_pages)
1568 goto out;
1351 1569
1352 for_each_buffer_cpu(buffer, cpu) { 1570 cpu_buffer->nr_pages_to_update = nr_pages -
1353 for (i = 0; i < new_pages; i++) { 1571 cpu_buffer->nr_pages;
1354 struct page *page; 1572
1355 /* 1573 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1356 * __GFP_NORETRY flag makes sure that the allocation 1574 if (cpu_buffer->nr_pages_to_update > 0 &&
1357 * fails gracefully without invoking oom-killer and 1575 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1358 * the system is not destabilized. 1576 &cpu_buffer->new_pages, cpu_id)) {
1359 */ 1577 err = -ENOMEM;
1360 bpage = kzalloc_node(ALIGN(sizeof(*bpage), 1578 goto out_err;
1361 cache_line_size()),
1362 GFP_KERNEL | __GFP_NORETRY,
1363 cpu_to_node(cpu));
1364 if (!bpage)
1365 goto free_pages;
1366 list_add(&bpage->list, &pages);
1367 page = alloc_pages_node(cpu_to_node(cpu),
1368 GFP_KERNEL | __GFP_NORETRY, 0);
1369 if (!page)
1370 goto free_pages;
1371 bpage->page = page_address(page);
1372 rb_init_page(bpage->page);
1373 } 1579 }
1374 }
1375 1580
1376 for_each_buffer_cpu(buffer, cpu) { 1581 get_online_cpus();
1377 cpu_buffer = buffer->buffers[cpu];
1378 rb_insert_pages(cpu_buffer, &pages, new_pages);
1379 }
1380 1582
1381 if (RB_WARN_ON(buffer, !list_empty(&pages))) 1583 if (cpu_online(cpu_id)) {
1382 goto out_fail; 1584 schedule_work_on(cpu_id,
1585 &cpu_buffer->update_pages_work);
1586 wait_for_completion(&cpu_buffer->update_done);
1587 } else
1588 rb_update_pages(cpu_buffer);
1589
1590 cpu_buffer->nr_pages_to_update = 0;
1591 put_online_cpus();
1592 }
1383 1593
1384 out: 1594 out:
1385 buffer->pages = nr_pages; 1595 /*
1386 put_online_cpus(); 1596 * The ring buffer resize can happen with the ring buffer
1597 * enabled, so that the update disturbs the tracing as little
1598 * as possible. But if the buffer is disabled, we do not need
1599 * to worry about that, and we can take the time to verify
1600 * that the buffer is not corrupt.
1601 */
1602 if (atomic_read(&buffer->record_disabled)) {
1603 atomic_inc(&buffer->record_disabled);
1604 /*
1605 * Even though the buffer was disabled, we must make sure
1606 * that it is truly disabled before calling rb_check_pages.
1607 * There could have been a race between checking
1608 * record_disable and incrementing it.
1609 */
1610 synchronize_sched();
1611 for_each_buffer_cpu(buffer, cpu) {
1612 cpu_buffer = buffer->buffers[cpu];
1613 rb_check_pages(cpu_buffer);
1614 }
1615 atomic_dec(&buffer->record_disabled);
1616 }
1617
1387 mutex_unlock(&buffer->mutex); 1618 mutex_unlock(&buffer->mutex);
1619 return size;
1388 1620
1389 atomic_dec(&buffer->record_disabled); 1621 out_err:
1622 for_each_buffer_cpu(buffer, cpu) {
1623 struct buffer_page *bpage, *tmp;
1390 1624
1391 return size; 1625 cpu_buffer = buffer->buffers[cpu];
1626 cpu_buffer->nr_pages_to_update = 0;
1392 1627
1393 free_pages: 1628 if (list_empty(&cpu_buffer->new_pages))
1394 list_for_each_entry_safe(bpage, tmp, &pages, list) { 1629 continue;
1395 list_del_init(&bpage->list);
1396 free_buffer_page(bpage);
1397 }
1398 put_online_cpus();
1399 mutex_unlock(&buffer->mutex);
1400 atomic_dec(&buffer->record_disabled);
1401 return -ENOMEM;
1402 1630
1403 /* 1631 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1404 * Something went totally wrong, and we are too paranoid 1632 list) {
1405 * to even clean up the mess. 1633 list_del_init(&bpage->list);
1406 */ 1634 free_buffer_page(bpage);
1407 out_fail: 1635 }
1408 put_online_cpus(); 1636 }
1409 mutex_unlock(&buffer->mutex); 1637 mutex_unlock(&buffer->mutex);
1410 atomic_dec(&buffer->record_disabled); 1638 return err;
1411 return -1;
1412} 1639}
1413EXPORT_SYMBOL_GPL(ring_buffer_resize); 1640EXPORT_SYMBOL_GPL(ring_buffer_resize);
1414 1641
@@ -1447,21 +1674,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
1447 return __rb_page_index(iter->head_page, iter->head); 1674 return __rb_page_index(iter->head_page, iter->head);
1448} 1675}
1449 1676
1450static inline unsigned long rb_page_write(struct buffer_page *bpage)
1451{
1452 return local_read(&bpage->write) & RB_WRITE_MASK;
1453}
1454
1455static inline unsigned rb_page_commit(struct buffer_page *bpage) 1677static inline unsigned rb_page_commit(struct buffer_page *bpage)
1456{ 1678{
1457 return local_read(&bpage->page->commit); 1679 return local_read(&bpage->page->commit);
1458} 1680}
1459 1681
1460static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1461{
1462 return local_read(&bpage->entries) & RB_WRITE_MASK;
1463}
1464
1465/* Size is determined by what has been committed */ 1682/* Size is determined by what has been committed */
1466static inline unsigned rb_page_size(struct buffer_page *bpage) 1683static inline unsigned rb_page_size(struct buffer_page *bpage)
1467{ 1684{
@@ -1510,7 +1727,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1510 * assign the commit to the tail. 1727 * assign the commit to the tail.
1511 */ 1728 */
1512 again: 1729 again:
1513 max_count = cpu_buffer->buffer->pages * 100; 1730 max_count = cpu_buffer->nr_pages * 100;
1514 1731
1515 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 1732 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1516 if (RB_WARN_ON(cpu_buffer, !(--max_count))) 1733 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
@@ -3486,6 +3703,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3486 3703
3487 iter->cpu_buffer = cpu_buffer; 3704 iter->cpu_buffer = cpu_buffer;
3488 3705
3706 atomic_inc(&buffer->resize_disabled);
3489 atomic_inc(&cpu_buffer->record_disabled); 3707 atomic_inc(&cpu_buffer->record_disabled);
3490 3708
3491 return iter; 3709 return iter;
@@ -3548,7 +3766,14 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
3548{ 3766{
3549 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3767 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3550 3768
3769 /*
3770 * Ring buffer is disabled from recording, here's a good place
3771 * to check the integrity of the ring buffer.
3772 */
3773 rb_check_pages(cpu_buffer);
3774
3551 atomic_dec(&cpu_buffer->record_disabled); 3775 atomic_dec(&cpu_buffer->record_disabled);
3776 atomic_dec(&cpu_buffer->buffer->resize_disabled);
3552 kfree(iter); 3777 kfree(iter);
3553} 3778}
3554EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 3779EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
@@ -3588,9 +3813,18 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
3588 * ring_buffer_size - return the size of the ring buffer (in bytes) 3813 * ring_buffer_size - return the size of the ring buffer (in bytes)
3589 * @buffer: The ring buffer. 3814 * @buffer: The ring buffer.
3590 */ 3815 */
3591unsigned long ring_buffer_size(struct ring_buffer *buffer) 3816unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
3592{ 3817{
3593 return BUF_PAGE_SIZE * buffer->pages; 3818 /*
3819 * Earlier, this method returned
3820 * BUF_PAGE_SIZE * buffer->nr_pages
3821 * Since the nr_pages field is now removed, we have converted this to
3822 * return the per cpu buffer value.
3823 */
3824 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3825 return 0;
3826
3827 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
3594} 3828}
3595EXPORT_SYMBOL_GPL(ring_buffer_size); 3829EXPORT_SYMBOL_GPL(ring_buffer_size);
3596 3830
@@ -3611,6 +3845,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3611 cpu_buffer->commit_page = cpu_buffer->head_page; 3845 cpu_buffer->commit_page = cpu_buffer->head_page;
3612 3846
3613 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 3847 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3848 INIT_LIST_HEAD(&cpu_buffer->new_pages);
3614 local_set(&cpu_buffer->reader_page->write, 0); 3849 local_set(&cpu_buffer->reader_page->write, 0);
3615 local_set(&cpu_buffer->reader_page->entries, 0); 3850 local_set(&cpu_buffer->reader_page->entries, 0);
3616 local_set(&cpu_buffer->reader_page->page->commit, 0); 3851 local_set(&cpu_buffer->reader_page->page->commit, 0);
@@ -3647,8 +3882,12 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3647 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3882 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3648 return; 3883 return;
3649 3884
3885 atomic_inc(&buffer->resize_disabled);
3650 atomic_inc(&cpu_buffer->record_disabled); 3886 atomic_inc(&cpu_buffer->record_disabled);
3651 3887
3888 /* Make sure all commits have finished */
3889 synchronize_sched();
3890
3652 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3891 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3653 3892
3654 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3893 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
@@ -3664,6 +3903,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3664 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3903 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3665 3904
3666 atomic_dec(&cpu_buffer->record_disabled); 3905 atomic_dec(&cpu_buffer->record_disabled);
3906 atomic_dec(&buffer->resize_disabled);
3667} 3907}
3668EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 3908EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3669 3909
@@ -3765,8 +4005,11 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3765 !cpumask_test_cpu(cpu, buffer_b->cpumask)) 4005 !cpumask_test_cpu(cpu, buffer_b->cpumask))
3766 goto out; 4006 goto out;
3767 4007
4008 cpu_buffer_a = buffer_a->buffers[cpu];
4009 cpu_buffer_b = buffer_b->buffers[cpu];
4010
3768 /* At least make sure the two buffers are somewhat the same */ 4011 /* At least make sure the two buffers are somewhat the same */
3769 if (buffer_a->pages != buffer_b->pages) 4012 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
3770 goto out; 4013 goto out;
3771 4014
3772 ret = -EAGAIN; 4015 ret = -EAGAIN;
@@ -3780,9 +4023,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3780 if (atomic_read(&buffer_b->record_disabled)) 4023 if (atomic_read(&buffer_b->record_disabled))
3781 goto out; 4024 goto out;
3782 4025
3783 cpu_buffer_a = buffer_a->buffers[cpu];
3784 cpu_buffer_b = buffer_b->buffers[cpu];
3785
3786 if (atomic_read(&cpu_buffer_a->record_disabled)) 4026 if (atomic_read(&cpu_buffer_a->record_disabled))
3787 goto out; 4027 goto out;
3788 4028
@@ -4071,6 +4311,8 @@ static int rb_cpu_notify(struct notifier_block *self,
4071 struct ring_buffer *buffer = 4311 struct ring_buffer *buffer =
4072 container_of(self, struct ring_buffer, cpu_notify); 4312 container_of(self, struct ring_buffer, cpu_notify);
4073 long cpu = (long)hcpu; 4313 long cpu = (long)hcpu;
4314 int cpu_i, nr_pages_same;
4315 unsigned int nr_pages;
4074 4316
4075 switch (action) { 4317 switch (action) {
4076 case CPU_UP_PREPARE: 4318 case CPU_UP_PREPARE:
@@ -4078,8 +4320,23 @@ static int rb_cpu_notify(struct notifier_block *self,
4078 if (cpumask_test_cpu(cpu, buffer->cpumask)) 4320 if (cpumask_test_cpu(cpu, buffer->cpumask))
4079 return NOTIFY_OK; 4321 return NOTIFY_OK;
4080 4322
4323 nr_pages = 0;
4324 nr_pages_same = 1;
4325 /* check if all cpu sizes are same */
4326 for_each_buffer_cpu(buffer, cpu_i) {
4327 /* fill in the size from first enabled cpu */
4328 if (nr_pages == 0)
4329 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4330 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4331 nr_pages_same = 0;
4332 break;
4333 }
4334 }
4335 /* allocate minimum pages, user can later expand it */
4336 if (!nr_pages_same)
4337 nr_pages = 2;
4081 buffer->buffers[cpu] = 4338 buffer->buffers[cpu] =
4082 rb_allocate_cpu_buffer(buffer, cpu); 4339 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4083 if (!buffer->buffers[cpu]) { 4340 if (!buffer->buffers[cpu]) {
4084 WARN(1, "failed to allocate ring buffer on CPU %ld\n", 4341 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4085 cpu); 4342 cpu);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2a22255c1010..68032c6177db 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -87,18 +87,6 @@ static int tracing_disabled = 1;
87 87
88DEFINE_PER_CPU(int, ftrace_cpu_disabled); 88DEFINE_PER_CPU(int, ftrace_cpu_disabled);
89 89
90static inline void ftrace_disable_cpu(void)
91{
92 preempt_disable();
93 __this_cpu_inc(ftrace_cpu_disabled);
94}
95
96static inline void ftrace_enable_cpu(void)
97{
98 __this_cpu_dec(ftrace_cpu_disabled);
99 preempt_enable();
100}
101
102cpumask_var_t __read_mostly tracing_buffer_mask; 90cpumask_var_t __read_mostly tracing_buffer_mask;
103 91
104/* 92/*
@@ -629,7 +617,6 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
629static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 617static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
630{ 618{
631 int len; 619 int len;
632 void *ret;
633 620
634 if (s->len <= s->readpos) 621 if (s->len <= s->readpos)
635 return -EBUSY; 622 return -EBUSY;
@@ -637,9 +624,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
637 len = s->len - s->readpos; 624 len = s->len - s->readpos;
638 if (cnt > len) 625 if (cnt > len)
639 cnt = len; 626 cnt = len;
640 ret = memcpy(buf, s->buffer + s->readpos, cnt); 627 memcpy(buf, s->buffer + s->readpos, cnt);
641 if (!ret)
642 return -EFAULT;
643 628
644 s->readpos += cnt; 629 s->readpos += cnt;
645 return cnt; 630 return cnt;
@@ -751,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
751 736
752 arch_spin_lock(&ftrace_max_lock); 737 arch_spin_lock(&ftrace_max_lock);
753 738
754 ftrace_disable_cpu();
755
756 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 739 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
757 740
758 if (ret == -EBUSY) { 741 if (ret == -EBUSY) {
@@ -766,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
766 "Failed to swap buffers due to commit in progress\n"); 749 "Failed to swap buffers due to commit in progress\n");
767 } 750 }
768 751
769 ftrace_enable_cpu();
770
771 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 752 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
772 753
773 __update_max_tr(tr, tsk, cpu); 754 __update_max_tr(tr, tsk, cpu);
@@ -782,8 +763,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
782 * Register a new plugin tracer. 763 * Register a new plugin tracer.
783 */ 764 */
784int register_tracer(struct tracer *type) 765int register_tracer(struct tracer *type)
785__releases(kernel_lock)
786__acquires(kernel_lock)
787{ 766{
788 struct tracer *t; 767 struct tracer *t;
789 int ret = 0; 768 int ret = 0;
@@ -841,7 +820,8 @@ __acquires(kernel_lock)
841 820
842 /* If we expanded the buffers, make sure the max is expanded too */ 821 /* If we expanded the buffers, make sure the max is expanded too */
843 if (ring_buffer_expanded && type->use_max_tr) 822 if (ring_buffer_expanded && type->use_max_tr)
844 ring_buffer_resize(max_tr.buffer, trace_buf_size); 823 ring_buffer_resize(max_tr.buffer, trace_buf_size,
824 RING_BUFFER_ALL_CPUS);
845 825
846 /* the test is responsible for initializing and enabling */ 826 /* the test is responsible for initializing and enabling */
847 pr_info("Testing tracer %s: ", type->name); 827 pr_info("Testing tracer %s: ", type->name);
@@ -857,7 +837,8 @@ __acquires(kernel_lock)
857 837
858 /* Shrink the max buffer again */ 838 /* Shrink the max buffer again */
859 if (ring_buffer_expanded && type->use_max_tr) 839 if (ring_buffer_expanded && type->use_max_tr)
860 ring_buffer_resize(max_tr.buffer, 1); 840 ring_buffer_resize(max_tr.buffer, 1,
841 RING_BUFFER_ALL_CPUS);
861 842
862 printk(KERN_CONT "PASSED\n"); 843 printk(KERN_CONT "PASSED\n");
863 } 844 }
@@ -917,13 +898,6 @@ out:
917 mutex_unlock(&trace_types_lock); 898 mutex_unlock(&trace_types_lock);
918} 899}
919 900
920static void __tracing_reset(struct ring_buffer *buffer, int cpu)
921{
922 ftrace_disable_cpu();
923 ring_buffer_reset_cpu(buffer, cpu);
924 ftrace_enable_cpu();
925}
926
927void tracing_reset(struct trace_array *tr, int cpu) 901void tracing_reset(struct trace_array *tr, int cpu)
928{ 902{
929 struct ring_buffer *buffer = tr->buffer; 903 struct ring_buffer *buffer = tr->buffer;
@@ -932,7 +906,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
932 906
933 /* Make sure all commits have finished */ 907 /* Make sure all commits have finished */
934 synchronize_sched(); 908 synchronize_sched();
935 __tracing_reset(buffer, cpu); 909 ring_buffer_reset_cpu(buffer, cpu);
936 910
937 ring_buffer_record_enable(buffer); 911 ring_buffer_record_enable(buffer);
938} 912}
@@ -950,7 +924,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
950 tr->time_start = ftrace_now(tr->cpu); 924 tr->time_start = ftrace_now(tr->cpu);
951 925
952 for_each_online_cpu(cpu) 926 for_each_online_cpu(cpu)
953 __tracing_reset(buffer, cpu); 927 ring_buffer_reset_cpu(buffer, cpu);
954 928
955 ring_buffer_record_enable(buffer); 929 ring_buffer_record_enable(buffer);
956} 930}
@@ -1498,25 +1472,119 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1498 1472
1499#endif /* CONFIG_STACKTRACE */ 1473#endif /* CONFIG_STACKTRACE */
1500 1474
1475/* created for use with alloc_percpu */
1476struct trace_buffer_struct {
1477 char buffer[TRACE_BUF_SIZE];
1478};
1479
1480static struct trace_buffer_struct *trace_percpu_buffer;
1481static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1482static struct trace_buffer_struct *trace_percpu_irq_buffer;
1483static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1484
1485/*
1486 * The buffer used is dependent on the context. There is a per cpu
1487 * buffer for normal context, softirq contex, hard irq context and
1488 * for NMI context. Thise allows for lockless recording.
1489 *
1490 * Note, if the buffers failed to be allocated, then this returns NULL
1491 */
1492static char *get_trace_buf(void)
1493{
1494 struct trace_buffer_struct *percpu_buffer;
1495 struct trace_buffer_struct *buffer;
1496
1497 /*
1498 * If we have allocated per cpu buffers, then we do not
1499 * need to do any locking.
1500 */
1501 if (in_nmi())
1502 percpu_buffer = trace_percpu_nmi_buffer;
1503 else if (in_irq())
1504 percpu_buffer = trace_percpu_irq_buffer;
1505 else if (in_softirq())
1506 percpu_buffer = trace_percpu_sirq_buffer;
1507 else
1508 percpu_buffer = trace_percpu_buffer;
1509
1510 if (!percpu_buffer)
1511 return NULL;
1512
1513 buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
1514
1515 return buffer->buffer;
1516}
1517
1518static int alloc_percpu_trace_buffer(void)
1519{
1520 struct trace_buffer_struct *buffers;
1521 struct trace_buffer_struct *sirq_buffers;
1522 struct trace_buffer_struct *irq_buffers;
1523 struct trace_buffer_struct *nmi_buffers;
1524
1525 buffers = alloc_percpu(struct trace_buffer_struct);
1526 if (!buffers)
1527 goto err_warn;
1528
1529 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1530 if (!sirq_buffers)
1531 goto err_sirq;
1532
1533 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1534 if (!irq_buffers)
1535 goto err_irq;
1536
1537 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1538 if (!nmi_buffers)
1539 goto err_nmi;
1540
1541 trace_percpu_buffer = buffers;
1542 trace_percpu_sirq_buffer = sirq_buffers;
1543 trace_percpu_irq_buffer = irq_buffers;
1544 trace_percpu_nmi_buffer = nmi_buffers;
1545
1546 return 0;
1547
1548 err_nmi:
1549 free_percpu(irq_buffers);
1550 err_irq:
1551 free_percpu(sirq_buffers);
1552 err_sirq:
1553 free_percpu(buffers);
1554 err_warn:
1555 WARN(1, "Could not allocate percpu trace_printk buffer");
1556 return -ENOMEM;
1557}
1558
1559void trace_printk_init_buffers(void)
1560{
1561 static int buffers_allocated;
1562
1563 if (buffers_allocated)
1564 return;
1565
1566 if (alloc_percpu_trace_buffer())
1567 return;
1568
1569 pr_info("ftrace: Allocated trace_printk buffers\n");
1570
1571 buffers_allocated = 1;
1572}
1573
1501/** 1574/**
1502 * trace_vbprintk - write binary msg to tracing buffer 1575 * trace_vbprintk - write binary msg to tracing buffer
1503 * 1576 *
1504 */ 1577 */
1505int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1578int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1506{ 1579{
1507 static arch_spinlock_t trace_buf_lock =
1508 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1509 static u32 trace_buf[TRACE_BUF_SIZE];
1510
1511 struct ftrace_event_call *call = &event_bprint; 1580 struct ftrace_event_call *call = &event_bprint;
1512 struct ring_buffer_event *event; 1581 struct ring_buffer_event *event;
1513 struct ring_buffer *buffer; 1582 struct ring_buffer *buffer;
1514 struct trace_array *tr = &global_trace; 1583 struct trace_array *tr = &global_trace;
1515 struct trace_array_cpu *data;
1516 struct bprint_entry *entry; 1584 struct bprint_entry *entry;
1517 unsigned long flags; 1585 unsigned long flags;
1518 int disable; 1586 char *tbuffer;
1519 int cpu, len = 0, size, pc; 1587 int len = 0, size, pc;
1520 1588
1521 if (unlikely(tracing_selftest_running || tracing_disabled)) 1589 if (unlikely(tracing_selftest_running || tracing_disabled))
1522 return 0; 1590 return 0;
@@ -1526,43 +1594,36 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1526 1594
1527 pc = preempt_count(); 1595 pc = preempt_count();
1528 preempt_disable_notrace(); 1596 preempt_disable_notrace();
1529 cpu = raw_smp_processor_id();
1530 data = tr->data[cpu];
1531 1597
1532 disable = atomic_inc_return(&data->disabled); 1598 tbuffer = get_trace_buf();
1533 if (unlikely(disable != 1)) 1599 if (!tbuffer) {
1600 len = 0;
1534 goto out; 1601 goto out;
1602 }
1535 1603
1536 /* Lockdep uses trace_printk for lock tracing */ 1604 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1537 local_irq_save(flags);
1538 arch_spin_lock(&trace_buf_lock);
1539 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1540 1605
1541 if (len > TRACE_BUF_SIZE || len < 0) 1606 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1542 goto out_unlock; 1607 goto out;
1543 1608
1609 local_save_flags(flags);
1544 size = sizeof(*entry) + sizeof(u32) * len; 1610 size = sizeof(*entry) + sizeof(u32) * len;
1545 buffer = tr->buffer; 1611 buffer = tr->buffer;
1546 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 1612 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1547 flags, pc); 1613 flags, pc);
1548 if (!event) 1614 if (!event)
1549 goto out_unlock; 1615 goto out;
1550 entry = ring_buffer_event_data(event); 1616 entry = ring_buffer_event_data(event);
1551 entry->ip = ip; 1617 entry->ip = ip;
1552 entry->fmt = fmt; 1618 entry->fmt = fmt;
1553 1619
1554 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1620 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1555 if (!filter_check_discard(call, entry, buffer, event)) { 1621 if (!filter_check_discard(call, entry, buffer, event)) {
1556 ring_buffer_unlock_commit(buffer, event); 1622 ring_buffer_unlock_commit(buffer, event);
1557 ftrace_trace_stack(buffer, flags, 6, pc); 1623 ftrace_trace_stack(buffer, flags, 6, pc);
1558 } 1624 }
1559 1625
1560out_unlock:
1561 arch_spin_unlock(&trace_buf_lock);
1562 local_irq_restore(flags);
1563
1564out: 1626out:
1565 atomic_dec_return(&data->disabled);
1566 preempt_enable_notrace(); 1627 preempt_enable_notrace();
1567 unpause_graph_tracing(); 1628 unpause_graph_tracing();
1568 1629
@@ -1588,58 +1649,53 @@ int trace_array_printk(struct trace_array *tr,
1588int trace_array_vprintk(struct trace_array *tr, 1649int trace_array_vprintk(struct trace_array *tr,
1589 unsigned long ip, const char *fmt, va_list args) 1650 unsigned long ip, const char *fmt, va_list args)
1590{ 1651{
1591 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1592 static char trace_buf[TRACE_BUF_SIZE];
1593
1594 struct ftrace_event_call *call = &event_print; 1652 struct ftrace_event_call *call = &event_print;
1595 struct ring_buffer_event *event; 1653 struct ring_buffer_event *event;
1596 struct ring_buffer *buffer; 1654 struct ring_buffer *buffer;
1597 struct trace_array_cpu *data; 1655 int len = 0, size, pc;
1598 int cpu, len = 0, size, pc;
1599 struct print_entry *entry; 1656 struct print_entry *entry;
1600 unsigned long irq_flags; 1657 unsigned long flags;
1601 int disable; 1658 char *tbuffer;
1602 1659
1603 if (tracing_disabled || tracing_selftest_running) 1660 if (tracing_disabled || tracing_selftest_running)
1604 return 0; 1661 return 0;
1605 1662
1663 /* Don't pollute graph traces with trace_vprintk internals */
1664 pause_graph_tracing();
1665
1606 pc = preempt_count(); 1666 pc = preempt_count();
1607 preempt_disable_notrace(); 1667 preempt_disable_notrace();
1608 cpu = raw_smp_processor_id();
1609 data = tr->data[cpu];
1610 1668
1611 disable = atomic_inc_return(&data->disabled); 1669
1612 if (unlikely(disable != 1)) 1670 tbuffer = get_trace_buf();
1671 if (!tbuffer) {
1672 len = 0;
1613 goto out; 1673 goto out;
1674 }
1614 1675
1615 pause_graph_tracing(); 1676 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1616 raw_local_irq_save(irq_flags); 1677 if (len > TRACE_BUF_SIZE)
1617 arch_spin_lock(&trace_buf_lock); 1678 goto out;
1618 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1619 1679
1680 local_save_flags(flags);
1620 size = sizeof(*entry) + len + 1; 1681 size = sizeof(*entry) + len + 1;
1621 buffer = tr->buffer; 1682 buffer = tr->buffer;
1622 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1683 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1623 irq_flags, pc); 1684 flags, pc);
1624 if (!event) 1685 if (!event)
1625 goto out_unlock; 1686 goto out;
1626 entry = ring_buffer_event_data(event); 1687 entry = ring_buffer_event_data(event);
1627 entry->ip = ip; 1688 entry->ip = ip;
1628 1689
1629 memcpy(&entry->buf, trace_buf, len); 1690 memcpy(&entry->buf, tbuffer, len);
1630 entry->buf[len] = '\0'; 1691 entry->buf[len] = '\0';
1631 if (!filter_check_discard(call, entry, buffer, event)) { 1692 if (!filter_check_discard(call, entry, buffer, event)) {
1632 ring_buffer_unlock_commit(buffer, event); 1693 ring_buffer_unlock_commit(buffer, event);
1633 ftrace_trace_stack(buffer, irq_flags, 6, pc); 1694 ftrace_trace_stack(buffer, flags, 6, pc);
1634 } 1695 }
1635
1636 out_unlock:
1637 arch_spin_unlock(&trace_buf_lock);
1638 raw_local_irq_restore(irq_flags);
1639 unpause_graph_tracing();
1640 out: 1696 out:
1641 atomic_dec_return(&data->disabled);
1642 preempt_enable_notrace(); 1697 preempt_enable_notrace();
1698 unpause_graph_tracing();
1643 1699
1644 return len; 1700 return len;
1645} 1701}
@@ -1652,14 +1708,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
1652 1708
1653static void trace_iterator_increment(struct trace_iterator *iter) 1709static void trace_iterator_increment(struct trace_iterator *iter)
1654{ 1710{
1655 /* Don't allow ftrace to trace into the ring buffers */
1656 ftrace_disable_cpu();
1657
1658 iter->idx++; 1711 iter->idx++;
1659 if (iter->buffer_iter[iter->cpu]) 1712 if (iter->buffer_iter[iter->cpu])
1660 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); 1713 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1661
1662 ftrace_enable_cpu();
1663} 1714}
1664 1715
1665static struct trace_entry * 1716static struct trace_entry *
@@ -1669,17 +1720,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1669 struct ring_buffer_event *event; 1720 struct ring_buffer_event *event;
1670 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; 1721 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1671 1722
1672 /* Don't allow ftrace to trace into the ring buffers */
1673 ftrace_disable_cpu();
1674
1675 if (buf_iter) 1723 if (buf_iter)
1676 event = ring_buffer_iter_peek(buf_iter, ts); 1724 event = ring_buffer_iter_peek(buf_iter, ts);
1677 else 1725 else
1678 event = ring_buffer_peek(iter->tr->buffer, cpu, ts, 1726 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1679 lost_events); 1727 lost_events);
1680 1728
1681 ftrace_enable_cpu();
1682
1683 if (event) { 1729 if (event) {
1684 iter->ent_size = ring_buffer_event_length(event); 1730 iter->ent_size = ring_buffer_event_length(event);
1685 return ring_buffer_event_data(event); 1731 return ring_buffer_event_data(event);
@@ -1769,11 +1815,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
1769 1815
1770static void trace_consume(struct trace_iterator *iter) 1816static void trace_consume(struct trace_iterator *iter)
1771{ 1817{
1772 /* Don't allow ftrace to trace into the ring buffers */
1773 ftrace_disable_cpu();
1774 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, 1818 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1775 &iter->lost_events); 1819 &iter->lost_events);
1776 ftrace_enable_cpu();
1777} 1820}
1778 1821
1779static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1822static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1862,16 +1905,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1862 iter->cpu = 0; 1905 iter->cpu = 0;
1863 iter->idx = -1; 1906 iter->idx = -1;
1864 1907
1865 ftrace_disable_cpu();
1866
1867 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1908 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1868 for_each_tracing_cpu(cpu) 1909 for_each_tracing_cpu(cpu)
1869 tracing_iter_reset(iter, cpu); 1910 tracing_iter_reset(iter, cpu);
1870 } else 1911 } else
1871 tracing_iter_reset(iter, cpu_file); 1912 tracing_iter_reset(iter, cpu_file);
1872 1913
1873 ftrace_enable_cpu();
1874
1875 iter->leftover = 0; 1914 iter->leftover = 0;
1876 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1915 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1877 ; 1916 ;
@@ -2332,15 +2371,13 @@ static struct trace_iterator *
2332__tracing_open(struct inode *inode, struct file *file) 2371__tracing_open(struct inode *inode, struct file *file)
2333{ 2372{
2334 long cpu_file = (long) inode->i_private; 2373 long cpu_file = (long) inode->i_private;
2335 void *fail_ret = ERR_PTR(-ENOMEM);
2336 struct trace_iterator *iter; 2374 struct trace_iterator *iter;
2337 struct seq_file *m; 2375 int cpu;
2338 int cpu, ret;
2339 2376
2340 if (tracing_disabled) 2377 if (tracing_disabled)
2341 return ERR_PTR(-ENODEV); 2378 return ERR_PTR(-ENODEV);
2342 2379
2343 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2380 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2344 if (!iter) 2381 if (!iter)
2345 return ERR_PTR(-ENOMEM); 2382 return ERR_PTR(-ENOMEM);
2346 2383
@@ -2397,32 +2434,15 @@ __tracing_open(struct inode *inode, struct file *file)
2397 tracing_iter_reset(iter, cpu); 2434 tracing_iter_reset(iter, cpu);
2398 } 2435 }
2399 2436
2400 ret = seq_open(file, &tracer_seq_ops);
2401 if (ret < 0) {
2402 fail_ret = ERR_PTR(ret);
2403 goto fail_buffer;
2404 }
2405
2406 m = file->private_data;
2407 m->private = iter;
2408
2409 mutex_unlock(&trace_types_lock); 2437 mutex_unlock(&trace_types_lock);
2410 2438
2411 return iter; 2439 return iter;
2412 2440
2413 fail_buffer:
2414 for_each_tracing_cpu(cpu) {
2415 if (iter->buffer_iter[cpu])
2416 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2417 }
2418 free_cpumask_var(iter->started);
2419 tracing_start();
2420 fail: 2441 fail:
2421 mutex_unlock(&trace_types_lock); 2442 mutex_unlock(&trace_types_lock);
2422 kfree(iter->trace); 2443 kfree(iter->trace);
2423 kfree(iter); 2444 seq_release_private(inode, file);
2424 2445 return ERR_PTR(-ENOMEM);
2425 return fail_ret;
2426} 2446}
2427 2447
2428int tracing_open_generic(struct inode *inode, struct file *filp) 2448int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -2458,11 +2478,10 @@ static int tracing_release(struct inode *inode, struct file *file)
2458 tracing_start(); 2478 tracing_start();
2459 mutex_unlock(&trace_types_lock); 2479 mutex_unlock(&trace_types_lock);
2460 2480
2461 seq_release(inode, file);
2462 mutex_destroy(&iter->mutex); 2481 mutex_destroy(&iter->mutex);
2463 free_cpumask_var(iter->started); 2482 free_cpumask_var(iter->started);
2464 kfree(iter->trace); 2483 kfree(iter->trace);
2465 kfree(iter); 2484 seq_release_private(inode, file);
2466 return 0; 2485 return 0;
2467} 2486}
2468 2487
@@ -2648,10 +2667,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2648 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2667 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2649 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2668 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2650 atomic_inc(&global_trace.data[cpu]->disabled); 2669 atomic_inc(&global_trace.data[cpu]->disabled);
2670 ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2651 } 2671 }
2652 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2672 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2653 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2673 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2654 atomic_dec(&global_trace.data[cpu]->disabled); 2674 atomic_dec(&global_trace.data[cpu]->disabled);
2675 ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2655 } 2676 }
2656 } 2677 }
2657 arch_spin_unlock(&ftrace_max_lock); 2678 arch_spin_unlock(&ftrace_max_lock);
@@ -2974,7 +2995,14 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
2974 return t->init(tr); 2995 return t->init(tr);
2975} 2996}
2976 2997
2977static int __tracing_resize_ring_buffer(unsigned long size) 2998static void set_buffer_entries(struct trace_array *tr, unsigned long val)
2999{
3000 int cpu;
3001 for_each_tracing_cpu(cpu)
3002 tr->data[cpu]->entries = val;
3003}
3004
3005static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
2978{ 3006{
2979 int ret; 3007 int ret;
2980 3008
@@ -2985,19 +3013,32 @@ static int __tracing_resize_ring_buffer(unsigned long size)
2985 */ 3013 */
2986 ring_buffer_expanded = 1; 3014 ring_buffer_expanded = 1;
2987 3015
2988 ret = ring_buffer_resize(global_trace.buffer, size); 3016 ret = ring_buffer_resize(global_trace.buffer, size, cpu);
2989 if (ret < 0) 3017 if (ret < 0)
2990 return ret; 3018 return ret;
2991 3019
2992 if (!current_trace->use_max_tr) 3020 if (!current_trace->use_max_tr)
2993 goto out; 3021 goto out;
2994 3022
2995 ret = ring_buffer_resize(max_tr.buffer, size); 3023 ret = ring_buffer_resize(max_tr.buffer, size, cpu);
2996 if (ret < 0) { 3024 if (ret < 0) {
2997 int r; 3025 int r = 0;
3026
3027 if (cpu == RING_BUFFER_ALL_CPUS) {
3028 int i;
3029 for_each_tracing_cpu(i) {
3030 r = ring_buffer_resize(global_trace.buffer,
3031 global_trace.data[i]->entries,
3032 i);
3033 if (r < 0)
3034 break;
3035 }
3036 } else {
3037 r = ring_buffer_resize(global_trace.buffer,
3038 global_trace.data[cpu]->entries,
3039 cpu);
3040 }
2998 3041
2999 r = ring_buffer_resize(global_trace.buffer,
3000 global_trace.entries);
3001 if (r < 0) { 3042 if (r < 0) {
3002 /* 3043 /*
3003 * AARGH! We are left with different 3044 * AARGH! We are left with different
@@ -3019,43 +3060,39 @@ static int __tracing_resize_ring_buffer(unsigned long size)
3019 return ret; 3060 return ret;
3020 } 3061 }
3021 3062
3022 max_tr.entries = size; 3063 if (cpu == RING_BUFFER_ALL_CPUS)
3064 set_buffer_entries(&max_tr, size);
3065 else
3066 max_tr.data[cpu]->entries = size;
3067
3023 out: 3068 out:
3024 global_trace.entries = size; 3069 if (cpu == RING_BUFFER_ALL_CPUS)
3070 set_buffer_entries(&global_trace, size);
3071 else
3072 global_trace.data[cpu]->entries = size;
3025 3073
3026 return ret; 3074 return ret;
3027} 3075}
3028 3076
3029static ssize_t tracing_resize_ring_buffer(unsigned long size) 3077static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3030{ 3078{
3031 int cpu, ret = size; 3079 int ret = size;
3032 3080
3033 mutex_lock(&trace_types_lock); 3081 mutex_lock(&trace_types_lock);
3034 3082
3035 tracing_stop(); 3083 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3036 3084 /* make sure, this cpu is enabled in the mask */
3037 /* disable all cpu buffers */ 3085 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3038 for_each_tracing_cpu(cpu) { 3086 ret = -EINVAL;
3039 if (global_trace.data[cpu]) 3087 goto out;
3040 atomic_inc(&global_trace.data[cpu]->disabled); 3088 }
3041 if (max_tr.data[cpu])
3042 atomic_inc(&max_tr.data[cpu]->disabled);
3043 } 3089 }
3044 3090
3045 if (size != global_trace.entries) 3091 ret = __tracing_resize_ring_buffer(size, cpu_id);
3046 ret = __tracing_resize_ring_buffer(size);
3047
3048 if (ret < 0) 3092 if (ret < 0)
3049 ret = -ENOMEM; 3093 ret = -ENOMEM;
3050 3094
3051 for_each_tracing_cpu(cpu) { 3095out:
3052 if (global_trace.data[cpu])
3053 atomic_dec(&global_trace.data[cpu]->disabled);
3054 if (max_tr.data[cpu])
3055 atomic_dec(&max_tr.data[cpu]->disabled);
3056 }
3057
3058 tracing_start();
3059 mutex_unlock(&trace_types_lock); 3096 mutex_unlock(&trace_types_lock);
3060 3097
3061 return ret; 3098 return ret;
@@ -3078,7 +3115,8 @@ int tracing_update_buffers(void)
3078 3115
3079 mutex_lock(&trace_types_lock); 3116 mutex_lock(&trace_types_lock);
3080 if (!ring_buffer_expanded) 3117 if (!ring_buffer_expanded)
3081 ret = __tracing_resize_ring_buffer(trace_buf_size); 3118 ret = __tracing_resize_ring_buffer(trace_buf_size,
3119 RING_BUFFER_ALL_CPUS);
3082 mutex_unlock(&trace_types_lock); 3120 mutex_unlock(&trace_types_lock);
3083 3121
3084 return ret; 3122 return ret;
@@ -3102,7 +3140,8 @@ static int tracing_set_tracer(const char *buf)
3102 mutex_lock(&trace_types_lock); 3140 mutex_lock(&trace_types_lock);
3103 3141
3104 if (!ring_buffer_expanded) { 3142 if (!ring_buffer_expanded) {
3105 ret = __tracing_resize_ring_buffer(trace_buf_size); 3143 ret = __tracing_resize_ring_buffer(trace_buf_size,
3144 RING_BUFFER_ALL_CPUS);
3106 if (ret < 0) 3145 if (ret < 0)
3107 goto out; 3146 goto out;
3108 ret = 0; 3147 ret = 0;
@@ -3128,8 +3167,8 @@ static int tracing_set_tracer(const char *buf)
3128 * The max_tr ring buffer has some state (e.g. ring->clock) and 3167 * The max_tr ring buffer has some state (e.g. ring->clock) and
3129 * we want preserve it. 3168 * we want preserve it.
3130 */ 3169 */
3131 ring_buffer_resize(max_tr.buffer, 1); 3170 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3132 max_tr.entries = 1; 3171 set_buffer_entries(&max_tr, 1);
3133 } 3172 }
3134 destroy_trace_option_files(topts); 3173 destroy_trace_option_files(topts);
3135 3174
@@ -3137,10 +3176,17 @@ static int tracing_set_tracer(const char *buf)
3137 3176
3138 topts = create_trace_option_files(current_trace); 3177 topts = create_trace_option_files(current_trace);
3139 if (current_trace->use_max_tr) { 3178 if (current_trace->use_max_tr) {
3140 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); 3179 int cpu;
3141 if (ret < 0) 3180 /* we need to make per cpu buffer sizes equivalent */
3142 goto out; 3181 for_each_tracing_cpu(cpu) {
3143 max_tr.entries = global_trace.entries; 3182 ret = ring_buffer_resize(max_tr.buffer,
3183 global_trace.data[cpu]->entries,
3184 cpu);
3185 if (ret < 0)
3186 goto out;
3187 max_tr.data[cpu]->entries =
3188 global_trace.data[cpu]->entries;
3189 }
3144 } 3190 }
3145 3191
3146 if (t->init) { 3192 if (t->init) {
@@ -3642,30 +3688,82 @@ out_err:
3642 goto out; 3688 goto out;
3643} 3689}
3644 3690
3691struct ftrace_entries_info {
3692 struct trace_array *tr;
3693 int cpu;
3694};
3695
3696static int tracing_entries_open(struct inode *inode, struct file *filp)
3697{
3698 struct ftrace_entries_info *info;
3699
3700 if (tracing_disabled)
3701 return -ENODEV;
3702
3703 info = kzalloc(sizeof(*info), GFP_KERNEL);
3704 if (!info)
3705 return -ENOMEM;
3706
3707 info->tr = &global_trace;
3708 info->cpu = (unsigned long)inode->i_private;
3709
3710 filp->private_data = info;
3711
3712 return 0;
3713}
3714
3645static ssize_t 3715static ssize_t
3646tracing_entries_read(struct file *filp, char __user *ubuf, 3716tracing_entries_read(struct file *filp, char __user *ubuf,
3647 size_t cnt, loff_t *ppos) 3717 size_t cnt, loff_t *ppos)
3648{ 3718{
3649 struct trace_array *tr = filp->private_data; 3719 struct ftrace_entries_info *info = filp->private_data;
3650 char buf[96]; 3720 struct trace_array *tr = info->tr;
3651 int r; 3721 char buf[64];
3722 int r = 0;
3723 ssize_t ret;
3652 3724
3653 mutex_lock(&trace_types_lock); 3725 mutex_lock(&trace_types_lock);
3654 if (!ring_buffer_expanded) 3726
3655 r = sprintf(buf, "%lu (expanded: %lu)\n", 3727 if (info->cpu == RING_BUFFER_ALL_CPUS) {
3656 tr->entries >> 10, 3728 int cpu, buf_size_same;
3657 trace_buf_size >> 10); 3729 unsigned long size;
3658 else 3730
3659 r = sprintf(buf, "%lu\n", tr->entries >> 10); 3731 size = 0;
3732 buf_size_same = 1;
3733 /* check if all cpu sizes are same */
3734 for_each_tracing_cpu(cpu) {
3735 /* fill in the size from first enabled cpu */
3736 if (size == 0)
3737 size = tr->data[cpu]->entries;
3738 if (size != tr->data[cpu]->entries) {
3739 buf_size_same = 0;
3740 break;
3741 }
3742 }
3743
3744 if (buf_size_same) {
3745 if (!ring_buffer_expanded)
3746 r = sprintf(buf, "%lu (expanded: %lu)\n",
3747 size >> 10,
3748 trace_buf_size >> 10);
3749 else
3750 r = sprintf(buf, "%lu\n", size >> 10);
3751 } else
3752 r = sprintf(buf, "X\n");
3753 } else
3754 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3755
3660 mutex_unlock(&trace_types_lock); 3756 mutex_unlock(&trace_types_lock);
3661 3757
3662 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3758 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3759 return ret;
3663} 3760}
3664 3761
3665static ssize_t 3762static ssize_t
3666tracing_entries_write(struct file *filp, const char __user *ubuf, 3763tracing_entries_write(struct file *filp, const char __user *ubuf,
3667 size_t cnt, loff_t *ppos) 3764 size_t cnt, loff_t *ppos)
3668{ 3765{
3766 struct ftrace_entries_info *info = filp->private_data;
3669 unsigned long val; 3767 unsigned long val;
3670 int ret; 3768 int ret;
3671 3769
@@ -3680,7 +3778,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3680 /* value is in KB */ 3778 /* value is in KB */
3681 val <<= 10; 3779 val <<= 10;
3682 3780
3683 ret = tracing_resize_ring_buffer(val); 3781 ret = tracing_resize_ring_buffer(val, info->cpu);
3684 if (ret < 0) 3782 if (ret < 0)
3685 return ret; 3783 return ret;
3686 3784
@@ -3689,6 +3787,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3689 return cnt; 3787 return cnt;
3690} 3788}
3691 3789
3790static int
3791tracing_entries_release(struct inode *inode, struct file *filp)
3792{
3793 struct ftrace_entries_info *info = filp->private_data;
3794
3795 kfree(info);
3796
3797 return 0;
3798}
3799
3692static ssize_t 3800static ssize_t
3693tracing_total_entries_read(struct file *filp, char __user *ubuf, 3801tracing_total_entries_read(struct file *filp, char __user *ubuf,
3694 size_t cnt, loff_t *ppos) 3802 size_t cnt, loff_t *ppos)
@@ -3700,7 +3808,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
3700 3808
3701 mutex_lock(&trace_types_lock); 3809 mutex_lock(&trace_types_lock);
3702 for_each_tracing_cpu(cpu) { 3810 for_each_tracing_cpu(cpu) {
3703 size += tr->entries >> 10; 3811 size += tr->data[cpu]->entries >> 10;
3704 if (!ring_buffer_expanded) 3812 if (!ring_buffer_expanded)
3705 expanded_size += trace_buf_size >> 10; 3813 expanded_size += trace_buf_size >> 10;
3706 } 3814 }
@@ -3734,7 +3842,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
3734 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 3842 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3735 tracing_off(); 3843 tracing_off();
3736 /* resize the ring buffer to 0 */ 3844 /* resize the ring buffer to 0 */
3737 tracing_resize_ring_buffer(0); 3845 tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3738 3846
3739 return 0; 3847 return 0;
3740} 3848}
@@ -3749,14 +3857,14 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3749 struct print_entry *entry; 3857 struct print_entry *entry;
3750 unsigned long irq_flags; 3858 unsigned long irq_flags;
3751 struct page *pages[2]; 3859 struct page *pages[2];
3860 void *map_page[2];
3752 int nr_pages = 1; 3861 int nr_pages = 1;
3753 ssize_t written; 3862 ssize_t written;
3754 void *page1;
3755 void *page2;
3756 int offset; 3863 int offset;
3757 int size; 3864 int size;
3758 int len; 3865 int len;
3759 int ret; 3866 int ret;
3867 int i;
3760 3868
3761 if (tracing_disabled) 3869 if (tracing_disabled)
3762 return -EINVAL; 3870 return -EINVAL;
@@ -3795,9 +3903,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3795 goto out; 3903 goto out;
3796 } 3904 }
3797 3905
3798 page1 = kmap_atomic(pages[0]); 3906 for (i = 0; i < nr_pages; i++)
3799 if (nr_pages == 2) 3907 map_page[i] = kmap_atomic(pages[i]);
3800 page2 = kmap_atomic(pages[1]);
3801 3908
3802 local_save_flags(irq_flags); 3909 local_save_flags(irq_flags);
3803 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 3910 size = sizeof(*entry) + cnt + 2; /* possible \n added */
@@ -3815,10 +3922,10 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3815 3922
3816 if (nr_pages == 2) { 3923 if (nr_pages == 2) {
3817 len = PAGE_SIZE - offset; 3924 len = PAGE_SIZE - offset;
3818 memcpy(&entry->buf, page1 + offset, len); 3925 memcpy(&entry->buf, map_page[0] + offset, len);
3819 memcpy(&entry->buf[len], page2, cnt - len); 3926 memcpy(&entry->buf[len], map_page[1], cnt - len);
3820 } else 3927 } else
3821 memcpy(&entry->buf, page1 + offset, cnt); 3928 memcpy(&entry->buf, map_page[0] + offset, cnt);
3822 3929
3823 if (entry->buf[cnt - 1] != '\n') { 3930 if (entry->buf[cnt - 1] != '\n') {
3824 entry->buf[cnt] = '\n'; 3931 entry->buf[cnt] = '\n';
@@ -3833,11 +3940,10 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3833 *fpos += written; 3940 *fpos += written;
3834 3941
3835 out_unlock: 3942 out_unlock:
3836 if (nr_pages == 2) 3943 for (i = 0; i < nr_pages; i++){
3837 kunmap_atomic(page2); 3944 kunmap_atomic(map_page[i]);
3838 kunmap_atomic(page1); 3945 put_page(pages[i]);
3839 while (nr_pages > 0) 3946 }
3840 put_page(pages[--nr_pages]);
3841 out: 3947 out:
3842 return written; 3948 return written;
3843} 3949}
@@ -3933,9 +4039,10 @@ static const struct file_operations tracing_pipe_fops = {
3933}; 4039};
3934 4040
3935static const struct file_operations tracing_entries_fops = { 4041static const struct file_operations tracing_entries_fops = {
3936 .open = tracing_open_generic, 4042 .open = tracing_entries_open,
3937 .read = tracing_entries_read, 4043 .read = tracing_entries_read,
3938 .write = tracing_entries_write, 4044 .write = tracing_entries_write,
4045 .release = tracing_entries_release,
3939 .llseek = generic_file_llseek, 4046 .llseek = generic_file_llseek,
3940}; 4047};
3941 4048
@@ -4367,6 +4474,9 @@ static void tracing_init_debugfs_percpu(long cpu)
4367 struct dentry *d_cpu; 4474 struct dentry *d_cpu;
4368 char cpu_dir[30]; /* 30 characters should be more than enough */ 4475 char cpu_dir[30]; /* 30 characters should be more than enough */
4369 4476
4477 if (!d_percpu)
4478 return;
4479
4370 snprintf(cpu_dir, 30, "cpu%ld", cpu); 4480 snprintf(cpu_dir, 30, "cpu%ld", cpu);
4371 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 4481 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4372 if (!d_cpu) { 4482 if (!d_cpu) {
@@ -4387,6 +4497,9 @@ static void tracing_init_debugfs_percpu(long cpu)
4387 4497
4388 trace_create_file("stats", 0444, d_cpu, 4498 trace_create_file("stats", 0444, d_cpu,
4389 (void *) cpu, &tracing_stats_fops); 4499 (void *) cpu, &tracing_stats_fops);
4500
4501 trace_create_file("buffer_size_kb", 0444, d_cpu,
4502 (void *) cpu, &tracing_entries_fops);
4390} 4503}
4391 4504
4392#ifdef CONFIG_FTRACE_SELFTEST 4505#ifdef CONFIG_FTRACE_SELFTEST
@@ -4718,7 +4831,7 @@ static __init int tracer_init_debugfs(void)
4718 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); 4831 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4719 4832
4720 trace_create_file("buffer_size_kb", 0644, d_tracer, 4833 trace_create_file("buffer_size_kb", 0644, d_tracer,
4721 &global_trace, &tracing_entries_fops); 4834 (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4722 4835
4723 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 4836 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4724 &global_trace, &tracing_total_entries_fops); 4837 &global_trace, &tracing_total_entries_fops);
@@ -4957,6 +5070,10 @@ __init static int tracer_alloc_buffers(void)
4957 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 5070 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4958 goto out_free_buffer_mask; 5071 goto out_free_buffer_mask;
4959 5072
5073 /* Only allocate trace_printk buffers if a trace_printk exists */
5074 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5075 trace_printk_init_buffers();
5076
4960 /* To save memory, keep the ring buffer size to its minimum */ 5077 /* To save memory, keep the ring buffer size to its minimum */
4961 if (ring_buffer_expanded) 5078 if (ring_buffer_expanded)
4962 ring_buf_size = trace_buf_size; 5079 ring_buf_size = trace_buf_size;
@@ -4975,7 +5092,6 @@ __init static int tracer_alloc_buffers(void)
4975 WARN_ON(1); 5092 WARN_ON(1);
4976 goto out_free_cpumask; 5093 goto out_free_cpumask;
4977 } 5094 }
4978 global_trace.entries = ring_buffer_size(global_trace.buffer);
4979 if (global_trace.buffer_disabled) 5095 if (global_trace.buffer_disabled)
4980 tracing_off(); 5096 tracing_off();
4981 5097
@@ -4988,7 +5104,6 @@ __init static int tracer_alloc_buffers(void)
4988 ring_buffer_free(global_trace.buffer); 5104 ring_buffer_free(global_trace.buffer);
4989 goto out_free_cpumask; 5105 goto out_free_cpumask;
4990 } 5106 }
4991 max_tr.entries = 1;
4992#endif 5107#endif
4993 5108
4994 /* Allocate the first page for all buffers */ 5109 /* Allocate the first page for all buffers */
@@ -4997,6 +5112,12 @@ __init static int tracer_alloc_buffers(void)
4997 max_tr.data[i] = &per_cpu(max_tr_data, i); 5112 max_tr.data[i] = &per_cpu(max_tr_data, i);
4998 } 5113 }
4999 5114
5115 set_buffer_entries(&global_trace,
5116 ring_buffer_size(global_trace.buffer, 0));
5117#ifdef CONFIG_TRACER_MAX_TRACE
5118 set_buffer_entries(&max_tr, 1);
5119#endif
5120
5000 trace_init_cmdlines(); 5121 trace_init_cmdlines();
5001 5122
5002 register_tracer(&nop_trace); 5123 register_tracer(&nop_trace);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f95d65da6db8..6c6f7933eede 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -131,6 +131,7 @@ struct trace_array_cpu {
131 atomic_t disabled; 131 atomic_t disabled;
132 void *buffer_page; /* ring buffer spare */ 132 void *buffer_page; /* ring buffer spare */
133 133
134 unsigned long entries;
134 unsigned long saved_latency; 135 unsigned long saved_latency;
135 unsigned long critical_start; 136 unsigned long critical_start;
136 unsigned long critical_end; 137 unsigned long critical_end;
@@ -152,7 +153,6 @@ struct trace_array_cpu {
152 */ 153 */
153struct trace_array { 154struct trace_array {
154 struct ring_buffer *buffer; 155 struct ring_buffer *buffer;
155 unsigned long entries;
156 int cpu; 156 int cpu;
157 int buffer_disabled; 157 int buffer_disabled;
158 cycle_t time_start; 158 cycle_t time_start;
@@ -826,6 +826,8 @@ extern struct list_head ftrace_events;
826extern const char *__start___trace_bprintk_fmt[]; 826extern const char *__start___trace_bprintk_fmt[];
827extern const char *__stop___trace_bprintk_fmt[]; 827extern const char *__stop___trace_bprintk_fmt[];
828 828
829void trace_printk_init_buffers(void);
830
829#undef FTRACE_ENTRY 831#undef FTRACE_ENTRY
830#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 832#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
831 extern struct ftrace_event_call \ 833 extern struct ftrace_event_call \
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 6fd4ffd042f9..a9077c1b4ad3 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -51,6 +51,10 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
51 const char **iter; 51 const char **iter;
52 char *fmt; 52 char *fmt;
53 53
54 /* allocate the trace_printk per cpu buffers */
55 if (start != end)
56 trace_printk_init_buffers();
57
54 mutex_lock(&btrace_mutex); 58 mutex_lock(&btrace_mutex);
55 for (iter = start; iter < end; iter++) { 59 for (iter = start; iter < end; iter++) {
56 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); 60 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
diff --git a/tools/Makefile b/tools/Makefile
new file mode 100644
index 000000000000..3ae43947a171
--- /dev/null
+++ b/tools/Makefile
@@ -0,0 +1,77 @@
1include scripts/Makefile.include
2
3help:
4 @echo 'Possible targets:'
5 @echo ''
6 @echo ' cpupower - a tool for all things x86 CPU power'
7 @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
8 @echo ' lguest - a minimal 32-bit x86 hypervisor'
9 @echo ' perf - Linux performance measurement and analysis tool'
10 @echo ' selftests - various kernel selftests'
11 @echo ' turbostat - Intel CPU idle stats and freq reporting tool'
12 @echo ' usb - USB testing tools'
13 @echo ' virtio - vhost test module'
14 @echo ' vm - misc vm tools'
15 @echo ' x86_energy_perf_policy - Intel energy policy tool'
16 @echo ''
17 @echo 'You can do:'
18 @echo ' $$ make -C tools/<tool>_install'
19 @echo ''
20 @echo ' from the kernel command line to build and install one of'
21 @echo ' the tools above'
22 @echo ''
23 @echo ' $$ make tools/install'
24 @echo ''
25 @echo ' installs all tools.'
26 @echo ''
27 @echo 'Cleaning targets:'
28 @echo ''
29 @echo ' all of the above with the "_clean" string appended cleans'
30 @echo ' the respective build directory.'
31 @echo ' clean: a summary clean target to clean _all_ folders'
32
33cpupower: FORCE
34 $(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1)
35
36firewire lguest perf usb virtio vm: FORCE
37 $(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1)
38
39selftests: FORCE
40 $(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1)
41
42turbostat x86_energy_perf_policy: FORCE
43 $(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1)
44
45cpupower_install:
46 $(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install
47
48firewire_install lguest_install perf_install usb_install virtio_install vm_install:
49 $(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install
50
51selftests_install:
52 $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install
53
54turbostat_install x86_energy_perf_policy_install:
55 $(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install
56
57install: cpupower_install firewire_install lguest_install perf_install \
58 selftests_install turbostat_install usb_install virtio_install \
59 vm_install x86_energy_perf_policy_install
60
61cpupower_clean:
62 $(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean
63
64firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
65 $(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean
66
67selftests_clean:
68 $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
69
70turbostat_clean x86_energy_perf_policy_clean:
71 $(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean
72
73clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \
74 turbostat_clean usb_clean virtio_clean vm_clean \
75 x86_energy_perf_policy_clean
76
77.PHONY: FORCE
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
new file mode 100644
index 000000000000..3d69aa9ff51e
--- /dev/null
+++ b/tools/lib/traceevent/Makefile
@@ -0,0 +1,303 @@
1# trace-cmd version
2EP_VERSION = 1
3EP_PATCHLEVEL = 1
4EP_EXTRAVERSION = 0
5
6# file format version
7FILE_VERSION = 6
8
9MAKEFLAGS += --no-print-directory
10
11
12# Makefiles suck: This macro sets a default value of $(2) for the
13# variable named by $(1), unless the variable has been set by
14# environment or command line. This is necessary for CC and AR
15# because make sets default values, so the simpler ?= approach
16# won't work as expected.
17define allow-override
18 $(if $(or $(findstring environment,$(origin $(1))),\
19 $(findstring command line,$(origin $(1)))),,\
20 $(eval $(1) = $(2)))
21endef
22
23# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
24$(call allow-override,CC,$(CROSS_COMPILE)gcc)
25$(call allow-override,AR,$(CROSS_COMPILE)ar)
26
27EXT = -std=gnu99
28INSTALL = install
29
30# Use DESTDIR for installing into a different root directory.
31# This is useful for building a package. The program will be
32# installed in this directory as if it was the root directory.
33# Then the build tool can move it later.
34DESTDIR ?=
35DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
36
37prefix ?= /usr/local
38bindir_relative = bin
39bindir = $(prefix)/$(bindir_relative)
40man_dir = $(prefix)/share/man
41man_dir_SQ = '$(subst ','\'',$(man_dir))'
42html_install = $(prefix)/share/kernelshark/html
43html_install_SQ = '$(subst ','\'',$(html_install))'
44img_install = $(prefix)/share/kernelshark/html/images
45img_install_SQ = '$(subst ','\'',$(img_install))'
46
47export man_dir man_dir_SQ html_install html_install_SQ INSTALL
48export img_install img_install_SQ
49export DESTDIR DESTDIR_SQ
50
51# copy a bit from Linux kbuild
52
53ifeq ("$(origin V)", "command line")
54 VERBOSE = $(V)
55endif
56ifndef VERBOSE
57 VERBOSE = 0
58endif
59
60ifeq ("$(origin O)", "command line")
61 BUILD_OUTPUT := $(O)
62endif
63
64ifeq ($(BUILD_SRC),)
65ifneq ($(BUILD_OUTPUT),)
66
67define build_output
68 $(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
69 BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
70endef
71
72saved-output := $(BUILD_OUTPUT)
73BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
74$(if $(BUILD_OUTPUT),, \
75 $(error output directory "$(saved-output)" does not exist))
76
77all: sub-make
78
79gui: force
80 $(call build_output, all_cmd)
81
82$(filter-out gui,$(MAKECMDGOALS)): sub-make
83
84sub-make: force
85 $(call build_output, $(MAKECMDGOALS))
86
87
88# Leave processing to above invocation of make
89skip-makefile := 1
90
91endif # BUILD_OUTPUT
92endif # BUILD_SRC
93
94# We process the rest of the Makefile if this is the final invocation of make
95ifeq ($(skip-makefile),)
96
97srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
98objtree := $(CURDIR)
99src := $(srctree)
100obj := $(objtree)
101
102export prefix bindir src obj
103
104# Shell quotes
105bindir_SQ = $(subst ','\'',$(bindir))
106bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
107
108LIB_FILE = libtraceevent.a libtraceevent.so
109
110CONFIG_INCLUDES =
111CONFIG_LIBS =
112CONFIG_FLAGS =
113
114VERSION = $(EP_VERSION)
115PATCHLEVEL = $(EP_PATCHLEVEL)
116EXTRAVERSION = $(EP_EXTRAVERSION)
117
118OBJ = $@
119N =
120
121export Q VERBOSE
122
123EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
124
125INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES)
126
127# Set compile option CFLAGS if not set elsewhere
128CFLAGS ?= -g -Wall
129
130# Append required CFLAGS
131override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
132override CFLAGS += $(udis86-flags)
133
134ifeq ($(VERBOSE),1)
135 Q =
136 print_compile =
137 print_app_build =
138 print_fpic_compile =
139 print_shared_lib_compile =
140 print_plugin_obj_compile =
141 print_plugin_build =
142 print_install =
143else
144 Q = @
145 print_compile = echo ' CC '$(OBJ);
146 print_app_build = echo ' BUILD '$(OBJ);
147 print_fpic_compile = echo ' CC FPIC '$(OBJ);
148 print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
149 print_plugin_obj_compile = echo ' CC PLUGIN OBJ '$(OBJ);
150 print_plugin_build = echo ' CC PLUGI '$(OBJ);
151 print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
152 print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
153endif
154
155do_fpic_compile = \
156 ($(print_fpic_compile) \
157 $(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
158
159do_app_build = \
160 ($(print_app_build) \
161 $(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
162
163do_compile_shared_library = \
164 ($(print_shared_lib_compile) \
165 $(CC) --shared $^ -o $@)
166
167do_compile_plugin_obj = \
168 ($(print_plugin_obj_compile) \
169 $(CC) -c $(CFLAGS) -fPIC -o $@ $<)
170
171do_plugin_build = \
172 ($(print_plugin_build) \
173 $(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<)
174
175do_build_static_lib = \
176 ($(print_static_lib_build) \
177 $(RM) $@; $(AR) rcs $@ $^)
178
179
180define do_compile
181 $(print_compile) \
182 $(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
183endef
184
185$(obj)/%.o: $(src)/%.c
186 $(Q)$(call do_compile)
187
188%.o: $(src)/%.c
189 $(Q)$(call do_compile)
190
191PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o
192
193ALL_OBJS = $(PEVENT_LIB_OBJS)
194
195CMD_TARGETS = $(LIB_FILE)
196
197TARGETS = $(CMD_TARGETS)
198
199
200all: all_cmd
201
202all_cmd: $(CMD_TARGETS)
203
204libtraceevent.so: $(PEVENT_LIB_OBJS)
205 $(Q)$(do_compile_shared_library)
206
207libtraceevent.a: $(PEVENT_LIB_OBJS)
208 $(Q)$(do_build_static_lib)
209
210$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
211 $(Q)$(do_fpic_compile)
212
213define make_version.h
214 (echo '/* This file is automatically generated. Do not modify. */'; \
215 echo \#define VERSION_CODE $(shell \
216 expr $(VERSION) \* 256 + $(PATCHLEVEL)); \
217 echo '#define EXTRAVERSION ' $(EXTRAVERSION); \
218 echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \
219 echo '#define FILE_VERSION '$(FILE_VERSION); \
220 ) > $1
221endef
222
223define update_version.h
224 ($(call make_version.h, $@.tmp); \
225 if [ -r $@ ] && cmp -s $@ $@.tmp; then \
226 rm -f $@.tmp; \
227 else \
228 echo ' UPDATE $@'; \
229 mv -f $@.tmp $@; \
230 fi);
231endef
232
233ep_version.h: force
234 $(Q)$(N)$(call update_version.h)
235
236VERSION_FILES = ep_version.h
237
238define update_dir
239 (echo $1 > $@.tmp; \
240 if [ -r $@ ] && cmp -s $@ $@.tmp; then \
241 rm -f $@.tmp; \
242 else \
243 echo ' UPDATE $@'; \
244 mv -f $@.tmp $@; \
245 fi);
246endef
247
248## make deps
249
250all_objs := $(sort $(ALL_OBJS))
251all_deps := $(all_objs:%.o=.%.d)
252
253define check_deps
254 $(CC) -M $(CFLAGS) $< > $@;
255endef
256
257$(gui_deps): ks_version.h
258$(non_gui_deps): tc_version.h
259
260$(all_deps): .%.d: $(src)/%.c
261 $(Q)$(call check_deps)
262
263$(all_objs) : %.o : .%.d
264
265dep_includes := $(wildcard $(all_deps))
266
267ifneq ($(dep_includes),)
268 include $(dep_includes)
269endif
270
271tags: force
272 $(RM) tags
273 find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px
274
275TAGS: force
276 $(RM) TAGS
277 find . -name '*.[ch]' | xargs etags
278
279define do_install
280 $(print_install) \
281 if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
282 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
283 fi; \
284 $(INSTALL) $1 '$(DESTDIR_SQ)$2'
285endef
286
287install_lib: all_cmd install_plugins install_python
288 $(Q)$(call do_install,$(LIB_FILE),$(bindir_SQ))
289
290install: install_lib
291
292clean:
293 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES).*.d
294 $(RM) tags TAGS
295
296endif # skip-makefile
297
298PHONY += force
299force:
300
301# Declare the contents of the .PHONY variable as phony. We keep that
302# information in a variable so we can use it in if_changed and friends.
303.PHONY: $(PHONY)
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
new file mode 100644
index 000000000000..998534992197
--- /dev/null
+++ b/tools/lib/traceevent/event-parse.c
@@ -0,0 +1,5065 @@
1/*
2 * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 *
21 * The parts for function graph printing was taken and modified from the
22 * Linux Kernel that were written by
23 * - Copyright (C) 2009 Frederic Weisbecker,
24 * Frederic Weisbecker gave his permission to relicense the code to
25 * the Lesser General Public License.
26 */
27#define _GNU_SOURCE
28#include <stdio.h>
29#include <stdlib.h>
30#include <string.h>
31#include <stdarg.h>
32#include <ctype.h>
33#include <errno.h>
34
35#include "event-parse.h"
36#include "event-utils.h"
37
38static const char *input_buf;
39static unsigned long long input_buf_ptr;
40static unsigned long long input_buf_siz;
41
42static int is_flag_field;
43static int is_symbolic_field;
44
45static int show_warning = 1;
46
47#define do_warning(fmt, ...) \
48 do { \
49 if (show_warning) \
50 warning(fmt, ##__VA_ARGS__); \
51 } while (0)
52
53static void init_input_buf(const char *buf, unsigned long long size)
54{
55 input_buf = buf;
56 input_buf_siz = size;
57 input_buf_ptr = 0;
58}
59
60const char *pevent_get_input_buf(void)
61{
62 return input_buf;
63}
64
65unsigned long long pevent_get_input_buf_ptr(void)
66{
67 return input_buf_ptr;
68}
69
70struct event_handler {
71 struct event_handler *next;
72 int id;
73 const char *sys_name;
74 const char *event_name;
75 pevent_event_handler_func func;
76 void *context;
77};
78
79struct pevent_func_params {
80 struct pevent_func_params *next;
81 enum pevent_func_arg_type type;
82};
83
84struct pevent_function_handler {
85 struct pevent_function_handler *next;
86 enum pevent_func_arg_type ret_type;
87 char *name;
88 pevent_func_handler func;
89 struct pevent_func_params *params;
90 int nr_args;
91};
92
93static unsigned long long
94process_defined_func(struct trace_seq *s, void *data, int size,
95 struct event_format *event, struct print_arg *arg);
96
97static void free_func_handle(struct pevent_function_handler *func);
98
99/**
100 * pevent_buffer_init - init buffer for parsing
101 * @buf: buffer to parse
102 * @size: the size of the buffer
103 *
104 * For use with pevent_read_token(), this initializes the internal
105 * buffer that pevent_read_token() will parse.
106 */
107void pevent_buffer_init(const char *buf, unsigned long long size)
108{
109 init_input_buf(buf, size);
110}
111
112void breakpoint(void)
113{
114 static int x;
115 x++;
116}
117
118struct print_arg *alloc_arg(void)
119{
120 struct print_arg *arg;
121
122 arg = malloc_or_die(sizeof(*arg));
123 if (!arg)
124 return NULL;
125 memset(arg, 0, sizeof(*arg));
126
127 return arg;
128}
129
130struct cmdline {
131 char *comm;
132 int pid;
133};
134
135static int cmdline_cmp(const void *a, const void *b)
136{
137 const struct cmdline *ca = a;
138 const struct cmdline *cb = b;
139
140 if (ca->pid < cb->pid)
141 return -1;
142 if (ca->pid > cb->pid)
143 return 1;
144
145 return 0;
146}
147
148struct cmdline_list {
149 struct cmdline_list *next;
150 char *comm;
151 int pid;
152};
153
154static int cmdline_init(struct pevent *pevent)
155{
156 struct cmdline_list *cmdlist = pevent->cmdlist;
157 struct cmdline_list *item;
158 struct cmdline *cmdlines;
159 int i;
160
161 cmdlines = malloc_or_die(sizeof(*cmdlines) * pevent->cmdline_count);
162
163 i = 0;
164 while (cmdlist) {
165 cmdlines[i].pid = cmdlist->pid;
166 cmdlines[i].comm = cmdlist->comm;
167 i++;
168 item = cmdlist;
169 cmdlist = cmdlist->next;
170 free(item);
171 }
172
173 qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
174
175 pevent->cmdlines = cmdlines;
176 pevent->cmdlist = NULL;
177
178 return 0;
179}
180
181static char *find_cmdline(struct pevent *pevent, int pid)
182{
183 const struct cmdline *comm;
184 struct cmdline key;
185
186 if (!pid)
187 return "<idle>";
188
189 if (!pevent->cmdlines)
190 cmdline_init(pevent);
191
192 key.pid = pid;
193
194 comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
195 sizeof(*pevent->cmdlines), cmdline_cmp);
196
197 if (comm)
198 return comm->comm;
199 return "<...>";
200}
201
202/**
203 * pevent_pid_is_registered - return if a pid has a cmdline registered
204 * @pevent: handle for the pevent
205 * @pid: The pid to check if it has a cmdline registered with.
206 *
207 * Returns 1 if the pid has a cmdline mapped to it
208 * 0 otherwise.
209 */
210int pevent_pid_is_registered(struct pevent *pevent, int pid)
211{
212 const struct cmdline *comm;
213 struct cmdline key;
214
215 if (!pid)
216 return 1;
217
218 if (!pevent->cmdlines)
219 cmdline_init(pevent);
220
221 key.pid = pid;
222
223 comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
224 sizeof(*pevent->cmdlines), cmdline_cmp);
225
226 if (comm)
227 return 1;
228 return 0;
229}
230
231/*
232 * If the command lines have been converted to an array, then
233 * we must add this pid. This is much slower than when cmdlines
234 * are added before the array is initialized.
235 */
236static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
237{
238 struct cmdline *cmdlines = pevent->cmdlines;
239 const struct cmdline *cmdline;
240 struct cmdline key;
241
242 if (!pid)
243 return 0;
244
245 /* avoid duplicates */
246 key.pid = pid;
247
248 cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
249 sizeof(*pevent->cmdlines), cmdline_cmp);
250 if (cmdline) {
251 errno = EEXIST;
252 return -1;
253 }
254
255 cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
256 if (!cmdlines) {
257 errno = ENOMEM;
258 return -1;
259 }
260
261 cmdlines[pevent->cmdline_count].pid = pid;
262 cmdlines[pevent->cmdline_count].comm = strdup(comm);
263 if (!cmdlines[pevent->cmdline_count].comm)
264 die("malloc comm");
265
266 if (cmdlines[pevent->cmdline_count].comm)
267 pevent->cmdline_count++;
268
269 qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
270 pevent->cmdlines = cmdlines;
271
272 return 0;
273}
274
275/**
276 * pevent_register_comm - register a pid / comm mapping
277 * @pevent: handle for the pevent
278 * @comm: the command line to register
279 * @pid: the pid to map the command line to
280 *
281 * This adds a mapping to search for command line names with
282 * a given pid. The comm is duplicated.
283 */
284int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
285{
286 struct cmdline_list *item;
287
288 if (pevent->cmdlines)
289 return add_new_comm(pevent, comm, pid);
290
291 item = malloc_or_die(sizeof(*item));
292 item->comm = strdup(comm);
293 if (!item->comm)
294 die("malloc comm");
295 item->pid = pid;
296 item->next = pevent->cmdlist;
297
298 pevent->cmdlist = item;
299 pevent->cmdline_count++;
300
301 return 0;
302}
303
304struct func_map {
305 unsigned long long addr;
306 char *func;
307 char *mod;
308};
309
310struct func_list {
311 struct func_list *next;
312 unsigned long long addr;
313 char *func;
314 char *mod;
315};
316
317static int func_cmp(const void *a, const void *b)
318{
319 const struct func_map *fa = a;
320 const struct func_map *fb = b;
321
322 if (fa->addr < fb->addr)
323 return -1;
324 if (fa->addr > fb->addr)
325 return 1;
326
327 return 0;
328}
329
330/*
331 * We are searching for a record in between, not an exact
332 * match.
333 */
334static int func_bcmp(const void *a, const void *b)
335{
336 const struct func_map *fa = a;
337 const struct func_map *fb = b;
338
339 if ((fa->addr == fb->addr) ||
340
341 (fa->addr > fb->addr &&
342 fa->addr < (fb+1)->addr))
343 return 0;
344
345 if (fa->addr < fb->addr)
346 return -1;
347
348 return 1;
349}
350
351static int func_map_init(struct pevent *pevent)
352{
353 struct func_list *funclist;
354 struct func_list *item;
355 struct func_map *func_map;
356 int i;
357
358 func_map = malloc_or_die(sizeof(*func_map) * (pevent->func_count + 1));
359 funclist = pevent->funclist;
360
361 i = 0;
362 while (funclist) {
363 func_map[i].func = funclist->func;
364 func_map[i].addr = funclist->addr;
365 func_map[i].mod = funclist->mod;
366 i++;
367 item = funclist;
368 funclist = funclist->next;
369 free(item);
370 }
371
372 qsort(func_map, pevent->func_count, sizeof(*func_map), func_cmp);
373
374 /*
375 * Add a special record at the end.
376 */
377 func_map[pevent->func_count].func = NULL;
378 func_map[pevent->func_count].addr = 0;
379 func_map[pevent->func_count].mod = NULL;
380
381 pevent->func_map = func_map;
382 pevent->funclist = NULL;
383
384 return 0;
385}
386
387static struct func_map *
388find_func(struct pevent *pevent, unsigned long long addr)
389{
390 struct func_map *func;
391 struct func_map key;
392
393 if (!pevent->func_map)
394 func_map_init(pevent);
395
396 key.addr = addr;
397
398 func = bsearch(&key, pevent->func_map, pevent->func_count,
399 sizeof(*pevent->func_map), func_bcmp);
400
401 return func;
402}
403
404/**
405 * pevent_find_function - find a function by a given address
406 * @pevent: handle for the pevent
407 * @addr: the address to find the function with
408 *
409 * Returns a pointer to the function stored that has the given
410 * address. Note, the address does not have to be exact, it
411 * will select the function that would contain the address.
412 */
413const char *pevent_find_function(struct pevent *pevent, unsigned long long addr)
414{
415 struct func_map *map;
416
417 map = find_func(pevent, addr);
418 if (!map)
419 return NULL;
420
421 return map->func;
422}
423
424/**
425 * pevent_find_function_address - find a function address by a given address
426 * @pevent: handle for the pevent
427 * @addr: the address to find the function with
428 *
429 * Returns the address the function starts at. This can be used in
430 * conjunction with pevent_find_function to print both the function
431 * name and the function offset.
432 */
433unsigned long long
434pevent_find_function_address(struct pevent *pevent, unsigned long long addr)
435{
436 struct func_map *map;
437
438 map = find_func(pevent, addr);
439 if (!map)
440 return 0;
441
442 return map->addr;
443}
444
445/**
446 * pevent_register_function - register a function with a given address
447 * @pevent: handle for the pevent
448 * @function: the function name to register
449 * @addr: the address the function starts at
450 * @mod: the kernel module the function may be in (NULL for none)
451 *
452 * This registers a function name with an address and module.
453 * The @func passed in is duplicated.
454 */
455int pevent_register_function(struct pevent *pevent, char *func,
456 unsigned long long addr, char *mod)
457{
458 struct func_list *item;
459
460 item = malloc_or_die(sizeof(*item));
461
462 item->next = pevent->funclist;
463 item->func = strdup(func);
464 if (mod)
465 item->mod = strdup(mod);
466 else
467 item->mod = NULL;
468 item->addr = addr;
469
470 pevent->funclist = item;
471
472 pevent->func_count++;
473
474 return 0;
475}
476
477/**
478 * pevent_print_funcs - print out the stored functions
479 * @pevent: handle for the pevent
480 *
481 * This prints out the stored functions.
482 */
483void pevent_print_funcs(struct pevent *pevent)
484{
485 int i;
486
487 if (!pevent->func_map)
488 func_map_init(pevent);
489
490 for (i = 0; i < (int)pevent->func_count; i++) {
491 printf("%016llx %s",
492 pevent->func_map[i].addr,
493 pevent->func_map[i].func);
494 if (pevent->func_map[i].mod)
495 printf(" [%s]\n", pevent->func_map[i].mod);
496 else
497 printf("\n");
498 }
499}
500
501struct printk_map {
502 unsigned long long addr;
503 char *printk;
504};
505
506struct printk_list {
507 struct printk_list *next;
508 unsigned long long addr;
509 char *printk;
510};
511
512static int printk_cmp(const void *a, const void *b)
513{
514 const struct func_map *fa = a;
515 const struct func_map *fb = b;
516
517 if (fa->addr < fb->addr)
518 return -1;
519 if (fa->addr > fb->addr)
520 return 1;
521
522 return 0;
523}
524
525static void printk_map_init(struct pevent *pevent)
526{
527 struct printk_list *printklist;
528 struct printk_list *item;
529 struct printk_map *printk_map;
530 int i;
531
532 printk_map = malloc_or_die(sizeof(*printk_map) * (pevent->printk_count + 1));
533
534 printklist = pevent->printklist;
535
536 i = 0;
537 while (printklist) {
538 printk_map[i].printk = printklist->printk;
539 printk_map[i].addr = printklist->addr;
540 i++;
541 item = printklist;
542 printklist = printklist->next;
543 free(item);
544 }
545
546 qsort(printk_map, pevent->printk_count, sizeof(*printk_map), printk_cmp);
547
548 pevent->printk_map = printk_map;
549 pevent->printklist = NULL;
550}
551
552static struct printk_map *
553find_printk(struct pevent *pevent, unsigned long long addr)
554{
555 struct printk_map *printk;
556 struct printk_map key;
557
558 if (!pevent->printk_map)
559 printk_map_init(pevent);
560
561 key.addr = addr;
562
563 printk = bsearch(&key, pevent->printk_map, pevent->printk_count,
564 sizeof(*pevent->printk_map), printk_cmp);
565
566 return printk;
567}
568
569/**
570 * pevent_register_print_string - register a string by its address
571 * @pevent: handle for the pevent
572 * @fmt: the string format to register
573 * @addr: the address the string was located at
574 *
575 * This registers a string by the address it was stored in the kernel.
576 * The @fmt passed in is duplicated.
577 */
578int pevent_register_print_string(struct pevent *pevent, char *fmt,
579 unsigned long long addr)
580{
581 struct printk_list *item;
582
583 item = malloc_or_die(sizeof(*item));
584
585 item->next = pevent->printklist;
586 pevent->printklist = item;
587 item->printk = strdup(fmt);
588 item->addr = addr;
589
590 pevent->printk_count++;
591
592 return 0;
593}
594
595/**
596 * pevent_print_printk - print out the stored strings
597 * @pevent: handle for the pevent
598 *
599 * This prints the string formats that were stored.
600 */
601void pevent_print_printk(struct pevent *pevent)
602{
603 int i;
604
605 if (!pevent->printk_map)
606 printk_map_init(pevent);
607
608 for (i = 0; i < (int)pevent->printk_count; i++) {
609 printf("%016llx %s\n",
610 pevent->printk_map[i].addr,
611 pevent->printk_map[i].printk);
612 }
613}
614
615static struct event_format *alloc_event(void)
616{
617 struct event_format *event;
618
619 event = malloc_or_die(sizeof(*event));
620 memset(event, 0, sizeof(*event));
621
622 return event;
623}
624
625static void add_event(struct pevent *pevent, struct event_format *event)
626{
627 int i;
628
629 if (!pevent->events)
630 pevent->events = malloc_or_die(sizeof(event));
631 else
632 pevent->events =
633 realloc(pevent->events, sizeof(event) *
634 (pevent->nr_events + 1));
635 if (!pevent->events)
636 die("Can not allocate events");
637
638 for (i = 0; i < pevent->nr_events; i++) {
639 if (pevent->events[i]->id > event->id)
640 break;
641 }
642 if (i < pevent->nr_events)
643 memmove(&pevent->events[i + 1],
644 &pevent->events[i],
645 sizeof(event) * (pevent->nr_events - i));
646
647 pevent->events[i] = event;
648 pevent->nr_events++;
649
650 event->pevent = pevent;
651}
652
653static int event_item_type(enum event_type type)
654{
655 switch (type) {
656 case EVENT_ITEM ... EVENT_SQUOTE:
657 return 1;
658 case EVENT_ERROR ... EVENT_DELIM:
659 default:
660 return 0;
661 }
662}
663
664static void free_flag_sym(struct print_flag_sym *fsym)
665{
666 struct print_flag_sym *next;
667
668 while (fsym) {
669 next = fsym->next;
670 free(fsym->value);
671 free(fsym->str);
672 free(fsym);
673 fsym = next;
674 }
675}
676
677static void free_arg(struct print_arg *arg)
678{
679 struct print_arg *farg;
680
681 if (!arg)
682 return;
683
684 switch (arg->type) {
685 case PRINT_ATOM:
686 free(arg->atom.atom);
687 break;
688 case PRINT_FIELD:
689 free(arg->field.name);
690 break;
691 case PRINT_FLAGS:
692 free_arg(arg->flags.field);
693 free(arg->flags.delim);
694 free_flag_sym(arg->flags.flags);
695 break;
696 case PRINT_SYMBOL:
697 free_arg(arg->symbol.field);
698 free_flag_sym(arg->symbol.symbols);
699 break;
700 case PRINT_TYPE:
701 free(arg->typecast.type);
702 free_arg(arg->typecast.item);
703 break;
704 case PRINT_STRING:
705 case PRINT_BSTRING:
706 free(arg->string.string);
707 break;
708 case PRINT_DYNAMIC_ARRAY:
709 free(arg->dynarray.index);
710 break;
711 case PRINT_OP:
712 free(arg->op.op);
713 free_arg(arg->op.left);
714 free_arg(arg->op.right);
715 break;
716 case PRINT_FUNC:
717 while (arg->func.args) {
718 farg = arg->func.args;
719 arg->func.args = farg->next;
720 free_arg(farg);
721 }
722 break;
723
724 case PRINT_NULL:
725 default:
726 break;
727 }
728
729 free(arg);
730}
731
732static enum event_type get_type(int ch)
733{
734 if (ch == '\n')
735 return EVENT_NEWLINE;
736 if (isspace(ch))
737 return EVENT_SPACE;
738 if (isalnum(ch) || ch == '_')
739 return EVENT_ITEM;
740 if (ch == '\'')
741 return EVENT_SQUOTE;
742 if (ch == '"')
743 return EVENT_DQUOTE;
744 if (!isprint(ch))
745 return EVENT_NONE;
746 if (ch == '(' || ch == ')' || ch == ',')
747 return EVENT_DELIM;
748
749 return EVENT_OP;
750}
751
752static int __read_char(void)
753{
754 if (input_buf_ptr >= input_buf_siz)
755 return -1;
756
757 return input_buf[input_buf_ptr++];
758}
759
760static int __peek_char(void)
761{
762 if (input_buf_ptr >= input_buf_siz)
763 return -1;
764
765 return input_buf[input_buf_ptr];
766}
767
768/**
769 * pevent_peek_char - peek at the next character that will be read
770 *
771 * Returns the next character read, or -1 if end of buffer.
772 */
773int pevent_peek_char(void)
774{
775 return __peek_char();
776}
777
778static enum event_type force_token(const char *str, char **tok);
779
780static enum event_type __read_token(char **tok)
781{
782 char buf[BUFSIZ];
783 int ch, last_ch, quote_ch, next_ch;
784 int i = 0;
785 int tok_size = 0;
786 enum event_type type;
787
788 *tok = NULL;
789
790
791 ch = __read_char();
792 if (ch < 0)
793 return EVENT_NONE;
794
795 type = get_type(ch);
796 if (type == EVENT_NONE)
797 return type;
798
799 buf[i++] = ch;
800
801 switch (type) {
802 case EVENT_NEWLINE:
803 case EVENT_DELIM:
804 *tok = malloc_or_die(2);
805 (*tok)[0] = ch;
806 (*tok)[1] = 0;
807 return type;
808
809 case EVENT_OP:
810 switch (ch) {
811 case '-':
812 next_ch = __peek_char();
813 if (next_ch == '>') {
814 buf[i++] = __read_char();
815 break;
816 }
817 /* fall through */
818 case '+':
819 case '|':
820 case '&':
821 case '>':
822 case '<':
823 last_ch = ch;
824 ch = __peek_char();
825 if (ch != last_ch)
826 goto test_equal;
827 buf[i++] = __read_char();
828 switch (last_ch) {
829 case '>':
830 case '<':
831 goto test_equal;
832 default:
833 break;
834 }
835 break;
836 case '!':
837 case '=':
838 goto test_equal;
839 default: /* what should we do instead? */
840 break;
841 }
842 buf[i] = 0;
843 *tok = strdup(buf);
844 return type;
845
846 test_equal:
847 ch = __peek_char();
848 if (ch == '=')
849 buf[i++] = __read_char();
850 goto out;
851
852 case EVENT_DQUOTE:
853 case EVENT_SQUOTE:
854 /* don't keep quotes */
855 i--;
856 quote_ch = ch;
857 last_ch = 0;
858 concat:
859 do {
860 if (i == (BUFSIZ - 1)) {
861 buf[i] = 0;
862 if (*tok) {
863 *tok = realloc(*tok, tok_size + BUFSIZ);
864 if (!*tok)
865 return EVENT_NONE;
866 strcat(*tok, buf);
867 } else
868 *tok = strdup(buf);
869
870 if (!*tok)
871 return EVENT_NONE;
872 tok_size += BUFSIZ;
873 i = 0;
874 }
875 last_ch = ch;
876 ch = __read_char();
877 buf[i++] = ch;
878 /* the '\' '\' will cancel itself */
879 if (ch == '\\' && last_ch == '\\')
880 last_ch = 0;
881 } while (ch != quote_ch || last_ch == '\\');
882 /* remove the last quote */
883 i--;
884
885 /*
886 * For strings (double quotes) check the next token.
887 * If it is another string, concatinate the two.
888 */
889 if (type == EVENT_DQUOTE) {
890 unsigned long long save_input_buf_ptr = input_buf_ptr;
891
892 do {
893 ch = __read_char();
894 } while (isspace(ch));
895 if (ch == '"')
896 goto concat;
897 input_buf_ptr = save_input_buf_ptr;
898 }
899
900 goto out;
901
902 case EVENT_ERROR ... EVENT_SPACE:
903 case EVENT_ITEM:
904 default:
905 break;
906 }
907
908 while (get_type(__peek_char()) == type) {
909 if (i == (BUFSIZ - 1)) {
910 buf[i] = 0;
911 if (*tok) {
912 *tok = realloc(*tok, tok_size + BUFSIZ);
913 if (!*tok)
914 return EVENT_NONE;
915 strcat(*tok, buf);
916 } else
917 *tok = strdup(buf);
918
919 if (!*tok)
920 return EVENT_NONE;
921 tok_size += BUFSIZ;
922 i = 0;
923 }
924 ch = __read_char();
925 buf[i++] = ch;
926 }
927
928 out:
929 buf[i] = 0;
930 if (*tok) {
931 *tok = realloc(*tok, tok_size + i);
932 if (!*tok)
933 return EVENT_NONE;
934 strcat(*tok, buf);
935 } else
936 *tok = strdup(buf);
937 if (!*tok)
938 return EVENT_NONE;
939
940 if (type == EVENT_ITEM) {
941 /*
942 * Older versions of the kernel has a bug that
943 * creates invalid symbols and will break the mac80211
944 * parsing. This is a work around to that bug.
945 *
946 * See Linux kernel commit:
947 * 811cb50baf63461ce0bdb234927046131fc7fa8b
948 */
949 if (strcmp(*tok, "LOCAL_PR_FMT") == 0) {
950 free(*tok);
951 *tok = NULL;
952 return force_token("\"\%s\" ", tok);
953 } else if (strcmp(*tok, "STA_PR_FMT") == 0) {
954 free(*tok);
955 *tok = NULL;
956 return force_token("\" sta:%pM\" ", tok);
957 } else if (strcmp(*tok, "VIF_PR_FMT") == 0) {
958 free(*tok);
959 *tok = NULL;
960 return force_token("\" vif:%p(%d)\" ", tok);
961 }
962 }
963
964 return type;
965}
966
967static enum event_type force_token(const char *str, char **tok)
968{
969 const char *save_input_buf;
970 unsigned long long save_input_buf_ptr;
971 unsigned long long save_input_buf_siz;
972 enum event_type type;
973
974 /* save off the current input pointers */
975 save_input_buf = input_buf;
976 save_input_buf_ptr = input_buf_ptr;
977 save_input_buf_siz = input_buf_siz;
978
979 init_input_buf(str, strlen(str));
980
981 type = __read_token(tok);
982
983 /* reset back to original token */
984 input_buf = save_input_buf;
985 input_buf_ptr = save_input_buf_ptr;
986 input_buf_siz = save_input_buf_siz;
987
988 return type;
989}
990
991static void free_token(char *tok)
992{
993 if (tok)
994 free(tok);
995}
996
997static enum event_type read_token(char **tok)
998{
999 enum event_type type;
1000
1001 for (;;) {
1002 type = __read_token(tok);
1003 if (type != EVENT_SPACE)
1004 return type;
1005
1006 free_token(*tok);
1007 }
1008
1009 /* not reached */
1010 *tok = NULL;
1011 return EVENT_NONE;
1012}
1013
1014/**
1015 * pevent_read_token - access to utilites to use the pevent parser
1016 * @tok: The token to return
1017 *
1018 * This will parse tokens from the string given by
1019 * pevent_init_data().
1020 *
1021 * Returns the token type.
1022 */
1023enum event_type pevent_read_token(char **tok)
1024{
1025 return read_token(tok);
1026}
1027
1028/**
1029 * pevent_free_token - free a token returned by pevent_read_token
1030 * @token: the token to free
1031 */
1032void pevent_free_token(char *token)
1033{
1034 free_token(token);
1035}
1036
1037/* no newline */
1038static enum event_type read_token_item(char **tok)
1039{
1040 enum event_type type;
1041
1042 for (;;) {
1043 type = __read_token(tok);
1044 if (type != EVENT_SPACE && type != EVENT_NEWLINE)
1045 return type;
1046 free_token(*tok);
1047 *tok = NULL;
1048 }
1049
1050 /* not reached */
1051 *tok = NULL;
1052 return EVENT_NONE;
1053}
1054
1055static int test_type(enum event_type type, enum event_type expect)
1056{
1057 if (type != expect) {
1058 do_warning("Error: expected type %d but read %d",
1059 expect, type);
1060 return -1;
1061 }
1062 return 0;
1063}
1064
1065static int test_type_token(enum event_type type, const char *token,
1066 enum event_type expect, const char *expect_tok)
1067{
1068 if (type != expect) {
1069 do_warning("Error: expected type %d but read %d",
1070 expect, type);
1071 return -1;
1072 }
1073
1074 if (strcmp(token, expect_tok) != 0) {
1075 do_warning("Error: expected '%s' but read '%s'",
1076 expect_tok, token);
1077 return -1;
1078 }
1079 return 0;
1080}
1081
1082static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
1083{
1084 enum event_type type;
1085
1086 if (newline_ok)
1087 type = read_token(tok);
1088 else
1089 type = read_token_item(tok);
1090 return test_type(type, expect);
1091}
1092
1093static int read_expect_type(enum event_type expect, char **tok)
1094{
1095 return __read_expect_type(expect, tok, 1);
1096}
1097
1098static int __read_expected(enum event_type expect, const char *str,
1099 int newline_ok)
1100{
1101 enum event_type type;
1102 char *token;
1103 int ret;
1104
1105 if (newline_ok)
1106 type = read_token(&token);
1107 else
1108 type = read_token_item(&token);
1109
1110 ret = test_type_token(type, token, expect, str);
1111
1112 free_token(token);
1113
1114 return ret;
1115}
1116
1117static int read_expected(enum event_type expect, const char *str)
1118{
1119 return __read_expected(expect, str, 1);
1120}
1121
1122static int read_expected_item(enum event_type expect, const char *str)
1123{
1124 return __read_expected(expect, str, 0);
1125}
1126
1127static char *event_read_name(void)
1128{
1129 char *token;
1130
1131 if (read_expected(EVENT_ITEM, "name") < 0)
1132 return NULL;
1133
1134 if (read_expected(EVENT_OP, ":") < 0)
1135 return NULL;
1136
1137 if (read_expect_type(EVENT_ITEM, &token) < 0)
1138 goto fail;
1139
1140 return token;
1141
1142 fail:
1143 free_token(token);
1144 return NULL;
1145}
1146
1147static int event_read_id(void)
1148{
1149 char *token;
1150 int id;
1151
1152 if (read_expected_item(EVENT_ITEM, "ID") < 0)
1153 return -1;
1154
1155 if (read_expected(EVENT_OP, ":") < 0)
1156 return -1;
1157
1158 if (read_expect_type(EVENT_ITEM, &token) < 0)
1159 goto fail;
1160
1161 id = strtoul(token, NULL, 0);
1162 free_token(token);
1163 return id;
1164
1165 fail:
1166 free_token(token);
1167 return -1;
1168}
1169
1170static int field_is_string(struct format_field *field)
1171{
1172 if ((field->flags & FIELD_IS_ARRAY) &&
1173 (strstr(field->type, "char") || strstr(field->type, "u8") ||
1174 strstr(field->type, "s8")))
1175 return 1;
1176
1177 return 0;
1178}
1179
1180static int field_is_dynamic(struct format_field *field)
1181{
1182 if (strncmp(field->type, "__data_loc", 10) == 0)
1183 return 1;
1184
1185 return 0;
1186}
1187
1188static int field_is_long(struct format_field *field)
1189{
1190 /* includes long long */
1191 if (strstr(field->type, "long"))
1192 return 1;
1193
1194 return 0;
1195}
1196
1197static int event_read_fields(struct event_format *event, struct format_field **fields)
1198{
1199 struct format_field *field = NULL;
1200 enum event_type type;
1201 char *token;
1202 char *last_token;
1203 int count = 0;
1204
1205 do {
1206 type = read_token(&token);
1207 if (type == EVENT_NEWLINE) {
1208 free_token(token);
1209 return count;
1210 }
1211
1212 count++;
1213
1214 if (test_type_token(type, token, EVENT_ITEM, "field"))
1215 goto fail;
1216 free_token(token);
1217
1218 type = read_token(&token);
1219 /*
1220 * The ftrace fields may still use the "special" name.
1221 * Just ignore it.
1222 */
1223 if (event->flags & EVENT_FL_ISFTRACE &&
1224 type == EVENT_ITEM && strcmp(token, "special") == 0) {
1225 free_token(token);
1226 type = read_token(&token);
1227 }
1228
1229 if (test_type_token(type, token, EVENT_OP, ":") < 0)
1230 goto fail;
1231
1232 free_token(token);
1233 if (read_expect_type(EVENT_ITEM, &token) < 0)
1234 goto fail;
1235
1236 last_token = token;
1237
1238 field = malloc_or_die(sizeof(*field));
1239 memset(field, 0, sizeof(*field));
1240 field->event = event;
1241
1242 /* read the rest of the type */
1243 for (;;) {
1244 type = read_token(&token);
1245 if (type == EVENT_ITEM ||
1246 (type == EVENT_OP && strcmp(token, "*") == 0) ||
1247 /*
1248 * Some of the ftrace fields are broken and have
1249 * an illegal "." in them.
1250 */
1251 (event->flags & EVENT_FL_ISFTRACE &&
1252 type == EVENT_OP && strcmp(token, ".") == 0)) {
1253
1254 if (strcmp(token, "*") == 0)
1255 field->flags |= FIELD_IS_POINTER;
1256
1257 if (field->type) {
1258 field->type = realloc(field->type,
1259 strlen(field->type) +
1260 strlen(last_token) + 2);
1261 strcat(field->type, " ");
1262 strcat(field->type, last_token);
1263 free(last_token);
1264 } else
1265 field->type = last_token;
1266 last_token = token;
1267 continue;
1268 }
1269
1270 break;
1271 }
1272
1273 if (!field->type) {
1274 die("no type found");
1275 goto fail;
1276 }
1277 field->name = last_token;
1278
1279 if (test_type(type, EVENT_OP))
1280 goto fail;
1281
1282 if (strcmp(token, "[") == 0) {
1283 enum event_type last_type = type;
1284 char *brackets = token;
1285 int len;
1286
1287 field->flags |= FIELD_IS_ARRAY;
1288
1289 type = read_token(&token);
1290
1291 if (type == EVENT_ITEM)
1292 field->arraylen = strtoul(token, NULL, 0);
1293 else
1294 field->arraylen = 0;
1295
1296 while (strcmp(token, "]") != 0) {
1297 if (last_type == EVENT_ITEM &&
1298 type == EVENT_ITEM)
1299 len = 2;
1300 else
1301 len = 1;
1302 last_type = type;
1303
1304 brackets = realloc(brackets,
1305 strlen(brackets) +
1306 strlen(token) + len);
1307 if (len == 2)
1308 strcat(brackets, " ");
1309 strcat(brackets, token);
1310 /* We only care about the last token */
1311 field->arraylen = strtoul(token, NULL, 0);
1312 free_token(token);
1313 type = read_token(&token);
1314 if (type == EVENT_NONE) {
1315 die("failed to find token");
1316 goto fail;
1317 }
1318 }
1319
1320 free_token(token);
1321
1322 brackets = realloc(brackets, strlen(brackets) + 2);
1323 strcat(brackets, "]");
1324
1325 /* add brackets to type */
1326
1327 type = read_token(&token);
1328 /*
1329 * If the next token is not an OP, then it is of
1330 * the format: type [] item;
1331 */
1332 if (type == EVENT_ITEM) {
1333 field->type = realloc(field->type,
1334 strlen(field->type) +
1335 strlen(field->name) +
1336 strlen(brackets) + 2);
1337 strcat(field->type, " ");
1338 strcat(field->type, field->name);
1339 free_token(field->name);
1340 strcat(field->type, brackets);
1341 field->name = token;
1342 type = read_token(&token);
1343 } else {
1344 field->type = realloc(field->type,
1345 strlen(field->type) +
1346 strlen(brackets) + 1);
1347 strcat(field->type, brackets);
1348 }
1349 free(brackets);
1350 }
1351
1352 if (field_is_string(field))
1353 field->flags |= FIELD_IS_STRING;
1354 if (field_is_dynamic(field))
1355 field->flags |= FIELD_IS_DYNAMIC;
1356 if (field_is_long(field))
1357 field->flags |= FIELD_IS_LONG;
1358
1359 if (test_type_token(type, token, EVENT_OP, ";"))
1360 goto fail;
1361 free_token(token);
1362
1363 if (read_expected(EVENT_ITEM, "offset") < 0)
1364 goto fail_expect;
1365
1366 if (read_expected(EVENT_OP, ":") < 0)
1367 goto fail_expect;
1368
1369 if (read_expect_type(EVENT_ITEM, &token))
1370 goto fail;
1371 field->offset = strtoul(token, NULL, 0);
1372 free_token(token);
1373
1374 if (read_expected(EVENT_OP, ";") < 0)
1375 goto fail_expect;
1376
1377 if (read_expected(EVENT_ITEM, "size") < 0)
1378 goto fail_expect;
1379
1380 if (read_expected(EVENT_OP, ":") < 0)
1381 goto fail_expect;
1382
1383 if (read_expect_type(EVENT_ITEM, &token))
1384 goto fail;
1385 field->size = strtoul(token, NULL, 0);
1386 free_token(token);
1387
1388 if (read_expected(EVENT_OP, ";") < 0)
1389 goto fail_expect;
1390
1391 type = read_token(&token);
1392 if (type != EVENT_NEWLINE) {
1393 /* newer versions of the kernel have a "signed" type */
1394 if (test_type_token(type, token, EVENT_ITEM, "signed"))
1395 goto fail;
1396
1397 free_token(token);
1398
1399 if (read_expected(EVENT_OP, ":") < 0)
1400 goto fail_expect;
1401
1402 if (read_expect_type(EVENT_ITEM, &token))
1403 goto fail;
1404
1405 /* add signed type */
1406
1407 free_token(token);
1408 if (read_expected(EVENT_OP, ";") < 0)
1409 goto fail_expect;
1410
1411 if (read_expect_type(EVENT_NEWLINE, &token))
1412 goto fail;
1413 }
1414
1415 free_token(token);
1416
1417 if (field->flags & FIELD_IS_ARRAY) {
1418 if (field->arraylen)
1419 field->elementsize = field->size / field->arraylen;
1420 else if (field->flags & FIELD_IS_STRING)
1421 field->elementsize = 1;
1422 else
1423 field->elementsize = event->pevent->long_size;
1424 } else
1425 field->elementsize = field->size;
1426
1427 *fields = field;
1428 fields = &field->next;
1429
1430 } while (1);
1431
1432 return 0;
1433
1434fail:
1435 free_token(token);
1436fail_expect:
1437 if (field)
1438 free(field);
1439 return -1;
1440}
1441
1442static int event_read_format(struct event_format *event)
1443{
1444 char *token;
1445 int ret;
1446
1447 if (read_expected_item(EVENT_ITEM, "format") < 0)
1448 return -1;
1449
1450 if (read_expected(EVENT_OP, ":") < 0)
1451 return -1;
1452
1453 if (read_expect_type(EVENT_NEWLINE, &token))
1454 goto fail;
1455 free_token(token);
1456
1457 ret = event_read_fields(event, &event->format.common_fields);
1458 if (ret < 0)
1459 return ret;
1460 event->format.nr_common = ret;
1461
1462 ret = event_read_fields(event, &event->format.fields);
1463 if (ret < 0)
1464 return ret;
1465 event->format.nr_fields = ret;
1466
1467 return 0;
1468
1469 fail:
1470 free_token(token);
1471 return -1;
1472}
1473
1474static enum event_type
1475process_arg_token(struct event_format *event, struct print_arg *arg,
1476 char **tok, enum event_type type);
1477
1478static enum event_type
1479process_arg(struct event_format *event, struct print_arg *arg, char **tok)
1480{
1481 enum event_type type;
1482 char *token;
1483
1484 type = read_token(&token);
1485 *tok = token;
1486
1487 return process_arg_token(event, arg, tok, type);
1488}
1489
1490static enum event_type
1491process_op(struct event_format *event, struct print_arg *arg, char **tok);
1492
1493static enum event_type
1494process_cond(struct event_format *event, struct print_arg *top, char **tok)
1495{
1496 struct print_arg *arg, *left, *right;
1497 enum event_type type;
1498 char *token = NULL;
1499
1500 arg = alloc_arg();
1501 left = alloc_arg();
1502 right = alloc_arg();
1503
1504 arg->type = PRINT_OP;
1505 arg->op.left = left;
1506 arg->op.right = right;
1507
1508 *tok = NULL;
1509 type = process_arg(event, left, &token);
1510
1511 again:
1512 /* Handle other operations in the arguments */
1513 if (type == EVENT_OP && strcmp(token, ":") != 0) {
1514 type = process_op(event, left, &token);
1515 goto again;
1516 }
1517
1518 if (test_type_token(type, token, EVENT_OP, ":"))
1519 goto out_free;
1520
1521 arg->op.op = token;
1522
1523 type = process_arg(event, right, &token);
1524
1525 top->op.right = arg;
1526
1527 *tok = token;
1528 return type;
1529
1530out_free:
1531 /* Top may point to itself */
1532 top->op.right = NULL;
1533 free_token(token);
1534 free_arg(arg);
1535 return EVENT_ERROR;
1536}
1537
1538static enum event_type
1539process_array(struct event_format *event, struct print_arg *top, char **tok)
1540{
1541 struct print_arg *arg;
1542 enum event_type type;
1543 char *token = NULL;
1544
1545 arg = alloc_arg();
1546
1547 *tok = NULL;
1548 type = process_arg(event, arg, &token);
1549 if (test_type_token(type, token, EVENT_OP, "]"))
1550 goto out_free;
1551
1552 top->op.right = arg;
1553
1554 free_token(token);
1555 type = read_token_item(&token);
1556 *tok = token;
1557
1558 return type;
1559
1560out_free:
1561 free_token(*tok);
1562 *tok = NULL;
1563 free_arg(arg);
1564 return EVENT_ERROR;
1565}
1566
1567static int get_op_prio(char *op)
1568{
1569 if (!op[1]) {
1570 switch (op[0]) {
1571 case '~':
1572 case '!':
1573 return 4;
1574 case '*':
1575 case '/':
1576 case '%':
1577 return 6;
1578 case '+':
1579 case '-':
1580 return 7;
1581 /* '>>' and '<<' are 8 */
1582 case '<':
1583 case '>':
1584 return 9;
1585 /* '==' and '!=' are 10 */
1586 case '&':
1587 return 11;
1588 case '^':
1589 return 12;
1590 case '|':
1591 return 13;
1592 case '?':
1593 return 16;
1594 default:
1595 do_warning("unknown op '%c'", op[0]);
1596 return -1;
1597 }
1598 } else {
1599 if (strcmp(op, "++") == 0 ||
1600 strcmp(op, "--") == 0) {
1601 return 3;
1602 } else if (strcmp(op, ">>") == 0 ||
1603 strcmp(op, "<<") == 0) {
1604 return 8;
1605 } else if (strcmp(op, ">=") == 0 ||
1606 strcmp(op, "<=") == 0) {
1607 return 9;
1608 } else if (strcmp(op, "==") == 0 ||
1609 strcmp(op, "!=") == 0) {
1610 return 10;
1611 } else if (strcmp(op, "&&") == 0) {
1612 return 14;
1613 } else if (strcmp(op, "||") == 0) {
1614 return 15;
1615 } else {
1616 do_warning("unknown op '%s'", op);
1617 return -1;
1618 }
1619 }
1620}
1621
1622static int set_op_prio(struct print_arg *arg)
1623{
1624
1625 /* single ops are the greatest */
1626 if (!arg->op.left || arg->op.left->type == PRINT_NULL)
1627 arg->op.prio = 0;
1628 else
1629 arg->op.prio = get_op_prio(arg->op.op);
1630
1631 return arg->op.prio;
1632}
1633
1634/* Note, *tok does not get freed, but will most likely be saved */
1635static enum event_type
1636process_op(struct event_format *event, struct print_arg *arg, char **tok)
1637{
1638 struct print_arg *left, *right = NULL;
1639 enum event_type type;
1640 char *token;
1641
1642 /* the op is passed in via tok */
1643 token = *tok;
1644
1645 if (arg->type == PRINT_OP && !arg->op.left) {
1646 /* handle single op */
1647 if (token[1]) {
1648 die("bad op token %s", token);
1649 goto out_free;
1650 }
1651 switch (token[0]) {
1652 case '~':
1653 case '!':
1654 case '+':
1655 case '-':
1656 break;
1657 default:
1658 do_warning("bad op token %s", token);
1659 goto out_free;
1660
1661 }
1662
1663 /* make an empty left */
1664 left = alloc_arg();
1665 left->type = PRINT_NULL;
1666 arg->op.left = left;
1667
1668 right = alloc_arg();
1669 arg->op.right = right;
1670
1671 /* do not free the token, it belongs to an op */
1672 *tok = NULL;
1673 type = process_arg(event, right, tok);
1674
1675 } else if (strcmp(token, "?") == 0) {
1676
1677 left = alloc_arg();
1678 /* copy the top arg to the left */
1679 *left = *arg;
1680
1681 arg->type = PRINT_OP;
1682 arg->op.op = token;
1683 arg->op.left = left;
1684 arg->op.prio = 0;
1685
1686 type = process_cond(event, arg, tok);
1687
1688 } else if (strcmp(token, ">>") == 0 ||
1689 strcmp(token, "<<") == 0 ||
1690 strcmp(token, "&") == 0 ||
1691 strcmp(token, "|") == 0 ||
1692 strcmp(token, "&&") == 0 ||
1693 strcmp(token, "||") == 0 ||
1694 strcmp(token, "-") == 0 ||
1695 strcmp(token, "+") == 0 ||
1696 strcmp(token, "*") == 0 ||
1697 strcmp(token, "^") == 0 ||
1698 strcmp(token, "/") == 0 ||
1699 strcmp(token, "<") == 0 ||
1700 strcmp(token, ">") == 0 ||
1701 strcmp(token, "==") == 0 ||
1702 strcmp(token, "!=") == 0) {
1703
1704 left = alloc_arg();
1705
1706 /* copy the top arg to the left */
1707 *left = *arg;
1708
1709 arg->type = PRINT_OP;
1710 arg->op.op = token;
1711 arg->op.left = left;
1712
1713 if (set_op_prio(arg) == -1) {
1714 event->flags |= EVENT_FL_FAILED;
1715 goto out_free;
1716 }
1717
1718 type = read_token_item(&token);
1719 *tok = token;
1720
1721 /* could just be a type pointer */
1722 if ((strcmp(arg->op.op, "*") == 0) &&
1723 type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
1724 if (left->type != PRINT_ATOM)
1725 die("bad pointer type");
1726 left->atom.atom = realloc(left->atom.atom,
1727 strlen(left->atom.atom) + 3);
1728 strcat(left->atom.atom, " *");
1729 free(arg->op.op);
1730 *arg = *left;
1731 free(left);
1732
1733 return type;
1734 }
1735
1736 right = alloc_arg();
1737 type = process_arg_token(event, right, tok, type);
1738 arg->op.right = right;
1739
1740 } else if (strcmp(token, "[") == 0) {
1741
1742 left = alloc_arg();
1743 *left = *arg;
1744
1745 arg->type = PRINT_OP;
1746 arg->op.op = token;
1747 arg->op.left = left;
1748
1749 arg->op.prio = 0;
1750
1751 type = process_array(event, arg, tok);
1752
1753 } else {
1754 do_warning("unknown op '%s'", token);
1755 event->flags |= EVENT_FL_FAILED;
1756 /* the arg is now the left side */
1757 goto out_free;
1758 }
1759
1760 if (type == EVENT_OP && strcmp(*tok, ":") != 0) {
1761 int prio;
1762
1763 /* higher prios need to be closer to the root */
1764 prio = get_op_prio(*tok);
1765
1766 if (prio > arg->op.prio)
1767 return process_op(event, arg, tok);
1768
1769 return process_op(event, right, tok);
1770 }
1771
1772 return type;
1773
1774 out_free:
1775 free_token(token);
1776 *tok = NULL;
1777 return EVENT_ERROR;
1778}
1779
1780static enum event_type
1781process_entry(struct event_format *event __unused, struct print_arg *arg,
1782 char **tok)
1783{
1784 enum event_type type;
1785 char *field;
1786 char *token;
1787
1788 if (read_expected(EVENT_OP, "->") < 0)
1789 goto out_err;
1790
1791 if (read_expect_type(EVENT_ITEM, &token) < 0)
1792 goto out_free;
1793 field = token;
1794
1795 arg->type = PRINT_FIELD;
1796 arg->field.name = field;
1797
1798 if (is_flag_field) {
1799 arg->field.field = pevent_find_any_field(event, arg->field.name);
1800 arg->field.field->flags |= FIELD_IS_FLAG;
1801 is_flag_field = 0;
1802 } else if (is_symbolic_field) {
1803 arg->field.field = pevent_find_any_field(event, arg->field.name);
1804 arg->field.field->flags |= FIELD_IS_SYMBOLIC;
1805 is_symbolic_field = 0;
1806 }
1807
1808 type = read_token(&token);
1809 *tok = token;
1810
1811 return type;
1812
1813 out_free:
1814 free_token(token);
1815 out_err:
1816 *tok = NULL;
1817 return EVENT_ERROR;
1818}
1819
1820static char *arg_eval (struct print_arg *arg);
1821
1822static unsigned long long
1823eval_type_str(unsigned long long val, const char *type, int pointer)
1824{
1825 int sign = 0;
1826 char *ref;
1827 int len;
1828
1829 len = strlen(type);
1830
1831 if (pointer) {
1832
1833 if (type[len-1] != '*') {
1834 do_warning("pointer expected with non pointer type");
1835 return val;
1836 }
1837
1838 ref = malloc_or_die(len);
1839 memcpy(ref, type, len);
1840
1841 /* chop off the " *" */
1842 ref[len - 2] = 0;
1843
1844 val = eval_type_str(val, ref, 0);
1845 free(ref);
1846 return val;
1847 }
1848
1849 /* check if this is a pointer */
1850 if (type[len - 1] == '*')
1851 return val;
1852
1853 /* Try to figure out the arg size*/
1854 if (strncmp(type, "struct", 6) == 0)
1855 /* all bets off */
1856 return val;
1857
1858 if (strcmp(type, "u8") == 0)
1859 return val & 0xff;
1860
1861 if (strcmp(type, "u16") == 0)
1862 return val & 0xffff;
1863
1864 if (strcmp(type, "u32") == 0)
1865 return val & 0xffffffff;
1866
1867 if (strcmp(type, "u64") == 0 ||
1868 strcmp(type, "s64"))
1869 return val;
1870
1871 if (strcmp(type, "s8") == 0)
1872 return (unsigned long long)(char)val & 0xff;
1873
1874 if (strcmp(type, "s16") == 0)
1875 return (unsigned long long)(short)val & 0xffff;
1876
1877 if (strcmp(type, "s32") == 0)
1878 return (unsigned long long)(int)val & 0xffffffff;
1879
1880 if (strncmp(type, "unsigned ", 9) == 0) {
1881 sign = 0;
1882 type += 9;
1883 }
1884
1885 if (strcmp(type, "char") == 0) {
1886 if (sign)
1887 return (unsigned long long)(char)val & 0xff;
1888 else
1889 return val & 0xff;
1890 }
1891
1892 if (strcmp(type, "short") == 0) {
1893 if (sign)
1894 return (unsigned long long)(short)val & 0xffff;
1895 else
1896 return val & 0xffff;
1897 }
1898
1899 if (strcmp(type, "int") == 0) {
1900 if (sign)
1901 return (unsigned long long)(int)val & 0xffffffff;
1902 else
1903 return val & 0xffffffff;
1904 }
1905
1906 return val;
1907}
1908
1909/*
1910 * Try to figure out the type.
1911 */
1912static unsigned long long
1913eval_type(unsigned long long val, struct print_arg *arg, int pointer)
1914{
1915 if (arg->type != PRINT_TYPE)
1916 die("expected type argument");
1917
1918 return eval_type_str(val, arg->typecast.type, pointer);
1919}
1920
1921static int arg_num_eval(struct print_arg *arg, long long *val)
1922{
1923 long long left, right;
1924 int ret = 1;
1925
1926 switch (arg->type) {
1927 case PRINT_ATOM:
1928 *val = strtoll(arg->atom.atom, NULL, 0);
1929 break;
1930 case PRINT_TYPE:
1931 ret = arg_num_eval(arg->typecast.item, val);
1932 if (!ret)
1933 break;
1934 *val = eval_type(*val, arg, 0);
1935 break;
1936 case PRINT_OP:
1937 switch (arg->op.op[0]) {
1938 case '|':
1939 ret = arg_num_eval(arg->op.left, &left);
1940 if (!ret)
1941 break;
1942 ret = arg_num_eval(arg->op.right, &right);
1943 if (!ret)
1944 break;
1945 if (arg->op.op[1])
1946 *val = left || right;
1947 else
1948 *val = left | right;
1949 break;
1950 case '&':
1951 ret = arg_num_eval(arg->op.left, &left);
1952 if (!ret)
1953 break;
1954 ret = arg_num_eval(arg->op.right, &right);
1955 if (!ret)
1956 break;
1957 if (arg->op.op[1])
1958 *val = left && right;
1959 else
1960 *val = left & right;
1961 break;
1962 case '<':
1963 ret = arg_num_eval(arg->op.left, &left);
1964 if (!ret)
1965 break;
1966 ret = arg_num_eval(arg->op.right, &right);
1967 if (!ret)
1968 break;
1969 switch (arg->op.op[1]) {
1970 case 0:
1971 *val = left < right;
1972 break;
1973 case '<':
1974 *val = left << right;
1975 break;
1976 case '=':
1977 *val = left <= right;
1978 break;
1979 default:
1980 do_warning("unknown op '%s'", arg->op.op);
1981 ret = 0;
1982 }
1983 break;
1984 case '>':
1985 ret = arg_num_eval(arg->op.left, &left);
1986 if (!ret)
1987 break;
1988 ret = arg_num_eval(arg->op.right, &right);
1989 if (!ret)
1990 break;
1991 switch (arg->op.op[1]) {
1992 case 0:
1993 *val = left > right;
1994 break;
1995 case '>':
1996 *val = left >> right;
1997 break;
1998 case '=':
1999 *val = left >= right;
2000 break;
2001 default:
2002 do_warning("unknown op '%s'", arg->op.op);
2003 ret = 0;
2004 }
2005 break;
2006 case '=':
2007 ret = arg_num_eval(arg->op.left, &left);
2008 if (!ret)
2009 break;
2010 ret = arg_num_eval(arg->op.right, &right);
2011 if (!ret)
2012 break;
2013
2014 if (arg->op.op[1] != '=') {
2015 do_warning("unknown op '%s'", arg->op.op);
2016 ret = 0;
2017 } else
2018 *val = left == right;
2019 break;
2020 case '!':
2021 ret = arg_num_eval(arg->op.left, &left);
2022 if (!ret)
2023 break;
2024 ret = arg_num_eval(arg->op.right, &right);
2025 if (!ret)
2026 break;
2027
2028 switch (arg->op.op[1]) {
2029 case '=':
2030 *val = left != right;
2031 break;
2032 default:
2033 do_warning("unknown op '%s'", arg->op.op);
2034 ret = 0;
2035 }
2036 break;
2037 case '-':
2038 /* check for negative */
2039 if (arg->op.left->type == PRINT_NULL)
2040 left = 0;
2041 else
2042 ret = arg_num_eval(arg->op.left, &left);
2043 if (!ret)
2044 break;
2045 ret = arg_num_eval(arg->op.right, &right);
2046 if (!ret)
2047 break;
2048 *val = left - right;
2049 break;
2050 case '+':
2051 if (arg->op.left->type == PRINT_NULL)
2052 left = 0;
2053 else
2054 ret = arg_num_eval(arg->op.left, &left);
2055 if (!ret)
2056 break;
2057 ret = arg_num_eval(arg->op.right, &right);
2058 if (!ret)
2059 break;
2060 *val = left + right;
2061 break;
2062 default:
2063 do_warning("unknown op '%s'", arg->op.op);
2064 ret = 0;
2065 }
2066 break;
2067
2068 case PRINT_NULL:
2069 case PRINT_FIELD ... PRINT_SYMBOL:
2070 case PRINT_STRING:
2071 case PRINT_BSTRING:
2072 default:
2073 do_warning("invalid eval type %d", arg->type);
2074 ret = 0;
2075
2076 }
2077 return ret;
2078}
2079
2080static char *arg_eval (struct print_arg *arg)
2081{
2082 long long val;
2083 static char buf[20];
2084
2085 switch (arg->type) {
2086 case PRINT_ATOM:
2087 return arg->atom.atom;
2088 case PRINT_TYPE:
2089 return arg_eval(arg->typecast.item);
2090 case PRINT_OP:
2091 if (!arg_num_eval(arg, &val))
2092 break;
2093 sprintf(buf, "%lld", val);
2094 return buf;
2095
2096 case PRINT_NULL:
2097 case PRINT_FIELD ... PRINT_SYMBOL:
2098 case PRINT_STRING:
2099 case PRINT_BSTRING:
2100 default:
2101 die("invalid eval type %d", arg->type);
2102 break;
2103 }
2104
2105 return NULL;
2106}
2107
2108static enum event_type
2109process_fields(struct event_format *event, struct print_flag_sym **list, char **tok)
2110{
2111 enum event_type type;
2112 struct print_arg *arg = NULL;
2113 struct print_flag_sym *field;
2114 char *token = *tok;
2115 char *value;
2116
2117 do {
2118 free_token(token);
2119 type = read_token_item(&token);
2120 if (test_type_token(type, token, EVENT_OP, "{"))
2121 break;
2122
2123 arg = alloc_arg();
2124
2125 free_token(token);
2126 type = process_arg(event, arg, &token);
2127 if (test_type_token(type, token, EVENT_DELIM, ","))
2128 goto out_free;
2129
2130 field = malloc_or_die(sizeof(*field));
2131 memset(field, 0, sizeof(*field));
2132
2133 value = arg_eval(arg);
2134 if (value == NULL)
2135 goto out_free;
2136 field->value = strdup(value);
2137
2138 free_arg(arg);
2139 arg = alloc_arg();
2140
2141 free_token(token);
2142 type = process_arg(event, arg, &token);
2143 if (test_type_token(type, token, EVENT_OP, "}"))
2144 goto out_free;
2145
2146 value = arg_eval(arg);
2147 if (value == NULL)
2148 goto out_free;
2149 field->str = strdup(value);
2150 free_arg(arg);
2151 arg = NULL;
2152
2153 *list = field;
2154 list = &field->next;
2155
2156 free_token(token);
2157 type = read_token_item(&token);
2158 } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
2159
2160 *tok = token;
2161 return type;
2162
2163out_free:
2164 free_arg(arg);
2165 free_token(token);
2166 *tok = NULL;
2167
2168 return EVENT_ERROR;
2169}
2170
2171static enum event_type
2172process_flags(struct event_format *event, struct print_arg *arg, char **tok)
2173{
2174 struct print_arg *field;
2175 enum event_type type;
2176 char *token;
2177
2178 memset(arg, 0, sizeof(*arg));
2179 arg->type = PRINT_FLAGS;
2180
2181 field = alloc_arg();
2182
2183 type = process_arg(event, field, &token);
2184
2185 /* Handle operations in the first argument */
2186 while (type == EVENT_OP)
2187 type = process_op(event, field, &token);
2188
2189 if (test_type_token(type, token, EVENT_DELIM, ","))
2190 goto out_free;
2191 free_token(token);
2192
2193 arg->flags.field = field;
2194
2195 type = read_token_item(&token);
2196 if (event_item_type(type)) {
2197 arg->flags.delim = token;
2198 type = read_token_item(&token);
2199 }
2200
2201 if (test_type_token(type, token, EVENT_DELIM, ","))
2202 goto out_free;
2203
2204 type = process_fields(event, &arg->flags.flags, &token);
2205 if (test_type_token(type, token, EVENT_DELIM, ")"))
2206 goto out_free;
2207
2208 free_token(token);
2209 type = read_token_item(tok);
2210 return type;
2211
2212 out_free:
2213 free_token(token);
2214 *tok = NULL;
2215 return EVENT_ERROR;
2216}
2217
2218static enum event_type
2219process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
2220{
2221 struct print_arg *field;
2222 enum event_type type;
2223 char *token;
2224
2225 memset(arg, 0, sizeof(*arg));
2226 arg->type = PRINT_SYMBOL;
2227
2228 field = alloc_arg();
2229
2230 type = process_arg(event, field, &token);
2231 if (test_type_token(type, token, EVENT_DELIM, ","))
2232 goto out_free;
2233
2234 arg->symbol.field = field;
2235
2236 type = process_fields(event, &arg->symbol.symbols, &token);
2237 if (test_type_token(type, token, EVENT_DELIM, ")"))
2238 goto out_free;
2239
2240 free_token(token);
2241 type = read_token_item(tok);
2242 return type;
2243
2244 out_free:
2245 free_token(token);
2246 *tok = NULL;
2247 return EVENT_ERROR;
2248}
2249
2250static enum event_type
2251process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok)
2252{
2253 struct format_field *field;
2254 enum event_type type;
2255 char *token;
2256
2257 memset(arg, 0, sizeof(*arg));
2258 arg->type = PRINT_DYNAMIC_ARRAY;
2259
2260 /*
2261 * The item within the parenthesis is another field that holds
2262 * the index into where the array starts.
2263 */
2264 type = read_token(&token);
2265 *tok = token;
2266 if (type != EVENT_ITEM)
2267 goto out_free;
2268
2269 /* Find the field */
2270
2271 field = pevent_find_field(event, token);
2272 if (!field)
2273 goto out_free;
2274
2275 arg->dynarray.field = field;
2276 arg->dynarray.index = 0;
2277
2278 if (read_expected(EVENT_DELIM, ")") < 0)
2279 goto out_free;
2280
2281 free_token(token);
2282 type = read_token_item(&token);
2283 *tok = token;
2284 if (type != EVENT_OP || strcmp(token, "[") != 0)
2285 return type;
2286
2287 free_token(token);
2288 arg = alloc_arg();
2289 type = process_arg(event, arg, &token);
2290 if (type == EVENT_ERROR)
2291 goto out_free;
2292
2293 if (!test_type_token(type, token, EVENT_OP, "]"))
2294 goto out_free;
2295
2296 free_token(token);
2297 type = read_token_item(tok);
2298 return type;
2299
2300 out_free:
2301 free(arg);
2302 free_token(token);
2303 *tok = NULL;
2304 return EVENT_ERROR;
2305}
2306
2307static enum event_type
2308process_paren(struct event_format *event, struct print_arg *arg, char **tok)
2309{
2310 struct print_arg *item_arg;
2311 enum event_type type;
2312 char *token;
2313
2314 type = process_arg(event, arg, &token);
2315
2316 if (type == EVENT_ERROR)
2317 goto out_free;
2318
2319 if (type == EVENT_OP)
2320 type = process_op(event, arg, &token);
2321
2322 if (type == EVENT_ERROR)
2323 goto out_free;
2324
2325 if (test_type_token(type, token, EVENT_DELIM, ")"))
2326 goto out_free;
2327
2328 free_token(token);
2329 type = read_token_item(&token);
2330
2331 /*
2332 * If the next token is an item or another open paren, then
2333 * this was a typecast.
2334 */
2335 if (event_item_type(type) ||
2336 (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
2337
2338 /* make this a typecast and contine */
2339
2340 /* prevous must be an atom */
2341 if (arg->type != PRINT_ATOM)
2342 die("previous needed to be PRINT_ATOM");
2343
2344 item_arg = alloc_arg();
2345
2346 arg->type = PRINT_TYPE;
2347 arg->typecast.type = arg->atom.atom;
2348 arg->typecast.item = item_arg;
2349 type = process_arg_token(event, item_arg, &token, type);
2350
2351 }
2352
2353 *tok = token;
2354 return type;
2355
2356 out_free:
2357 free_token(token);
2358 *tok = NULL;
2359 return EVENT_ERROR;
2360}
2361
2362
2363static enum event_type
2364process_str(struct event_format *event __unused, struct print_arg *arg, char **tok)
2365{
2366 enum event_type type;
2367 char *token;
2368
2369 if (read_expect_type(EVENT_ITEM, &token) < 0)
2370 goto out_free;
2371
2372 arg->type = PRINT_STRING;
2373 arg->string.string = token;
2374 arg->string.offset = -1;
2375
2376 if (read_expected(EVENT_DELIM, ")") < 0)
2377 goto out_err;
2378
2379 type = read_token(&token);
2380 *tok = token;
2381
2382 return type;
2383
2384 out_free:
2385 free_token(token);
2386 out_err:
2387 *tok = NULL;
2388 return EVENT_ERROR;
2389}
2390
2391static struct pevent_function_handler *
2392find_func_handler(struct pevent *pevent, char *func_name)
2393{
2394 struct pevent_function_handler *func;
2395
2396 for (func = pevent->func_handlers; func; func = func->next) {
2397 if (strcmp(func->name, func_name) == 0)
2398 break;
2399 }
2400
2401 return func;
2402}
2403
2404static void remove_func_handler(struct pevent *pevent, char *func_name)
2405{
2406 struct pevent_function_handler *func;
2407 struct pevent_function_handler **next;
2408
2409 next = &pevent->func_handlers;
2410 while ((func = *next)) {
2411 if (strcmp(func->name, func_name) == 0) {
2412 *next = func->next;
2413 free_func_handle(func);
2414 break;
2415 }
2416 next = &func->next;
2417 }
2418}
2419
2420static enum event_type
2421process_func_handler(struct event_format *event, struct pevent_function_handler *func,
2422 struct print_arg *arg, char **tok)
2423{
2424 struct print_arg **next_arg;
2425 struct print_arg *farg;
2426 enum event_type type;
2427 char *token;
2428 char *test;
2429 int i;
2430
2431 arg->type = PRINT_FUNC;
2432 arg->func.func = func;
2433
2434 *tok = NULL;
2435
2436 next_arg = &(arg->func.args);
2437 for (i = 0; i < func->nr_args; i++) {
2438 farg = alloc_arg();
2439 type = process_arg(event, farg, &token);
2440 if (i < (func->nr_args - 1))
2441 test = ",";
2442 else
2443 test = ")";
2444
2445 if (test_type_token(type, token, EVENT_DELIM, test)) {
2446 free_arg(farg);
2447 free_token(token);
2448 return EVENT_ERROR;
2449 }
2450
2451 *next_arg = farg;
2452 next_arg = &(farg->next);
2453 free_token(token);
2454 }
2455
2456 type = read_token(&token);
2457 *tok = token;
2458
2459 return type;
2460}
2461
2462static enum event_type
2463process_function(struct event_format *event, struct print_arg *arg,
2464 char *token, char **tok)
2465{
2466 struct pevent_function_handler *func;
2467
2468 if (strcmp(token, "__print_flags") == 0) {
2469 free_token(token);
2470 is_flag_field = 1;
2471 return process_flags(event, arg, tok);
2472 }
2473 if (strcmp(token, "__print_symbolic") == 0) {
2474 free_token(token);
2475 is_symbolic_field = 1;
2476 return process_symbols(event, arg, tok);
2477 }
2478 if (strcmp(token, "__get_str") == 0) {
2479 free_token(token);
2480 return process_str(event, arg, tok);
2481 }
2482 if (strcmp(token, "__get_dynamic_array") == 0) {
2483 free_token(token);
2484 return process_dynamic_array(event, arg, tok);
2485 }
2486
2487 func = find_func_handler(event->pevent, token);
2488 if (func) {
2489 free_token(token);
2490 return process_func_handler(event, func, arg, tok);
2491 }
2492
2493 do_warning("function %s not defined", token);
2494 free_token(token);
2495 return EVENT_ERROR;
2496}
2497
2498static enum event_type
2499process_arg_token(struct event_format *event, struct print_arg *arg,
2500 char **tok, enum event_type type)
2501{
2502 char *token;
2503 char *atom;
2504
2505 token = *tok;
2506
2507 switch (type) {
2508 case EVENT_ITEM:
2509 if (strcmp(token, "REC") == 0) {
2510 free_token(token);
2511 type = process_entry(event, arg, &token);
2512 break;
2513 }
2514 atom = token;
2515 /* test the next token */
2516 type = read_token_item(&token);
2517
2518 /*
2519 * If the next token is a parenthesis, then this
2520 * is a function.
2521 */
2522 if (type == EVENT_DELIM && strcmp(token, "(") == 0) {
2523 free_token(token);
2524 token = NULL;
2525 /* this will free atom. */
2526 type = process_function(event, arg, atom, &token);
2527 break;
2528 }
2529 /* atoms can be more than one token long */
2530 while (type == EVENT_ITEM) {
2531 atom = realloc(atom, strlen(atom) + strlen(token) + 2);
2532 strcat(atom, " ");
2533 strcat(atom, token);
2534 free_token(token);
2535 type = read_token_item(&token);
2536 }
2537
2538 arg->type = PRINT_ATOM;
2539 arg->atom.atom = atom;
2540 break;
2541
2542 case EVENT_DQUOTE:
2543 case EVENT_SQUOTE:
2544 arg->type = PRINT_ATOM;
2545 arg->atom.atom = token;
2546 type = read_token_item(&token);
2547 break;
2548 case EVENT_DELIM:
2549 if (strcmp(token, "(") == 0) {
2550 free_token(token);
2551 type = process_paren(event, arg, &token);
2552 break;
2553 }
2554 case EVENT_OP:
2555 /* handle single ops */
2556 arg->type = PRINT_OP;
2557 arg->op.op = token;
2558 arg->op.left = NULL;
2559 type = process_op(event, arg, &token);
2560
2561 /* On error, the op is freed */
2562 if (type == EVENT_ERROR)
2563 arg->op.op = NULL;
2564
2565 /* return error type if errored */
2566 break;
2567
2568 case EVENT_ERROR ... EVENT_NEWLINE:
2569 default:
2570 die("unexpected type %d", type);
2571 }
2572 *tok = token;
2573
2574 return type;
2575}
2576
2577static int event_read_print_args(struct event_format *event, struct print_arg **list)
2578{
2579 enum event_type type = EVENT_ERROR;
2580 struct print_arg *arg;
2581 char *token;
2582 int args = 0;
2583
2584 do {
2585 if (type == EVENT_NEWLINE) {
2586 type = read_token_item(&token);
2587 continue;
2588 }
2589
2590 arg = alloc_arg();
2591
2592 type = process_arg(event, arg, &token);
2593
2594 if (type == EVENT_ERROR) {
2595 free_token(token);
2596 free_arg(arg);
2597 return -1;
2598 }
2599
2600 *list = arg;
2601 args++;
2602
2603 if (type == EVENT_OP) {
2604 type = process_op(event, arg, &token);
2605 free_token(token);
2606 if (type == EVENT_ERROR) {
2607 *list = NULL;
2608 free_arg(arg);
2609 return -1;
2610 }
2611 list = &arg->next;
2612 continue;
2613 }
2614
2615 if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
2616 free_token(token);
2617 *list = arg;
2618 list = &arg->next;
2619 continue;
2620 }
2621 break;
2622 } while (type != EVENT_NONE);
2623
2624 if (type != EVENT_NONE && type != EVENT_ERROR)
2625 free_token(token);
2626
2627 return args;
2628}
2629
2630static int event_read_print(struct event_format *event)
2631{
2632 enum event_type type;
2633 char *token;
2634 int ret;
2635
2636 if (read_expected_item(EVENT_ITEM, "print") < 0)
2637 return -1;
2638
2639 if (read_expected(EVENT_ITEM, "fmt") < 0)
2640 return -1;
2641
2642 if (read_expected(EVENT_OP, ":") < 0)
2643 return -1;
2644
2645 if (read_expect_type(EVENT_DQUOTE, &token) < 0)
2646 goto fail;
2647
2648 concat:
2649 event->print_fmt.format = token;
2650 event->print_fmt.args = NULL;
2651
2652 /* ok to have no arg */
2653 type = read_token_item(&token);
2654
2655 if (type == EVENT_NONE)
2656 return 0;
2657
2658 /* Handle concatenation of print lines */
2659 if (type == EVENT_DQUOTE) {
2660 char *cat;
2661
2662 cat = malloc_or_die(strlen(event->print_fmt.format) +
2663 strlen(token) + 1);
2664 strcpy(cat, event->print_fmt.format);
2665 strcat(cat, token);
2666 free_token(token);
2667 free_token(event->print_fmt.format);
2668 event->print_fmt.format = NULL;
2669 token = cat;
2670 goto concat;
2671 }
2672
2673 if (test_type_token(type, token, EVENT_DELIM, ","))
2674 goto fail;
2675
2676 free_token(token);
2677
2678 ret = event_read_print_args(event, &event->print_fmt.args);
2679 if (ret < 0)
2680 return -1;
2681
2682 return ret;
2683
2684 fail:
2685 free_token(token);
2686 return -1;
2687}
2688
2689/**
2690 * pevent_find_common_field - return a common field by event
2691 * @event: handle for the event
2692 * @name: the name of the common field to return
2693 *
2694 * Returns a common field from the event by the given @name.
2695 * This only searchs the common fields and not all field.
2696 */
2697struct format_field *
2698pevent_find_common_field(struct event_format *event, const char *name)
2699{
2700 struct format_field *format;
2701
2702 for (format = event->format.common_fields;
2703 format; format = format->next) {
2704 if (strcmp(format->name, name) == 0)
2705 break;
2706 }
2707
2708 return format;
2709}
2710
2711/**
2712 * pevent_find_field - find a non-common field
2713 * @event: handle for the event
2714 * @name: the name of the non-common field
2715 *
2716 * Returns a non-common field by the given @name.
2717 * This does not search common fields.
2718 */
2719struct format_field *
2720pevent_find_field(struct event_format *event, const char *name)
2721{
2722 struct format_field *format;
2723
2724 for (format = event->format.fields;
2725 format; format = format->next) {
2726 if (strcmp(format->name, name) == 0)
2727 break;
2728 }
2729
2730 return format;
2731}
2732
2733/**
2734 * pevent_find_any_field - find any field by name
2735 * @event: handle for the event
2736 * @name: the name of the field
2737 *
2738 * Returns a field by the given @name.
2739 * This searchs the common field names first, then
2740 * the non-common ones if a common one was not found.
2741 */
2742struct format_field *
2743pevent_find_any_field(struct event_format *event, const char *name)
2744{
2745 struct format_field *format;
2746
2747 format = pevent_find_common_field(event, name);
2748 if (format)
2749 return format;
2750 return pevent_find_field(event, name);
2751}
2752
2753/**
2754 * pevent_read_number - read a number from data
2755 * @pevent: handle for the pevent
2756 * @ptr: the raw data
2757 * @size: the size of the data that holds the number
2758 *
2759 * Returns the number (converted to host) from the
2760 * raw data.
2761 */
2762unsigned long long pevent_read_number(struct pevent *pevent,
2763 const void *ptr, int size)
2764{
2765 switch (size) {
2766 case 1:
2767 return *(unsigned char *)ptr;
2768 case 2:
2769 return data2host2(pevent, ptr);
2770 case 4:
2771 return data2host4(pevent, ptr);
2772 case 8:
2773 return data2host8(pevent, ptr);
2774 default:
2775 /* BUG! */
2776 return 0;
2777 }
2778}
2779
2780/**
2781 * pevent_read_number_field - read a number from data
2782 * @field: a handle to the field
2783 * @data: the raw data to read
2784 * @value: the value to place the number in
2785 *
2786 * Reads raw data according to a field offset and size,
2787 * and translates it into @value.
2788 *
2789 * Returns 0 on success, -1 otherwise.
2790 */
2791int pevent_read_number_field(struct format_field *field, const void *data,
2792 unsigned long long *value)
2793{
2794 if (!field)
2795 return -1;
2796 switch (field->size) {
2797 case 1:
2798 case 2:
2799 case 4:
2800 case 8:
2801 *value = pevent_read_number(field->event->pevent,
2802 data + field->offset, field->size);
2803 return 0;
2804 default:
2805 return -1;
2806 }
2807}
2808
2809static int get_common_info(struct pevent *pevent,
2810 const char *type, int *offset, int *size)
2811{
2812 struct event_format *event;
2813 struct format_field *field;
2814
2815 /*
2816 * All events should have the same common elements.
2817 * Pick any event to find where the type is;
2818 */
2819 if (!pevent->events)
2820 die("no event_list!");
2821
2822 event = pevent->events[0];
2823 field = pevent_find_common_field(event, type);
2824 if (!field)
2825 die("field '%s' not found", type);
2826
2827 *offset = field->offset;
2828 *size = field->size;
2829
2830 return 0;
2831}
2832
2833static int __parse_common(struct pevent *pevent, void *data,
2834 int *size, int *offset, const char *name)
2835{
2836 int ret;
2837
2838 if (!*size) {
2839 ret = get_common_info(pevent, name, offset, size);
2840 if (ret < 0)
2841 return ret;
2842 }
2843 return pevent_read_number(pevent, data + *offset, *size);
2844}
2845
2846static int trace_parse_common_type(struct pevent *pevent, void *data)
2847{
2848 return __parse_common(pevent, data,
2849 &pevent->type_size, &pevent->type_offset,
2850 "common_type");
2851}
2852
2853static int parse_common_pid(struct pevent *pevent, void *data)
2854{
2855 return __parse_common(pevent, data,
2856 &pevent->pid_size, &pevent->pid_offset,
2857 "common_pid");
2858}
2859
2860static int parse_common_pc(struct pevent *pevent, void *data)
2861{
2862 return __parse_common(pevent, data,
2863 &pevent->pc_size, &pevent->pc_offset,
2864 "common_preempt_count");
2865}
2866
2867static int parse_common_flags(struct pevent *pevent, void *data)
2868{
2869 return __parse_common(pevent, data,
2870 &pevent->flags_size, &pevent->flags_offset,
2871 "common_flags");
2872}
2873
2874static int parse_common_lock_depth(struct pevent *pevent, void *data)
2875{
2876 int ret;
2877
2878 ret = __parse_common(pevent, data,
2879 &pevent->ld_size, &pevent->ld_offset,
2880 "common_lock_depth");
2881 if (ret < 0)
2882 return -1;
2883
2884 return ret;
2885}
2886
2887static int events_id_cmp(const void *a, const void *b);
2888
2889/**
2890 * pevent_find_event - find an event by given id
2891 * @pevent: a handle to the pevent
2892 * @id: the id of the event
2893 *
2894 * Returns an event that has a given @id.
2895 */
2896struct event_format *pevent_find_event(struct pevent *pevent, int id)
2897{
2898 struct event_format **eventptr;
2899 struct event_format key;
2900 struct event_format *pkey = &key;
2901
2902 /* Check cache first */
2903 if (pevent->last_event && pevent->last_event->id == id)
2904 return pevent->last_event;
2905
2906 key.id = id;
2907
2908 eventptr = bsearch(&pkey, pevent->events, pevent->nr_events,
2909 sizeof(*pevent->events), events_id_cmp);
2910
2911 if (eventptr) {
2912 pevent->last_event = *eventptr;
2913 return *eventptr;
2914 }
2915
2916 return NULL;
2917}
2918
2919/**
2920 * pevent_find_event_by_name - find an event by given name
2921 * @pevent: a handle to the pevent
2922 * @sys: the system name to search for
2923 * @name: the name of the event to search for
2924 *
2925 * This returns an event with a given @name and under the system
2926 * @sys. If @sys is NULL the first event with @name is returned.
2927 */
2928struct event_format *
2929pevent_find_event_by_name(struct pevent *pevent,
2930 const char *sys, const char *name)
2931{
2932 struct event_format *event;
2933 int i;
2934
2935 if (pevent->last_event &&
2936 strcmp(pevent->last_event->name, name) == 0 &&
2937 (!sys || strcmp(pevent->last_event->system, sys) == 0))
2938 return pevent->last_event;
2939
2940 for (i = 0; i < pevent->nr_events; i++) {
2941 event = pevent->events[i];
2942 if (strcmp(event->name, name) == 0) {
2943 if (!sys)
2944 break;
2945 if (strcmp(event->system, sys) == 0)
2946 break;
2947 }
2948 }
2949 if (i == pevent->nr_events)
2950 event = NULL;
2951
2952 pevent->last_event = event;
2953 return event;
2954}
2955
2956static unsigned long long
2957eval_num_arg(void *data, int size, struct event_format *event, struct print_arg *arg)
2958{
2959 struct pevent *pevent = event->pevent;
2960 unsigned long long val = 0;
2961 unsigned long long left, right;
2962 struct print_arg *typearg = NULL;
2963 struct print_arg *larg;
2964 unsigned long offset;
2965 unsigned int field_size;
2966
2967 switch (arg->type) {
2968 case PRINT_NULL:
2969 /* ?? */
2970 return 0;
2971 case PRINT_ATOM:
2972 return strtoull(arg->atom.atom, NULL, 0);
2973 case PRINT_FIELD:
2974 if (!arg->field.field) {
2975 arg->field.field = pevent_find_any_field(event, arg->field.name);
2976 if (!arg->field.field)
2977 die("field %s not found", arg->field.name);
2978 }
2979 /* must be a number */
2980 val = pevent_read_number(pevent, data + arg->field.field->offset,
2981 arg->field.field->size);
2982 break;
2983 case PRINT_FLAGS:
2984 case PRINT_SYMBOL:
2985 break;
2986 case PRINT_TYPE:
2987 val = eval_num_arg(data, size, event, arg->typecast.item);
2988 return eval_type(val, arg, 0);
2989 case PRINT_STRING:
2990 case PRINT_BSTRING:
2991 return 0;
2992 case PRINT_FUNC: {
2993 struct trace_seq s;
2994 trace_seq_init(&s);
2995 val = process_defined_func(&s, data, size, event, arg);
2996 trace_seq_destroy(&s);
2997 return val;
2998 }
2999 case PRINT_OP:
3000 if (strcmp(arg->op.op, "[") == 0) {
3001 /*
3002 * Arrays are special, since we don't want
3003 * to read the arg as is.
3004 */
3005 right = eval_num_arg(data, size, event, arg->op.right);
3006
3007 /* handle typecasts */
3008 larg = arg->op.left;
3009 while (larg->type == PRINT_TYPE) {
3010 if (!typearg)
3011 typearg = larg;
3012 larg = larg->typecast.item;
3013 }
3014
3015 /* Default to long size */
3016 field_size = pevent->long_size;
3017
3018 switch (larg->type) {
3019 case PRINT_DYNAMIC_ARRAY:
3020 offset = pevent_read_number(pevent,
3021 data + larg->dynarray.field->offset,
3022 larg->dynarray.field->size);
3023 if (larg->dynarray.field->elementsize)
3024 field_size = larg->dynarray.field->elementsize;
3025 /*
3026 * The actual length of the dynamic array is stored
3027 * in the top half of the field, and the offset
3028 * is in the bottom half of the 32 bit field.
3029 */
3030 offset &= 0xffff;
3031 offset += right;
3032 break;
3033 case PRINT_FIELD:
3034 if (!larg->field.field) {
3035 larg->field.field =
3036 pevent_find_any_field(event, larg->field.name);
3037 if (!larg->field.field)
3038 die("field %s not found", larg->field.name);
3039 }
3040 field_size = larg->field.field->elementsize;
3041 offset = larg->field.field->offset +
3042 right * larg->field.field->elementsize;
3043 break;
3044 default:
3045 goto default_op; /* oops, all bets off */
3046 }
3047 val = pevent_read_number(pevent,
3048 data + offset, field_size);
3049 if (typearg)
3050 val = eval_type(val, typearg, 1);
3051 break;
3052 } else if (strcmp(arg->op.op, "?") == 0) {
3053 left = eval_num_arg(data, size, event, arg->op.left);
3054 arg = arg->op.right;
3055 if (left)
3056 val = eval_num_arg(data, size, event, arg->op.left);
3057 else
3058 val = eval_num_arg(data, size, event, arg->op.right);
3059 break;
3060 }
3061 default_op:
3062 left = eval_num_arg(data, size, event, arg->op.left);
3063 right = eval_num_arg(data, size, event, arg->op.right);
3064 switch (arg->op.op[0]) {
3065 case '!':
3066 switch (arg->op.op[1]) {
3067 case 0:
3068 val = !right;
3069 break;
3070 case '=':
3071 val = left != right;
3072 break;
3073 default:
3074 die("unknown op '%s'", arg->op.op);
3075 }
3076 break;
3077 case '~':
3078 val = ~right;
3079 break;
3080 case '|':
3081 if (arg->op.op[1])
3082 val = left || right;
3083 else
3084 val = left | right;
3085 break;
3086 case '&':
3087 if (arg->op.op[1])
3088 val = left && right;
3089 else
3090 val = left & right;
3091 break;
3092 case '<':
3093 switch (arg->op.op[1]) {
3094 case 0:
3095 val = left < right;
3096 break;
3097 case '<':
3098 val = left << right;
3099 break;
3100 case '=':
3101 val = left <= right;
3102 break;
3103 default:
3104 die("unknown op '%s'", arg->op.op);
3105 }
3106 break;
3107 case '>':
3108 switch (arg->op.op[1]) {
3109 case 0:
3110 val = left > right;
3111 break;
3112 case '>':
3113 val = left >> right;
3114 break;
3115 case '=':
3116 val = left >= right;
3117 break;
3118 default:
3119 die("unknown op '%s'", arg->op.op);
3120 }
3121 break;
3122 case '=':
3123 if (arg->op.op[1] != '=')
3124 die("unknown op '%s'", arg->op.op);
3125 val = left == right;
3126 break;
3127 case '-':
3128 val = left - right;
3129 break;
3130 case '+':
3131 val = left + right;
3132 break;
3133 case '/':
3134 val = left / right;
3135 break;
3136 case '*':
3137 val = left * right;
3138 break;
3139 default:
3140 die("unknown op '%s'", arg->op.op);
3141 }
3142 break;
3143 default: /* not sure what to do there */
3144 return 0;
3145 }
3146 return val;
3147}
3148
3149struct flag {
3150 const char *name;
3151 unsigned long long value;
3152};
3153
3154static const struct flag flags[] = {
3155 { "HI_SOFTIRQ", 0 },
3156 { "TIMER_SOFTIRQ", 1 },
3157 { "NET_TX_SOFTIRQ", 2 },
3158 { "NET_RX_SOFTIRQ", 3 },
3159 { "BLOCK_SOFTIRQ", 4 },
3160 { "BLOCK_IOPOLL_SOFTIRQ", 5 },
3161 { "TASKLET_SOFTIRQ", 6 },
3162 { "SCHED_SOFTIRQ", 7 },
3163 { "HRTIMER_SOFTIRQ", 8 },
3164 { "RCU_SOFTIRQ", 9 },
3165
3166 { "HRTIMER_NORESTART", 0 },
3167 { "HRTIMER_RESTART", 1 },
3168};
3169
3170static unsigned long long eval_flag(const char *flag)
3171{
3172 int i;
3173
3174 /*
3175 * Some flags in the format files do not get converted.
3176 * If the flag is not numeric, see if it is something that
3177 * we already know about.
3178 */
3179 if (isdigit(flag[0]))
3180 return strtoull(flag, NULL, 0);
3181
3182 for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++)
3183 if (strcmp(flags[i].name, flag) == 0)
3184 return flags[i].value;
3185
3186 return 0;
3187}
3188
3189static void print_str_to_seq(struct trace_seq *s, const char *format,
3190 int len_arg, const char *str)
3191{
3192 if (len_arg >= 0)
3193 trace_seq_printf(s, format, len_arg, str);
3194 else
3195 trace_seq_printf(s, format, str);
3196}
3197
3198static void print_str_arg(struct trace_seq *s, void *data, int size,
3199 struct event_format *event, const char *format,
3200 int len_arg, struct print_arg *arg)
3201{
3202 struct pevent *pevent = event->pevent;
3203 struct print_flag_sym *flag;
3204 unsigned long long val, fval;
3205 unsigned long addr;
3206 char *str;
3207 int print;
3208 int len;
3209
3210 switch (arg->type) {
3211 case PRINT_NULL:
3212 /* ?? */
3213 return;
3214 case PRINT_ATOM:
3215 print_str_to_seq(s, format, len_arg, arg->atom.atom);
3216 return;
3217 case PRINT_FIELD:
3218 if (!arg->field.field) {
3219 arg->field.field = pevent_find_any_field(event, arg->field.name);
3220 if (!arg->field.field)
3221 die("field %s not found", arg->field.name);
3222 }
3223 /* Zero sized fields, mean the rest of the data */
3224 len = arg->field.field->size ? : size - arg->field.field->offset;
3225
3226 /*
3227 * Some events pass in pointers. If this is not an array
3228 * and the size is the same as long_size, assume that it
3229 * is a pointer.
3230 */
3231 if (!(arg->field.field->flags & FIELD_IS_ARRAY) &&
3232 arg->field.field->size == pevent->long_size) {
3233 addr = *(unsigned long *)(data + arg->field.field->offset);
3234 trace_seq_printf(s, "%lx", addr);
3235 break;
3236 }
3237 str = malloc_or_die(len + 1);
3238 memcpy(str, data + arg->field.field->offset, len);
3239 str[len] = 0;
3240 print_str_to_seq(s, format, len_arg, str);
3241 free(str);
3242 break;
3243 case PRINT_FLAGS:
3244 val = eval_num_arg(data, size, event, arg->flags.field);
3245 print = 0;
3246 for (flag = arg->flags.flags; flag; flag = flag->next) {
3247 fval = eval_flag(flag->value);
3248 if (!val && !fval) {
3249 print_str_to_seq(s, format, len_arg, flag->str);
3250 break;
3251 }
3252 if (fval && (val & fval) == fval) {
3253 if (print && arg->flags.delim)
3254 trace_seq_puts(s, arg->flags.delim);
3255 print_str_to_seq(s, format, len_arg, flag->str);
3256 print = 1;
3257 val &= ~fval;
3258 }
3259 }
3260 break;
3261 case PRINT_SYMBOL:
3262 val = eval_num_arg(data, size, event, arg->symbol.field);
3263 for (flag = arg->symbol.symbols; flag; flag = flag->next) {
3264 fval = eval_flag(flag->value);
3265 if (val == fval) {
3266 print_str_to_seq(s, format, len_arg, flag->str);
3267 break;
3268 }
3269 }
3270 break;
3271
3272 case PRINT_TYPE:
3273 break;
3274 case PRINT_STRING: {
3275 int str_offset;
3276
3277 if (arg->string.offset == -1) {
3278 struct format_field *f;
3279
3280 f = pevent_find_any_field(event, arg->string.string);
3281 arg->string.offset = f->offset;
3282 }
3283 str_offset = data2host4(pevent, data + arg->string.offset);
3284 str_offset &= 0xffff;
3285 print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
3286 break;
3287 }
3288 case PRINT_BSTRING:
3289 trace_seq_printf(s, format, arg->string.string);
3290 break;
3291 case PRINT_OP:
3292 /*
3293 * The only op for string should be ? :
3294 */
3295 if (arg->op.op[0] != '?')
3296 return;
3297 val = eval_num_arg(data, size, event, arg->op.left);
3298 if (val)
3299 print_str_arg(s, data, size, event,
3300 format, len_arg, arg->op.right->op.left);
3301 else
3302 print_str_arg(s, data, size, event,
3303 format, len_arg, arg->op.right->op.right);
3304 break;
3305 case PRINT_FUNC:
3306 process_defined_func(s, data, size, event, arg);
3307 break;
3308 default:
3309 /* well... */
3310 break;
3311 }
3312}
3313
3314static unsigned long long
3315process_defined_func(struct trace_seq *s, void *data, int size,
3316 struct event_format *event, struct print_arg *arg)
3317{
3318 struct pevent_function_handler *func_handle = arg->func.func;
3319 struct pevent_func_params *param;
3320 unsigned long long *args;
3321 unsigned long long ret;
3322 struct print_arg *farg;
3323 struct trace_seq str;
3324 struct save_str {
3325 struct save_str *next;
3326 char *str;
3327 } *strings = NULL, *string;
3328 int i;
3329
3330 if (!func_handle->nr_args) {
3331 ret = (*func_handle->func)(s, NULL);
3332 goto out;
3333 }
3334
3335 farg = arg->func.args;
3336 param = func_handle->params;
3337
3338 args = malloc_or_die(sizeof(*args) * func_handle->nr_args);
3339 for (i = 0; i < func_handle->nr_args; i++) {
3340 switch (param->type) {
3341 case PEVENT_FUNC_ARG_INT:
3342 case PEVENT_FUNC_ARG_LONG:
3343 case PEVENT_FUNC_ARG_PTR:
3344 args[i] = eval_num_arg(data, size, event, farg);
3345 break;
3346 case PEVENT_FUNC_ARG_STRING:
3347 trace_seq_init(&str);
3348 print_str_arg(&str, data, size, event, "%s", -1, farg);
3349 trace_seq_terminate(&str);
3350 string = malloc_or_die(sizeof(*string));
3351 string->next = strings;
3352 string->str = strdup(str.buffer);
3353 strings = string;
3354 trace_seq_destroy(&str);
3355 break;
3356 default:
3357 /*
3358 * Something went totally wrong, this is not
3359 * an input error, something in this code broke.
3360 */
3361 die("Unexpected end of arguments\n");
3362 break;
3363 }
3364 farg = farg->next;
3365 }
3366
3367 ret = (*func_handle->func)(s, args);
3368 free(args);
3369 while (strings) {
3370 string = strings;
3371 strings = string->next;
3372 free(string->str);
3373 free(string);
3374 }
3375
3376 out:
3377 /* TBD : handle return type here */
3378 return ret;
3379}
3380
3381static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event)
3382{
3383 struct pevent *pevent = event->pevent;
3384 struct format_field *field, *ip_field;
3385 struct print_arg *args, *arg, **next;
3386 unsigned long long ip, val;
3387 char *ptr;
3388 void *bptr;
3389
3390 field = pevent->bprint_buf_field;
3391 ip_field = pevent->bprint_ip_field;
3392
3393 if (!field) {
3394 field = pevent_find_field(event, "buf");
3395 if (!field)
3396 die("can't find buffer field for binary printk");
3397 ip_field = pevent_find_field(event, "ip");
3398 if (!ip_field)
3399 die("can't find ip field for binary printk");
3400 pevent->bprint_buf_field = field;
3401 pevent->bprint_ip_field = ip_field;
3402 }
3403
3404 ip = pevent_read_number(pevent, data + ip_field->offset, ip_field->size);
3405
3406 /*
3407 * The first arg is the IP pointer.
3408 */
3409 args = alloc_arg();
3410 arg = args;
3411 arg->next = NULL;
3412 next = &arg->next;
3413
3414 arg->type = PRINT_ATOM;
3415 arg->atom.atom = malloc_or_die(32);
3416 sprintf(arg->atom.atom, "%lld", ip);
3417
3418 /* skip the first "%pf : " */
3419 for (ptr = fmt + 6, bptr = data + field->offset;
3420 bptr < data + size && *ptr; ptr++) {
3421 int ls = 0;
3422
3423 if (*ptr == '%') {
3424 process_again:
3425 ptr++;
3426 switch (*ptr) {
3427 case '%':
3428 break;
3429 case 'l':
3430 ls++;
3431 goto process_again;
3432 case 'L':
3433 ls = 2;
3434 goto process_again;
3435 case '0' ... '9':
3436 goto process_again;
3437 case 'p':
3438 ls = 1;
3439 /* fall through */
3440 case 'd':
3441 case 'u':
3442 case 'x':
3443 case 'i':
3444 /* the pointers are always 4 bytes aligned */
3445 bptr = (void *)(((unsigned long)bptr + 3) &
3446 ~3);
3447 switch (ls) {
3448 case 0:
3449 ls = 4;
3450 break;
3451 case 1:
3452 ls = pevent->long_size;
3453 break;
3454 case 2:
3455 ls = 8;
3456 default:
3457 break;
3458 }
3459 val = pevent_read_number(pevent, bptr, ls);
3460 bptr += ls;
3461 arg = alloc_arg();
3462 arg->next = NULL;
3463 arg->type = PRINT_ATOM;
3464 arg->atom.atom = malloc_or_die(32);
3465 sprintf(arg->atom.atom, "%lld", val);
3466 *next = arg;
3467 next = &arg->next;
3468 break;
3469 case 's':
3470 arg = alloc_arg();
3471 arg->next = NULL;
3472 arg->type = PRINT_BSTRING;
3473 arg->string.string = strdup(bptr);
3474 bptr += strlen(bptr) + 1;
3475 *next = arg;
3476 next = &arg->next;
3477 default:
3478 break;
3479 }
3480 }
3481 }
3482
3483 return args;
3484}
3485
3486static void free_args(struct print_arg *args)
3487{
3488 struct print_arg *next;
3489
3490 while (args) {
3491 next = args->next;
3492
3493 free_arg(args);
3494 args = next;
3495 }
3496}
3497
3498static char *
3499get_bprint_format(void *data, int size __unused, struct event_format *event)
3500{
3501 struct pevent *pevent = event->pevent;
3502 unsigned long long addr;
3503 struct format_field *field;
3504 struct printk_map *printk;
3505 char *format;
3506 char *p;
3507
3508 field = pevent->bprint_fmt_field;
3509
3510 if (!field) {
3511 field = pevent_find_field(event, "fmt");
3512 if (!field)
3513 die("can't find format field for binary printk");
3514 pevent->bprint_fmt_field = field;
3515 }
3516
3517 addr = pevent_read_number(pevent, data + field->offset, field->size);
3518
3519 printk = find_printk(pevent, addr);
3520 if (!printk) {
3521 format = malloc_or_die(45);
3522 sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
3523 addr);
3524 return format;
3525 }
3526
3527 p = printk->printk;
3528 /* Remove any quotes. */
3529 if (*p == '"')
3530 p++;
3531 format = malloc_or_die(strlen(p) + 10);
3532 sprintf(format, "%s : %s", "%pf", p);
3533 /* remove ending quotes and new line since we will add one too */
3534 p = format + strlen(format) - 1;
3535 if (*p == '"')
3536 *p = 0;
3537
3538 p -= 2;
3539 if (strcmp(p, "\\n") == 0)
3540 *p = 0;
3541
3542 return format;
3543}
3544
3545static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
3546 struct event_format *event, struct print_arg *arg)
3547{
3548 unsigned char *buf;
3549 char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
3550
3551 if (arg->type == PRINT_FUNC) {
3552 process_defined_func(s, data, size, event, arg);
3553 return;
3554 }
3555
3556 if (arg->type != PRINT_FIELD) {
3557 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d",
3558 arg->type);
3559 return;
3560 }
3561
3562 if (mac == 'm')
3563 fmt = "%.2x%.2x%.2x%.2x%.2x%.2x";
3564 if (!arg->field.field) {
3565 arg->field.field =
3566 pevent_find_any_field(event, arg->field.name);
3567 if (!arg->field.field)
3568 die("field %s not found", arg->field.name);
3569 }
3570 if (arg->field.field->size != 6) {
3571 trace_seq_printf(s, "INVALIDMAC");
3572 return;
3573 }
3574 buf = data + arg->field.field->offset;
3575 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
3576}
3577
3578static void print_event_fields(struct trace_seq *s, void *data, int size,
3579 struct event_format *event)
3580{
3581 struct format_field *field;
3582 unsigned long long val;
3583 unsigned int offset, len, i;
3584
3585 field = event->format.fields;
3586 while (field) {
3587 trace_seq_printf(s, " %s=", field->name);
3588 if (field->flags & FIELD_IS_ARRAY) {
3589 offset = field->offset;
3590 len = field->size;
3591 if (field->flags & FIELD_IS_DYNAMIC) {
3592 val = pevent_read_number(event->pevent, data + offset, len);
3593 offset = val;
3594 len = offset >> 16;
3595 offset &= 0xffff;
3596 }
3597 if (field->flags & FIELD_IS_STRING) {
3598 trace_seq_printf(s, "%s", (char *)data + offset);
3599 } else {
3600 trace_seq_puts(s, "ARRAY[");
3601 for (i = 0; i < len; i++) {
3602 if (i)
3603 trace_seq_puts(s, ", ");
3604 trace_seq_printf(s, "%02x",
3605 *((unsigned char *)data + offset + i));
3606 }
3607 trace_seq_putc(s, ']');
3608 }
3609 } else {
3610 val = pevent_read_number(event->pevent, data + field->offset,
3611 field->size);
3612 if (field->flags & FIELD_IS_POINTER) {
3613 trace_seq_printf(s, "0x%llx", val);
3614 } else if (field->flags & FIELD_IS_SIGNED) {
3615 switch (field->size) {
3616 case 4:
3617 /*
3618 * If field is long then print it in hex.
3619 * A long usually stores pointers.
3620 */
3621 if (field->flags & FIELD_IS_LONG)
3622 trace_seq_printf(s, "0x%x", (int)val);
3623 else
3624 trace_seq_printf(s, "%d", (int)val);
3625 break;
3626 case 2:
3627 trace_seq_printf(s, "%2d", (short)val);
3628 break;
3629 case 1:
3630 trace_seq_printf(s, "%1d", (char)val);
3631 break;
3632 default:
3633 trace_seq_printf(s, "%lld", val);
3634 }
3635 } else {
3636 if (field->flags & FIELD_IS_LONG)
3637 trace_seq_printf(s, "0x%llx", val);
3638 else
3639 trace_seq_printf(s, "%llu", val);
3640 }
3641 }
3642 field = field->next;
3643 }
3644}
3645
3646static void pretty_print(struct trace_seq *s, void *data, int size, struct event_format *event)
3647{
3648 struct pevent *pevent = event->pevent;
3649 struct print_fmt *print_fmt = &event->print_fmt;
3650 struct print_arg *arg = print_fmt->args;
3651 struct print_arg *args = NULL;
3652 const char *ptr = print_fmt->format;
3653 unsigned long long val;
3654 struct func_map *func;
3655 const char *saveptr;
3656 char *bprint_fmt = NULL;
3657 char format[32];
3658 int show_func;
3659 int len_as_arg;
3660 int len_arg;
3661 int len;
3662 int ls;
3663
3664 if (event->flags & EVENT_FL_FAILED) {
3665 trace_seq_printf(s, "[FAILED TO PARSE]");
3666 print_event_fields(s, data, size, event);
3667 return;
3668 }
3669
3670 if (event->flags & EVENT_FL_ISBPRINT) {
3671 bprint_fmt = get_bprint_format(data, size, event);
3672 args = make_bprint_args(bprint_fmt, data, size, event);
3673 arg = args;
3674 ptr = bprint_fmt;
3675 }
3676
3677 for (; *ptr; ptr++) {
3678 ls = 0;
3679 if (*ptr == '\\') {
3680 ptr++;
3681 switch (*ptr) {
3682 case 'n':
3683 trace_seq_putc(s, '\n');
3684 break;
3685 case 't':
3686 trace_seq_putc(s, '\t');
3687 break;
3688 case 'r':
3689 trace_seq_putc(s, '\r');
3690 break;
3691 case '\\':
3692 trace_seq_putc(s, '\\');
3693 break;
3694 default:
3695 trace_seq_putc(s, *ptr);
3696 break;
3697 }
3698
3699 } else if (*ptr == '%') {
3700 saveptr = ptr;
3701 show_func = 0;
3702 len_as_arg = 0;
3703 cont_process:
3704 ptr++;
3705 switch (*ptr) {
3706 case '%':
3707 trace_seq_putc(s, '%');
3708 break;
3709 case '#':
3710 /* FIXME: need to handle properly */
3711 goto cont_process;
3712 case 'h':
3713 ls--;
3714 goto cont_process;
3715 case 'l':
3716 ls++;
3717 goto cont_process;
3718 case 'L':
3719 ls = 2;
3720 goto cont_process;
3721 case '*':
3722 /* The argument is the length. */
3723 if (!arg)
3724 die("no argument match");
3725 len_arg = eval_num_arg(data, size, event, arg);
3726 len_as_arg = 1;
3727 arg = arg->next;
3728 goto cont_process;
3729 case '.':
3730 case 'z':
3731 case 'Z':
3732 case '0' ... '9':
3733 goto cont_process;
3734 case 'p':
3735 if (pevent->long_size == 4)
3736 ls = 1;
3737 else
3738 ls = 2;
3739
3740 if (*(ptr+1) == 'F' ||
3741 *(ptr+1) == 'f') {
3742 ptr++;
3743 show_func = *ptr;
3744 } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
3745 print_mac_arg(s, *(ptr+1), data, size, event, arg);
3746 ptr++;
3747 break;
3748 }
3749
3750 /* fall through */
3751 case 'd':
3752 case 'i':
3753 case 'x':
3754 case 'X':
3755 case 'u':
3756 if (!arg)
3757 die("no argument match");
3758
3759 len = ((unsigned long)ptr + 1) -
3760 (unsigned long)saveptr;
3761
3762 /* should never happen */
3763 if (len > 31)
3764 die("bad format!");
3765
3766 memcpy(format, saveptr, len);
3767 format[len] = 0;
3768
3769 val = eval_num_arg(data, size, event, arg);
3770 arg = arg->next;
3771
3772 if (show_func) {
3773 func = find_func(pevent, val);
3774 if (func) {
3775 trace_seq_puts(s, func->func);
3776 if (show_func == 'F')
3777 trace_seq_printf(s,
3778 "+0x%llx",
3779 val - func->addr);
3780 break;
3781 }
3782 }
3783 if (pevent->long_size == 8 && ls) {
3784 char *p;
3785
3786 ls = 2;
3787 /* make %l into %ll */
3788 p = strchr(format, 'l');
3789 if (p)
3790 memmove(p, p+1, strlen(p)+1);
3791 else if (strcmp(format, "%p") == 0)
3792 strcpy(format, "0x%llx");
3793 }
3794 switch (ls) {
3795 case -2:
3796 if (len_as_arg)
3797 trace_seq_printf(s, format, len_arg, (char)val);
3798 else
3799 trace_seq_printf(s, format, (char)val);
3800 break;
3801 case -1:
3802 if (len_as_arg)
3803 trace_seq_printf(s, format, len_arg, (short)val);
3804 else
3805 trace_seq_printf(s, format, (short)val);
3806 break;
3807 case 0:
3808 if (len_as_arg)
3809 trace_seq_printf(s, format, len_arg, (int)val);
3810 else
3811 trace_seq_printf(s, format, (int)val);
3812 break;
3813 case 1:
3814 if (len_as_arg)
3815 trace_seq_printf(s, format, len_arg, (long)val);
3816 else
3817 trace_seq_printf(s, format, (long)val);
3818 break;
3819 case 2:
3820 if (len_as_arg)
3821 trace_seq_printf(s, format, len_arg,
3822 (long long)val);
3823 else
3824 trace_seq_printf(s, format, (long long)val);
3825 break;
3826 default:
3827 die("bad count (%d)", ls);
3828 }
3829 break;
3830 case 's':
3831 if (!arg)
3832 die("no matching argument");
3833
3834 len = ((unsigned long)ptr + 1) -
3835 (unsigned long)saveptr;
3836
3837 /* should never happen */
3838 if (len > 31)
3839 die("bad format!");
3840
3841 memcpy(format, saveptr, len);
3842 format[len] = 0;
3843 if (!len_as_arg)
3844 len_arg = -1;
3845 print_str_arg(s, data, size, event,
3846 format, len_arg, arg);
3847 arg = arg->next;
3848 break;
3849 default:
3850 trace_seq_printf(s, ">%c<", *ptr);
3851
3852 }
3853 } else
3854 trace_seq_putc(s, *ptr);
3855 }
3856
3857 if (args) {
3858 free_args(args);
3859 free(bprint_fmt);
3860 }
3861}
3862
3863/**
3864 * pevent_data_lat_fmt - parse the data for the latency format
3865 * @pevent: a handle to the pevent
3866 * @s: the trace_seq to write to
3867 * @data: the raw data to read from
3868 * @size: currently unused.
3869 *
3870 * This parses out the Latency format (interrupts disabled,
3871 * need rescheduling, in hard/soft interrupt, preempt count
3872 * and lock depth) and places it into the trace_seq.
3873 */
3874void pevent_data_lat_fmt(struct pevent *pevent,
3875 struct trace_seq *s, struct pevent_record *record)
3876{
3877 static int check_lock_depth = 1;
3878 static int lock_depth_exists;
3879 unsigned int lat_flags;
3880 unsigned int pc;
3881 int lock_depth;
3882 int hardirq;
3883 int softirq;
3884 void *data = record->data;
3885
3886 lat_flags = parse_common_flags(pevent, data);
3887 pc = parse_common_pc(pevent, data);
3888 /* lock_depth may not always exist */
3889 if (check_lock_depth) {
3890 struct format_field *field;
3891 struct event_format *event;
3892
3893 check_lock_depth = 0;
3894 event = pevent->events[0];
3895 field = pevent_find_common_field(event, "common_lock_depth");
3896 if (field)
3897 lock_depth_exists = 1;
3898 }
3899 if (lock_depth_exists)
3900 lock_depth = parse_common_lock_depth(pevent, data);
3901
3902 hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
3903 softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
3904
3905 trace_seq_printf(s, "%c%c%c",
3906 (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
3907 (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
3908 'X' : '.',
3909 (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
3910 'N' : '.',
3911 (hardirq && softirq) ? 'H' :
3912 hardirq ? 'h' : softirq ? 's' : '.');
3913
3914 if (pc)
3915 trace_seq_printf(s, "%x", pc);
3916 else
3917 trace_seq_putc(s, '.');
3918
3919 if (lock_depth_exists) {
3920 if (lock_depth < 0)
3921 trace_seq_putc(s, '.');
3922 else
3923 trace_seq_printf(s, "%d", lock_depth);
3924 }
3925
3926 trace_seq_terminate(s);
3927}
3928
3929/**
3930 * pevent_data_type - parse out the given event type
3931 * @pevent: a handle to the pevent
3932 * @rec: the record to read from
3933 *
3934 * This returns the event id from the @rec.
3935 */
3936int pevent_data_type(struct pevent *pevent, struct pevent_record *rec)
3937{
3938 return trace_parse_common_type(pevent, rec->data);
3939}
3940
3941/**
3942 * pevent_data_event_from_type - find the event by a given type
3943 * @pevent: a handle to the pevent
3944 * @type: the type of the event.
3945 *
3946 * This returns the event form a given @type;
3947 */
3948struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type)
3949{
3950 return pevent_find_event(pevent, type);
3951}
3952
3953/**
3954 * pevent_data_pid - parse the PID from raw data
3955 * @pevent: a handle to the pevent
3956 * @rec: the record to parse
3957 *
3958 * This returns the PID from a raw data.
3959 */
3960int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec)
3961{
3962 return parse_common_pid(pevent, rec->data);
3963}
3964
3965/**
3966 * pevent_data_comm_from_pid - return the command line from PID
3967 * @pevent: a handle to the pevent
3968 * @pid: the PID of the task to search for
3969 *
3970 * This returns a pointer to the command line that has the given
3971 * @pid.
3972 */
3973const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
3974{
3975 const char *comm;
3976
3977 comm = find_cmdline(pevent, pid);
3978 return comm;
3979}
3980
3981/**
3982 * pevent_data_comm_from_pid - parse the data into the print format
3983 * @s: the trace_seq to write to
3984 * @event: the handle to the event
3985 * @cpu: the cpu the event was recorded on
3986 * @data: the raw data
3987 * @size: the size of the raw data
3988 * @nsecs: the timestamp of the event
3989 *
3990 * This parses the raw @data using the given @event information and
3991 * writes the print format into the trace_seq.
3992 */
3993void pevent_event_info(struct trace_seq *s, struct event_format *event,
3994 struct pevent_record *record)
3995{
3996 int print_pretty = 1;
3997
3998 if (event->pevent->print_raw)
3999 print_event_fields(s, record->data, record->size, event);
4000 else {
4001
4002 if (event->handler)
4003 print_pretty = event->handler(s, record, event,
4004 event->context);
4005
4006 if (print_pretty)
4007 pretty_print(s, record->data, record->size, event);
4008 }
4009
4010 trace_seq_terminate(s);
4011}
4012
4013void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
4014 struct pevent_record *record)
4015{
4016 static char *spaces = " "; /* 20 spaces */
4017 struct event_format *event;
4018 unsigned long secs;
4019 unsigned long usecs;
4020 unsigned long nsecs;
4021 const char *comm;
4022 void *data = record->data;
4023 int type;
4024 int pid;
4025 int len;
4026 int p;
4027
4028 secs = record->ts / NSECS_PER_SEC;
4029 nsecs = record->ts - secs * NSECS_PER_SEC;
4030
4031 if (record->size < 0) {
4032 do_warning("ug! negative record size %d", record->size);
4033 return;
4034 }
4035
4036 type = trace_parse_common_type(pevent, data);
4037
4038 event = pevent_find_event(pevent, type);
4039 if (!event) {
4040 do_warning("ug! no event found for type %d", type);
4041 return;
4042 }
4043
4044 pid = parse_common_pid(pevent, data);
4045 comm = find_cmdline(pevent, pid);
4046
4047 if (pevent->latency_format) {
4048 trace_seq_printf(s, "%8.8s-%-5d %3d",
4049 comm, pid, record->cpu);
4050 pevent_data_lat_fmt(pevent, s, record);
4051 } else
4052 trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu);
4053
4054 if (pevent->flags & PEVENT_NSEC_OUTPUT) {
4055 usecs = nsecs;
4056 p = 9;
4057 } else {
4058 usecs = (nsecs + 500) / NSECS_PER_USEC;
4059 p = 6;
4060 }
4061
4062 trace_seq_printf(s, " %5lu.%0*lu: %s: ", secs, p, usecs, event->name);
4063
4064 /* Space out the event names evenly. */
4065 len = strlen(event->name);
4066 if (len < 20)
4067 trace_seq_printf(s, "%.*s", 20 - len, spaces);
4068
4069 pevent_event_info(s, event, record);
4070}
4071
4072static int events_id_cmp(const void *a, const void *b)
4073{
4074 struct event_format * const * ea = a;
4075 struct event_format * const * eb = b;
4076
4077 if ((*ea)->id < (*eb)->id)
4078 return -1;
4079
4080 if ((*ea)->id > (*eb)->id)
4081 return 1;
4082
4083 return 0;
4084}
4085
4086static int events_name_cmp(const void *a, const void *b)
4087{
4088 struct event_format * const * ea = a;
4089 struct event_format * const * eb = b;
4090 int res;
4091
4092 res = strcmp((*ea)->name, (*eb)->name);
4093 if (res)
4094 return res;
4095
4096 res = strcmp((*ea)->system, (*eb)->system);
4097 if (res)
4098 return res;
4099
4100 return events_id_cmp(a, b);
4101}
4102
4103static int events_system_cmp(const void *a, const void *b)
4104{
4105 struct event_format * const * ea = a;
4106 struct event_format * const * eb = b;
4107 int res;
4108
4109 res = strcmp((*ea)->system, (*eb)->system);
4110 if (res)
4111 return res;
4112
4113 res = strcmp((*ea)->name, (*eb)->name);
4114 if (res)
4115 return res;
4116
4117 return events_id_cmp(a, b);
4118}
4119
4120struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type sort_type)
4121{
4122 struct event_format **events;
4123 int (*sort)(const void *a, const void *b);
4124
4125 events = pevent->sort_events;
4126
4127 if (events && pevent->last_type == sort_type)
4128 return events;
4129
4130 if (!events) {
4131 events = malloc(sizeof(*events) * (pevent->nr_events + 1));
4132 if (!events)
4133 return NULL;
4134
4135 memcpy(events, pevent->events, sizeof(*events) * pevent->nr_events);
4136 events[pevent->nr_events] = NULL;
4137
4138 pevent->sort_events = events;
4139
4140 /* the internal events are sorted by id */
4141 if (sort_type == EVENT_SORT_ID) {
4142 pevent->last_type = sort_type;
4143 return events;
4144 }
4145 }
4146
4147 switch (sort_type) {
4148 case EVENT_SORT_ID:
4149 sort = events_id_cmp;
4150 break;
4151 case EVENT_SORT_NAME:
4152 sort = events_name_cmp;
4153 break;
4154 case EVENT_SORT_SYSTEM:
4155 sort = events_system_cmp;
4156 break;
4157 default:
4158 return events;
4159 }
4160
4161 qsort(events, pevent->nr_events, sizeof(*events), sort);
4162 pevent->last_type = sort_type;
4163
4164 return events;
4165}
4166
4167static struct format_field **
4168get_event_fields(const char *type, const char *name,
4169 int count, struct format_field *list)
4170{
4171 struct format_field **fields;
4172 struct format_field *field;
4173 int i = 0;
4174
4175 fields = malloc_or_die(sizeof(*fields) * (count + 1));
4176 for (field = list; field; field = field->next) {
4177 fields[i++] = field;
4178 if (i == count + 1) {
4179 do_warning("event %s has more %s fields than specified",
4180 name, type);
4181 i--;
4182 break;
4183 }
4184 }
4185
4186 if (i != count)
4187 do_warning("event %s has less %s fields than specified",
4188 name, type);
4189
4190 fields[i] = NULL;
4191
4192 return fields;
4193}
4194
4195/**
4196 * pevent_event_common_fields - return a list of common fields for an event
4197 * @event: the event to return the common fields of.
4198 *
4199 * Returns an allocated array of fields. The last item in the array is NULL.
4200 * The array must be freed with free().
4201 */
4202struct format_field **pevent_event_common_fields(struct event_format *event)
4203{
4204 return get_event_fields("common", event->name,
4205 event->format.nr_common,
4206 event->format.common_fields);
4207}
4208
4209/**
4210 * pevent_event_fields - return a list of event specific fields for an event
4211 * @event: the event to return the fields of.
4212 *
4213 * Returns an allocated array of fields. The last item in the array is NULL.
4214 * The array must be freed with free().
4215 */
4216struct format_field **pevent_event_fields(struct event_format *event)
4217{
4218 return get_event_fields("event", event->name,
4219 event->format.nr_fields,
4220 event->format.fields);
4221}
4222
4223static void print_fields(struct trace_seq *s, struct print_flag_sym *field)
4224{
4225 trace_seq_printf(s, "{ %s, %s }", field->value, field->str);
4226 if (field->next) {
4227 trace_seq_puts(s, ", ");
4228 print_fields(s, field->next);
4229 }
4230}
4231
4232/* for debugging */
4233static void print_args(struct print_arg *args)
4234{
4235 int print_paren = 1;
4236 struct trace_seq s;
4237
4238 switch (args->type) {
4239 case PRINT_NULL:
4240 printf("null");
4241 break;
4242 case PRINT_ATOM:
4243 printf("%s", args->atom.atom);
4244 break;
4245 case PRINT_FIELD:
4246 printf("REC->%s", args->field.name);
4247 break;
4248 case PRINT_FLAGS:
4249 printf("__print_flags(");
4250 print_args(args->flags.field);
4251 printf(", %s, ", args->flags.delim);
4252 trace_seq_init(&s);
4253 print_fields(&s, args->flags.flags);
4254 trace_seq_do_printf(&s);
4255 trace_seq_destroy(&s);
4256 printf(")");
4257 break;
4258 case PRINT_SYMBOL:
4259 printf("__print_symbolic(");
4260 print_args(args->symbol.field);
4261 printf(", ");
4262 trace_seq_init(&s);
4263 print_fields(&s, args->symbol.symbols);
4264 trace_seq_do_printf(&s);
4265 trace_seq_destroy(&s);
4266 printf(")");
4267 break;
4268 case PRINT_STRING:
4269 case PRINT_BSTRING:
4270 printf("__get_str(%s)", args->string.string);
4271 break;
4272 case PRINT_TYPE:
4273 printf("(%s)", args->typecast.type);
4274 print_args(args->typecast.item);
4275 break;
4276 case PRINT_OP:
4277 if (strcmp(args->op.op, ":") == 0)
4278 print_paren = 0;
4279 if (print_paren)
4280 printf("(");
4281 print_args(args->op.left);
4282 printf(" %s ", args->op.op);
4283 print_args(args->op.right);
4284 if (print_paren)
4285 printf(")");
4286 break;
4287 default:
4288 /* we should warn... */
4289 return;
4290 }
4291 if (args->next) {
4292 printf("\n");
4293 print_args(args->next);
4294 }
4295}
4296
4297static void parse_header_field(const char *field,
4298 int *offset, int *size, int mandatory)
4299{
4300 unsigned long long save_input_buf_ptr;
4301 unsigned long long save_input_buf_siz;
4302 char *token;
4303 int type;
4304
4305 save_input_buf_ptr = input_buf_ptr;
4306 save_input_buf_siz = input_buf_siz;
4307
4308 if (read_expected(EVENT_ITEM, "field") < 0)
4309 return;
4310 if (read_expected(EVENT_OP, ":") < 0)
4311 return;
4312
4313 /* type */
4314 if (read_expect_type(EVENT_ITEM, &token) < 0)
4315 goto fail;
4316 free_token(token);
4317
4318 /*
4319 * If this is not a mandatory field, then test it first.
4320 */
4321 if (mandatory) {
4322 if (read_expected(EVENT_ITEM, field) < 0)
4323 return;
4324 } else {
4325 if (read_expect_type(EVENT_ITEM, &token) < 0)
4326 goto fail;
4327 if (strcmp(token, field) != 0)
4328 goto discard;
4329 free_token(token);
4330 }
4331
4332 if (read_expected(EVENT_OP, ";") < 0)
4333 return;
4334 if (read_expected(EVENT_ITEM, "offset") < 0)
4335 return;
4336 if (read_expected(EVENT_OP, ":") < 0)
4337 return;
4338 if (read_expect_type(EVENT_ITEM, &token) < 0)
4339 goto fail;
4340 *offset = atoi(token);
4341 free_token(token);
4342 if (read_expected(EVENT_OP, ";") < 0)
4343 return;
4344 if (read_expected(EVENT_ITEM, "size") < 0)
4345 return;
4346 if (read_expected(EVENT_OP, ":") < 0)
4347 return;
4348 if (read_expect_type(EVENT_ITEM, &token) < 0)
4349 goto fail;
4350 *size = atoi(token);
4351 free_token(token);
4352 if (read_expected(EVENT_OP, ";") < 0)
4353 return;
4354 type = read_token(&token);
4355 if (type != EVENT_NEWLINE) {
4356 /* newer versions of the kernel have a "signed" type */
4357 if (type != EVENT_ITEM)
4358 goto fail;
4359
4360 if (strcmp(token, "signed") != 0)
4361 goto fail;
4362
4363 free_token(token);
4364
4365 if (read_expected(EVENT_OP, ":") < 0)
4366 return;
4367
4368 if (read_expect_type(EVENT_ITEM, &token))
4369 goto fail;
4370
4371 free_token(token);
4372 if (read_expected(EVENT_OP, ";") < 0)
4373 return;
4374
4375 if (read_expect_type(EVENT_NEWLINE, &token))
4376 goto fail;
4377 }
4378 fail:
4379 free_token(token);
4380 return;
4381
4382 discard:
4383 input_buf_ptr = save_input_buf_ptr;
4384 input_buf_siz = save_input_buf_siz;
4385 *offset = 0;
4386 *size = 0;
4387 free_token(token);
4388}
4389
4390/**
4391 * pevent_parse_header_page - parse the data stored in the header page
4392 * @pevent: the handle to the pevent
4393 * @buf: the buffer storing the header page format string
4394 * @size: the size of @buf
4395 * @long_size: the long size to use if there is no header
4396 *
4397 * This parses the header page format for information on the
4398 * ring buffer used. The @buf should be copied from
4399 *
4400 * /sys/kernel/debug/tracing/events/header_page
4401 */
4402int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size,
4403 int long_size)
4404{
4405 int ignore;
4406
4407 if (!size) {
4408 /*
4409 * Old kernels did not have header page info.
4410 * Sorry but we just use what we find here in user space.
4411 */
4412 pevent->header_page_ts_size = sizeof(long long);
4413 pevent->header_page_size_size = long_size;
4414 pevent->header_page_data_offset = sizeof(long long) + long_size;
4415 pevent->old_format = 1;
4416 return -1;
4417 }
4418 init_input_buf(buf, size);
4419
4420 parse_header_field("timestamp", &pevent->header_page_ts_offset,
4421 &pevent->header_page_ts_size, 1);
4422 parse_header_field("commit", &pevent->header_page_size_offset,
4423 &pevent->header_page_size_size, 1);
4424 parse_header_field("overwrite", &pevent->header_page_overwrite,
4425 &ignore, 0);
4426 parse_header_field("data", &pevent->header_page_data_offset,
4427 &pevent->header_page_data_size, 1);
4428
4429 return 0;
4430}
4431
4432static int event_matches(struct event_format *event,
4433 int id, const char *sys_name,
4434 const char *event_name)
4435{
4436 if (id >= 0 && id != event->id)
4437 return 0;
4438
4439 if (event_name && (strcmp(event_name, event->name) != 0))
4440 return 0;
4441
4442 if (sys_name && (strcmp(sys_name, event->system) != 0))
4443 return 0;
4444
4445 return 1;
4446}
4447
4448static void free_handler(struct event_handler *handle)
4449{
4450 free((void *)handle->sys_name);
4451 free((void *)handle->event_name);
4452 free(handle);
4453}
4454
4455static int find_event_handle(struct pevent *pevent, struct event_format *event)
4456{
4457 struct event_handler *handle, **next;
4458
4459 for (next = &pevent->handlers; *next;
4460 next = &(*next)->next) {
4461 handle = *next;
4462 if (event_matches(event, handle->id,
4463 handle->sys_name,
4464 handle->event_name))
4465 break;
4466 }
4467
4468 if (!(*next))
4469 return 0;
4470
4471 pr_stat("overriding event (%d) %s:%s with new print handler",
4472 event->id, event->system, event->name);
4473
4474 event->handler = handle->func;
4475 event->context = handle->context;
4476
4477 *next = handle->next;
4478 free_handler(handle);
4479
4480 return 1;
4481}
4482
4483/**
4484 * pevent_parse_event - parse the event format
4485 * @pevent: the handle to the pevent
4486 * @buf: the buffer storing the event format string
4487 * @size: the size of @buf
4488 * @sys: the system the event belongs to
4489 *
4490 * This parses the event format and creates an event structure
4491 * to quickly parse raw data for a given event.
4492 *
4493 * These files currently come from:
4494 *
4495 * /sys/kernel/debug/tracing/events/.../.../format
4496 */
4497int pevent_parse_event(struct pevent *pevent,
4498 const char *buf, unsigned long size,
4499 const char *sys)
4500{
4501 struct event_format *event;
4502 int ret;
4503
4504 init_input_buf(buf, size);
4505
4506 event = alloc_event();
4507 if (!event)
4508 return -ENOMEM;
4509
4510 event->name = event_read_name();
4511 if (!event->name) {
4512 /* Bad event? */
4513 free(event);
4514 return -1;
4515 }
4516
4517 if (strcmp(sys, "ftrace") == 0) {
4518
4519 event->flags |= EVENT_FL_ISFTRACE;
4520
4521 if (strcmp(event->name, "bprint") == 0)
4522 event->flags |= EVENT_FL_ISBPRINT;
4523 }
4524
4525 event->id = event_read_id();
4526 if (event->id < 0)
4527 die("failed to read event id");
4528
4529 event->system = strdup(sys);
4530
4531 /* Add pevent to event so that it can be referenced */
4532 event->pevent = pevent;
4533
4534 ret = event_read_format(event);
4535 if (ret < 0) {
4536 do_warning("failed to read event format for %s", event->name);
4537 goto event_failed;
4538 }
4539
4540 /*
4541 * If the event has an override, don't print warnings if the event
4542 * print format fails to parse.
4543 */
4544 if (find_event_handle(pevent, event))
4545 show_warning = 0;
4546
4547 ret = event_read_print(event);
4548 if (ret < 0) {
4549 do_warning("failed to read event print fmt for %s",
4550 event->name);
4551 show_warning = 1;
4552 goto event_failed;
4553 }
4554 show_warning = 1;
4555
4556 add_event(pevent, event);
4557
4558 if (!ret && (event->flags & EVENT_FL_ISFTRACE)) {
4559 struct format_field *field;
4560 struct print_arg *arg, **list;
4561
4562 /* old ftrace had no args */
4563
4564 list = &event->print_fmt.args;
4565 for (field = event->format.fields; field; field = field->next) {
4566 arg = alloc_arg();
4567 *list = arg;
4568 list = &arg->next;
4569 arg->type = PRINT_FIELD;
4570 arg->field.name = strdup(field->name);
4571 arg->field.field = field;
4572 }
4573 return 0;
4574 }
4575
4576#define PRINT_ARGS 0
4577 if (PRINT_ARGS && event->print_fmt.args)
4578 print_args(event->print_fmt.args);
4579
4580 return 0;
4581
4582 event_failed:
4583 event->flags |= EVENT_FL_FAILED;
4584 /* still add it even if it failed */
4585 add_event(pevent, event);
4586 return -1;
4587}
4588
4589int get_field_val(struct trace_seq *s, struct format_field *field,
4590 const char *name, struct pevent_record *record,
4591 unsigned long long *val, int err)
4592{
4593 if (!field) {
4594 if (err)
4595 trace_seq_printf(s, "<CANT FIND FIELD %s>", name);
4596 return -1;
4597 }
4598
4599 if (pevent_read_number_field(field, record->data, val)) {
4600 if (err)
4601 trace_seq_printf(s, " %s=INVALID", name);
4602 return -1;
4603 }
4604
4605 return 0;
4606}
4607
4608/**
4609 * pevent_get_field_raw - return the raw pointer into the data field
4610 * @s: The seq to print to on error
4611 * @event: the event that the field is for
4612 * @name: The name of the field
4613 * @record: The record with the field name.
4614 * @len: place to store the field length.
4615 * @err: print default error if failed.
4616 *
4617 * Returns a pointer into record->data of the field and places
4618 * the length of the field in @len.
4619 *
4620 * On failure, it returns NULL.
4621 */
4622void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
4623 const char *name, struct pevent_record *record,
4624 int *len, int err)
4625{
4626 struct format_field *field;
4627 void *data = record->data;
4628 unsigned offset;
4629 int dummy;
4630
4631 if (!event)
4632 return NULL;
4633
4634 field = pevent_find_field(event, name);
4635
4636 if (!field) {
4637 if (err)
4638 trace_seq_printf(s, "<CANT FIND FIELD %s>", name);
4639 return NULL;
4640 }
4641
4642 /* Allow @len to be NULL */
4643 if (!len)
4644 len = &dummy;
4645
4646 offset = field->offset;
4647 if (field->flags & FIELD_IS_DYNAMIC) {
4648 offset = pevent_read_number(event->pevent,
4649 data + offset, field->size);
4650 *len = offset >> 16;
4651 offset &= 0xffff;
4652 } else
4653 *len = field->size;
4654
4655 return data + offset;
4656}
4657
4658/**
4659 * pevent_get_field_val - find a field and return its value
4660 * @s: The seq to print to on error
4661 * @event: the event that the field is for
4662 * @name: The name of the field
4663 * @record: The record with the field name.
4664 * @val: place to store the value of the field.
4665 * @err: print default error if failed.
4666 *
4667 * Returns 0 on success -1 on field not found.
4668 */
4669int pevent_get_field_val(struct trace_seq *s, struct event_format *event,
4670 const char *name, struct pevent_record *record,
4671 unsigned long long *val, int err)
4672{
4673 struct format_field *field;
4674
4675 if (!event)
4676 return -1;
4677
4678 field = pevent_find_field(event, name);
4679
4680 return get_field_val(s, field, name, record, val, err);
4681}
4682
4683/**
4684 * pevent_get_common_field_val - find a common field and return its value
4685 * @s: The seq to print to on error
4686 * @event: the event that the field is for
4687 * @name: The name of the field
4688 * @record: The record with the field name.
4689 * @val: place to store the value of the field.
4690 * @err: print default error if failed.
4691 *
4692 * Returns 0 on success -1 on field not found.
4693 */
4694int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event,
4695 const char *name, struct pevent_record *record,
4696 unsigned long long *val, int err)
4697{
4698 struct format_field *field;
4699
4700 if (!event)
4701 return -1;
4702
4703 field = pevent_find_common_field(event, name);
4704
4705 return get_field_val(s, field, name, record, val, err);
4706}
4707
4708/**
4709 * pevent_get_any_field_val - find a any field and return its value
4710 * @s: The seq to print to on error
4711 * @event: the event that the field is for
4712 * @name: The name of the field
4713 * @record: The record with the field name.
4714 * @val: place to store the value of the field.
4715 * @err: print default error if failed.
4716 *
4717 * Returns 0 on success -1 on field not found.
4718 */
4719int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
4720 const char *name, struct pevent_record *record,
4721 unsigned long long *val, int err)
4722{
4723 struct format_field *field;
4724
4725 if (!event)
4726 return -1;
4727
4728 field = pevent_find_any_field(event, name);
4729
4730 return get_field_val(s, field, name, record, val, err);
4731}
4732
4733/**
4734 * pevent_print_num_field - print a field and a format
4735 * @s: The seq to print to
4736 * @fmt: The printf format to print the field with.
4737 * @event: the event that the field is for
4738 * @name: The name of the field
4739 * @record: The record with the field name.
4740 * @err: print default error if failed.
4741 *
4742 * Returns: 0 on success, -1 field not fould, or 1 if buffer is full.
4743 */
4744int pevent_print_num_field(struct trace_seq *s, const char *fmt,
4745 struct event_format *event, const char *name,
4746 struct pevent_record *record, int err)
4747{
4748 struct format_field *field = pevent_find_field(event, name);
4749 unsigned long long val;
4750
4751 if (!field)
4752 goto failed;
4753
4754 if (pevent_read_number_field(field, record->data, &val))
4755 goto failed;
4756
4757 return trace_seq_printf(s, fmt, val);
4758
4759 failed:
4760 if (err)
4761 trace_seq_printf(s, "CAN'T FIND FIELD \"%s\"", name);
4762 return -1;
4763}
4764
4765static void free_func_handle(struct pevent_function_handler *func)
4766{
4767 struct pevent_func_params *params;
4768
4769 free(func->name);
4770
4771 while (func->params) {
4772 params = func->params;
4773 func->params = params->next;
4774 free(params);
4775 }
4776
4777 free(func);
4778}
4779
4780/**
4781 * pevent_register_print_function - register a helper function
4782 * @pevent: the handle to the pevent
4783 * @func: the function to process the helper function
4784 * @name: the name of the helper function
4785 * @parameters: A list of enum pevent_func_arg_type
4786 *
4787 * Some events may have helper functions in the print format arguments.
4788 * This allows a plugin to dynmically create a way to process one
4789 * of these functions.
4790 *
4791 * The @parameters is a variable list of pevent_func_arg_type enums that
4792 * must end with PEVENT_FUNC_ARG_VOID.
4793 */
4794int pevent_register_print_function(struct pevent *pevent,
4795 pevent_func_handler func,
4796 enum pevent_func_arg_type ret_type,
4797 char *name, ...)
4798{
4799 struct pevent_function_handler *func_handle;
4800 struct pevent_func_params **next_param;
4801 struct pevent_func_params *param;
4802 enum pevent_func_arg_type type;
4803 va_list ap;
4804
4805 func_handle = find_func_handler(pevent, name);
4806 if (func_handle) {
4807 /*
4808 * This is most like caused by the users own
4809 * plugins updating the function. This overrides the
4810 * system defaults.
4811 */
4812 pr_stat("override of function helper '%s'", name);
4813 remove_func_handler(pevent, name);
4814 }
4815
4816 func_handle = malloc_or_die(sizeof(*func_handle));
4817 memset(func_handle, 0, sizeof(*func_handle));
4818
4819 func_handle->ret_type = ret_type;
4820 func_handle->name = strdup(name);
4821 func_handle->func = func;
4822 if (!func_handle->name)
4823 die("Failed to allocate function name");
4824
4825 next_param = &(func_handle->params);
4826 va_start(ap, name);
4827 for (;;) {
4828 type = va_arg(ap, enum pevent_func_arg_type);
4829 if (type == PEVENT_FUNC_ARG_VOID)
4830 break;
4831
4832 if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) {
4833 warning("Invalid argument type %d", type);
4834 goto out_free;
4835 }
4836
4837 param = malloc_or_die(sizeof(*param));
4838 param->type = type;
4839 param->next = NULL;
4840
4841 *next_param = param;
4842 next_param = &(param->next);
4843
4844 func_handle->nr_args++;
4845 }
4846 va_end(ap);
4847
4848 func_handle->next = pevent->func_handlers;
4849 pevent->func_handlers = func_handle;
4850
4851 return 0;
4852 out_free:
4853 va_end(ap);
4854 free_func_handle(func_handle);
4855 return -1;
4856}
4857
4858/**
4859 * pevent_register_event_handle - register a way to parse an event
4860 * @pevent: the handle to the pevent
4861 * @id: the id of the event to register
4862 * @sys_name: the system name the event belongs to
4863 * @event_name: the name of the event
4864 * @func: the function to call to parse the event information
4865 *
4866 * This function allows a developer to override the parsing of
4867 * a given event. If for some reason the default print format
4868 * is not sufficient, this function will register a function
4869 * for an event to be used to parse the data instead.
4870 *
4871 * If @id is >= 0, then it is used to find the event.
4872 * else @sys_name and @event_name are used.
4873 */
4874int pevent_register_event_handler(struct pevent *pevent,
4875 int id, char *sys_name, char *event_name,
4876 pevent_event_handler_func func,
4877 void *context)
4878{
4879 struct event_format *event;
4880 struct event_handler *handle;
4881
4882 if (id >= 0) {
4883 /* search by id */
4884 event = pevent_find_event(pevent, id);
4885 if (!event)
4886 goto not_found;
4887 if (event_name && (strcmp(event_name, event->name) != 0))
4888 goto not_found;
4889 if (sys_name && (strcmp(sys_name, event->system) != 0))
4890 goto not_found;
4891 } else {
4892 event = pevent_find_event_by_name(pevent, sys_name, event_name);
4893 if (!event)
4894 goto not_found;
4895 }
4896
4897 pr_stat("overriding event (%d) %s:%s with new print handler",
4898 event->id, event->system, event->name);
4899
4900 event->handler = func;
4901 event->context = context;
4902 return 0;
4903
4904 not_found:
4905 /* Save for later use. */
4906 handle = malloc_or_die(sizeof(*handle));
4907 memset(handle, 0, sizeof(*handle));
4908 handle->id = id;
4909 if (event_name)
4910 handle->event_name = strdup(event_name);
4911 if (sys_name)
4912 handle->sys_name = strdup(sys_name);
4913
4914 handle->func = func;
4915 handle->next = pevent->handlers;
4916 pevent->handlers = handle;
4917 handle->context = context;
4918
4919 return -1;
4920}
4921
4922/**
4923 * pevent_alloc - create a pevent handle
4924 */
4925struct pevent *pevent_alloc(void)
4926{
4927 struct pevent *pevent;
4928
4929 pevent = malloc(sizeof(*pevent));
4930 if (!pevent)
4931 return NULL;
4932 memset(pevent, 0, sizeof(*pevent));
4933 pevent->ref_count = 1;
4934
4935 return pevent;
4936}
4937
4938void pevent_ref(struct pevent *pevent)
4939{
4940 pevent->ref_count++;
4941}
4942
4943static void free_format_fields(struct format_field *field)
4944{
4945 struct format_field *next;
4946
4947 while (field) {
4948 next = field->next;
4949 free(field->type);
4950 free(field->name);
4951 free(field);
4952 field = next;
4953 }
4954}
4955
4956static void free_formats(struct format *format)
4957{
4958 free_format_fields(format->common_fields);
4959 free_format_fields(format->fields);
4960}
4961
4962static void free_event(struct event_format *event)
4963{
4964 free(event->name);
4965 free(event->system);
4966
4967 free_formats(&event->format);
4968
4969 free(event->print_fmt.format);
4970 free_args(event->print_fmt.args);
4971
4972 free(event);
4973}
4974
4975/**
4976 * pevent_free - free a pevent handle
4977 * @pevent: the pevent handle to free
4978 */
4979void pevent_free(struct pevent *pevent)
4980{
4981 struct cmdline_list *cmdlist, *cmdnext;
4982 struct func_list *funclist, *funcnext;
4983 struct printk_list *printklist, *printknext;
4984 struct pevent_function_handler *func_handler;
4985 struct event_handler *handle;
4986 int i;
4987
4988 if (!pevent)
4989 return;
4990
4991 cmdlist = pevent->cmdlist;
4992 funclist = pevent->funclist;
4993 printklist = pevent->printklist;
4994
4995 pevent->ref_count--;
4996 if (pevent->ref_count)
4997 return;
4998
4999 if (pevent->cmdlines) {
5000 for (i = 0; i < pevent->cmdline_count; i++)
5001 free(pevent->cmdlines[i].comm);
5002 free(pevent->cmdlines);
5003 }
5004
5005 while (cmdlist) {
5006 cmdnext = cmdlist->next;
5007 free(cmdlist->comm);
5008 free(cmdlist);
5009 cmdlist = cmdnext;
5010 }
5011
5012 if (pevent->func_map) {
5013 for (i = 0; i < pevent->func_count; i++) {
5014 free(pevent->func_map[i].func);
5015 free(pevent->func_map[i].mod);
5016 }
5017 free(pevent->func_map);
5018 }
5019
5020 while (funclist) {
5021 funcnext = funclist->next;
5022 free(funclist->func);
5023 free(funclist->mod);
5024 free(funclist);
5025 funclist = funcnext;
5026 }
5027
5028 while (pevent->func_handlers) {
5029 func_handler = pevent->func_handlers;
5030 pevent->func_handlers = func_handler->next;
5031 free_func_handle(func_handler);
5032 }
5033
5034 if (pevent->printk_map) {
5035 for (i = 0; i < pevent->printk_count; i++)
5036 free(pevent->printk_map[i].printk);
5037 free(pevent->printk_map);
5038 }
5039
5040 while (printklist) {
5041 printknext = printklist->next;
5042 free(printklist->printk);
5043 free(printklist);
5044 printklist = printknext;
5045 }
5046
5047 for (i = 0; i < pevent->nr_events; i++)
5048 free_event(pevent->events[i]);
5049
5050 while (pevent->handlers) {
5051 handle = pevent->handlers;
5052 pevent->handlers = handle->next;
5053 free_handler(handle);
5054 }
5055
5056 free(pevent->events);
5057 free(pevent->sort_events);
5058
5059 free(pevent);
5060}
5061
5062void pevent_unref(struct pevent *pevent)
5063{
5064 pevent_free(pevent);
5065}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
new file mode 100644
index 000000000000..ac997bc7b592
--- /dev/null
+++ b/tools/lib/traceevent/event-parse.h
@@ -0,0 +1,804 @@
1/*
2 * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21#ifndef _PARSE_EVENTS_H
22#define _PARSE_EVENTS_H
23
24#include <stdarg.h>
25#include <regex.h>
26
27#ifndef __unused
28#define __unused __attribute__ ((unused))
29#endif
30
31/* ----------------------- trace_seq ----------------------- */
32
33
34#ifndef TRACE_SEQ_BUF_SIZE
35#define TRACE_SEQ_BUF_SIZE 4096
36#endif
37
38#ifndef DEBUG_RECORD
39#define DEBUG_RECORD 0
40#endif
41
42struct pevent_record {
43 unsigned long long ts;
44 unsigned long long offset;
45 long long missed_events; /* buffer dropped events before */
46 int record_size; /* size of binary record */
47 int size; /* size of data */
48 void *data;
49 int cpu;
50 int ref_count;
51 int locked; /* Do not free, even if ref_count is zero */
52 void *private;
53#if DEBUG_RECORD
54 struct pevent_record *prev;
55 struct pevent_record *next;
56 long alloc_addr;
57#endif
58};
59
60/*
61 * Trace sequences are used to allow a function to call several other functions
62 * to create a string of data to use (up to a max of PAGE_SIZE).
63 */
64
65struct trace_seq {
66 char *buffer;
67 unsigned int buffer_size;
68 unsigned int len;
69 unsigned int readpos;
70};
71
72void trace_seq_init(struct trace_seq *s);
73void trace_seq_destroy(struct trace_seq *s);
74
75extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
76 __attribute__ ((format (printf, 2, 3)));
77extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
78 __attribute__ ((format (printf, 2, 0)));
79
80extern int trace_seq_puts(struct trace_seq *s, const char *str);
81extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
82
83extern void trace_seq_terminate(struct trace_seq *s);
84
85extern int trace_seq_do_printf(struct trace_seq *s);
86
87
88/* ----------------------- pevent ----------------------- */
89
90struct pevent;
91struct event_format;
92
93typedef int (*pevent_event_handler_func)(struct trace_seq *s,
94 struct pevent_record *record,
95 struct event_format *event,
96 void *context);
97
98typedef int (*pevent_plugin_load_func)(struct pevent *pevent);
99typedef int (*pevent_plugin_unload_func)(void);
100
101struct plugin_option {
102 struct plugin_option *next;
103 void *handle;
104 char *file;
105 char *name;
106 char *plugin_alias;
107 char *description;
108 char *value;
109 void *private;
110 int set;
111};
112
113/*
114 * Plugin hooks that can be called:
115 *
116 * PEVENT_PLUGIN_LOADER: (required)
117 * The function name to initialized the plugin.
118 *
119 * int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
120 *
121 * PEVENT_PLUGIN_UNLOADER: (optional)
122 * The function called just before unloading
123 *
124 * int PEVENT_PLUGIN_UNLOADER(void)
125 *
126 * PEVENT_PLUGIN_OPTIONS: (optional)
127 * Plugin options that can be set before loading
128 *
129 * struct plugin_option PEVENT_PLUGIN_OPTIONS[] = {
130 * {
131 * .name = "option-name",
132 * .plugin_alias = "overide-file-name", (optional)
133 * .description = "description of option to show users",
134 * },
135 * {
136 * .name = NULL,
137 * },
138 * };
139 *
140 * Array must end with .name = NULL;
141 *
142 *
143 * .plugin_alias is used to give a shorter name to access
144 * the vairable. Useful if a plugin handles more than one event.
145 *
146 * PEVENT_PLUGIN_ALIAS: (optional)
147 * The name to use for finding options (uses filename if not defined)
148 */
149#define PEVENT_PLUGIN_LOADER pevent_plugin_loader
150#define PEVENT_PLUGIN_UNLOADER pevent_plugin_unloader
151#define PEVENT_PLUGIN_OPTIONS pevent_plugin_options
152#define PEVENT_PLUGIN_ALIAS pevent_plugin_alias
153#define _MAKE_STR(x) #x
154#define MAKE_STR(x) _MAKE_STR(x)
155#define PEVENT_PLUGIN_LOADER_NAME MAKE_STR(PEVENT_PLUGIN_LOADER)
156#define PEVENT_PLUGIN_UNLOADER_NAME MAKE_STR(PEVENT_PLUGIN_UNLOADER)
157#define PEVENT_PLUGIN_OPTIONS_NAME MAKE_STR(PEVENT_PLUGIN_OPTIONS)
158#define PEVENT_PLUGIN_ALIAS_NAME MAKE_STR(PEVENT_PLUGIN_ALIAS)
159
160#define NSECS_PER_SEC 1000000000ULL
161#define NSECS_PER_USEC 1000ULL
162
163enum format_flags {
164 FIELD_IS_ARRAY = 1,
165 FIELD_IS_POINTER = 2,
166 FIELD_IS_SIGNED = 4,
167 FIELD_IS_STRING = 8,
168 FIELD_IS_DYNAMIC = 16,
169 FIELD_IS_LONG = 32,
170 FIELD_IS_FLAG = 64,
171 FIELD_IS_SYMBOLIC = 128,
172};
173
174struct format_field {
175 struct format_field *next;
176 struct event_format *event;
177 char *type;
178 char *name;
179 int offset;
180 int size;
181 unsigned int arraylen;
182 unsigned int elementsize;
183 unsigned long flags;
184};
185
186struct format {
187 int nr_common;
188 int nr_fields;
189 struct format_field *common_fields;
190 struct format_field *fields;
191};
192
193struct print_arg_atom {
194 char *atom;
195};
196
197struct print_arg_string {
198 char *string;
199 int offset;
200};
201
202struct print_arg_field {
203 char *name;
204 struct format_field *field;
205};
206
207struct print_flag_sym {
208 struct print_flag_sym *next;
209 char *value;
210 char *str;
211};
212
213struct print_arg_typecast {
214 char *type;
215 struct print_arg *item;
216};
217
218struct print_arg_flags {
219 struct print_arg *field;
220 char *delim;
221 struct print_flag_sym *flags;
222};
223
224struct print_arg_symbol {
225 struct print_arg *field;
226 struct print_flag_sym *symbols;
227};
228
229struct print_arg_dynarray {
230 struct format_field *field;
231 struct print_arg *index;
232};
233
234struct print_arg;
235
236struct print_arg_op {
237 char *op;
238 int prio;
239 struct print_arg *left;
240 struct print_arg *right;
241};
242
243struct pevent_function_handler;
244
245struct print_arg_func {
246 struct pevent_function_handler *func;
247 struct print_arg *args;
248};
249
250enum print_arg_type {
251 PRINT_NULL,
252 PRINT_ATOM,
253 PRINT_FIELD,
254 PRINT_FLAGS,
255 PRINT_SYMBOL,
256 PRINT_TYPE,
257 PRINT_STRING,
258 PRINT_BSTRING,
259 PRINT_DYNAMIC_ARRAY,
260 PRINT_OP,
261 PRINT_FUNC,
262};
263
264struct print_arg {
265 struct print_arg *next;
266 enum print_arg_type type;
267 union {
268 struct print_arg_atom atom;
269 struct print_arg_field field;
270 struct print_arg_typecast typecast;
271 struct print_arg_flags flags;
272 struct print_arg_symbol symbol;
273 struct print_arg_func func;
274 struct print_arg_string string;
275 struct print_arg_op op;
276 struct print_arg_dynarray dynarray;
277 };
278};
279
280struct print_fmt {
281 char *format;
282 struct print_arg *args;
283};
284
285struct event_format {
286 struct pevent *pevent;
287 char *name;
288 int id;
289 int flags;
290 struct format format;
291 struct print_fmt print_fmt;
292 char *system;
293 pevent_event_handler_func handler;
294 void *context;
295};
296
297enum {
298 EVENT_FL_ISFTRACE = 0x01,
299 EVENT_FL_ISPRINT = 0x02,
300 EVENT_FL_ISBPRINT = 0x04,
301 EVENT_FL_ISFUNCENT = 0x10,
302 EVENT_FL_ISFUNCRET = 0x20,
303
304 EVENT_FL_FAILED = 0x80000000
305};
306
307enum event_sort_type {
308 EVENT_SORT_ID,
309 EVENT_SORT_NAME,
310 EVENT_SORT_SYSTEM,
311};
312
313enum event_type {
314 EVENT_ERROR,
315 EVENT_NONE,
316 EVENT_SPACE,
317 EVENT_NEWLINE,
318 EVENT_OP,
319 EVENT_DELIM,
320 EVENT_ITEM,
321 EVENT_DQUOTE,
322 EVENT_SQUOTE,
323};
324
325typedef unsigned long long (*pevent_func_handler)(struct trace_seq *s,
326 unsigned long long *args);
327
328enum pevent_func_arg_type {
329 PEVENT_FUNC_ARG_VOID,
330 PEVENT_FUNC_ARG_INT,
331 PEVENT_FUNC_ARG_LONG,
332 PEVENT_FUNC_ARG_STRING,
333 PEVENT_FUNC_ARG_PTR,
334 PEVENT_FUNC_ARG_MAX_TYPES
335};
336
337enum pevent_flag {
338 PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */
339};
340
341struct cmdline;
342struct cmdline_list;
343struct func_map;
344struct func_list;
345struct event_handler;
346
347struct pevent {
348 int ref_count;
349
350 int header_page_ts_offset;
351 int header_page_ts_size;
352 int header_page_size_offset;
353 int header_page_size_size;
354 int header_page_data_offset;
355 int header_page_data_size;
356 int header_page_overwrite;
357
358 int file_bigendian;
359 int host_bigendian;
360
361 int latency_format;
362
363 int old_format;
364
365 int cpus;
366 int long_size;
367
368 struct cmdline *cmdlines;
369 struct cmdline_list *cmdlist;
370 int cmdline_count;
371
372 struct func_map *func_map;
373 struct func_list *funclist;
374 unsigned int func_count;
375
376 struct printk_map *printk_map;
377 struct printk_list *printklist;
378 unsigned int printk_count;
379
380
381 struct event_format **events;
382 int nr_events;
383 struct event_format **sort_events;
384 enum event_sort_type last_type;
385
386 int type_offset;
387 int type_size;
388
389 int pid_offset;
390 int pid_size;
391
392 int pc_offset;
393 int pc_size;
394
395 int flags_offset;
396 int flags_size;
397
398 int ld_offset;
399 int ld_size;
400
401 int print_raw;
402
403 int test_filters;
404
405 int flags;
406
407 struct format_field *bprint_ip_field;
408 struct format_field *bprint_fmt_field;
409 struct format_field *bprint_buf_field;
410
411 struct event_handler *handlers;
412 struct pevent_function_handler *func_handlers;
413
414 /* cache */
415 struct event_format *last_event;
416};
417
418static inline void pevent_set_flag(struct pevent *pevent, int flag)
419{
420 pevent->flags |= flag;
421}
422
423static inline unsigned short
424__data2host2(struct pevent *pevent, unsigned short data)
425{
426 unsigned short swap;
427
428 if (pevent->host_bigendian == pevent->file_bigendian)
429 return data;
430
431 swap = ((data & 0xffULL) << 8) |
432 ((data & (0xffULL << 8)) >> 8);
433
434 return swap;
435}
436
437static inline unsigned int
438__data2host4(struct pevent *pevent, unsigned int data)
439{
440 unsigned int swap;
441
442 if (pevent->host_bigendian == pevent->file_bigendian)
443 return data;
444
445 swap = ((data & 0xffULL) << 24) |
446 ((data & (0xffULL << 8)) << 8) |
447 ((data & (0xffULL << 16)) >> 8) |
448 ((data & (0xffULL << 24)) >> 24);
449
450 return swap;
451}
452
453static inline unsigned long long
454__data2host8(struct pevent *pevent, unsigned long long data)
455{
456 unsigned long long swap;
457
458 if (pevent->host_bigendian == pevent->file_bigendian)
459 return data;
460
461 swap = ((data & 0xffULL) << 56) |
462 ((data & (0xffULL << 8)) << 40) |
463 ((data & (0xffULL << 16)) << 24) |
464 ((data & (0xffULL << 24)) << 8) |
465 ((data & (0xffULL << 32)) >> 8) |
466 ((data & (0xffULL << 40)) >> 24) |
467 ((data & (0xffULL << 48)) >> 40) |
468 ((data & (0xffULL << 56)) >> 56);
469
470 return swap;
471}
472
473#define data2host2(pevent, ptr) __data2host2(pevent, *(unsigned short *)(ptr))
474#define data2host4(pevent, ptr) __data2host4(pevent, *(unsigned int *)(ptr))
475#define data2host8(pevent, ptr) \
476({ \
477 unsigned long long __val; \
478 \
479 memcpy(&__val, (ptr), sizeof(unsigned long long)); \
480 __data2host8(pevent, __val); \
481})
482
483/* taken from kernel/trace/trace.h */
484enum trace_flag_type {
485 TRACE_FLAG_IRQS_OFF = 0x01,
486 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
487 TRACE_FLAG_NEED_RESCHED = 0x04,
488 TRACE_FLAG_HARDIRQ = 0x08,
489 TRACE_FLAG_SOFTIRQ = 0x10,
490};
491
492int pevent_register_comm(struct pevent *pevent, const char *comm, int pid);
493int pevent_register_function(struct pevent *pevent, char *name,
494 unsigned long long addr, char *mod);
495int pevent_register_print_string(struct pevent *pevent, char *fmt,
496 unsigned long long addr);
497int pevent_pid_is_registered(struct pevent *pevent, int pid);
498
499void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
500 struct pevent_record *record);
501
502int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size,
503 int long_size);
504
505int pevent_parse_event(struct pevent *pevent, const char *buf,
506 unsigned long size, const char *sys);
507
508void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
509 const char *name, struct pevent_record *record,
510 int *len, int err);
511
512int pevent_get_field_val(struct trace_seq *s, struct event_format *event,
513 const char *name, struct pevent_record *record,
514 unsigned long long *val, int err);
515int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event,
516 const char *name, struct pevent_record *record,
517 unsigned long long *val, int err);
518int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
519 const char *name, struct pevent_record *record,
520 unsigned long long *val, int err);
521
522int pevent_print_num_field(struct trace_seq *s, const char *fmt,
523 struct event_format *event, const char *name,
524 struct pevent_record *record, int err);
525
526int pevent_register_event_handler(struct pevent *pevent, int id, char *sys_name, char *event_name,
527 pevent_event_handler_func func, void *context);
528int pevent_register_print_function(struct pevent *pevent,
529 pevent_func_handler func,
530 enum pevent_func_arg_type ret_type,
531 char *name, ...);
532
533struct format_field *pevent_find_common_field(struct event_format *event, const char *name);
534struct format_field *pevent_find_field(struct event_format *event, const char *name);
535struct format_field *pevent_find_any_field(struct event_format *event, const char *name);
536
537const char *pevent_find_function(struct pevent *pevent, unsigned long long addr);
538unsigned long long
539pevent_find_function_address(struct pevent *pevent, unsigned long long addr);
540unsigned long long pevent_read_number(struct pevent *pevent, const void *ptr, int size);
541int pevent_read_number_field(struct format_field *field, const void *data,
542 unsigned long long *value);
543
544struct event_format *pevent_find_event(struct pevent *pevent, int id);
545
546struct event_format *
547pevent_find_event_by_name(struct pevent *pevent, const char *sys, const char *name);
548
549void pevent_data_lat_fmt(struct pevent *pevent,
550 struct trace_seq *s, struct pevent_record *record);
551int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
552struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type);
553int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
554const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
555void pevent_event_info(struct trace_seq *s, struct event_format *event,
556 struct pevent_record *record);
557
558struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type);
559struct format_field **pevent_event_common_fields(struct event_format *event);
560struct format_field **pevent_event_fields(struct event_format *event);
561
562static inline int pevent_get_cpus(struct pevent *pevent)
563{
564 return pevent->cpus;
565}
566
567static inline void pevent_set_cpus(struct pevent *pevent, int cpus)
568{
569 pevent->cpus = cpus;
570}
571
572static inline int pevent_get_long_size(struct pevent *pevent)
573{
574 return pevent->long_size;
575}
576
577static inline void pevent_set_long_size(struct pevent *pevent, int long_size)
578{
579 pevent->long_size = long_size;
580}
581
582static inline int pevent_is_file_bigendian(struct pevent *pevent)
583{
584 return pevent->file_bigendian;
585}
586
587static inline void pevent_set_file_bigendian(struct pevent *pevent, int endian)
588{
589 pevent->file_bigendian = endian;
590}
591
592static inline int pevent_is_host_bigendian(struct pevent *pevent)
593{
594 return pevent->host_bigendian;
595}
596
597static inline void pevent_set_host_bigendian(struct pevent *pevent, int endian)
598{
599 pevent->host_bigendian = endian;
600}
601
602static inline int pevent_is_latency_format(struct pevent *pevent)
603{
604 return pevent->latency_format;
605}
606
607static inline void pevent_set_latency_format(struct pevent *pevent, int lat)
608{
609 pevent->latency_format = lat;
610}
611
612struct pevent *pevent_alloc(void);
613void pevent_free(struct pevent *pevent);
614void pevent_ref(struct pevent *pevent);
615void pevent_unref(struct pevent *pevent);
616
617/* access to the internal parser */
618void pevent_buffer_init(const char *buf, unsigned long long size);
619enum event_type pevent_read_token(char **tok);
620void pevent_free_token(char *token);
621int pevent_peek_char(void);
622const char *pevent_get_input_buf(void);
623unsigned long long pevent_get_input_buf_ptr(void);
624
625/* for debugging */
626void pevent_print_funcs(struct pevent *pevent);
627void pevent_print_printk(struct pevent *pevent);
628
629/* ----------------------- filtering ----------------------- */
630
631enum filter_boolean_type {
632 FILTER_FALSE,
633 FILTER_TRUE,
634};
635
636enum filter_op_type {
637 FILTER_OP_AND = 1,
638 FILTER_OP_OR,
639 FILTER_OP_NOT,
640};
641
642enum filter_cmp_type {
643 FILTER_CMP_NONE,
644 FILTER_CMP_EQ,
645 FILTER_CMP_NE,
646 FILTER_CMP_GT,
647 FILTER_CMP_LT,
648 FILTER_CMP_GE,
649 FILTER_CMP_LE,
650 FILTER_CMP_MATCH,
651 FILTER_CMP_NOT_MATCH,
652 FILTER_CMP_REGEX,
653 FILTER_CMP_NOT_REGEX,
654};
655
656enum filter_exp_type {
657 FILTER_EXP_NONE,
658 FILTER_EXP_ADD,
659 FILTER_EXP_SUB,
660 FILTER_EXP_MUL,
661 FILTER_EXP_DIV,
662 FILTER_EXP_MOD,
663 FILTER_EXP_RSHIFT,
664 FILTER_EXP_LSHIFT,
665 FILTER_EXP_AND,
666 FILTER_EXP_OR,
667 FILTER_EXP_XOR,
668 FILTER_EXP_NOT,
669};
670
671enum filter_arg_type {
672 FILTER_ARG_NONE,
673 FILTER_ARG_BOOLEAN,
674 FILTER_ARG_VALUE,
675 FILTER_ARG_FIELD,
676 FILTER_ARG_EXP,
677 FILTER_ARG_OP,
678 FILTER_ARG_NUM,
679 FILTER_ARG_STR,
680};
681
682enum filter_value_type {
683 FILTER_NUMBER,
684 FILTER_STRING,
685 FILTER_CHAR
686};
687
688struct fliter_arg;
689
690struct filter_arg_boolean {
691 enum filter_boolean_type value;
692};
693
694struct filter_arg_field {
695 struct format_field *field;
696};
697
698struct filter_arg_value {
699 enum filter_value_type type;
700 union {
701 char *str;
702 unsigned long long val;
703 };
704};
705
706struct filter_arg_op {
707 enum filter_op_type type;
708 struct filter_arg *left;
709 struct filter_arg *right;
710};
711
712struct filter_arg_exp {
713 enum filter_exp_type type;
714 struct filter_arg *left;
715 struct filter_arg *right;
716};
717
718struct filter_arg_num {
719 enum filter_cmp_type type;
720 struct filter_arg *left;
721 struct filter_arg *right;
722};
723
724struct filter_arg_str {
725 enum filter_cmp_type type;
726 struct format_field *field;
727 char *val;
728 char *buffer;
729 regex_t reg;
730};
731
732struct filter_arg {
733 enum filter_arg_type type;
734 union {
735 struct filter_arg_boolean boolean;
736 struct filter_arg_field field;
737 struct filter_arg_value value;
738 struct filter_arg_op op;
739 struct filter_arg_exp exp;
740 struct filter_arg_num num;
741 struct filter_arg_str str;
742 };
743};
744
745struct filter_type {
746 int event_id;
747 struct event_format *event;
748 struct filter_arg *filter;
749};
750
751struct event_filter {
752 struct pevent *pevent;
753 int filters;
754 struct filter_type *event_filters;
755};
756
757struct event_filter *pevent_filter_alloc(struct pevent *pevent);
758
759#define FILTER_NONE -2
760#define FILTER_NOEXIST -1
761#define FILTER_MISS 0
762#define FILTER_MATCH 1
763
764enum filter_trivial_type {
765 FILTER_TRIVIAL_FALSE,
766 FILTER_TRIVIAL_TRUE,
767 FILTER_TRIVIAL_BOTH,
768};
769
770int pevent_filter_add_filter_str(struct event_filter *filter,
771 const char *filter_str,
772 char **error_str);
773
774
775int pevent_filter_match(struct event_filter *filter,
776 struct pevent_record *record);
777
778int pevent_event_filtered(struct event_filter *filter,
779 int event_id);
780
781void pevent_filter_reset(struct event_filter *filter);
782
783void pevent_filter_clear_trivial(struct event_filter *filter,
784 enum filter_trivial_type type);
785
786void pevent_filter_free(struct event_filter *filter);
787
788char *pevent_filter_make_string(struct event_filter *filter, int event_id);
789
790int pevent_filter_remove_event(struct event_filter *filter,
791 int event_id);
792
793int pevent_filter_event_has_trivial(struct event_filter *filter,
794 int event_id,
795 enum filter_trivial_type type);
796
797int pevent_filter_copy(struct event_filter *dest, struct event_filter *source);
798
799int pevent_update_trivial(struct event_filter *dest, struct event_filter *source,
800 enum filter_trivial_type type);
801
802int pevent_filter_compare(struct event_filter *filter1, struct event_filter *filter2);
803
804#endif /* _PARSE_EVENTS_H */
diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h
new file mode 100644
index 000000000000..08296383d1e6
--- /dev/null
+++ b/tools/lib/traceevent/event-utils.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21#ifndef __UTIL_H
22#define __UTIL_H
23
24#include <ctype.h>
25
26/* Can be overridden */
27void die(const char *fmt, ...);
28void *malloc_or_die(unsigned int size);
29void warning(const char *fmt, ...);
30void pr_stat(const char *fmt, ...);
31void vpr_stat(const char *fmt, va_list ap);
32
33/* Always available */
34void __die(const char *fmt, ...);
35void __warning(const char *fmt, ...);
36void __pr_stat(const char *fmt, ...);
37
38void __vdie(const char *fmt, ...);
39void __vwarning(const char *fmt, ...);
40void __vpr_stat(const char *fmt, ...);
41
42static inline char *strim(char *string)
43{
44 char *ret;
45
46 if (!string)
47 return NULL;
48 while (*string) {
49 if (!isspace(*string))
50 break;
51 string++;
52 }
53 ret = string;
54
55 string = ret + strlen(ret) - 1;
56 while (string > ret) {
57 if (!isspace(*string))
58 break;
59 string--;
60 }
61 string[1] = 0;
62
63 return ret;
64}
65
66static inline int has_text(const char *text)
67{
68 if (!text)
69 return 0;
70
71 while (*text) {
72 if (!isspace(*text))
73 return 1;
74 text++;
75 }
76
77 return 0;
78}
79
80#endif
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
new file mode 100644
index 000000000000..2d40c5ed81d6
--- /dev/null
+++ b/tools/lib/traceevent/parse-filter.c
@@ -0,0 +1,2262 @@
1/*
2 * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21#include <stdio.h>
22#include <stdlib.h>
23#include <string.h>
24#include <stdarg.h>
25#include <errno.h>
26#include <sys/types.h>
27
28#include "event-parse.h"
29#include "event-utils.h"
30
31#define COMM "COMM"
32
33static struct format_field comm = {
34 .name = "COMM",
35};
36
37struct event_list {
38 struct event_list *next;
39 struct event_format *event;
40};
41
42#define MAX_ERR_STR_SIZE 256
43
44static void show_error(char **error_str, const char *fmt, ...)
45{
46 unsigned long long index;
47 const char *input;
48 char *error;
49 va_list ap;
50 int len;
51 int i;
52
53 if (!error_str)
54 return;
55
56 input = pevent_get_input_buf();
57 index = pevent_get_input_buf_ptr();
58 len = input ? strlen(input) : 0;
59
60 error = malloc_or_die(MAX_ERR_STR_SIZE + (len*2) + 3);
61
62 if (len) {
63 strcpy(error, input);
64 error[len] = '\n';
65 for (i = 1; i < len && i < index; i++)
66 error[len+i] = ' ';
67 error[len + i] = '^';
68 error[len + i + 1] = '\n';
69 len += i+2;
70 }
71
72 va_start(ap, fmt);
73 vsnprintf(error + len, MAX_ERR_STR_SIZE, fmt, ap);
74 va_end(ap);
75
76 *error_str = error;
77}
78
79static void free_token(char *token)
80{
81 pevent_free_token(token);
82}
83
84static enum event_type read_token(char **tok)
85{
86 enum event_type type;
87 char *token = NULL;
88
89 do {
90 free_token(token);
91 type = pevent_read_token(&token);
92 } while (type == EVENT_NEWLINE || type == EVENT_SPACE);
93
94 /* If token is = or ! check to see if the next char is ~ */
95 if (token &&
96 (strcmp(token, "=") == 0 || strcmp(token, "!") == 0) &&
97 pevent_peek_char() == '~') {
98 /* append it */
99 *tok = malloc(3);
100 sprintf(*tok, "%c%c", *token, '~');
101 free_token(token);
102 /* Now remove the '~' from the buffer */
103 pevent_read_token(&token);
104 free_token(token);
105 } else
106 *tok = token;
107
108 return type;
109}
110
111static int filter_cmp(const void *a, const void *b)
112{
113 const struct filter_type *ea = a;
114 const struct filter_type *eb = b;
115
116 if (ea->event_id < eb->event_id)
117 return -1;
118
119 if (ea->event_id > eb->event_id)
120 return 1;
121
122 return 0;
123}
124
125static struct filter_type *
126find_filter_type(struct event_filter *filter, int id)
127{
128 struct filter_type *filter_type;
129 struct filter_type key;
130
131 key.event_id = id;
132
133 filter_type = bsearch(&key, filter->event_filters,
134 filter->filters,
135 sizeof(*filter->event_filters),
136 filter_cmp);
137
138 return filter_type;
139}
140
141static struct filter_type *
142add_filter_type(struct event_filter *filter, int id)
143{
144 struct filter_type *filter_type;
145 int i;
146
147 filter_type = find_filter_type(filter, id);
148 if (filter_type)
149 return filter_type;
150
151 if (!filter->filters)
152 filter->event_filters =
153 malloc_or_die(sizeof(*filter->event_filters));
154 else {
155 filter->event_filters =
156 realloc(filter->event_filters,
157 sizeof(*filter->event_filters) *
158 (filter->filters + 1));
159 if (!filter->event_filters)
160 die("Could not allocate filter");
161 }
162
163 for (i = 0; i < filter->filters; i++) {
164 if (filter->event_filters[i].event_id > id)
165 break;
166 }
167
168 if (i < filter->filters)
169 memmove(&filter->event_filters[i+1],
170 &filter->event_filters[i],
171 sizeof(*filter->event_filters) *
172 (filter->filters - i));
173
174 filter_type = &filter->event_filters[i];
175 filter_type->event_id = id;
176 filter_type->event = pevent_find_event(filter->pevent, id);
177 filter_type->filter = NULL;
178
179 filter->filters++;
180
181 return filter_type;
182}
183
184/**
185 * pevent_filter_alloc - create a new event filter
186 * @pevent: The pevent that this filter is associated with
187 */
188struct event_filter *pevent_filter_alloc(struct pevent *pevent)
189{
190 struct event_filter *filter;
191
192 filter = malloc_or_die(sizeof(*filter));
193 memset(filter, 0, sizeof(*filter));
194 filter->pevent = pevent;
195 pevent_ref(pevent);
196
197 return filter;
198}
199
200static struct filter_arg *allocate_arg(void)
201{
202 struct filter_arg *arg;
203
204 arg = malloc_or_die(sizeof(*arg));
205 memset(arg, 0, sizeof(*arg));
206
207 return arg;
208}
209
210static void free_arg(struct filter_arg *arg)
211{
212 if (!arg)
213 return;
214
215 switch (arg->type) {
216 case FILTER_ARG_NONE:
217 case FILTER_ARG_BOOLEAN:
218 case FILTER_ARG_NUM:
219 break;
220
221 case FILTER_ARG_STR:
222 free(arg->str.val);
223 regfree(&arg->str.reg);
224 free(arg->str.buffer);
225 break;
226
227 case FILTER_ARG_OP:
228 free_arg(arg->op.left);
229 free_arg(arg->op.right);
230 default:
231 break;
232 }
233
234 free(arg);
235}
236
237static void add_event(struct event_list **events,
238 struct event_format *event)
239{
240 struct event_list *list;
241
242 list = malloc_or_die(sizeof(*list));
243 list->next = *events;
244 *events = list;
245 list->event = event;
246}
247
248static int event_match(struct event_format *event,
249 regex_t *sreg, regex_t *ereg)
250{
251 if (sreg) {
252 return !regexec(sreg, event->system, 0, NULL, 0) &&
253 !regexec(ereg, event->name, 0, NULL, 0);
254 }
255
256 return !regexec(ereg, event->system, 0, NULL, 0) ||
257 !regexec(ereg, event->name, 0, NULL, 0);
258}
259
260static int
261find_event(struct pevent *pevent, struct event_list **events,
262 char *sys_name, char *event_name)
263{
264 struct event_format *event;
265 regex_t ereg;
266 regex_t sreg;
267 int match = 0;
268 char *reg;
269 int ret;
270 int i;
271
272 if (!event_name) {
273 /* if no name is given, then swap sys and name */
274 event_name = sys_name;
275 sys_name = NULL;
276 }
277
278 reg = malloc_or_die(strlen(event_name) + 3);
279 sprintf(reg, "^%s$", event_name);
280
281 ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
282 free(reg);
283
284 if (ret)
285 return -1;
286
287 if (sys_name) {
288 reg = malloc_or_die(strlen(sys_name) + 3);
289 sprintf(reg, "^%s$", sys_name);
290 ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
291 free(reg);
292 if (ret) {
293 regfree(&ereg);
294 return -1;
295 }
296 }
297
298 for (i = 0; i < pevent->nr_events; i++) {
299 event = pevent->events[i];
300 if (event_match(event, sys_name ? &sreg : NULL, &ereg)) {
301 match = 1;
302 add_event(events, event);
303 }
304 }
305
306 regfree(&ereg);
307 if (sys_name)
308 regfree(&sreg);
309
310 if (!match)
311 return -1;
312
313 return 0;
314}
315
316static void free_events(struct event_list *events)
317{
318 struct event_list *event;
319
320 while (events) {
321 event = events;
322 events = events->next;
323 free(event);
324 }
325}
326
327static struct filter_arg *
328create_arg_item(struct event_format *event,
329 const char *token, enum filter_arg_type type,
330 char **error_str)
331{
332 struct format_field *field;
333 struct filter_arg *arg;
334
335 arg = allocate_arg();
336
337 switch (type) {
338
339 case EVENT_SQUOTE:
340 case EVENT_DQUOTE:
341 arg->type = FILTER_ARG_VALUE;
342 arg->value.type =
343 type == EVENT_DQUOTE ? FILTER_STRING : FILTER_CHAR;
344 arg->value.str = strdup(token);
345 if (!arg->value.str)
346 die("malloc string");
347 break;
348 case EVENT_ITEM:
349 /* if it is a number, then convert it */
350 if (isdigit(token[0])) {
351 arg->type = FILTER_ARG_VALUE;
352 arg->value.type = FILTER_NUMBER;
353 arg->value.val = strtoull(token, NULL, 0);
354 break;
355 }
356 /* Consider this a field */
357 field = pevent_find_any_field(event, token);
358 if (!field) {
359 if (strcmp(token, COMM) != 0) {
360 /* not a field, Make it false */
361 arg->type = FILTER_ARG_BOOLEAN;
362 arg->boolean.value = FILTER_FALSE;
363 break;
364 }
365 /* If token is 'COMM' then it is special */
366 field = &comm;
367 }
368 arg->type = FILTER_ARG_FIELD;
369 arg->field.field = field;
370 break;
371 default:
372 free_arg(arg);
373 show_error(error_str, "expected a value but found %s",
374 token);
375 return NULL;
376 }
377 return arg;
378}
379
380static struct filter_arg *
381create_arg_op(enum filter_op_type btype)
382{
383 struct filter_arg *arg;
384
385 arg = allocate_arg();
386 arg->type = FILTER_ARG_OP;
387 arg->op.type = btype;
388
389 return arg;
390}
391
392static struct filter_arg *
393create_arg_exp(enum filter_exp_type etype)
394{
395 struct filter_arg *arg;
396
397 arg = allocate_arg();
398 arg->type = FILTER_ARG_EXP;
399 arg->op.type = etype;
400
401 return arg;
402}
403
404static struct filter_arg *
405create_arg_cmp(enum filter_exp_type etype)
406{
407 struct filter_arg *arg;
408
409 arg = allocate_arg();
410 /* Use NUM and change if necessary */
411 arg->type = FILTER_ARG_NUM;
412 arg->op.type = etype;
413
414 return arg;
415}
416
417static int add_right(struct filter_arg *op, struct filter_arg *arg,
418 char **error_str)
419{
420 struct filter_arg *left;
421 char *str;
422 int op_type;
423 int ret;
424
425 switch (op->type) {
426 case FILTER_ARG_EXP:
427 if (op->exp.right)
428 goto out_fail;
429 op->exp.right = arg;
430 break;
431
432 case FILTER_ARG_OP:
433 if (op->op.right)
434 goto out_fail;
435 op->op.right = arg;
436 break;
437
438 case FILTER_ARG_NUM:
439 if (op->op.right)
440 goto out_fail;
441 /*
442 * The arg must be num, str, or field
443 */
444 switch (arg->type) {
445 case FILTER_ARG_VALUE:
446 case FILTER_ARG_FIELD:
447 break;
448 default:
449 show_error(error_str,
450 "Illegal rvalue");
451 return -1;
452 }
453
454 /*
455 * Depending on the type, we may need to
456 * convert this to a string or regex.
457 */
458 switch (arg->value.type) {
459 case FILTER_CHAR:
460 /*
461 * A char should be converted to number if
462 * the string is 1 byte, and the compare
463 * is not a REGEX.
464 */
465 if (strlen(arg->value.str) == 1 &&
466 op->num.type != FILTER_CMP_REGEX &&
467 op->num.type != FILTER_CMP_NOT_REGEX) {
468 arg->value.type = FILTER_NUMBER;
469 goto do_int;
470 }
471 /* fall through */
472 case FILTER_STRING:
473
474 /* convert op to a string arg */
475 op_type = op->num.type;
476 left = op->num.left;
477 str = arg->value.str;
478
479 /* reset the op for the new field */
480 memset(op, 0, sizeof(*op));
481
482 /*
483 * If left arg was a field not found then
484 * NULL the entire op.
485 */
486 if (left->type == FILTER_ARG_BOOLEAN) {
487 free_arg(left);
488 free_arg(arg);
489 op->type = FILTER_ARG_BOOLEAN;
490 op->boolean.value = FILTER_FALSE;
491 break;
492 }
493
494 /* Left arg must be a field */
495 if (left->type != FILTER_ARG_FIELD) {
496 show_error(error_str,
497 "Illegal lvalue for string comparison");
498 return -1;
499 }
500
501 /* Make sure this is a valid string compare */
502 switch (op_type) {
503 case FILTER_CMP_EQ:
504 op_type = FILTER_CMP_MATCH;
505 break;
506 case FILTER_CMP_NE:
507 op_type = FILTER_CMP_NOT_MATCH;
508 break;
509
510 case FILTER_CMP_REGEX:
511 case FILTER_CMP_NOT_REGEX:
512 ret = regcomp(&op->str.reg, str, REG_ICASE|REG_NOSUB);
513 if (ret) {
514 show_error(error_str,
515 "RegEx '%s' did not compute",
516 str);
517 return -1;
518 }
519 break;
520 default:
521 show_error(error_str,
522 "Illegal comparison for string");
523 return -1;
524 }
525
526 op->type = FILTER_ARG_STR;
527 op->str.type = op_type;
528 op->str.field = left->field.field;
529 op->str.val = strdup(str);
530 if (!op->str.val)
531 die("malloc string");
532 /*
533 * Need a buffer to copy data for tests
534 */
535 op->str.buffer = malloc_or_die(op->str.field->size + 1);
536 /* Null terminate this buffer */
537 op->str.buffer[op->str.field->size] = 0;
538
539 /* We no longer have left or right args */
540 free_arg(arg);
541 free_arg(left);
542
543 break;
544
545 case FILTER_NUMBER:
546
547 do_int:
548 switch (op->num.type) {
549 case FILTER_CMP_REGEX:
550 case FILTER_CMP_NOT_REGEX:
551 show_error(error_str,
552 "Op not allowed with integers");
553 return -1;
554
555 default:
556 break;
557 }
558
559 /* numeric compare */
560 op->num.right = arg;
561 break;
562 default:
563 goto out_fail;
564 }
565 break;
566 default:
567 goto out_fail;
568 }
569
570 return 0;
571
572 out_fail:
573 show_error(error_str,
574 "Syntax error");
575 return -1;
576}
577
578static struct filter_arg *
579rotate_op_right(struct filter_arg *a, struct filter_arg *b)
580{
581 struct filter_arg *arg;
582
583 arg = a->op.right;
584 a->op.right = b;
585 return arg;
586}
587
588static int add_left(struct filter_arg *op, struct filter_arg *arg)
589{
590 switch (op->type) {
591 case FILTER_ARG_EXP:
592 if (arg->type == FILTER_ARG_OP)
593 arg = rotate_op_right(arg, op);
594 op->exp.left = arg;
595 break;
596
597 case FILTER_ARG_OP:
598 op->op.left = arg;
599 break;
600 case FILTER_ARG_NUM:
601 if (arg->type == FILTER_ARG_OP)
602 arg = rotate_op_right(arg, op);
603
604 /* left arg of compares must be a field */
605 if (arg->type != FILTER_ARG_FIELD &&
606 arg->type != FILTER_ARG_BOOLEAN)
607 return -1;
608 op->num.left = arg;
609 break;
610 default:
611 return -1;
612 }
613 return 0;
614}
615
616enum op_type {
617 OP_NONE,
618 OP_BOOL,
619 OP_NOT,
620 OP_EXP,
621 OP_CMP,
622};
623
624static enum op_type process_op(const char *token,
625 enum filter_op_type *btype,
626 enum filter_cmp_type *ctype,
627 enum filter_exp_type *etype)
628{
629 *btype = FILTER_OP_NOT;
630 *etype = FILTER_EXP_NONE;
631 *ctype = FILTER_CMP_NONE;
632
633 if (strcmp(token, "&&") == 0)
634 *btype = FILTER_OP_AND;
635 else if (strcmp(token, "||") == 0)
636 *btype = FILTER_OP_OR;
637 else if (strcmp(token, "!") == 0)
638 return OP_NOT;
639
640 if (*btype != FILTER_OP_NOT)
641 return OP_BOOL;
642
643 /* Check for value expressions */
644 if (strcmp(token, "+") == 0) {
645 *etype = FILTER_EXP_ADD;
646 } else if (strcmp(token, "-") == 0) {
647 *etype = FILTER_EXP_SUB;
648 } else if (strcmp(token, "*") == 0) {
649 *etype = FILTER_EXP_MUL;
650 } else if (strcmp(token, "/") == 0) {
651 *etype = FILTER_EXP_DIV;
652 } else if (strcmp(token, "%") == 0) {
653 *etype = FILTER_EXP_MOD;
654 } else if (strcmp(token, ">>") == 0) {
655 *etype = FILTER_EXP_RSHIFT;
656 } else if (strcmp(token, "<<") == 0) {
657 *etype = FILTER_EXP_LSHIFT;
658 } else if (strcmp(token, "&") == 0) {
659 *etype = FILTER_EXP_AND;
660 } else if (strcmp(token, "|") == 0) {
661 *etype = FILTER_EXP_OR;
662 } else if (strcmp(token, "^") == 0) {
663 *etype = FILTER_EXP_XOR;
664 } else if (strcmp(token, "~") == 0)
665 *etype = FILTER_EXP_NOT;
666
667 if (*etype != FILTER_EXP_NONE)
668 return OP_EXP;
669
670 /* Check for compares */
671 if (strcmp(token, "==") == 0)
672 *ctype = FILTER_CMP_EQ;
673 else if (strcmp(token, "!=") == 0)
674 *ctype = FILTER_CMP_NE;
675 else if (strcmp(token, "<") == 0)
676 *ctype = FILTER_CMP_LT;
677 else if (strcmp(token, ">") == 0)
678 *ctype = FILTER_CMP_GT;
679 else if (strcmp(token, "<=") == 0)
680 *ctype = FILTER_CMP_LE;
681 else if (strcmp(token, ">=") == 0)
682 *ctype = FILTER_CMP_GE;
683 else if (strcmp(token, "=~") == 0)
684 *ctype = FILTER_CMP_REGEX;
685 else if (strcmp(token, "!~") == 0)
686 *ctype = FILTER_CMP_NOT_REGEX;
687 else
688 return OP_NONE;
689
690 return OP_CMP;
691}
692
693static int check_op_done(struct filter_arg *arg)
694{
695 switch (arg->type) {
696 case FILTER_ARG_EXP:
697 return arg->exp.right != NULL;
698
699 case FILTER_ARG_OP:
700 return arg->op.right != NULL;
701
702 case FILTER_ARG_NUM:
703 return arg->num.right != NULL;
704
705 case FILTER_ARG_STR:
706 /* A string conversion is always done */
707 return 1;
708
709 case FILTER_ARG_BOOLEAN:
710 /* field not found, is ok */
711 return 1;
712
713 default:
714 return 0;
715 }
716}
717
718enum filter_vals {
719 FILTER_VAL_NORM,
720 FILTER_VAL_FALSE,
721 FILTER_VAL_TRUE,
722};
723
724void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
725 struct filter_arg *arg)
726{
727 struct filter_arg *other_child;
728 struct filter_arg **ptr;
729
730 if (parent->type != FILTER_ARG_OP &&
731 arg->type != FILTER_ARG_OP)
732 die("can not reparent other than OP");
733
734 /* Get the sibling */
735 if (old_child->op.right == arg) {
736 ptr = &old_child->op.right;
737 other_child = old_child->op.left;
738 } else if (old_child->op.left == arg) {
739 ptr = &old_child->op.left;
740 other_child = old_child->op.right;
741 } else
742 die("Error in reparent op, find other child");
743
744 /* Detach arg from old_child */
745 *ptr = NULL;
746
747 /* Check for root */
748 if (parent == old_child) {
749 free_arg(other_child);
750 *parent = *arg;
751 /* Free arg without recussion */
752 free(arg);
753 return;
754 }
755
756 if (parent->op.right == old_child)
757 ptr = &parent->op.right;
758 else if (parent->op.left == old_child)
759 ptr = &parent->op.left;
760 else
761 die("Error in reparent op");
762 *ptr = arg;
763
764 free_arg(old_child);
765}
766
767enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg)
768{
769 enum filter_vals lval, rval;
770
771 switch (arg->type) {
772
773 /* bad case */
774 case FILTER_ARG_BOOLEAN:
775 return FILTER_VAL_FALSE + arg->boolean.value;
776
777 /* good cases: */
778 case FILTER_ARG_STR:
779 case FILTER_ARG_VALUE:
780 case FILTER_ARG_FIELD:
781 return FILTER_VAL_NORM;
782
783 case FILTER_ARG_EXP:
784 lval = test_arg(arg, arg->exp.left);
785 if (lval != FILTER_VAL_NORM)
786 return lval;
787 rval = test_arg(arg, arg->exp.right);
788 if (rval != FILTER_VAL_NORM)
789 return rval;
790 return FILTER_VAL_NORM;
791
792 case FILTER_ARG_NUM:
793 lval = test_arg(arg, arg->num.left);
794 if (lval != FILTER_VAL_NORM)
795 return lval;
796 rval = test_arg(arg, arg->num.right);
797 if (rval != FILTER_VAL_NORM)
798 return rval;
799 return FILTER_VAL_NORM;
800
801 case FILTER_ARG_OP:
802 if (arg->op.type != FILTER_OP_NOT) {
803 lval = test_arg(arg, arg->op.left);
804 switch (lval) {
805 case FILTER_VAL_NORM:
806 break;
807 case FILTER_VAL_TRUE:
808 if (arg->op.type == FILTER_OP_OR)
809 return FILTER_VAL_TRUE;
810 rval = test_arg(arg, arg->op.right);
811 if (rval != FILTER_VAL_NORM)
812 return rval;
813
814 reparent_op_arg(parent, arg, arg->op.right);
815 return FILTER_VAL_NORM;
816
817 case FILTER_VAL_FALSE:
818 if (arg->op.type == FILTER_OP_AND)
819 return FILTER_VAL_FALSE;
820 rval = test_arg(arg, arg->op.right);
821 if (rval != FILTER_VAL_NORM)
822 return rval;
823
824 reparent_op_arg(parent, arg, arg->op.right);
825 return FILTER_VAL_NORM;
826 }
827 }
828
829 rval = test_arg(arg, arg->op.right);
830 switch (rval) {
831 case FILTER_VAL_NORM:
832 break;
833 case FILTER_VAL_TRUE:
834 if (arg->op.type == FILTER_OP_OR)
835 return FILTER_VAL_TRUE;
836 if (arg->op.type == FILTER_OP_NOT)
837 return FILTER_VAL_FALSE;
838
839 reparent_op_arg(parent, arg, arg->op.left);
840 return FILTER_VAL_NORM;
841
842 case FILTER_VAL_FALSE:
843 if (arg->op.type == FILTER_OP_AND)
844 return FILTER_VAL_FALSE;
845 if (arg->op.type == FILTER_OP_NOT)
846 return FILTER_VAL_TRUE;
847
848 reparent_op_arg(parent, arg, arg->op.left);
849 return FILTER_VAL_NORM;
850 }
851
852 return FILTER_VAL_NORM;
853 default:
854 die("bad arg in filter tree");
855 }
856 return FILTER_VAL_NORM;
857}
858
859/* Remove any unknown event fields */
860static struct filter_arg *collapse_tree(struct filter_arg *arg)
861{
862 enum filter_vals ret;
863
864 ret = test_arg(arg, arg);
865 switch (ret) {
866 case FILTER_VAL_NORM:
867 return arg;
868
869 case FILTER_VAL_TRUE:
870 case FILTER_VAL_FALSE:
871 free_arg(arg);
872 arg = allocate_arg();
873 arg->type = FILTER_ARG_BOOLEAN;
874 arg->boolean.value = ret == FILTER_VAL_TRUE;
875 }
876
877 return arg;
878}
879
880static int
881process_filter(struct event_format *event, struct filter_arg **parg,
882 char **error_str, int not)
883{
884 enum event_type type;
885 char *token = NULL;
886 struct filter_arg *current_op = NULL;
887 struct filter_arg *current_exp = NULL;
888 struct filter_arg *left_item = NULL;
889 struct filter_arg *arg = NULL;
890 enum op_type op_type;
891 enum filter_op_type btype;
892 enum filter_exp_type etype;
893 enum filter_cmp_type ctype;
894 int ret;
895
896 *parg = NULL;
897
898 do {
899 free(token);
900 type = read_token(&token);
901 switch (type) {
902 case EVENT_SQUOTE:
903 case EVENT_DQUOTE:
904 case EVENT_ITEM:
905 arg = create_arg_item(event, token, type, error_str);
906 if (!arg)
907 goto fail;
908 if (!left_item)
909 left_item = arg;
910 else if (current_exp) {
911 ret = add_right(current_exp, arg, error_str);
912 if (ret < 0)
913 goto fail;
914 left_item = NULL;
915 /* Not's only one one expression */
916 if (not) {
917 arg = NULL;
918 if (current_op)
919 goto fail_print;
920 free(token);
921 *parg = current_exp;
922 return 0;
923 }
924 } else
925 goto fail_print;
926 arg = NULL;
927 break;
928
929 case EVENT_DELIM:
930 if (*token == ',') {
931 show_error(error_str,
932 "Illegal token ','");
933 goto fail;
934 }
935
936 if (*token == '(') {
937 if (left_item) {
938 show_error(error_str,
939 "Open paren can not come after item");
940 goto fail;
941 }
942 if (current_exp) {
943 show_error(error_str,
944 "Open paren can not come after expression");
945 goto fail;
946 }
947
948 ret = process_filter(event, &arg, error_str, 0);
949 if (ret != 1) {
950 if (ret == 0)
951 show_error(error_str,
952 "Unbalanced number of '('");
953 goto fail;
954 }
955 ret = 0;
956
957 /* A not wants just one expression */
958 if (not) {
959 if (current_op)
960 goto fail_print;
961 *parg = arg;
962 return 0;
963 }
964
965 if (current_op)
966 ret = add_right(current_op, arg, error_str);
967 else
968 current_exp = arg;
969
970 if (ret < 0)
971 goto fail;
972
973 } else { /* ')' */
974 if (!current_op && !current_exp)
975 goto fail_print;
976
977 /* Make sure everything is finished at this level */
978 if (current_exp && !check_op_done(current_exp))
979 goto fail_print;
980 if (current_op && !check_op_done(current_op))
981 goto fail_print;
982
983 if (current_op)
984 *parg = current_op;
985 else
986 *parg = current_exp;
987 return 1;
988 }
989 break;
990
991 case EVENT_OP:
992 op_type = process_op(token, &btype, &ctype, &etype);
993
994 /* All expect a left arg except for NOT */
995 switch (op_type) {
996 case OP_BOOL:
997 /* Logic ops need a left expression */
998 if (!current_exp && !current_op)
999 goto fail_print;
1000 /* fall through */
1001 case OP_NOT:
1002 /* logic only processes ops and exp */
1003 if (left_item)
1004 goto fail_print;
1005 break;
1006 case OP_EXP:
1007 case OP_CMP:
1008 if (!left_item)
1009 goto fail_print;
1010 break;
1011 case OP_NONE:
1012 show_error(error_str,
1013 "Unknown op token %s", token);
1014 goto fail;
1015 }
1016
1017 ret = 0;
1018 switch (op_type) {
1019 case OP_BOOL:
1020 arg = create_arg_op(btype);
1021 if (current_op)
1022 ret = add_left(arg, current_op);
1023 else
1024 ret = add_left(arg, current_exp);
1025 current_op = arg;
1026 current_exp = NULL;
1027 break;
1028
1029 case OP_NOT:
1030 arg = create_arg_op(btype);
1031 if (current_op)
1032 ret = add_right(current_op, arg, error_str);
1033 if (ret < 0)
1034 goto fail;
1035 current_exp = arg;
1036 ret = process_filter(event, &arg, error_str, 1);
1037 if (ret < 0)
1038 goto fail;
1039 ret = add_right(current_exp, arg, error_str);
1040 if (ret < 0)
1041 goto fail;
1042 break;
1043
1044 case OP_EXP:
1045 case OP_CMP:
1046 if (op_type == OP_EXP)
1047 arg = create_arg_exp(etype);
1048 else
1049 arg = create_arg_cmp(ctype);
1050
1051 if (current_op)
1052 ret = add_right(current_op, arg, error_str);
1053 if (ret < 0)
1054 goto fail;
1055 ret = add_left(arg, left_item);
1056 if (ret < 0) {
1057 arg = NULL;
1058 goto fail_print;
1059 }
1060 current_exp = arg;
1061 break;
1062 default:
1063 break;
1064 }
1065 arg = NULL;
1066 if (ret < 0)
1067 goto fail_print;
1068 break;
1069 case EVENT_NONE:
1070 break;
1071 default:
1072 goto fail_print;
1073 }
1074 } while (type != EVENT_NONE);
1075
1076 if (!current_op && !current_exp)
1077 goto fail_print;
1078
1079 if (!current_op)
1080 current_op = current_exp;
1081
1082 current_op = collapse_tree(current_op);
1083
1084 *parg = current_op;
1085
1086 return 0;
1087
1088 fail_print:
1089 show_error(error_str, "Syntax error");
1090 fail:
1091 free_arg(current_op);
1092 free_arg(current_exp);
1093 free_arg(arg);
1094 free(token);
1095 return -1;
1096}
1097
1098static int
1099process_event(struct event_format *event, const char *filter_str,
1100 struct filter_arg **parg, char **error_str)
1101{
1102 int ret;
1103
1104 pevent_buffer_init(filter_str, strlen(filter_str));
1105
1106 ret = process_filter(event, parg, error_str, 0);
1107 if (ret == 1) {
1108 show_error(error_str,
1109 "Unbalanced number of ')'");
1110 return -1;
1111 }
1112 if (ret < 0)
1113 return ret;
1114
1115 /* If parg is NULL, then make it into FALSE */
1116 if (!*parg) {
1117 *parg = allocate_arg();
1118 (*parg)->type = FILTER_ARG_BOOLEAN;
1119 (*parg)->boolean.value = FILTER_FALSE;
1120 }
1121
1122 return 0;
1123}
1124
1125static int filter_event(struct event_filter *filter,
1126 struct event_format *event,
1127 const char *filter_str, char **error_str)
1128{
1129 struct filter_type *filter_type;
1130 struct filter_arg *arg;
1131 int ret;
1132
1133 if (filter_str) {
1134 ret = process_event(event, filter_str, &arg, error_str);
1135 if (ret < 0)
1136 return ret;
1137
1138 } else {
1139 /* just add a TRUE arg */
1140 arg = allocate_arg();
1141 arg->type = FILTER_ARG_BOOLEAN;
1142 arg->boolean.value = FILTER_TRUE;
1143 }
1144
1145 filter_type = add_filter_type(filter, event->id);
1146 if (filter_type->filter)
1147 free_arg(filter_type->filter);
1148 filter_type->filter = arg;
1149
1150 return 0;
1151}
1152
1153/**
1154 * pevent_filter_add_filter_str - add a new filter
1155 * @filter: the event filter to add to
1156 * @filter_str: the filter string that contains the filter
1157 * @error_str: string containing reason for failed filter
1158 *
1159 * Returns 0 if the filter was successfully added
1160 * -1 if there was an error.
1161 *
1162 * On error, if @error_str points to a string pointer,
1163 * it is set to the reason that the filter failed.
1164 * This string must be freed with "free".
1165 */
1166int pevent_filter_add_filter_str(struct event_filter *filter,
1167 const char *filter_str,
1168 char **error_str)
1169{
1170 struct pevent *pevent = filter->pevent;
1171 struct event_list *event;
1172 struct event_list *events = NULL;
1173 const char *filter_start;
1174 const char *next_event;
1175 char *this_event;
1176 char *event_name = NULL;
1177 char *sys_name = NULL;
1178 char *sp;
1179 int rtn = 0;
1180 int len;
1181 int ret;
1182
1183 /* clear buffer to reset show error */
1184 pevent_buffer_init("", 0);
1185
1186 if (error_str)
1187 *error_str = NULL;
1188
1189 filter_start = strchr(filter_str, ':');
1190 if (filter_start)
1191 len = filter_start - filter_str;
1192 else
1193 len = strlen(filter_str);
1194
1195
1196 do {
1197 next_event = strchr(filter_str, ',');
1198 if (next_event &&
1199 (!filter_start || next_event < filter_start))
1200 len = next_event - filter_str;
1201 else if (filter_start)
1202 len = filter_start - filter_str;
1203 else
1204 len = strlen(filter_str);
1205
1206 this_event = malloc_or_die(len + 1);
1207 memcpy(this_event, filter_str, len);
1208 this_event[len] = 0;
1209
1210 if (next_event)
1211 next_event++;
1212
1213 filter_str = next_event;
1214
1215 sys_name = strtok_r(this_event, "/", &sp);
1216 event_name = strtok_r(NULL, "/", &sp);
1217
1218 if (!sys_name) {
1219 show_error(error_str, "No filter found");
1220 /* This can only happen when events is NULL, but still */
1221 free_events(events);
1222 free(this_event);
1223 return -1;
1224 }
1225
1226 /* Find this event */
1227 ret = find_event(pevent, &events, strim(sys_name), strim(event_name));
1228 if (ret < 0) {
1229 if (event_name)
1230 show_error(error_str,
1231 "No event found under '%s.%s'",
1232 sys_name, event_name);
1233 else
1234 show_error(error_str,
1235 "No event found under '%s'",
1236 sys_name);
1237 free_events(events);
1238 free(this_event);
1239 return -1;
1240 }
1241 free(this_event);
1242 } while (filter_str);
1243
1244 /* Skip the ':' */
1245 if (filter_start)
1246 filter_start++;
1247
1248 /* filter starts here */
1249 for (event = events; event; event = event->next) {
1250 ret = filter_event(filter, event->event, filter_start,
1251 error_str);
1252 /* Failures are returned if a parse error happened */
1253 if (ret < 0)
1254 rtn = ret;
1255
1256 if (ret >= 0 && pevent->test_filters) {
1257 char *test;
1258 test = pevent_filter_make_string(filter, event->event->id);
1259 printf(" '%s: %s'\n", event->event->name, test);
1260 free(test);
1261 }
1262 }
1263
1264 free_events(events);
1265
1266 if (rtn >= 0 && pevent->test_filters)
1267 exit(0);
1268
1269 return rtn;
1270}
1271
1272static void free_filter_type(struct filter_type *filter_type)
1273{
1274 free_arg(filter_type->filter);
1275}
1276
1277/**
1278 * pevent_filter_remove_event - remove a filter for an event
1279 * @filter: the event filter to remove from
1280 * @event_id: the event to remove a filter for
1281 *
1282 * Removes the filter saved for an event defined by @event_id
1283 * from the @filter.
1284 *
1285 * Returns 1: if an event was removed
1286 * 0: if the event was not found
1287 */
1288int pevent_filter_remove_event(struct event_filter *filter,
1289 int event_id)
1290{
1291 struct filter_type *filter_type;
1292 unsigned long len;
1293
1294 if (!filter->filters)
1295 return 0;
1296
1297 filter_type = find_filter_type(filter, event_id);
1298
1299 if (!filter_type)
1300 return 0;
1301
1302 free_filter_type(filter_type);
1303
1304 /* The filter_type points into the event_filters array */
1305 len = (unsigned long)(filter->event_filters + filter->filters) -
1306 (unsigned long)(filter_type + 1);
1307
1308 memmove(filter_type, filter_type + 1, len);
1309 filter->filters--;
1310
1311 memset(&filter->event_filters[filter->filters], 0,
1312 sizeof(*filter_type));
1313
1314 return 1;
1315}
1316
1317/**
1318 * pevent_filter_reset - clear all filters in a filter
1319 * @filter: the event filter to reset
1320 *
1321 * Removes all filters from a filter and resets it.
1322 */
1323void pevent_filter_reset(struct event_filter *filter)
1324{
1325 int i;
1326
1327 for (i = 0; i < filter->filters; i++)
1328 free_filter_type(&filter->event_filters[i]);
1329
1330 free(filter->event_filters);
1331 filter->filters = 0;
1332 filter->event_filters = NULL;
1333}
1334
1335void pevent_filter_free(struct event_filter *filter)
1336{
1337 pevent_unref(filter->pevent);
1338
1339 pevent_filter_reset(filter);
1340
1341 free(filter);
1342}
1343
1344static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg);
1345
1346static int copy_filter_type(struct event_filter *filter,
1347 struct event_filter *source,
1348 struct filter_type *filter_type)
1349{
1350 struct filter_arg *arg;
1351 struct event_format *event;
1352 const char *sys;
1353 const char *name;
1354 char *str;
1355
1356 /* Can't assume that the pevent's are the same */
1357 sys = filter_type->event->system;
1358 name = filter_type->event->name;
1359 event = pevent_find_event_by_name(filter->pevent, sys, name);
1360 if (!event)
1361 return -1;
1362
1363 str = arg_to_str(source, filter_type->filter);
1364 if (!str)
1365 return -1;
1366
1367 if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
1368 /* Add trivial event */
1369 arg = allocate_arg();
1370 arg->type = FILTER_ARG_BOOLEAN;
1371 if (strcmp(str, "TRUE") == 0)
1372 arg->boolean.value = 1;
1373 else
1374 arg->boolean.value = 0;
1375
1376 filter_type = add_filter_type(filter, event->id);
1377 filter_type->filter = arg;
1378
1379 free(str);
1380 return 0;
1381 }
1382
1383 filter_event(filter, event, str, NULL);
1384 free(str);
1385
1386 return 0;
1387}
1388
1389/**
1390 * pevent_filter_copy - copy a filter using another filter
1391 * @dest - the filter to copy to
1392 * @source - the filter to copy from
1393 *
1394 * Returns 0 on success and -1 if not all filters were copied
1395 */
1396int pevent_filter_copy(struct event_filter *dest, struct event_filter *source)
1397{
1398 int ret = 0;
1399 int i;
1400
1401 pevent_filter_reset(dest);
1402
1403 for (i = 0; i < source->filters; i++) {
1404 if (copy_filter_type(dest, source, &source->event_filters[i]))
1405 ret = -1;
1406 }
1407 return ret;
1408}
1409
1410
1411/**
1412 * pevent_update_trivial - update the trivial filters with the given filter
1413 * @dest - the filter to update
1414 * @source - the filter as the source of the update
1415 * @type - the type of trivial filter to update.
1416 *
1417 * Scan dest for trivial events matching @type to replace with the source.
1418 *
1419 * Returns 0 on success and -1 if there was a problem updating, but
1420 * events may have still been updated on error.
1421 */
1422int pevent_update_trivial(struct event_filter *dest, struct event_filter *source,
1423 enum filter_trivial_type type)
1424{
1425 struct pevent *src_pevent;
1426 struct pevent *dest_pevent;
1427 struct event_format *event;
1428 struct filter_type *filter_type;
1429 struct filter_arg *arg;
1430 char *str;
1431 int i;
1432
1433 src_pevent = source->pevent;
1434 dest_pevent = dest->pevent;
1435
1436 /* Do nothing if either of the filters has nothing to filter */
1437 if (!dest->filters || !source->filters)
1438 return 0;
1439
1440 for (i = 0; i < dest->filters; i++) {
1441 filter_type = &dest->event_filters[i];
1442 arg = filter_type->filter;
1443 if (arg->type != FILTER_ARG_BOOLEAN)
1444 continue;
1445 if ((arg->boolean.value && type == FILTER_TRIVIAL_FALSE) ||
1446 (!arg->boolean.value && type == FILTER_TRIVIAL_TRUE))
1447 continue;
1448
1449 event = filter_type->event;
1450
1451 if (src_pevent != dest_pevent) {
1452 /* do a look up */
1453 event = pevent_find_event_by_name(src_pevent,
1454 event->system,
1455 event->name);
1456 if (!event)
1457 return -1;
1458 }
1459
1460 str = pevent_filter_make_string(source, event->id);
1461 if (!str)
1462 continue;
1463
1464 /* Don't bother if the filter is trivial too */
1465 if (strcmp(str, "TRUE") != 0 && strcmp(str, "FALSE") != 0)
1466 filter_event(dest, event, str, NULL);
1467 free(str);
1468 }
1469 return 0;
1470}
1471
1472/**
1473 * pevent_filter_clear_trivial - clear TRUE and FALSE filters
1474 * @filter: the filter to remove trivial filters from
1475 * @type: remove only true, false, or both
1476 *
1477 * Removes filters that only contain a TRUE or FALES boolean arg.
1478 */
1479void pevent_filter_clear_trivial(struct event_filter *filter,
1480 enum filter_trivial_type type)
1481{
1482 struct filter_type *filter_type;
1483 int count = 0;
1484 int *ids;
1485 int i;
1486
1487 if (!filter->filters)
1488 return;
1489
1490 /*
1491 * Two steps, first get all ids with trivial filters.
1492 * then remove those ids.
1493 */
1494 for (i = 0; i < filter->filters; i++) {
1495 filter_type = &filter->event_filters[i];
1496 if (filter_type->filter->type != FILTER_ARG_BOOLEAN)
1497 continue;
1498 switch (type) {
1499 case FILTER_TRIVIAL_FALSE:
1500 if (filter_type->filter->boolean.value)
1501 continue;
1502 case FILTER_TRIVIAL_TRUE:
1503 if (!filter_type->filter->boolean.value)
1504 continue;
1505 default:
1506 break;
1507 }
1508 if (count)
1509 ids = realloc(ids, sizeof(*ids) * (count + 1));
1510 else
1511 ids = malloc(sizeof(*ids));
1512 if (!ids)
1513 die("Can't allocate ids");
1514 ids[count++] = filter_type->event_id;
1515 }
1516
1517 if (!count)
1518 return;
1519
1520 for (i = 0; i < count; i++)
1521 pevent_filter_remove_event(filter, ids[i]);
1522
1523 free(ids);
1524}
1525
1526/**
1527 * pevent_filter_event_has_trivial - return true event contains trivial filter
1528 * @filter: the filter with the information
1529 * @event_id: the id of the event to test
1530 * @type: trivial type to test for (TRUE, FALSE, EITHER)
1531 *
1532 * Returns 1 if the event contains a matching trivial type
1533 * otherwise 0.
1534 */
1535int pevent_filter_event_has_trivial(struct event_filter *filter,
1536 int event_id,
1537 enum filter_trivial_type type)
1538{
1539 struct filter_type *filter_type;
1540
1541 if (!filter->filters)
1542 return 0;
1543
1544 filter_type = find_filter_type(filter, event_id);
1545
1546 if (!filter_type)
1547 return 0;
1548
1549 if (filter_type->filter->type != FILTER_ARG_BOOLEAN)
1550 return 0;
1551
1552 switch (type) {
1553 case FILTER_TRIVIAL_FALSE:
1554 return !filter_type->filter->boolean.value;
1555
1556 case FILTER_TRIVIAL_TRUE:
1557 return filter_type->filter->boolean.value;
1558 default:
1559 return 1;
1560 }
1561}
1562
1563static int test_filter(struct event_format *event,
1564 struct filter_arg *arg, struct pevent_record *record);
1565
1566static const char *
1567get_comm(struct event_format *event, struct pevent_record *record)
1568{
1569 const char *comm;
1570 int pid;
1571
1572 pid = pevent_data_pid(event->pevent, record);
1573 comm = pevent_data_comm_from_pid(event->pevent, pid);
1574 return comm;
1575}
1576
1577static unsigned long long
1578get_value(struct event_format *event,
1579 struct format_field *field, struct pevent_record *record)
1580{
1581 unsigned long long val;
1582
1583 /* Handle our dummy "comm" field */
1584 if (field == &comm) {
1585 const char *name;
1586
1587 name = get_comm(event, record);
1588 return (unsigned long long)name;
1589 }
1590
1591 pevent_read_number_field(field, record->data, &val);
1592
1593 if (!(field->flags & FIELD_IS_SIGNED))
1594 return val;
1595
1596 switch (field->size) {
1597 case 1:
1598 return (char)val;
1599 case 2:
1600 return (short)val;
1601 case 4:
1602 return (int)val;
1603 case 8:
1604 return (long long)val;
1605 }
1606 return val;
1607}
1608
1609static unsigned long long
1610get_arg_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record);
1611
1612static unsigned long long
1613get_exp_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record)
1614{
1615 unsigned long long lval, rval;
1616
1617 lval = get_arg_value(event, arg->exp.left, record);
1618 rval = get_arg_value(event, arg->exp.right, record);
1619
1620 switch (arg->exp.type) {
1621 case FILTER_EXP_ADD:
1622 return lval + rval;
1623
1624 case FILTER_EXP_SUB:
1625 return lval - rval;
1626
1627 case FILTER_EXP_MUL:
1628 return lval * rval;
1629
1630 case FILTER_EXP_DIV:
1631 return lval / rval;
1632
1633 case FILTER_EXP_MOD:
1634 return lval % rval;
1635
1636 case FILTER_EXP_RSHIFT:
1637 return lval >> rval;
1638
1639 case FILTER_EXP_LSHIFT:
1640 return lval << rval;
1641
1642 case FILTER_EXP_AND:
1643 return lval & rval;
1644
1645 case FILTER_EXP_OR:
1646 return lval | rval;
1647
1648 case FILTER_EXP_XOR:
1649 return lval ^ rval;
1650
1651 case FILTER_EXP_NOT:
1652 default:
1653 die("error in exp");
1654 }
1655 return 0;
1656}
1657
1658static unsigned long long
1659get_arg_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record)
1660{
1661 switch (arg->type) {
1662 case FILTER_ARG_FIELD:
1663 return get_value(event, arg->field.field, record);
1664
1665 case FILTER_ARG_VALUE:
1666 if (arg->value.type != FILTER_NUMBER)
1667 die("must have number field!");
1668 return arg->value.val;
1669
1670 case FILTER_ARG_EXP:
1671 return get_exp_value(event, arg, record);
1672
1673 default:
1674 die("oops in filter");
1675 }
1676 return 0;
1677}
1678
1679static int test_num(struct event_format *event,
1680 struct filter_arg *arg, struct pevent_record *record)
1681{
1682 unsigned long long lval, rval;
1683
1684 lval = get_arg_value(event, arg->num.left, record);
1685 rval = get_arg_value(event, arg->num.right, record);
1686
1687 switch (arg->num.type) {
1688 case FILTER_CMP_EQ:
1689 return lval == rval;
1690
1691 case FILTER_CMP_NE:
1692 return lval != rval;
1693
1694 case FILTER_CMP_GT:
1695 return lval > rval;
1696
1697 case FILTER_CMP_LT:
1698 return lval < rval;
1699
1700 case FILTER_CMP_GE:
1701 return lval >= rval;
1702
1703 case FILTER_CMP_LE:
1704 return lval <= rval;
1705
1706 default:
1707 /* ?? */
1708 return 0;
1709 }
1710}
1711
1712static const char *get_field_str(struct filter_arg *arg, struct pevent_record *record)
1713{
1714 const char *val = record->data + arg->str.field->offset;
1715
1716 /*
1717 * We need to copy the data since we can't be sure the field
1718 * is null terminated.
1719 */
1720 if (*(val + arg->str.field->size - 1)) {
1721 /* copy it */
1722 memcpy(arg->str.buffer, val, arg->str.field->size);
1723 /* the buffer is already NULL terminated */
1724 val = arg->str.buffer;
1725 }
1726 return val;
1727}
1728
1729static int test_str(struct event_format *event,
1730 struct filter_arg *arg, struct pevent_record *record)
1731{
1732 const char *val;
1733
1734 if (arg->str.field == &comm)
1735 val = get_comm(event, record);
1736 else
1737 val = get_field_str(arg, record);
1738
1739 switch (arg->str.type) {
1740 case FILTER_CMP_MATCH:
1741 return strcmp(val, arg->str.val) == 0;
1742
1743 case FILTER_CMP_NOT_MATCH:
1744 return strcmp(val, arg->str.val) != 0;
1745
1746 case FILTER_CMP_REGEX:
1747 /* Returns zero on match */
1748 return !regexec(&arg->str.reg, val, 0, NULL, 0);
1749
1750 case FILTER_CMP_NOT_REGEX:
1751 return regexec(&arg->str.reg, val, 0, NULL, 0);
1752
1753 default:
1754 /* ?? */
1755 return 0;
1756 }
1757}
1758
1759static int test_op(struct event_format *event,
1760 struct filter_arg *arg, struct pevent_record *record)
1761{
1762 switch (arg->op.type) {
1763 case FILTER_OP_AND:
1764 return test_filter(event, arg->op.left, record) &&
1765 test_filter(event, arg->op.right, record);
1766
1767 case FILTER_OP_OR:
1768 return test_filter(event, arg->op.left, record) ||
1769 test_filter(event, arg->op.right, record);
1770
1771 case FILTER_OP_NOT:
1772 return !test_filter(event, arg->op.right, record);
1773
1774 default:
1775 /* ?? */
1776 return 0;
1777 }
1778}
1779
1780static int test_filter(struct event_format *event,
1781 struct filter_arg *arg, struct pevent_record *record)
1782{
1783 switch (arg->type) {
1784 case FILTER_ARG_BOOLEAN:
1785 /* easy case */
1786 return arg->boolean.value;
1787
1788 case FILTER_ARG_OP:
1789 return test_op(event, arg, record);
1790
1791 case FILTER_ARG_NUM:
1792 return test_num(event, arg, record);
1793
1794 case FILTER_ARG_STR:
1795 return test_str(event, arg, record);
1796
1797 case FILTER_ARG_EXP:
1798 case FILTER_ARG_VALUE:
1799 case FILTER_ARG_FIELD:
1800 /*
1801 * Expressions, fields and values evaluate
1802 * to true if they return non zero
1803 */
1804 return !!get_arg_value(event, arg, record);
1805
1806 default:
1807 die("oops!");
1808 /* ?? */
1809 return 0;
1810 }
1811}
1812
1813/**
1814 * pevent_event_filtered - return true if event has filter
1815 * @filter: filter struct with filter information
1816 * @event_id: event id to test if filter exists
1817 *
1818 * Returns 1 if filter found for @event_id
1819 * otherwise 0;
1820 */
1821int pevent_event_filtered(struct event_filter *filter,
1822 int event_id)
1823{
1824 struct filter_type *filter_type;
1825
1826 if (!filter->filters)
1827 return 0;
1828
1829 filter_type = find_filter_type(filter, event_id);
1830
1831 return filter_type ? 1 : 0;
1832}
1833
1834/**
1835 * pevent_filter_match - test if a record matches a filter
1836 * @filter: filter struct with filter information
1837 * @record: the record to test against the filter
1838 *
1839 * Returns:
1840 * 1 - filter found for event and @record matches
1841 * 0 - filter found for event and @record does not match
1842 * -1 - no filter found for @record's event
1843 * -2 - if no filters exist
1844 */
1845int pevent_filter_match(struct event_filter *filter,
1846 struct pevent_record *record)
1847{
1848 struct pevent *pevent = filter->pevent;
1849 struct filter_type *filter_type;
1850 int event_id;
1851
1852 if (!filter->filters)
1853 return FILTER_NONE;
1854
1855 event_id = pevent_data_type(pevent, record);
1856
1857 filter_type = find_filter_type(filter, event_id);
1858
1859 if (!filter_type)
1860 return FILTER_NOEXIST;
1861
1862 return test_filter(filter_type->event, filter_type->filter, record) ?
1863 FILTER_MATCH : FILTER_MISS;
1864}
1865
1866static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
1867{
1868 char *str = NULL;
1869 char *left = NULL;
1870 char *right = NULL;
1871 char *op = NULL;
1872 int left_val = -1;
1873 int right_val = -1;
1874 int val;
1875 int len;
1876
1877 switch (arg->op.type) {
1878 case FILTER_OP_AND:
1879 op = "&&";
1880 /* fall through */
1881 case FILTER_OP_OR:
1882 if (!op)
1883 op = "||";
1884
1885 left = arg_to_str(filter, arg->op.left);
1886 right = arg_to_str(filter, arg->op.right);
1887 if (!left || !right)
1888 break;
1889
1890 /* Try to consolidate boolean values */
1891 if (strcmp(left, "TRUE") == 0)
1892 left_val = 1;
1893 else if (strcmp(left, "FALSE") == 0)
1894 left_val = 0;
1895
1896 if (strcmp(right, "TRUE") == 0)
1897 right_val = 1;
1898 else if (strcmp(right, "FALSE") == 0)
1899 right_val = 0;
1900
1901 if (left_val >= 0) {
1902 if ((arg->op.type == FILTER_OP_AND && !left_val) ||
1903 (arg->op.type == FILTER_OP_OR && left_val)) {
1904 /* Just return left value */
1905 str = left;
1906 left = NULL;
1907 break;
1908 }
1909 if (right_val >= 0) {
1910 /* just evaluate this. */
1911 val = 0;
1912 switch (arg->op.type) {
1913 case FILTER_OP_AND:
1914 val = left_val && right_val;
1915 break;
1916 case FILTER_OP_OR:
1917 val = left_val || right_val;
1918 break;
1919 default:
1920 break;
1921 }
1922 str = malloc_or_die(6);
1923 if (val)
1924 strcpy(str, "TRUE");
1925 else
1926 strcpy(str, "FALSE");
1927 break;
1928 }
1929 }
1930 if (right_val >= 0) {
1931 if ((arg->op.type == FILTER_OP_AND && !right_val) ||
1932 (arg->op.type == FILTER_OP_OR && right_val)) {
1933 /* Just return right value */
1934 str = right;
1935 right = NULL;
1936 break;
1937 }
1938 /* The right value is meaningless */
1939 str = left;
1940 left = NULL;
1941 break;
1942 }
1943
1944 len = strlen(left) + strlen(right) + strlen(op) + 10;
1945 str = malloc_or_die(len);
1946 snprintf(str, len, "(%s) %s (%s)",
1947 left, op, right);
1948 break;
1949
1950 case FILTER_OP_NOT:
1951 op = "!";
1952 right = arg_to_str(filter, arg->op.right);
1953 if (!right)
1954 break;
1955
1956 /* See if we can consolidate */
1957 if (strcmp(right, "TRUE") == 0)
1958 right_val = 1;
1959 else if (strcmp(right, "FALSE") == 0)
1960 right_val = 0;
1961 if (right_val >= 0) {
1962 /* just return the opposite */
1963 str = malloc_or_die(6);
1964 if (right_val)
1965 strcpy(str, "FALSE");
1966 else
1967 strcpy(str, "TRUE");
1968 break;
1969 }
1970 len = strlen(right) + strlen(op) + 3;
1971 str = malloc_or_die(len);
1972 snprintf(str, len, "%s(%s)", op, right);
1973 break;
1974
1975 default:
1976 /* ?? */
1977 break;
1978 }
1979 free(left);
1980 free(right);
1981 return str;
1982}
1983
1984static char *val_to_str(struct event_filter *filter, struct filter_arg *arg)
1985{
1986 char *str;
1987
1988 str = malloc_or_die(30);
1989
1990 snprintf(str, 30, "%lld", arg->value.val);
1991
1992 return str;
1993}
1994
1995static char *field_to_str(struct event_filter *filter, struct filter_arg *arg)
1996{
1997 return strdup(arg->field.field->name);
1998}
1999
2000static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
2001{
2002 char *lstr;
2003 char *rstr;
2004 char *op;
2005 char *str;
2006 int len;
2007
2008 lstr = arg_to_str(filter, arg->exp.left);
2009 rstr = arg_to_str(filter, arg->exp.right);
2010
2011 switch (arg->exp.type) {
2012 case FILTER_EXP_ADD:
2013 op = "+";
2014 break;
2015 case FILTER_EXP_SUB:
2016 op = "-";
2017 break;
2018 case FILTER_EXP_MUL:
2019 op = "*";
2020 break;
2021 case FILTER_EXP_DIV:
2022 op = "/";
2023 break;
2024 case FILTER_EXP_MOD:
2025 op = "%";
2026 break;
2027 case FILTER_EXP_RSHIFT:
2028 op = ">>";
2029 break;
2030 case FILTER_EXP_LSHIFT:
2031 op = "<<";
2032 break;
2033 case FILTER_EXP_AND:
2034 op = "&";
2035 break;
2036 case FILTER_EXP_OR:
2037 op = "|";
2038 break;
2039 case FILTER_EXP_XOR:
2040 op = "^";
2041 break;
2042 default:
2043 die("oops in exp");
2044 }
2045
2046 len = strlen(op) + strlen(lstr) + strlen(rstr) + 4;
2047 str = malloc_or_die(len);
2048 snprintf(str, len, "%s %s %s", lstr, op, rstr);
2049 free(lstr);
2050 free(rstr);
2051
2052 return str;
2053}
2054
2055static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
2056{
2057 char *lstr;
2058 char *rstr;
2059 char *str = NULL;
2060 char *op = NULL;
2061 int len;
2062
2063 lstr = arg_to_str(filter, arg->num.left);
2064 rstr = arg_to_str(filter, arg->num.right);
2065
2066 switch (arg->num.type) {
2067 case FILTER_CMP_EQ:
2068 op = "==";
2069 /* fall through */
2070 case FILTER_CMP_NE:
2071 if (!op)
2072 op = "!=";
2073 /* fall through */
2074 case FILTER_CMP_GT:
2075 if (!op)
2076 op = ">";
2077 /* fall through */
2078 case FILTER_CMP_LT:
2079 if (!op)
2080 op = "<";
2081 /* fall through */
2082 case FILTER_CMP_GE:
2083 if (!op)
2084 op = ">=";
2085 /* fall through */
2086 case FILTER_CMP_LE:
2087 if (!op)
2088 op = "<=";
2089
2090 len = strlen(lstr) + strlen(op) + strlen(rstr) + 4;
2091 str = malloc_or_die(len);
2092 sprintf(str, "%s %s %s", lstr, op, rstr);
2093
2094 break;
2095
2096 default:
2097 /* ?? */
2098 break;
2099 }
2100
2101 free(lstr);
2102 free(rstr);
2103 return str;
2104}
2105
2106static char *str_to_str(struct event_filter *filter, struct filter_arg *arg)
2107{
2108 char *str = NULL;
2109 char *op = NULL;
2110 int len;
2111
2112 switch (arg->str.type) {
2113 case FILTER_CMP_MATCH:
2114 op = "==";
2115 /* fall through */
2116 case FILTER_CMP_NOT_MATCH:
2117 if (!op)
2118 op = "!=";
2119 /* fall through */
2120 case FILTER_CMP_REGEX:
2121 if (!op)
2122 op = "=~";
2123 /* fall through */
2124 case FILTER_CMP_NOT_REGEX:
2125 if (!op)
2126 op = "!~";
2127
2128 len = strlen(arg->str.field->name) + strlen(op) +
2129 strlen(arg->str.val) + 6;
2130 str = malloc_or_die(len);
2131 snprintf(str, len, "%s %s \"%s\"",
2132 arg->str.field->name,
2133 op, arg->str.val);
2134 break;
2135
2136 default:
2137 /* ?? */
2138 break;
2139 }
2140 return str;
2141}
2142
2143static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
2144{
2145 char *str;
2146
2147 switch (arg->type) {
2148 case FILTER_ARG_BOOLEAN:
2149 str = malloc_or_die(6);
2150 if (arg->boolean.value)
2151 strcpy(str, "TRUE");
2152 else
2153 strcpy(str, "FALSE");
2154 return str;
2155
2156 case FILTER_ARG_OP:
2157 return op_to_str(filter, arg);
2158
2159 case FILTER_ARG_NUM:
2160 return num_to_str(filter, arg);
2161
2162 case FILTER_ARG_STR:
2163 return str_to_str(filter, arg);
2164
2165 case FILTER_ARG_VALUE:
2166 return val_to_str(filter, arg);
2167
2168 case FILTER_ARG_FIELD:
2169 return field_to_str(filter, arg);
2170
2171 case FILTER_ARG_EXP:
2172 return exp_to_str(filter, arg);
2173
2174 default:
2175 /* ?? */
2176 return NULL;
2177 }
2178
2179}
2180
2181/**
2182 * pevent_filter_make_string - return a string showing the filter
2183 * @filter: filter struct with filter information
2184 * @event_id: the event id to return the filter string with
2185 *
2186 * Returns a string that displays the filter contents.
2187 * This string must be freed with free(str).
2188 * NULL is returned if no filter is found.
2189 */
2190char *
2191pevent_filter_make_string(struct event_filter *filter, int event_id)
2192{
2193 struct filter_type *filter_type;
2194
2195 if (!filter->filters)
2196 return NULL;
2197
2198 filter_type = find_filter_type(filter, event_id);
2199
2200 if (!filter_type)
2201 return NULL;
2202
2203 return arg_to_str(filter, filter_type->filter);
2204}
2205
2206/**
2207 * pevent_filter_compare - compare two filters and return if they are the same
2208 * @filter1: Filter to compare with @filter2
2209 * @filter2: Filter to compare with @filter1
2210 *
2211 * Returns:
2212 * 1 if the two filters hold the same content.
2213 * 0 if they do not.
2214 */
2215int pevent_filter_compare(struct event_filter *filter1, struct event_filter *filter2)
2216{
2217 struct filter_type *filter_type1;
2218 struct filter_type *filter_type2;
2219 char *str1, *str2;
2220 int result;
2221 int i;
2222
2223 /* Do the easy checks first */
2224 if (filter1->filters != filter2->filters)
2225 return 0;
2226 if (!filter1->filters && !filter2->filters)
2227 return 1;
2228
2229 /*
2230 * Now take a look at each of the events to see if they have the same
2231 * filters to them.
2232 */
2233 for (i = 0; i < filter1->filters; i++) {
2234 filter_type1 = &filter1->event_filters[i];
2235 filter_type2 = find_filter_type(filter2, filter_type1->event_id);
2236 if (!filter_type2)
2237 break;
2238 if (filter_type1->filter->type != filter_type2->filter->type)
2239 break;
2240 switch (filter_type1->filter->type) {
2241 case FILTER_TRIVIAL_FALSE:
2242 case FILTER_TRIVIAL_TRUE:
2243 /* trivial types just need the type compared */
2244 continue;
2245 default:
2246 break;
2247 }
2248 /* The best way to compare complex filters is with strings */
2249 str1 = arg_to_str(filter1, filter_type1->filter);
2250 str2 = arg_to_str(filter2, filter_type2->filter);
2251 result = strcmp(str1, str2) != 0;
2252 free(str1);
2253 free(str2);
2254 if (result)
2255 break;
2256 }
2257
2258 if (i < filter1->filters)
2259 return 0;
2260 return 1;
2261}
2262
diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c
new file mode 100644
index 000000000000..f023a133abb6
--- /dev/null
+++ b/tools/lib/traceevent/parse-utils.c
@@ -0,0 +1,110 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4#include <stdarg.h>
5#include <errno.h>
6
7#define __weak __attribute__((weak))
8
9void __vdie(const char *fmt, va_list ap)
10{
11 int ret = errno;
12
13 if (errno)
14 perror("trace-cmd");
15 else
16 ret = -1;
17
18 fprintf(stderr, " ");
19 vfprintf(stderr, fmt, ap);
20
21 fprintf(stderr, "\n");
22 exit(ret);
23}
24
25void __die(const char *fmt, ...)
26{
27 va_list ap;
28
29 va_start(ap, fmt);
30 __vdie(fmt, ap);
31 va_end(ap);
32}
33
34void __weak die(const char *fmt, ...)
35{
36 va_list ap;
37
38 va_start(ap, fmt);
39 __vdie(fmt, ap);
40 va_end(ap);
41}
42
43void __vwarning(const char *fmt, va_list ap)
44{
45 if (errno)
46 perror("trace-cmd");
47 errno = 0;
48
49 fprintf(stderr, " ");
50 vfprintf(stderr, fmt, ap);
51
52 fprintf(stderr, "\n");
53}
54
55void __warning(const char *fmt, ...)
56{
57 va_list ap;
58
59 va_start(ap, fmt);
60 __vwarning(fmt, ap);
61 va_end(ap);
62}
63
64void __weak warning(const char *fmt, ...)
65{
66 va_list ap;
67
68 va_start(ap, fmt);
69 __vwarning(fmt, ap);
70 va_end(ap);
71}
72
73void __vpr_stat(const char *fmt, va_list ap)
74{
75 vprintf(fmt, ap);
76 printf("\n");
77}
78
79void __pr_stat(const char *fmt, ...)
80{
81 va_list ap;
82
83 va_start(ap, fmt);
84 __vpr_stat(fmt, ap);
85 va_end(ap);
86}
87
88void __weak vpr_stat(const char *fmt, va_list ap)
89{
90 __vpr_stat(fmt, ap);
91}
92
93void __weak pr_stat(const char *fmt, ...)
94{
95 va_list ap;
96
97 va_start(ap, fmt);
98 __vpr_stat(fmt, ap);
99 va_end(ap);
100}
101
102void __weak *malloc_or_die(unsigned int size)
103{
104 void *data;
105
106 data = malloc(size);
107 if (!data)
108 die("malloc");
109 return data;
110}
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
new file mode 100644
index 000000000000..b1ccc923e8a5
--- /dev/null
+++ b/tools/lib/traceevent/trace-seq.c
@@ -0,0 +1,200 @@
1/*
2 * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation;
8 * version 2.1 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21#include <stdio.h>
22#include <stdlib.h>
23#include <string.h>
24#include <stdarg.h>
25
26#include "event-parse.h"
27#include "event-utils.h"
28
29/*
30 * The TRACE_SEQ_POISON is to catch the use of using
31 * a trace_seq structure after it was destroyed.
32 */
33#define TRACE_SEQ_POISON ((void *)0xdeadbeef)
34#define TRACE_SEQ_CHECK(s) \
35do { \
36 if ((s)->buffer == TRACE_SEQ_POISON) \
37 die("Usage of trace_seq after it was destroyed"); \
38} while (0)
39
40/**
41 * trace_seq_init - initialize the trace_seq structure
42 * @s: a pointer to the trace_seq structure to initialize
43 */
44void trace_seq_init(struct trace_seq *s)
45{
46 s->len = 0;
47 s->readpos = 0;
48 s->buffer_size = TRACE_SEQ_BUF_SIZE;
49 s->buffer = malloc_or_die(s->buffer_size);
50}
51
52/**
53 * trace_seq_destroy - free up memory of a trace_seq
54 * @s: a pointer to the trace_seq to free the buffer
55 *
56 * Only frees the buffer, not the trace_seq struct itself.
57 */
58void trace_seq_destroy(struct trace_seq *s)
59{
60 if (!s)
61 return;
62 TRACE_SEQ_CHECK(s);
63 free(s->buffer);
64 s->buffer = TRACE_SEQ_POISON;
65}
66
67static void expand_buffer(struct trace_seq *s)
68{
69 s->buffer_size += TRACE_SEQ_BUF_SIZE;
70 s->buffer = realloc(s->buffer, s->buffer_size);
71 if (!s->buffer)
72 die("Can't allocate trace_seq buffer memory");
73}
74
75/**
76 * trace_seq_printf - sequence printing of trace information
77 * @s: trace sequence descriptor
78 * @fmt: printf format string
79 *
80 * It returns 0 if the trace oversizes the buffer's free
81 * space, 1 otherwise.
82 *
83 * The tracer may use either sequence operations or its own
84 * copy to user routines. To simplify formating of a trace
85 * trace_seq_printf is used to store strings into a special
86 * buffer (@s). Then the output may be either used by
87 * the sequencer or pulled into another buffer.
88 */
89int
90trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
91{
92 va_list ap;
93 int len;
94 int ret;
95
96 TRACE_SEQ_CHECK(s);
97
98 try_again:
99 len = (s->buffer_size - 1) - s->len;
100
101 va_start(ap, fmt);
102 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
103 va_end(ap);
104
105 if (ret >= len) {
106 expand_buffer(s);
107 goto try_again;
108 }
109
110 s->len += ret;
111
112 return 1;
113}
114
115/**
116 * trace_seq_vprintf - sequence printing of trace information
117 * @s: trace sequence descriptor
118 * @fmt: printf format string
119 *
120 * The tracer may use either sequence operations or its own
121 * copy to user routines. To simplify formating of a trace
122 * trace_seq_printf is used to store strings into a special
123 * buffer (@s). Then the output may be either used by
124 * the sequencer or pulled into another buffer.
125 */
126int
127trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
128{
129 int len;
130 int ret;
131
132 TRACE_SEQ_CHECK(s);
133
134 try_again:
135 len = (s->buffer_size - 1) - s->len;
136
137 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
138
139 if (ret >= len) {
140 expand_buffer(s);
141 goto try_again;
142 }
143
144 s->len += ret;
145
146 return len;
147}
148
149/**
150 * trace_seq_puts - trace sequence printing of simple string
151 * @s: trace sequence descriptor
152 * @str: simple string to record
153 *
154 * The tracer may use either the sequence operations or its own
155 * copy to user routines. This function records a simple string
156 * into a special buffer (@s) for later retrieval by a sequencer
157 * or other mechanism.
158 */
159int trace_seq_puts(struct trace_seq *s, const char *str)
160{
161 int len;
162
163 TRACE_SEQ_CHECK(s);
164
165 len = strlen(str);
166
167 while (len > ((s->buffer_size - 1) - s->len))
168 expand_buffer(s);
169
170 memcpy(s->buffer + s->len, str, len);
171 s->len += len;
172
173 return len;
174}
175
176int trace_seq_putc(struct trace_seq *s, unsigned char c)
177{
178 TRACE_SEQ_CHECK(s);
179
180 while (s->len >= (s->buffer_size - 1))
181 expand_buffer(s);
182
183 s->buffer[s->len++] = c;
184
185 return 1;
186}
187
188void trace_seq_terminate(struct trace_seq *s)
189{
190 TRACE_SEQ_CHECK(s);
191
192 /* There's always one character left on the buffer */
193 s->buffer[s->len] = 0;
194}
195
196int trace_seq_do_printf(struct trace_seq *s)
197{
198 TRACE_SEQ_CHECK(s);
199 return printf("%.*s", s->len, s->buffer);
200}
diff --git a/tools/perf/Documentation/perfconfig.example b/tools/perf/Documentation/perfconfig.example
index d1448668f4d4..42c6fd2ae85d 100644
--- a/tools/perf/Documentation/perfconfig.example
+++ b/tools/perf/Documentation/perfconfig.example
@@ -6,6 +6,7 @@
6 normal = black, lightgray 6 normal = black, lightgray
7 selected = lightgray, magenta 7 selected = lightgray, magenta
8 code = blue, lightgray 8 code = blue, lightgray
9 addr = magenta, lightgray
9 10
10[tui] 11[tui]
11 12
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 92271d32bc30..fa37cd53e9b9 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -1,18 +1,10 @@
1ifeq ("$(origin O)", "command line") 1include ../scripts/Makefile.include
2 OUTPUT := $(O)/
3endif
4 2
5# The default target of this Makefile is... 3# The default target of this Makefile is...
6all: 4all:
7 5
8include config/utilities.mak 6include config/utilities.mak
9 7
10ifneq ($(OUTPUT),)
11# check that the output directory actually exists
12OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
13$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
14endif
15
16# Define V to have a more verbose compile. 8# Define V to have a more verbose compile.
17# 9#
18# Define O to save output files in a separate directory. 10# Define O to save output files in a separate directory.
@@ -84,31 +76,6 @@ ifneq ($(WERROR),0)
84 CFLAGS_WERROR := -Werror 76 CFLAGS_WERROR := -Werror
85endif 77endif
86 78
87#
88# Include saner warnings here, which can catch bugs:
89#
90
91EXTRA_WARNINGS := -Wformat
92EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security
93EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k
94EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
95EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
96EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
97EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
98EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
99EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
100EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
101EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
102EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
103EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
104EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
105EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
106EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes
107EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs
108EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
109EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
110EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
111
112ifeq ("$(origin DEBUG)", "command line") 79ifeq ("$(origin DEBUG)", "command line")
113 PERF_DEBUG = $(DEBUG) 80 PERF_DEBUG = $(DEBUG)
114endif 81endif
@@ -182,7 +149,7 @@ endif
182 149
183### --- END CONFIGURATION SECTION --- 150### --- END CONFIGURATION SECTION ---
184 151
185BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 152BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -I$(EVENT_PARSE_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
186BASIC_LDFLAGS = 153BASIC_LDFLAGS =
187 154
188# Guard against environment variables 155# Guard against environment variables
@@ -211,6 +178,17 @@ $(OUTPUT)python/perf.so: $(PYRF_OBJS) $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
211 178
212SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) 179SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
213 180
181EVENT_PARSE_DIR = ../lib/traceevent/
182
183ifeq ("$(origin O)", "command line")
184 EP_PATH=$(OUTPUT)/
185else
186 EP_PATH=$(EVENT_PARSE_DIR)/
187endif
188
189LIBPARSEVENT = $(EP_PATH)libtraceevent.a
190EP_LIB := -L$(EP_PATH) -ltraceevent
191
214# 192#
215# Single 'perf' binary right now: 193# Single 'perf' binary right now:
216# 194#
@@ -333,6 +311,8 @@ LIB_H += util/cpumap.h
333LIB_H += util/top.h 311LIB_H += util/top.h
334LIB_H += $(ARCH_INCLUDE) 312LIB_H += $(ARCH_INCLUDE)
335LIB_H += util/cgroup.h 313LIB_H += util/cgroup.h
314LIB_H += $(EVENT_PARSE_DIR)event-parse.h
315LIB_H += util/target.h
336 316
337LIB_OBJS += $(OUTPUT)util/abspath.o 317LIB_OBJS += $(OUTPUT)util/abspath.o
338LIB_OBJS += $(OUTPUT)util/alias.o 318LIB_OBJS += $(OUTPUT)util/alias.o
@@ -394,6 +374,7 @@ LIB_OBJS += $(OUTPUT)util/util.o
394LIB_OBJS += $(OUTPUT)util/xyarray.o 374LIB_OBJS += $(OUTPUT)util/xyarray.o
395LIB_OBJS += $(OUTPUT)util/cpumap.o 375LIB_OBJS += $(OUTPUT)util/cpumap.o
396LIB_OBJS += $(OUTPUT)util/cgroup.o 376LIB_OBJS += $(OUTPUT)util/cgroup.o
377LIB_OBJS += $(OUTPUT)util/target.o
397 378
398BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 379BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
399 380
@@ -429,7 +410,7 @@ BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
429BUILTIN_OBJS += $(OUTPUT)builtin-test.o 410BUILTIN_OBJS += $(OUTPUT)builtin-test.o
430BUILTIN_OBJS += $(OUTPUT)builtin-inject.o 411BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
431 412
432PERFLIBS = $(LIB_FILE) 413PERFLIBS = $(LIB_FILE) $(LIBPARSEVENT)
433 414
434# Files needed for the python binding, perf.so 415# Files needed for the python binding, perf.so
435# pyrf is just an internal name needed for all those wrappers. 416# pyrf is just an internal name needed for all those wrappers.
@@ -506,22 +487,23 @@ else
506 # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h 487 # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
507 BASIC_CFLAGS += -I/usr/include/slang 488 BASIC_CFLAGS += -I/usr/include/slang
508 EXTLIBS += -lnewt -lslang 489 EXTLIBS += -lnewt -lslang
509 LIB_OBJS += $(OUTPUT)util/ui/setup.o 490 LIB_OBJS += $(OUTPUT)ui/setup.o
510 LIB_OBJS += $(OUTPUT)util/ui/browser.o 491 LIB_OBJS += $(OUTPUT)ui/browser.o
511 LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o 492 LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
512 LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o 493 LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
513 LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o 494 LIB_OBJS += $(OUTPUT)ui/browsers/map.o
514 LIB_OBJS += $(OUTPUT)util/ui/helpline.o 495 LIB_OBJS += $(OUTPUT)ui/helpline.o
515 LIB_OBJS += $(OUTPUT)util/ui/progress.o 496 LIB_OBJS += $(OUTPUT)ui/progress.o
516 LIB_OBJS += $(OUTPUT)util/ui/util.o 497 LIB_OBJS += $(OUTPUT)ui/util.o
517 LIB_H += util/ui/browser.h 498 LIB_OBJS += $(OUTPUT)ui/tui/setup.o
518 LIB_H += util/ui/browsers/map.h 499 LIB_H += ui/browser.h
519 LIB_H += util/ui/helpline.h 500 LIB_H += ui/browsers/map.h
520 LIB_H += util/ui/keysyms.h 501 LIB_H += ui/helpline.h
521 LIB_H += util/ui/libslang.h 502 LIB_H += ui/keysyms.h
522 LIB_H += util/ui/progress.h 503 LIB_H += ui/libslang.h
523 LIB_H += util/ui/util.h 504 LIB_H += ui/progress.h
524 LIB_H += util/ui/ui.h 505 LIB_H += ui/util.h
506 LIB_H += ui/ui.h
525 endif 507 endif
526endif 508endif
527 509
@@ -535,7 +517,12 @@ else
535 else 517 else
536 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0) 518 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
537 EXTLIBS += $(shell pkg-config --libs gtk+-2.0) 519 EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
538 LIB_OBJS += $(OUTPUT)util/gtk/browser.o 520 LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
521 LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
522 # Make sure that it'd be included only once.
523 ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
524 LIB_OBJS += $(OUTPUT)ui/setup.o
525 endif
539 endif 526 endif
540endif 527endif
541 528
@@ -678,18 +665,6 @@ else
678 endif 665 endif
679endif 666endif
680 667
681ifneq ($(findstring $(MAKEFLAGS),s),s)
682ifndef V
683 QUIET_CC = @echo ' ' CC $@;
684 QUIET_AR = @echo ' ' AR $@;
685 QUIET_LINK = @echo ' ' LINK $@;
686 QUIET_MKDIR = @echo ' ' MKDIR $@;
687 QUIET_GEN = @echo ' ' GEN $@;
688 QUIET_FLEX = @echo ' ' FLEX $@;
689 QUIET_BISON = @echo ' ' BISON $@;
690endif
691endif
692
693ifdef ASCIIDOC8 668ifdef ASCIIDOC8
694 export ASCIIDOC8 669 export ASCIIDOC8
695endif 670endif
@@ -800,16 +775,16 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
800$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS 775$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
801 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 776 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
802 777
803$(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS 778$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
804 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< 779 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
805 780
806$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS 781$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
807 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< 782 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
808 783
809$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS 784$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
810 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< 785 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
811 786
812$(OUTPUT)util/ui/browsers/map.o: util/ui/browsers/map.c $(OUTPUT)PERF-CFLAGS 787$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
813 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< 788 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
814 789
815$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS 790$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
@@ -844,6 +819,10 @@ $(sort $(dir $(DIRECTORY_DEPS))):
844$(LIB_FILE): $(LIB_OBJS) 819$(LIB_FILE): $(LIB_OBJS)
845 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) 820 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
846 821
822# libparsevent.a
823$(LIBPARSEVENT):
824 make -C $(EVENT_PARSE_DIR) $(COMMAND_O) libtraceevent.a
825
847help: 826help:
848 @echo 'Perf make targets:' 827 @echo 'Perf make targets:'
849 @echo ' doc - make *all* documentation (see below)' 828 @echo ' doc - make *all* documentation (see below)'
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 39104c0beea3..547af48deb4f 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -192,7 +192,7 @@ static void insert_caller_stat(unsigned long call_site,
192} 192}
193 193
194static void process_alloc_event(void *data, 194static void process_alloc_event(void *data,
195 struct event *event, 195 struct event_format *event,
196 int cpu, 196 int cpu,
197 u64 timestamp __used, 197 u64 timestamp __used,
198 struct thread *thread __used, 198 struct thread *thread __used,
@@ -253,7 +253,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
253} 253}
254 254
255static void process_free_event(void *data, 255static void process_free_event(void *data,
256 struct event *event, 256 struct event_format *event,
257 int cpu, 257 int cpu,
258 u64 timestamp __used, 258 u64 timestamp __used,
259 struct thread *thread __used) 259 struct thread *thread __used)
@@ -281,7 +281,7 @@ static void process_free_event(void *data,
281static void process_raw_event(union perf_event *raw_event __used, void *data, 281static void process_raw_event(union perf_event *raw_event __used, void *data,
282 int cpu, u64 timestamp, struct thread *thread) 282 int cpu, u64 timestamp, struct thread *thread)
283{ 283{
284 struct event *event; 284 struct event_format *event;
285 int type; 285 int type;
286 286
287 type = trace_parse_common_type(data); 287 type = trace_parse_common_type(data);
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 12c814838993..fd53319de20d 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -356,25 +356,25 @@ struct trace_release_event {
356 356
357struct trace_lock_handler { 357struct trace_lock_handler {
358 void (*acquire_event)(struct trace_acquire_event *, 358 void (*acquire_event)(struct trace_acquire_event *,
359 struct event *, 359 struct event_format *,
360 int cpu, 360 int cpu,
361 u64 timestamp, 361 u64 timestamp,
362 struct thread *thread); 362 struct thread *thread);
363 363
364 void (*acquired_event)(struct trace_acquired_event *, 364 void (*acquired_event)(struct trace_acquired_event *,
365 struct event *, 365 struct event_format *,
366 int cpu, 366 int cpu,
367 u64 timestamp, 367 u64 timestamp,
368 struct thread *thread); 368 struct thread *thread);
369 369
370 void (*contended_event)(struct trace_contended_event *, 370 void (*contended_event)(struct trace_contended_event *,
371 struct event *, 371 struct event_format *,
372 int cpu, 372 int cpu,
373 u64 timestamp, 373 u64 timestamp,
374 struct thread *thread); 374 struct thread *thread);
375 375
376 void (*release_event)(struct trace_release_event *, 376 void (*release_event)(struct trace_release_event *,
377 struct event *, 377 struct event_format *,
378 int cpu, 378 int cpu,
379 u64 timestamp, 379 u64 timestamp,
380 struct thread *thread); 380 struct thread *thread);
@@ -416,7 +416,7 @@ enum acquire_flags {
416 416
417static void 417static void
418report_lock_acquire_event(struct trace_acquire_event *acquire_event, 418report_lock_acquire_event(struct trace_acquire_event *acquire_event,
419 struct event *__event __used, 419 struct event_format *__event __used,
420 int cpu __used, 420 int cpu __used,
421 u64 timestamp __used, 421 u64 timestamp __used,
422 struct thread *thread __used) 422 struct thread *thread __used)
@@ -480,7 +480,7 @@ end:
480 480
481static void 481static void
482report_lock_acquired_event(struct trace_acquired_event *acquired_event, 482report_lock_acquired_event(struct trace_acquired_event *acquired_event,
483 struct event *__event __used, 483 struct event_format *__event __used,
484 int cpu __used, 484 int cpu __used,
485 u64 timestamp __used, 485 u64 timestamp __used,
486 struct thread *thread __used) 486 struct thread *thread __used)
@@ -536,7 +536,7 @@ end:
536 536
537static void 537static void
538report_lock_contended_event(struct trace_contended_event *contended_event, 538report_lock_contended_event(struct trace_contended_event *contended_event,
539 struct event *__event __used, 539 struct event_format *__event __used,
540 int cpu __used, 540 int cpu __used,
541 u64 timestamp __used, 541 u64 timestamp __used,
542 struct thread *thread __used) 542 struct thread *thread __used)
@@ -583,7 +583,7 @@ end:
583 583
584static void 584static void
585report_lock_release_event(struct trace_release_event *release_event, 585report_lock_release_event(struct trace_release_event *release_event,
586 struct event *__event __used, 586 struct event_format *__event __used,
587 int cpu __used, 587 int cpu __used,
588 u64 timestamp __used, 588 u64 timestamp __used,
589 struct thread *thread __used) 589 struct thread *thread __used)
@@ -647,7 +647,7 @@ static struct trace_lock_handler *trace_handler;
647 647
648static void 648static void
649process_lock_acquire_event(void *data, 649process_lock_acquire_event(void *data,
650 struct event *event __used, 650 struct event_format *event __used,
651 int cpu __used, 651 int cpu __used,
652 u64 timestamp __used, 652 u64 timestamp __used,
653 struct thread *thread __used) 653 struct thread *thread __used)
@@ -666,7 +666,7 @@ process_lock_acquire_event(void *data,
666 666
667static void 667static void
668process_lock_acquired_event(void *data, 668process_lock_acquired_event(void *data,
669 struct event *event __used, 669 struct event_format *event __used,
670 int cpu __used, 670 int cpu __used,
671 u64 timestamp __used, 671 u64 timestamp __used,
672 struct thread *thread __used) 672 struct thread *thread __used)
@@ -684,7 +684,7 @@ process_lock_acquired_event(void *data,
684 684
685static void 685static void
686process_lock_contended_event(void *data, 686process_lock_contended_event(void *data,
687 struct event *event __used, 687 struct event_format *event __used,
688 int cpu __used, 688 int cpu __used,
689 u64 timestamp __used, 689 u64 timestamp __used,
690 struct thread *thread __used) 690 struct thread *thread __used)
@@ -702,7 +702,7 @@ process_lock_contended_event(void *data,
702 702
703static void 703static void
704process_lock_release_event(void *data, 704process_lock_release_event(void *data,
705 struct event *event __used, 705 struct event_format *event __used,
706 int cpu __used, 706 int cpu __used,
707 u64 timestamp __used, 707 u64 timestamp __used,
708 struct thread *thread __used) 708 struct thread *thread __used)
@@ -721,7 +721,7 @@ process_lock_release_event(void *data,
721static void 721static void
722process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread) 722process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
723{ 723{
724 struct event *event; 724 struct event_format *event;
725 int type; 725 int type;
726 726
727 type = trace_parse_common_type(data); 727 type = trace_parse_common_type(data);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index be4e1eee782e..8a3dfac161e2 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -44,7 +44,6 @@ struct perf_record {
44 struct perf_evlist *evlist; 44 struct perf_evlist *evlist;
45 struct perf_session *session; 45 struct perf_session *session;
46 const char *progname; 46 const char *progname;
47 const char *uid_str;
48 int output; 47 int output;
49 unsigned int page_size; 48 unsigned int page_size;
50 int realtime_prio; 49 int realtime_prio;
@@ -218,7 +217,7 @@ try_again:
218 if (err == EPERM || err == EACCES) { 217 if (err == EPERM || err == EACCES) {
219 ui__error_paranoid(); 218 ui__error_paranoid();
220 exit(EXIT_FAILURE); 219 exit(EXIT_FAILURE);
221 } else if (err == ENODEV && opts->cpu_list) { 220 } else if (err == ENODEV && opts->target.cpu_list) {
222 die("No such device - did you specify" 221 die("No such device - did you specify"
223 " an out-of-range profile CPU?\n"); 222 " an out-of-range profile CPU?\n");
224 } else if (err == EINVAL) { 223 } else if (err == EINVAL) {
@@ -243,9 +242,13 @@ try_again:
243 /* 242 /*
244 * If it's cycles then fall back to hrtimer 243 * If it's cycles then fall back to hrtimer
245 * based cpu-clock-tick sw counter, which 244 * based cpu-clock-tick sw counter, which
246 * is always available even if no PMU support: 245 * is always available even if no PMU support.
246 *
247 * PPC returns ENXIO until 2.6.37 (behavior changed
248 * with commit b0a873e).
247 */ 249 */
248 if (attr->type == PERF_TYPE_HARDWARE 250 if ((err == ENOENT || err == ENXIO)
251 && attr->type == PERF_TYPE_HARDWARE
249 && attr->config == PERF_COUNT_HW_CPU_CYCLES) { 252 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
250 253
251 if (verbose) 254 if (verbose)
@@ -253,6 +256,10 @@ try_again:
253 "trying to fall back to cpu-clock-ticks\n"); 256 "trying to fall back to cpu-clock-ticks\n");
254 attr->type = PERF_TYPE_SOFTWARE; 257 attr->type = PERF_TYPE_SOFTWARE;
255 attr->config = PERF_COUNT_SW_CPU_CLOCK; 258 attr->config = PERF_COUNT_SW_CPU_CLOCK;
259 if (pos->name) {
260 free(pos->name);
261 pos->name = NULL;
262 }
256 goto try_again; 263 goto try_again;
257 } 264 }
258 265
@@ -578,7 +585,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
578 perf_session__process_machines(session, tool, 585 perf_session__process_machines(session, tool,
579 perf_event__synthesize_guest_os); 586 perf_event__synthesize_guest_os);
580 587
581 if (!opts->system_wide) 588 if (!opts->target.system_wide)
582 perf_event__synthesize_thread_map(tool, evsel_list->threads, 589 perf_event__synthesize_thread_map(tool, evsel_list->threads,
583 process_synthesized_event, 590 process_synthesized_event,
584 machine); 591 machine);
@@ -747,6 +754,9 @@ static struct perf_record record = {
747 .user_freq = UINT_MAX, 754 .user_freq = UINT_MAX,
748 .user_interval = ULLONG_MAX, 755 .user_interval = ULLONG_MAX,
749 .freq = 1000, 756 .freq = 1000,
757 .target = {
758 .uses_mmap = true,
759 },
750 }, 760 },
751 .write_mode = WRITE_FORCE, 761 .write_mode = WRITE_FORCE,
752 .file_new = true, 762 .file_new = true,
@@ -765,9 +775,9 @@ const struct option record_options[] = {
765 parse_events_option), 775 parse_events_option),
766 OPT_CALLBACK(0, "filter", &record.evlist, "filter", 776 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
767 "event filter", parse_filter), 777 "event filter", parse_filter),
768 OPT_STRING('p', "pid", &record.opts.target_pid, "pid", 778 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
769 "record events on existing process id"), 779 "record events on existing process id"),
770 OPT_STRING('t', "tid", &record.opts.target_tid, "tid", 780 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
771 "record events on existing thread id"), 781 "record events on existing thread id"),
772 OPT_INTEGER('r', "realtime", &record.realtime_prio, 782 OPT_INTEGER('r', "realtime", &record.realtime_prio,
773 "collect data with this RT SCHED_FIFO priority"), 783 "collect data with this RT SCHED_FIFO priority"),
@@ -775,11 +785,11 @@ const struct option record_options[] = {
775 "collect data without buffering"), 785 "collect data without buffering"),
776 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples, 786 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
777 "collect raw sample records from all opened counters"), 787 "collect raw sample records from all opened counters"),
778 OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide, 788 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
779 "system-wide collection from all CPUs"), 789 "system-wide collection from all CPUs"),
780 OPT_BOOLEAN('A', "append", &record.append_file, 790 OPT_BOOLEAN('A', "append", &record.append_file,
781 "append to the output file to do incremental profiling"), 791 "append to the output file to do incremental profiling"),
782 OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu", 792 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
783 "list of cpus to monitor"), 793 "list of cpus to monitor"),
784 OPT_BOOLEAN('f', "force", &record.force, 794 OPT_BOOLEAN('f', "force", &record.force,
785 "overwrite existing data file (deprecated)"), 795 "overwrite existing data file (deprecated)"),
@@ -813,7 +823,8 @@ const struct option record_options[] = {
813 OPT_CALLBACK('G', "cgroup", &record.evlist, "name", 823 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
814 "monitor event in cgroup name only", 824 "monitor event in cgroup name only",
815 parse_cgroups), 825 parse_cgroups),
816 OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"), 826 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
827 "user to profile"),
817 828
818 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack, 829 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
819 "branch any", "sample any taken branches", 830 "branch any", "sample any taken branches",
@@ -831,6 +842,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
831 struct perf_evsel *pos; 842 struct perf_evsel *pos;
832 struct perf_evlist *evsel_list; 843 struct perf_evlist *evsel_list;
833 struct perf_record *rec = &record; 844 struct perf_record *rec = &record;
845 char errbuf[BUFSIZ];
834 846
835 perf_header__set_cmdline(argc, argv); 847 perf_header__set_cmdline(argc, argv);
836 848
@@ -842,8 +854,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
842 854
843 argc = parse_options(argc, argv, record_options, record_usage, 855 argc = parse_options(argc, argv, record_options, record_usage,
844 PARSE_OPT_STOP_AT_NON_OPTION); 856 PARSE_OPT_STOP_AT_NON_OPTION);
845 if (!argc && !rec->opts.target_pid && !rec->opts.target_tid && 857 if (!argc && perf_target__none(&rec->opts.target))
846 !rec->opts.system_wide && !rec->opts.cpu_list && !rec->uid_str)
847 usage_with_options(record_usage, record_options); 858 usage_with_options(record_usage, record_options);
848 859
849 if (rec->force && rec->append_file) { 860 if (rec->force && rec->append_file) {
@@ -856,7 +867,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
856 rec->write_mode = WRITE_FORCE; 867 rec->write_mode = WRITE_FORCE;
857 } 868 }
858 869
859 if (nr_cgroups && !rec->opts.system_wide) { 870 if (nr_cgroups && !rec->opts.target.system_wide) {
860 fprintf(stderr, "cgroup monitoring only available in" 871 fprintf(stderr, "cgroup monitoring only available in"
861 " system-wide mode\n"); 872 " system-wide mode\n");
862 usage_with_options(record_usage, record_options); 873 usage_with_options(record_usage, record_options);
@@ -883,17 +894,25 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
883 goto out_symbol_exit; 894 goto out_symbol_exit;
884 } 895 }
885 896
886 rec->opts.uid = parse_target_uid(rec->uid_str, rec->opts.target_tid, 897 err = perf_target__validate(&rec->opts.target);
887 rec->opts.target_pid); 898 if (err) {
888 if (rec->uid_str != NULL && rec->opts.uid == UINT_MAX - 1) 899 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
889 goto out_free_fd; 900 ui__warning("%s", errbuf);
901 }
902
903 err = perf_target__parse_uid(&rec->opts.target);
904 if (err) {
905 int saved_errno = errno;
890 906
891 if (rec->opts.target_pid) 907 perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
892 rec->opts.target_tid = rec->opts.target_pid; 908 ui__warning("%s", errbuf);
909
910 err = -saved_errno;
911 goto out_free_fd;
912 }
893 913
894 if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid, 914 err = -ENOMEM;
895 rec->opts.target_tid, rec->opts.uid, 915 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
896 rec->opts.cpu_list) < 0)
897 usage_with_options(record_usage, record_options); 916 usage_with_options(record_usage, record_options);
898 917
899 list_for_each_entry(pos, &evsel_list->entries, node) { 918 list_for_each_entry(pos, &evsel_list->entries, node) {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cdae9b2db1cc..d58e41445d0d 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -296,12 +296,15 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
296{ 296{
297 size_t ret; 297 size_t ret;
298 char unit; 298 char unit;
299 unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; 299 unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
300 u64 nr_events = self->stats.total_period;
300 301
301 nr_events = convert_unit(nr_events, &unit); 302 nr_samples = convert_unit(nr_samples, &unit);
302 ret = fprintf(fp, "# Events: %lu%c", nr_events, unit); 303 ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
303 if (evname != NULL) 304 if (evname != NULL)
304 ret += fprintf(fp, " %s", evname); 305 ret += fprintf(fp, " of event '%s'", evname);
306
307 ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
305 return ret + fprintf(fp, "\n#\n"); 308 return ret + fprintf(fp, "\n#\n");
306} 309}
307 310
@@ -680,14 +683,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
680 683
681 } 684 }
682 685
683 if (strcmp(report.input_name, "-") != 0) { 686 if (strcmp(report.input_name, "-") != 0)
684 if (report.use_gtk) 687 setup_browser(true);
685 perf_gtk_setup_browser(argc, argv, true); 688 else
686 else
687 setup_browser(true);
688 } else {
689 use_browser = 0; 689 use_browser = 0;
690 }
691 690
692 /* 691 /*
693 * Only in the newt browser we are doing integrated annotation, 692 * Only in the newt browser we are doing integrated annotation,
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 1cad3af4bf4c..b125e07eb399 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -728,34 +728,34 @@ struct trace_migrate_task_event {
728struct trace_sched_handler { 728struct trace_sched_handler {
729 void (*switch_event)(struct trace_switch_event *, 729 void (*switch_event)(struct trace_switch_event *,
730 struct machine *, 730 struct machine *,
731 struct event *, 731 struct event_format *,
732 int cpu, 732 int cpu,
733 u64 timestamp, 733 u64 timestamp,
734 struct thread *thread); 734 struct thread *thread);
735 735
736 void (*runtime_event)(struct trace_runtime_event *, 736 void (*runtime_event)(struct trace_runtime_event *,
737 struct machine *, 737 struct machine *,
738 struct event *, 738 struct event_format *,
739 int cpu, 739 int cpu,
740 u64 timestamp, 740 u64 timestamp,
741 struct thread *thread); 741 struct thread *thread);
742 742
743 void (*wakeup_event)(struct trace_wakeup_event *, 743 void (*wakeup_event)(struct trace_wakeup_event *,
744 struct machine *, 744 struct machine *,
745 struct event *, 745 struct event_format *,
746 int cpu, 746 int cpu,
747 u64 timestamp, 747 u64 timestamp,
748 struct thread *thread); 748 struct thread *thread);
749 749
750 void (*fork_event)(struct trace_fork_event *, 750 void (*fork_event)(struct trace_fork_event *,
751 struct event *, 751 struct event_format *,
752 int cpu, 752 int cpu,
753 u64 timestamp, 753 u64 timestamp,
754 struct thread *thread); 754 struct thread *thread);
755 755
756 void (*migrate_task_event)(struct trace_migrate_task_event *, 756 void (*migrate_task_event)(struct trace_migrate_task_event *,
757 struct machine *machine, 757 struct machine *machine,
758 struct event *, 758 struct event_format *,
759 int cpu, 759 int cpu,
760 u64 timestamp, 760 u64 timestamp,
761 struct thread *thread); 761 struct thread *thread);
@@ -765,7 +765,7 @@ struct trace_sched_handler {
765static void 765static void
766replay_wakeup_event(struct trace_wakeup_event *wakeup_event, 766replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
767 struct machine *machine __used, 767 struct machine *machine __used,
768 struct event *event, 768 struct event_format *event,
769 int cpu __used, 769 int cpu __used,
770 u64 timestamp __used, 770 u64 timestamp __used,
771 struct thread *thread __used) 771 struct thread *thread __used)
@@ -792,7 +792,7 @@ static u64 cpu_last_switched[MAX_CPUS];
792static void 792static void
793replay_switch_event(struct trace_switch_event *switch_event, 793replay_switch_event(struct trace_switch_event *switch_event,
794 struct machine *machine __used, 794 struct machine *machine __used,
795 struct event *event, 795 struct event_format *event,
796 int cpu, 796 int cpu,
797 u64 timestamp, 797 u64 timestamp,
798 struct thread *thread __used) 798 struct thread *thread __used)
@@ -835,7 +835,7 @@ replay_switch_event(struct trace_switch_event *switch_event,
835 835
836static void 836static void
837replay_fork_event(struct trace_fork_event *fork_event, 837replay_fork_event(struct trace_fork_event *fork_event,
838 struct event *event, 838 struct event_format *event,
839 int cpu __used, 839 int cpu __used,
840 u64 timestamp __used, 840 u64 timestamp __used,
841 struct thread *thread __used) 841 struct thread *thread __used)
@@ -944,7 +944,7 @@ static void thread_atoms_insert(struct thread *thread)
944 944
945static void 945static void
946latency_fork_event(struct trace_fork_event *fork_event __used, 946latency_fork_event(struct trace_fork_event *fork_event __used,
947 struct event *event __used, 947 struct event_format *event __used,
948 int cpu __used, 948 int cpu __used,
949 u64 timestamp __used, 949 u64 timestamp __used,
950 struct thread *thread __used) 950 struct thread *thread __used)
@@ -1026,7 +1026,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1026static void 1026static void
1027latency_switch_event(struct trace_switch_event *switch_event, 1027latency_switch_event(struct trace_switch_event *switch_event,
1028 struct machine *machine, 1028 struct machine *machine,
1029 struct event *event __used, 1029 struct event_format *event __used,
1030 int cpu, 1030 int cpu,
1031 u64 timestamp, 1031 u64 timestamp,
1032 struct thread *thread __used) 1032 struct thread *thread __used)
@@ -1079,7 +1079,7 @@ latency_switch_event(struct trace_switch_event *switch_event,
1079static void 1079static void
1080latency_runtime_event(struct trace_runtime_event *runtime_event, 1080latency_runtime_event(struct trace_runtime_event *runtime_event,
1081 struct machine *machine, 1081 struct machine *machine,
1082 struct event *event __used, 1082 struct event_format *event __used,
1083 int cpu, 1083 int cpu,
1084 u64 timestamp, 1084 u64 timestamp,
1085 struct thread *this_thread __used) 1085 struct thread *this_thread __used)
@@ -1102,7 +1102,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
1102static void 1102static void
1103latency_wakeup_event(struct trace_wakeup_event *wakeup_event, 1103latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1104 struct machine *machine, 1104 struct machine *machine,
1105 struct event *__event __used, 1105 struct event_format *__event __used,
1106 int cpu __used, 1106 int cpu __used,
1107 u64 timestamp, 1107 u64 timestamp,
1108 struct thread *thread __used) 1108 struct thread *thread __used)
@@ -1150,7 +1150,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1150static void 1150static void
1151latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, 1151latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1152 struct machine *machine, 1152 struct machine *machine,
1153 struct event *__event __used, 1153 struct event_format *__event __used,
1154 int cpu __used, 1154 int cpu __used,
1155 u64 timestamp, 1155 u64 timestamp,
1156 struct thread *thread __used) 1156 struct thread *thread __used)
@@ -1361,7 +1361,7 @@ static struct trace_sched_handler *trace_handler;
1361 1361
1362static void 1362static void
1363process_sched_wakeup_event(struct perf_tool *tool __used, 1363process_sched_wakeup_event(struct perf_tool *tool __used,
1364 struct event *event, 1364 struct event_format *event,
1365 struct perf_sample *sample, 1365 struct perf_sample *sample,
1366 struct machine *machine, 1366 struct machine *machine,
1367 struct thread *thread) 1367 struct thread *thread)
@@ -1398,7 +1398,7 @@ static char next_shortname2 = '0';
1398static void 1398static void
1399map_switch_event(struct trace_switch_event *switch_event, 1399map_switch_event(struct trace_switch_event *switch_event,
1400 struct machine *machine, 1400 struct machine *machine,
1401 struct event *event __used, 1401 struct event_format *event __used,
1402 int this_cpu, 1402 int this_cpu,
1403 u64 timestamp, 1403 u64 timestamp,
1404 struct thread *thread __used) 1404 struct thread *thread __used)
@@ -1476,7 +1476,7 @@ map_switch_event(struct trace_switch_event *switch_event,
1476 1476
1477static void 1477static void
1478process_sched_switch_event(struct perf_tool *tool __used, 1478process_sched_switch_event(struct perf_tool *tool __used,
1479 struct event *event, 1479 struct event_format *event,
1480 struct perf_sample *sample, 1480 struct perf_sample *sample,
1481 struct machine *machine, 1481 struct machine *machine,
1482 struct thread *thread) 1482 struct thread *thread)
@@ -1512,7 +1512,7 @@ process_sched_switch_event(struct perf_tool *tool __used,
1512 1512
1513static void 1513static void
1514process_sched_runtime_event(struct perf_tool *tool __used, 1514process_sched_runtime_event(struct perf_tool *tool __used,
1515 struct event *event, 1515 struct event_format *event,
1516 struct perf_sample *sample, 1516 struct perf_sample *sample,
1517 struct machine *machine, 1517 struct machine *machine,
1518 struct thread *thread) 1518 struct thread *thread)
@@ -1532,7 +1532,7 @@ process_sched_runtime_event(struct perf_tool *tool __used,
1532 1532
1533static void 1533static void
1534process_sched_fork_event(struct perf_tool *tool __used, 1534process_sched_fork_event(struct perf_tool *tool __used,
1535 struct event *event, 1535 struct event_format *event,
1536 struct perf_sample *sample, 1536 struct perf_sample *sample,
1537 struct machine *machine __used, 1537 struct machine *machine __used,
1538 struct thread *thread) 1538 struct thread *thread)
@@ -1554,7 +1554,7 @@ process_sched_fork_event(struct perf_tool *tool __used,
1554 1554
1555static void 1555static void
1556process_sched_exit_event(struct perf_tool *tool __used, 1556process_sched_exit_event(struct perf_tool *tool __used,
1557 struct event *event, 1557 struct event_format *event,
1558 struct perf_sample *sample __used, 1558 struct perf_sample *sample __used,
1559 struct machine *machine __used, 1559 struct machine *machine __used,
1560 struct thread *thread __used) 1560 struct thread *thread __used)
@@ -1565,7 +1565,7 @@ process_sched_exit_event(struct perf_tool *tool __used,
1565 1565
1566static void 1566static void
1567process_sched_migrate_task_event(struct perf_tool *tool __used, 1567process_sched_migrate_task_event(struct perf_tool *tool __used,
1568 struct event *event, 1568 struct event_format *event,
1569 struct perf_sample *sample, 1569 struct perf_sample *sample,
1570 struct machine *machine, 1570 struct machine *machine,
1571 struct thread *thread) 1571 struct thread *thread)
@@ -1586,7 +1586,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used,
1586 sample->time, thread); 1586 sample->time, thread);
1587} 1587}
1588 1588
1589typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event *event, 1589typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event,
1590 struct perf_sample *sample, 1590 struct perf_sample *sample,
1591 struct machine *machine, 1591 struct machine *machine,
1592 struct thread *thread); 1592 struct thread *thread);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d4ce733b9eba..8e395a538eb9 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -261,7 +261,7 @@ static void print_sample_start(struct perf_sample *sample,
261 struct perf_event_attr *attr) 261 struct perf_event_attr *attr)
262{ 262{
263 int type; 263 int type;
264 struct event *event; 264 struct event_format *event;
265 const char *evname = NULL; 265 const char *evname = NULL;
266 unsigned long secs; 266 unsigned long secs;
267 unsigned long usecs; 267 unsigned long usecs;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 1e5e9b270f5e..62ae30d34fa6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -173,24 +173,23 @@ static struct perf_event_attr very_very_detailed_attrs[] = {
173 173
174 174
175 175
176struct perf_evlist *evsel_list; 176static struct perf_evlist *evsel_list;
177 177
178static bool system_wide = false; 178static struct perf_target target = {
179static int run_idx = 0; 179 .uid = UINT_MAX,
180};
180 181
182static int run_idx = 0;
181static int run_count = 1; 183static int run_count = 1;
182static bool no_inherit = false; 184static bool no_inherit = false;
183static bool scale = true; 185static bool scale = true;
184static bool no_aggr = false; 186static bool no_aggr = false;
185static const char *target_pid;
186static const char *target_tid;
187static pid_t child_pid = -1; 187static pid_t child_pid = -1;
188static bool null_run = false; 188static bool null_run = false;
189static int detailed_run = 0; 189static int detailed_run = 0;
190static bool sync_run = false; 190static bool sync_run = false;
191static bool big_num = true; 191static bool big_num = true;
192static int big_num_opt = -1; 192static int big_num_opt = -1;
193static const char *cpu_list;
194static const char *csv_sep = NULL; 193static const char *csv_sep = NULL;
195static bool csv_output = false; 194static bool csv_output = false;
196static bool group = false; 195static bool group = false;
@@ -265,18 +264,18 @@ static double stddev_stats(struct stats *stats)
265 return sqrt(variance_mean); 264 return sqrt(variance_mean);
266} 265}
267 266
268struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 267static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
269struct stats runtime_cycles_stats[MAX_NR_CPUS]; 268static struct stats runtime_cycles_stats[MAX_NR_CPUS];
270struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; 269static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
271struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; 270static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
272struct stats runtime_branches_stats[MAX_NR_CPUS]; 271static struct stats runtime_branches_stats[MAX_NR_CPUS];
273struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; 272static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
274struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; 273static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
275struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; 274static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
276struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; 275static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
277struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; 276static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
278struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; 277static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
279struct stats walltime_nsecs_stats; 278static struct stats walltime_nsecs_stats;
280 279
281static int create_perf_stat_counter(struct perf_evsel *evsel, 280static int create_perf_stat_counter(struct perf_evsel *evsel,
282 struct perf_evsel *first) 281 struct perf_evsel *first)
@@ -299,15 +298,15 @@ retry:
299 if (exclude_guest_missing) 298 if (exclude_guest_missing)
300 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 299 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
301 300
302 if (system_wide) { 301 if (perf_target__has_cpu(&target)) {
303 ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 302 ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
304 group, group_fd); 303 group, group_fd);
305 if (ret) 304 if (ret)
306 goto check_ret; 305 goto check_ret;
307 return 0; 306 return 0;
308 } 307 }
309 308
310 if (!target_pid && !target_tid && (!group || evsel == first)) { 309 if (!perf_target__has_task(&target) && (!group || evsel == first)) {
311 attr->disabled = 1; 310 attr->disabled = 1;
312 attr->enable_on_exec = 1; 311 attr->enable_on_exec = 1;
313 } 312 }
@@ -471,7 +470,7 @@ static int run_perf_stat(int argc __used, const char **argv)
471 exit(-1); 470 exit(-1);
472 } 471 }
473 472
474 if (!target_tid && !target_pid && !system_wide) 473 if (perf_target__none(&target))
475 evsel_list->threads->map[0] = child_pid; 474 evsel_list->threads->map[0] = child_pid;
476 475
477 /* 476 /*
@@ -506,7 +505,7 @@ static int run_perf_stat(int argc __used, const char **argv)
506 error("You may not have permission to collect %sstats.\n" 505 error("You may not have permission to collect %sstats.\n"
507 "\t Consider tweaking" 506 "\t Consider tweaking"
508 " /proc/sys/kernel/perf_event_paranoid or running as root.", 507 " /proc/sys/kernel/perf_event_paranoid or running as root.",
509 system_wide ? "system-wide " : ""); 508 target.system_wide ? "system-wide " : "");
510 } else { 509 } else {
511 error("open_counter returned with %d (%s). " 510 error("open_counter returned with %d (%s). "
512 "/bin/dmesg may provide additional information.\n", 511 "/bin/dmesg may provide additional information.\n",
@@ -998,14 +997,14 @@ static void print_stat(int argc, const char **argv)
998 if (!csv_output) { 997 if (!csv_output) {
999 fprintf(output, "\n"); 998 fprintf(output, "\n");
1000 fprintf(output, " Performance counter stats for "); 999 fprintf(output, " Performance counter stats for ");
1001 if (!target_pid && !target_tid) { 1000 if (!perf_target__has_task(&target)) {
1002 fprintf(output, "\'%s", argv[0]); 1001 fprintf(output, "\'%s", argv[0]);
1003 for (i = 1; i < argc; i++) 1002 for (i = 1; i < argc; i++)
1004 fprintf(output, " %s", argv[i]); 1003 fprintf(output, " %s", argv[i]);
1005 } else if (target_pid) 1004 } else if (target.pid)
1006 fprintf(output, "process id \'%s", target_pid); 1005 fprintf(output, "process id \'%s", target.pid);
1007 else 1006 else
1008 fprintf(output, "thread id \'%s", target_tid); 1007 fprintf(output, "thread id \'%s", target.tid);
1009 1008
1010 fprintf(output, "\'"); 1009 fprintf(output, "\'");
1011 if (run_count > 1) 1010 if (run_count > 1)
@@ -1079,11 +1078,11 @@ static const struct option options[] = {
1079 "event filter", parse_filter), 1078 "event filter", parse_filter),
1080 OPT_BOOLEAN('i', "no-inherit", &no_inherit, 1079 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1081 "child tasks do not inherit counters"), 1080 "child tasks do not inherit counters"),
1082 OPT_STRING('p', "pid", &target_pid, "pid", 1081 OPT_STRING('p', "pid", &target.pid, "pid",
1083 "stat events on existing process id"), 1082 "stat events on existing process id"),
1084 OPT_STRING('t', "tid", &target_tid, "tid", 1083 OPT_STRING('t', "tid", &target.tid, "tid",
1085 "stat events on existing thread id"), 1084 "stat events on existing thread id"),
1086 OPT_BOOLEAN('a', "all-cpus", &system_wide, 1085 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1087 "system-wide collection from all CPUs"), 1086 "system-wide collection from all CPUs"),
1088 OPT_BOOLEAN('g', "group", &group, 1087 OPT_BOOLEAN('g', "group", &group,
1089 "put the counters into a counter group"), 1088 "put the counters into a counter group"),
@@ -1102,7 +1101,7 @@ static const struct option options[] = {
1102 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1101 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1103 "print large numbers with thousands\' separators", 1102 "print large numbers with thousands\' separators",
1104 stat__set_big_num), 1103 stat__set_big_num),
1105 OPT_STRING('C', "cpu", &cpu_list, "cpu", 1104 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1106 "list of cpus to monitor in system-wide"), 1105 "list of cpus to monitor in system-wide"),
1107 OPT_BOOLEAN('A', "no-aggr", &no_aggr, 1106 OPT_BOOLEAN('A', "no-aggr", &no_aggr,
1108 "disable CPU count aggregation"), 1107 "disable CPU count aggregation"),
@@ -1220,13 +1219,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
1220 } else if (big_num_opt == 0) /* User passed --no-big-num */ 1219 } else if (big_num_opt == 0) /* User passed --no-big-num */
1221 big_num = false; 1220 big_num = false;
1222 1221
1223 if (!argc && !target_pid && !target_tid) 1222 if (!argc && !perf_target__has_task(&target))
1224 usage_with_options(stat_usage, options); 1223 usage_with_options(stat_usage, options);
1225 if (run_count <= 0) 1224 if (run_count <= 0)
1226 usage_with_options(stat_usage, options); 1225 usage_with_options(stat_usage, options);
1227 1226
1228 /* no_aggr, cgroup are for system-wide only */ 1227 /* no_aggr, cgroup are for system-wide only */
1229 if ((no_aggr || nr_cgroups) && !system_wide) { 1228 if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
1230 fprintf(stderr, "both cgroup and no-aggregation " 1229 fprintf(stderr, "both cgroup and no-aggregation "
1231 "modes only available in system-wide mode\n"); 1230 "modes only available in system-wide mode\n");
1232 1231
@@ -1236,23 +1235,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
1236 if (add_default_attributes()) 1235 if (add_default_attributes())
1237 goto out; 1236 goto out;
1238 1237
1239 if (target_pid) 1238 perf_target__validate(&target);
1240 target_tid = target_pid;
1241 1239
1242 evsel_list->threads = thread_map__new_str(target_pid, 1240 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1243 target_tid, UINT_MAX); 1241 if (perf_target__has_task(&target))
1244 if (evsel_list->threads == NULL) { 1242 pr_err("Problems finding threads of monitor\n");
1245 pr_err("Problems finding threads of monitor\n"); 1243 if (perf_target__has_cpu(&target))
1246 usage_with_options(stat_usage, options); 1244 perror("failed to parse CPUs map");
1247 }
1248
1249 if (system_wide)
1250 evsel_list->cpus = cpu_map__new(cpu_list);
1251 else
1252 evsel_list->cpus = cpu_map__dummy_new();
1253 1245
1254 if (evsel_list->cpus == NULL) {
1255 perror("failed to parse CPUs map");
1256 usage_with_options(stat_usage, options); 1246 usage_with_options(stat_usage, options);
1257 return -1; 1247 return -1;
1258 } 1248 }
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 223ffdcc0fd8..6c47376e29d8 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -1195,6 +1195,10 @@ realloc:
1195static int test__PERF_RECORD(void) 1195static int test__PERF_RECORD(void)
1196{ 1196{
1197 struct perf_record_opts opts = { 1197 struct perf_record_opts opts = {
1198 .target = {
1199 .uid = UINT_MAX,
1200 .uses_mmap = true,
1201 },
1198 .no_delay = true, 1202 .no_delay = true,
1199 .freq = 10, 1203 .freq = 10,
1200 .mmap_pages = 256, 1204 .mmap_pages = 256,
@@ -1237,8 +1241,7 @@ static int test__PERF_RECORD(void)
1237 * perf_evlist__prepare_workload we'll fill in the only thread 1241 * perf_evlist__prepare_workload we'll fill in the only thread
1238 * we're monitoring, the one forked there. 1242 * we're monitoring, the one forked there.
1239 */ 1243 */
1240 err = perf_evlist__create_maps(evlist, opts.target_pid, 1244 err = perf_evlist__create_maps(evlist, &opts.target);
1241 opts.target_tid, UINT_MAX, opts.cpu_list);
1242 if (err < 0) { 1245 if (err < 0) {
1243 pr_debug("Not enough memory to create thread/cpu maps\n"); 1246 pr_debug("Not enough memory to create thread/cpu maps\n");
1244 goto out_delete_evlist; 1247 goto out_delete_evlist;
@@ -1579,8 +1582,6 @@ static int __test__rdpmc(void)
1579 sa.sa_sigaction = segfault_handler; 1582 sa.sa_sigaction = segfault_handler;
1580 sigaction(SIGSEGV, &sa, NULL); 1583 sigaction(SIGSEGV, &sa, NULL);
1581 1584
1582 fprintf(stderr, "\n\n");
1583
1584 fd = sys_perf_event_open(&attr, 0, -1, -1, 0); 1585 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
1585 if (fd < 0) { 1586 if (fd < 0) {
1586 die("Error: sys_perf_event_open() syscall returned " 1587 die("Error: sys_perf_event_open() syscall returned "
@@ -1605,7 +1606,7 @@ static int __test__rdpmc(void)
1605 loops *= 10; 1606 loops *= 10;
1606 1607
1607 delta = now - stamp; 1608 delta = now - stamp;
1608 fprintf(stderr, "%14d: %14Lu\n", n, (long long)delta); 1609 pr_debug("%14d: %14Lu\n", n, (long long)delta);
1609 1610
1610 delta_sum += delta; 1611 delta_sum += delta;
1611 } 1612 }
@@ -1613,7 +1614,7 @@ static int __test__rdpmc(void)
1613 munmap(addr, page_size); 1614 munmap(addr, page_size);
1614 close(fd); 1615 close(fd);
1615 1616
1616 fprintf(stderr, " "); 1617 pr_debug(" ");
1617 1618
1618 if (!delta_sum) 1619 if (!delta_sum)
1619 return -1; 1620 return -1;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 8ef59f8262bb..3e981a710c4d 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -588,7 +588,7 @@ static void *display_thread_tui(void *arg)
588 * via --uid. 588 * via --uid.
589 */ 589 */
590 list_for_each_entry(pos, &top->evlist->entries, node) 590 list_for_each_entry(pos, &top->evlist->entries, node)
591 pos->hists.uid_filter_str = top->uid_str; 591 pos->hists.uid_filter_str = top->target.uid_str;
592 592
593 perf_evlist__tui_browse_hists(top->evlist, help, 593 perf_evlist__tui_browse_hists(top->evlist, help,
594 perf_top__sort_new_samples, 594 perf_top__sort_new_samples,
@@ -948,6 +948,10 @@ try_again:
948 948
949 attr->type = PERF_TYPE_SOFTWARE; 949 attr->type = PERF_TYPE_SOFTWARE;
950 attr->config = PERF_COUNT_SW_CPU_CLOCK; 950 attr->config = PERF_COUNT_SW_CPU_CLOCK;
951 if (counter->name) {
952 free(counter->name);
953 counter->name = strdup(event_name(counter));
954 }
951 goto try_again; 955 goto try_again;
952 } 956 }
953 957
@@ -1016,7 +1020,7 @@ static int __cmd_top(struct perf_top *top)
1016 if (ret) 1020 if (ret)
1017 goto out_delete; 1021 goto out_delete;
1018 1022
1019 if (top->target_tid || top->uid != UINT_MAX) 1023 if (perf_target__has_task(&top->target))
1020 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, 1024 perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
1021 perf_event__process, 1025 perf_event__process,
1022 &top->session->host_machine); 1026 &top->session->host_machine);
@@ -1150,14 +1154,17 @@ static const char * const top_usage[] = {
1150int cmd_top(int argc, const char **argv, const char *prefix __used) 1154int cmd_top(int argc, const char **argv, const char *prefix __used)
1151{ 1155{
1152 struct perf_evsel *pos; 1156 struct perf_evsel *pos;
1153 int status = -ENOMEM; 1157 int status;
1158 char errbuf[BUFSIZ];
1154 struct perf_top top = { 1159 struct perf_top top = {
1155 .count_filter = 5, 1160 .count_filter = 5,
1156 .delay_secs = 2, 1161 .delay_secs = 2,
1157 .uid = UINT_MAX,
1158 .freq = 1000, /* 1 KHz */ 1162 .freq = 1000, /* 1 KHz */
1159 .mmap_pages = 128, 1163 .mmap_pages = 128,
1160 .sym_pcnt_filter = 5, 1164 .sym_pcnt_filter = 5,
1165 .target = {
1166 .uses_mmap = true,
1167 },
1161 }; 1168 };
1162 char callchain_default_opt[] = "fractal,0.5,callee"; 1169 char callchain_default_opt[] = "fractal,0.5,callee";
1163 const struct option options[] = { 1170 const struct option options[] = {
@@ -1166,13 +1173,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1166 parse_events_option), 1173 parse_events_option),
1167 OPT_INTEGER('c', "count", &top.default_interval, 1174 OPT_INTEGER('c', "count", &top.default_interval,
1168 "event period to sample"), 1175 "event period to sample"),
1169 OPT_STRING('p', "pid", &top.target_pid, "pid", 1176 OPT_STRING('p', "pid", &top.target.pid, "pid",
1170 "profile events on existing process id"), 1177 "profile events on existing process id"),
1171 OPT_STRING('t', "tid", &top.target_tid, "tid", 1178 OPT_STRING('t', "tid", &top.target.tid, "tid",
1172 "profile events on existing thread id"), 1179 "profile events on existing thread id"),
1173 OPT_BOOLEAN('a', "all-cpus", &top.system_wide, 1180 OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide,
1174 "system-wide collection from all CPUs"), 1181 "system-wide collection from all CPUs"),
1175 OPT_STRING('C', "cpu", &top.cpu_list, "cpu", 1182 OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu",
1176 "list of cpus to monitor"), 1183 "list of cpus to monitor"),
1177 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 1184 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1178 "file", "vmlinux pathname"), 1185 "file", "vmlinux pathname"),
@@ -1227,7 +1234,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1227 "Display raw encoding of assembly instructions (default)"), 1234 "Display raw encoding of assembly instructions (default)"),
1228 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", 1235 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1229 "Specify disassembler style (e.g. -M intel for intel syntax)"), 1236 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1230 OPT_STRING('u', "uid", &top.uid_str, "user", "user to profile"), 1237 OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"),
1231 OPT_END() 1238 OPT_END()
1232 }; 1239 };
1233 1240
@@ -1253,22 +1260,27 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1253 1260
1254 setup_browser(false); 1261 setup_browser(false);
1255 1262
1256 top.uid = parse_target_uid(top.uid_str, top.target_tid, top.target_pid); 1263 status = perf_target__validate(&top.target);
1257 if (top.uid_str != NULL && top.uid == UINT_MAX - 1) 1264 if (status) {
1258 goto out_delete_evlist; 1265 perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
1266 ui__warning("%s", errbuf);
1267 }
1259 1268
1260 /* CPU and PID are mutually exclusive */ 1269 status = perf_target__parse_uid(&top.target);
1261 if (top.target_tid && top.cpu_list) { 1270 if (status) {
1262 printf("WARNING: PID switch overriding CPU\n"); 1271 int saved_errno = errno;
1263 sleep(1); 1272
1264 top.cpu_list = NULL; 1273 perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
1274 ui__warning("%s", errbuf);
1275
1276 status = -saved_errno;
1277 goto out_delete_evlist;
1265 } 1278 }
1266 1279
1267 if (top.target_pid) 1280 if (perf_target__none(&top.target))
1268 top.target_tid = top.target_pid; 1281 top.target.system_wide = true;
1269 1282
1270 if (perf_evlist__create_maps(top.evlist, top.target_pid, 1283 if (perf_evlist__create_maps(top.evlist, &top.target) < 0)
1271 top.target_tid, top.uid, top.cpu_list) < 0)
1272 usage_with_options(top_usage, options); 1284 usage_with_options(top_usage, options);
1273 1285
1274 if (!top.evlist->nr_entries && 1286 if (!top.evlist->nr_entries &&
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 89e3355ab173..14f1034f14f9 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -207,10 +207,10 @@ extern const char perf_version_string[];
207 207
208void pthread__unblock_sigwinch(void); 208void pthread__unblock_sigwinch(void);
209 209
210#include "util/target.h"
211
210struct perf_record_opts { 212struct perf_record_opts {
211 const char *target_pid; 213 struct perf_target target;
212 const char *target_tid;
213 uid_t uid;
214 bool call_graph; 214 bool call_graph;
215 bool group; 215 bool group;
216 bool inherit_stat; 216 bool inherit_stat;
@@ -223,7 +223,6 @@ struct perf_record_opts {
223 bool sample_time; 223 bool sample_time;
224 bool sample_id_all_missing; 224 bool sample_id_all_missing;
225 bool exclude_guest_missing; 225 bool exclude_guest_missing;
226 bool system_wide;
227 bool period; 226 bool period;
228 unsigned int freq; 227 unsigned int freq;
229 unsigned int mmap_pages; 228 unsigned int mmap_pages;
@@ -231,7 +230,6 @@ struct perf_record_opts {
231 int branch_stack; 230 int branch_stack;
232 u64 default_interval; 231 u64 default_interval;
233 u64 user_interval; 232 u64 user_interval;
234 const char *cpu_list;
235}; 233};
236 234
237#endif 235#endif
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/ui/browser.c
index 556829124b02..cde4d0f0ddb9 100644
--- a/tools/perf/util/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -27,9 +27,12 @@ static int ui_browser__percent_color(struct ui_browser *browser,
27 return HE_COLORSET_NORMAL; 27 return HE_COLORSET_NORMAL;
28} 28}
29 29
30void ui_browser__set_color(struct ui_browser *self __used, int color) 30int ui_browser__set_color(struct ui_browser *browser, int color)
31{ 31{
32 int ret = browser->current_color;
33 browser->current_color = color;
32 SLsmg_set_color(color); 34 SLsmg_set_color(color);
35 return ret;
33} 36}
34 37
35void ui_browser__set_percent_color(struct ui_browser *self, 38void ui_browser__set_percent_color(struct ui_browser *self,
@@ -503,6 +506,12 @@ static struct ui_browser__colorset {
503 .bg = "default", 506 .bg = "default",
504 }, 507 },
505 { 508 {
509 .colorset = HE_COLORSET_ADDR,
510 .name = "addr",
511 .fg = "magenta",
512 .bg = "default",
513 },
514 {
506 .name = NULL, 515 .name = NULL,
507 } 516 }
508}; 517};
@@ -584,6 +593,111 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
584 return row; 593 return row;
585} 594}
586 595
596void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
597 u16 start, u16 end)
598{
599 SLsmg_set_char_set(1);
600 ui_browser__gotorc(browser, start, column);
601 SLsmg_draw_vline(end - start + 1);
602 SLsmg_set_char_set(0);
603}
604
605void ui_browser__write_graph(struct ui_browser *browser __used, int graph)
606{
607 SLsmg_set_char_set(1);
608 SLsmg_write_char(graph);
609 SLsmg_set_char_set(0);
610}
611
612static void __ui_browser__line_arrow_up(struct ui_browser *browser,
613 unsigned int column,
614 u64 start, u64 end)
615{
616 unsigned int row, end_row;
617
618 SLsmg_set_char_set(1);
619
620 if (start < browser->top_idx + browser->height) {
621 row = start - browser->top_idx;
622 ui_browser__gotorc(browser, row, column);
623 SLsmg_write_char(SLSMG_LLCORN_CHAR);
624 ui_browser__gotorc(browser, row, column + 1);
625 SLsmg_draw_hline(2);
626
627 if (row-- == 0)
628 goto out;
629 } else
630 row = browser->height - 1;
631
632 if (end > browser->top_idx)
633 end_row = end - browser->top_idx;
634 else
635 end_row = 0;
636
637 ui_browser__gotorc(browser, end_row, column);
638 SLsmg_draw_vline(row - end_row + 1);
639
640 ui_browser__gotorc(browser, end_row, column);
641 if (end >= browser->top_idx) {
642 SLsmg_write_char(SLSMG_ULCORN_CHAR);
643 ui_browser__gotorc(browser, end_row, column + 1);
644 SLsmg_write_char(SLSMG_HLINE_CHAR);
645 ui_browser__gotorc(browser, end_row, column + 2);
646 SLsmg_write_char(SLSMG_RARROW_CHAR);
647 }
648out:
649 SLsmg_set_char_set(0);
650}
651
652static void __ui_browser__line_arrow_down(struct ui_browser *browser,
653 unsigned int column,
654 u64 start, u64 end)
655{
656 unsigned int row, end_row;
657
658 SLsmg_set_char_set(1);
659
660 if (start >= browser->top_idx) {
661 row = start - browser->top_idx;
662 ui_browser__gotorc(browser, row, column);
663 SLsmg_write_char(SLSMG_ULCORN_CHAR);
664 ui_browser__gotorc(browser, row, column + 1);
665 SLsmg_draw_hline(2);
666
667 if (row++ == 0)
668 goto out;
669 } else
670 row = 0;
671
672 if (end >= browser->top_idx + browser->height)
673 end_row = browser->height - 1;
674 else
675 end_row = end - browser->top_idx;;
676
677 ui_browser__gotorc(browser, row, column);
678 SLsmg_draw_vline(end_row - row + 1);
679
680 ui_browser__gotorc(browser, end_row, column);
681 if (end < browser->top_idx + browser->height) {
682 SLsmg_write_char(SLSMG_LLCORN_CHAR);
683 ui_browser__gotorc(browser, end_row, column + 1);
684 SLsmg_write_char(SLSMG_HLINE_CHAR);
685 ui_browser__gotorc(browser, end_row, column + 2);
686 SLsmg_write_char(SLSMG_RARROW_CHAR);
687 }
688out:
689 SLsmg_set_char_set(0);
690}
691
692void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
693 u64 start, u64 end)
694{
695 if (start > end)
696 __ui_browser__line_arrow_up(browser, column, start, end);
697 else
698 __ui_browser__line_arrow_down(browser, column, start, end);
699}
700
587void ui_browser__init(void) 701void ui_browser__init(void)
588{ 702{
589 int i = 0; 703 int i = 0;
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/ui/browser.h
index 6ee82f60feaf..dd96d8229902 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -10,11 +10,13 @@
10#define HE_COLORSET_NORMAL 52 10#define HE_COLORSET_NORMAL 52
11#define HE_COLORSET_SELECTED 53 11#define HE_COLORSET_SELECTED 53
12#define HE_COLORSET_CODE 54 12#define HE_COLORSET_CODE 54
13#define HE_COLORSET_ADDR 55
13 14
14struct ui_browser { 15struct ui_browser {
15 u64 index, top_idx; 16 u64 index, top_idx;
16 void *top, *entries; 17 void *top, *entries;
17 u16 y, x, width, height; 18 u16 y, x, width, height;
19 int current_color;
18 void *priv; 20 void *priv;
19 const char *title; 21 const char *title;
20 char *helpline; 22 char *helpline;
@@ -27,7 +29,7 @@ struct ui_browser {
27 bool use_navkeypressed; 29 bool use_navkeypressed;
28}; 30};
29 31
30void ui_browser__set_color(struct ui_browser *self, int color); 32int ui_browser__set_color(struct ui_browser *browser, int color);
31void ui_browser__set_percent_color(struct ui_browser *self, 33void ui_browser__set_percent_color(struct ui_browser *self,
32 double percent, bool current); 34 double percent, bool current);
33bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row); 35bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
@@ -35,6 +37,9 @@ void ui_browser__refresh_dimensions(struct ui_browser *self);
35void ui_browser__reset_index(struct ui_browser *self); 37void ui_browser__reset_index(struct ui_browser *self);
36 38
37void ui_browser__gotorc(struct ui_browser *self, int y, int x); 39void ui_browser__gotorc(struct ui_browser *self, int y, int x);
40void ui_browser__write_graph(struct ui_browser *browser, int graph);
41void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
42 u64 start, u64 end);
38void __ui_browser__show_title(struct ui_browser *browser, const char *title); 43void __ui_browser__show_title(struct ui_browser *browser, const char *title);
39void ui_browser__show_title(struct ui_browser *browser, const char *title); 44void ui_browser__show_title(struct ui_browser *browser, const char *title);
40int ui_browser__show(struct ui_browser *self, const char *title, 45int ui_browser__show(struct ui_browser *self, const char *title,
@@ -44,6 +49,8 @@ int ui_browser__refresh(struct ui_browser *self);
44int ui_browser__run(struct ui_browser *browser, int delay_secs); 49int ui_browser__run(struct ui_browser *browser, int delay_secs);
45void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries); 50void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries);
46void ui_browser__handle_resize(struct ui_browser *browser); 51void ui_browser__handle_resize(struct ui_browser *browser);
52void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
53 u16 start, u16 end);
47 54
48int ui_browser__warning(struct ui_browser *browser, int timeout, 55int ui_browser__warning(struct ui_browser *browser, int timeout,
49 const char *format, ...); 56 const char *format, ...);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
new file mode 100644
index 000000000000..6e0ef79be169
--- /dev/null
+++ b/tools/perf/ui/browsers/annotate.c
@@ -0,0 +1,867 @@
1#include "../../util/util.h"
2#include "../browser.h"
3#include "../helpline.h"
4#include "../libslang.h"
5#include "../ui.h"
6#include "../util.h"
7#include "../../util/annotate.h"
8#include "../../util/hist.h"
9#include "../../util/sort.h"
10#include "../../util/symbol.h"
11#include <pthread.h>
12#include <newt.h>
13
14struct browser_disasm_line {
15 struct rb_node rb_node;
16 double percent;
17 u32 idx;
18 int idx_asm;
19 int jump_sources;
20};
21
22struct annotate_browser {
23 struct ui_browser b;
24 struct rb_root entries;
25 struct rb_node *curr_hot;
26 struct disasm_line *selection;
27 struct disasm_line **offsets;
28 u64 start;
29 int nr_asm_entries;
30 int nr_entries;
31 int max_jump_sources;
32 int nr_jumps;
33 bool hide_src_code;
34 bool use_offset;
35 bool jump_arrows;
36 bool show_nr_jumps;
37 bool searching_backwards;
38 u8 addr_width;
39 u8 jumps_width;
40 u8 target_width;
41 u8 min_addr_width;
42 u8 max_addr_width;
43 char search_bf[128];
44};
45
46static inline struct browser_disasm_line *disasm_line__browser(struct disasm_line *dl)
47{
48 return (struct browser_disasm_line *)(dl + 1);
49}
50
51static bool disasm_line__filter(struct ui_browser *browser, void *entry)
52{
53 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
54
55 if (ab->hide_src_code) {
56 struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
57 return dl->offset == -1;
58 }
59
60 return false;
61}
62
63static int annotate_browser__jumps_percent_color(struct annotate_browser *browser,
64 int nr, bool current)
65{
66 if (current && (!browser->b.use_navkeypressed || browser->b.navkeypressed))
67 return HE_COLORSET_SELECTED;
68 if (nr == browser->max_jump_sources)
69 return HE_COLORSET_TOP;
70 if (nr > 1)
71 return HE_COLORSET_MEDIUM;
72 return HE_COLORSET_NORMAL;
73}
74
75static int annotate_browser__set_jumps_percent_color(struct annotate_browser *browser,
76 int nr, bool current)
77{
78 int color = annotate_browser__jumps_percent_color(browser, nr, current);
79 return ui_browser__set_color(&browser->b, color);
80}
81
82static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
83{
84 struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
85 struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
86 struct browser_disasm_line *bdl = disasm_line__browser(dl);
87 bool current_entry = ui_browser__is_current_entry(self, row);
88 bool change_color = (!ab->hide_src_code &&
89 (!current_entry || (self->use_navkeypressed &&
90 !self->navkeypressed)));
91 int width = self->width, printed;
92 char bf[256];
93
94 if (dl->offset != -1 && bdl->percent != 0.0) {
95 ui_browser__set_percent_color(self, bdl->percent, current_entry);
96 slsmg_printf("%6.2f ", bdl->percent);
97 } else {
98 ui_browser__set_percent_color(self, 0, current_entry);
99 slsmg_write_nstring(" ", 7);
100 }
101
102 SLsmg_write_char(' ');
103
104 /* The scroll bar isn't being used */
105 if (!self->navkeypressed)
106 width += 1;
107
108 if (!*dl->line)
109 slsmg_write_nstring(" ", width - 7);
110 else if (dl->offset == -1) {
111 printed = scnprintf(bf, sizeof(bf), "%*s ",
112 ab->addr_width, " ");
113 slsmg_write_nstring(bf, printed);
114 slsmg_write_nstring(dl->line, width - printed - 6);
115 } else {
116 u64 addr = dl->offset;
117 int color = -1;
118
119 if (!ab->use_offset)
120 addr += ab->start;
121
122 if (!ab->use_offset) {
123 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
124 } else {
125 if (bdl->jump_sources) {
126 if (ab->show_nr_jumps) {
127 int prev;
128 printed = scnprintf(bf, sizeof(bf), "%*d ",
129 ab->jumps_width,
130 bdl->jump_sources);
131 prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
132 current_entry);
133 slsmg_write_nstring(bf, printed);
134 ui_browser__set_color(self, prev);
135 }
136
137 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
138 ab->target_width, addr);
139 } else {
140 printed = scnprintf(bf, sizeof(bf), "%*s ",
141 ab->addr_width, " ");
142 }
143 }
144
145 if (change_color)
146 color = ui_browser__set_color(self, HE_COLORSET_ADDR);
147 slsmg_write_nstring(bf, printed);
148 if (change_color)
149 ui_browser__set_color(self, color);
150 if (dl->ins && dl->ins->ops->scnprintf) {
151 if (ins__is_jump(dl->ins)) {
152 bool fwd = dl->ops.target.offset > (u64)dl->offset;
153
154 ui_browser__write_graph(self, fwd ? SLSMG_DARROW_CHAR :
155 SLSMG_UARROW_CHAR);
156 SLsmg_write_char(' ');
157 } else if (ins__is_call(dl->ins)) {
158 ui_browser__write_graph(self, SLSMG_RARROW_CHAR);
159 SLsmg_write_char(' ');
160 } else {
161 slsmg_write_nstring(" ", 2);
162 }
163 } else {
164 if (strcmp(dl->name, "retq")) {
165 slsmg_write_nstring(" ", 2);
166 } else {
167 ui_browser__write_graph(self, SLSMG_LARROW_CHAR);
168 SLsmg_write_char(' ');
169 }
170 }
171
172 disasm_line__scnprintf(dl, bf, sizeof(bf), !ab->use_offset);
173 slsmg_write_nstring(bf, width - 10 - printed);
174 }
175
176 if (current_entry)
177 ab->selection = dl;
178}
179
180static void annotate_browser__draw_current_jump(struct ui_browser *browser)
181{
182 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
183 struct disasm_line *cursor = ab->selection, *target;
184 struct browser_disasm_line *btarget, *bcursor;
185 unsigned int from, to;
186
187 if (!cursor->ins || !ins__is_jump(cursor->ins) ||
188 !disasm_line__has_offset(cursor))
189 return;
190
191 target = ab->offsets[cursor->ops.target.offset];
192 if (!target)
193 return;
194
195 bcursor = disasm_line__browser(cursor);
196 btarget = disasm_line__browser(target);
197
198 if (ab->hide_src_code) {
199 from = bcursor->idx_asm;
200 to = btarget->idx_asm;
201 } else {
202 from = (u64)bcursor->idx;
203 to = (u64)btarget->idx;
204 }
205
206 ui_browser__set_color(browser, HE_COLORSET_CODE);
207 __ui_browser__line_arrow(browser, 9 + ab->addr_width, from, to);
208}
209
210static unsigned int annotate_browser__refresh(struct ui_browser *browser)
211{
212 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
213 int ret = ui_browser__list_head_refresh(browser);
214
215 if (ab->jump_arrows)
216 annotate_browser__draw_current_jump(browser);
217
218 ui_browser__set_color(browser, HE_COLORSET_NORMAL);
219 __ui_browser__vline(browser, 7, 0, browser->height - 1);
220 return ret;
221}
222
223static double disasm_line__calc_percent(struct disasm_line *dl, struct symbol *sym, int evidx)
224{
225 double percent = 0.0;
226
227 if (dl->offset != -1) {
228 int len = sym->end - sym->start;
229 unsigned int hits = 0;
230 struct annotation *notes = symbol__annotation(sym);
231 struct source_line *src_line = notes->src->lines;
232 struct sym_hist *h = annotation__histogram(notes, evidx);
233 s64 offset = dl->offset;
234 struct disasm_line *next;
235
236 next = disasm__get_next_ip_line(&notes->src->source, dl);
237 while (offset < (s64)len &&
238 (next == NULL || offset < next->offset)) {
239 if (src_line) {
240 percent += src_line[offset].percent;
241 } else
242 hits += h->addr[offset];
243
244 ++offset;
245 }
246 /*
247 * If the percentage wasn't already calculated in
248 * symbol__get_source_line, do it now:
249 */
250 if (src_line == NULL && h->sum)
251 percent = 100.0 * hits / h->sum;
252 }
253
254 return percent;
255}
256
257static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl)
258{
259 struct rb_node **p = &root->rb_node;
260 struct rb_node *parent = NULL;
261 struct browser_disasm_line *l;
262
263 while (*p != NULL) {
264 parent = *p;
265 l = rb_entry(parent, struct browser_disasm_line, rb_node);
266 if (bdl->percent < l->percent)
267 p = &(*p)->rb_left;
268 else
269 p = &(*p)->rb_right;
270 }
271 rb_link_node(&bdl->rb_node, parent, p);
272 rb_insert_color(&bdl->rb_node, root);
273}
274
275static void annotate_browser__set_top(struct annotate_browser *self,
276 struct disasm_line *pos, u32 idx)
277{
278 unsigned back;
279
280 ui_browser__refresh_dimensions(&self->b);
281 back = self->b.height / 2;
282 self->b.top_idx = self->b.index = idx;
283
284 while (self->b.top_idx != 0 && back != 0) {
285 pos = list_entry(pos->node.prev, struct disasm_line, node);
286
287 if (disasm_line__filter(&self->b, &pos->node))
288 continue;
289
290 --self->b.top_idx;
291 --back;
292 }
293
294 self->b.top = pos;
295 self->b.navkeypressed = true;
296}
297
298static void annotate_browser__set_rb_top(struct annotate_browser *browser,
299 struct rb_node *nd)
300{
301 struct browser_disasm_line *bpos;
302 struct disasm_line *pos;
303
304 bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
305 pos = ((struct disasm_line *)bpos) - 1;
306 annotate_browser__set_top(browser, pos, bpos->idx);
307 browser->curr_hot = nd;
308}
309
310static void annotate_browser__calc_percent(struct annotate_browser *browser,
311 int evidx)
312{
313 struct map_symbol *ms = browser->b.priv;
314 struct symbol *sym = ms->sym;
315 struct annotation *notes = symbol__annotation(sym);
316 struct disasm_line *pos;
317
318 browser->entries = RB_ROOT;
319
320 pthread_mutex_lock(&notes->lock);
321
322 list_for_each_entry(pos, &notes->src->source, node) {
323 struct browser_disasm_line *bpos = disasm_line__browser(pos);
324 bpos->percent = disasm_line__calc_percent(pos, sym, evidx);
325 if (bpos->percent < 0.01) {
326 RB_CLEAR_NODE(&bpos->rb_node);
327 continue;
328 }
329 disasm_rb_tree__insert(&browser->entries, bpos);
330 }
331 pthread_mutex_unlock(&notes->lock);
332
333 browser->curr_hot = rb_last(&browser->entries);
334}
335
336static bool annotate_browser__toggle_source(struct annotate_browser *browser)
337{
338 struct disasm_line *dl;
339 struct browser_disasm_line *bdl;
340 off_t offset = browser->b.index - browser->b.top_idx;
341
342 browser->b.seek(&browser->b, offset, SEEK_CUR);
343 dl = list_entry(browser->b.top, struct disasm_line, node);
344 bdl = disasm_line__browser(dl);
345
346 if (browser->hide_src_code) {
347 if (bdl->idx_asm < offset)
348 offset = bdl->idx;
349
350 browser->b.nr_entries = browser->nr_entries;
351 browser->hide_src_code = false;
352 browser->b.seek(&browser->b, -offset, SEEK_CUR);
353 browser->b.top_idx = bdl->idx - offset;
354 browser->b.index = bdl->idx;
355 } else {
356 if (bdl->idx_asm < 0) {
357 ui_helpline__puts("Only available for assembly lines.");
358 browser->b.seek(&browser->b, -offset, SEEK_CUR);
359 return false;
360 }
361
362 if (bdl->idx_asm < offset)
363 offset = bdl->idx_asm;
364
365 browser->b.nr_entries = browser->nr_asm_entries;
366 browser->hide_src_code = true;
367 browser->b.seek(&browser->b, -offset, SEEK_CUR);
368 browser->b.top_idx = bdl->idx_asm - offset;
369 browser->b.index = bdl->idx_asm;
370 }
371
372 return true;
373}
374
375static bool annotate_browser__callq(struct annotate_browser *browser,
376 int evidx, void (*timer)(void *arg),
377 void *arg, int delay_secs)
378{
379 struct map_symbol *ms = browser->b.priv;
380 struct disasm_line *dl = browser->selection;
381 struct symbol *sym = ms->sym;
382 struct annotation *notes;
383 struct symbol *target;
384 u64 ip;
385
386 if (!ins__is_call(dl->ins))
387 return false;
388
389 ip = ms->map->map_ip(ms->map, dl->ops.target.addr);
390 target = map__find_symbol(ms->map, ip, NULL);
391 if (target == NULL) {
392 ui_helpline__puts("The called function was not found.");
393 return true;
394 }
395
396 notes = symbol__annotation(target);
397 pthread_mutex_lock(&notes->lock);
398
399 if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
400 pthread_mutex_unlock(&notes->lock);
401 ui__warning("Not enough memory for annotating '%s' symbol!\n",
402 target->name);
403 return true;
404 }
405
406 pthread_mutex_unlock(&notes->lock);
407 symbol__tui_annotate(target, ms->map, evidx, timer, arg, delay_secs);
408 ui_browser__show_title(&browser->b, sym->name);
409 return true;
410}
411
412static
413struct disasm_line *annotate_browser__find_offset(struct annotate_browser *browser,
414 s64 offset, s64 *idx)
415{
416 struct map_symbol *ms = browser->b.priv;
417 struct symbol *sym = ms->sym;
418 struct annotation *notes = symbol__annotation(sym);
419 struct disasm_line *pos;
420
421 *idx = 0;
422 list_for_each_entry(pos, &notes->src->source, node) {
423 if (pos->offset == offset)
424 return pos;
425 if (!disasm_line__filter(&browser->b, &pos->node))
426 ++*idx;
427 }
428
429 return NULL;
430}
431
432static bool annotate_browser__jump(struct annotate_browser *browser)
433{
434 struct disasm_line *dl = browser->selection;
435 s64 idx;
436
437 if (!ins__is_jump(dl->ins))
438 return false;
439
440 dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx);
441 if (dl == NULL) {
442 ui_helpline__puts("Invallid jump offset");
443 return true;
444 }
445
446 annotate_browser__set_top(browser, dl, idx);
447
448 return true;
449}
450
451static
452struct disasm_line *annotate_browser__find_string(struct annotate_browser *browser,
453 char *s, s64 *idx)
454{
455 struct map_symbol *ms = browser->b.priv;
456 struct symbol *sym = ms->sym;
457 struct annotation *notes = symbol__annotation(sym);
458 struct disasm_line *pos = browser->selection;
459
460 *idx = browser->b.index;
461 list_for_each_entry_continue(pos, &notes->src->source, node) {
462 if (disasm_line__filter(&browser->b, &pos->node))
463 continue;
464
465 ++*idx;
466
467 if (pos->line && strstr(pos->line, s) != NULL)
468 return pos;
469 }
470
471 return NULL;
472}
473
474static bool __annotate_browser__search(struct annotate_browser *browser)
475{
476 struct disasm_line *dl;
477 s64 idx;
478
479 dl = annotate_browser__find_string(browser, browser->search_bf, &idx);
480 if (dl == NULL) {
481 ui_helpline__puts("String not found!");
482 return false;
483 }
484
485 annotate_browser__set_top(browser, dl, idx);
486 browser->searching_backwards = false;
487 return true;
488}
489
490static
491struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
492 char *s, s64 *idx)
493{
494 struct map_symbol *ms = browser->b.priv;
495 struct symbol *sym = ms->sym;
496 struct annotation *notes = symbol__annotation(sym);
497 struct disasm_line *pos = browser->selection;
498
499 *idx = browser->b.index;
500 list_for_each_entry_continue_reverse(pos, &notes->src->source, node) {
501 if (disasm_line__filter(&browser->b, &pos->node))
502 continue;
503
504 --*idx;
505
506 if (pos->line && strstr(pos->line, s) != NULL)
507 return pos;
508 }
509
510 return NULL;
511}
512
513static bool __annotate_browser__search_reverse(struct annotate_browser *browser)
514{
515 struct disasm_line *dl;
516 s64 idx;
517
518 dl = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
519 if (dl == NULL) {
520 ui_helpline__puts("String not found!");
521 return false;
522 }
523
524 annotate_browser__set_top(browser, dl, idx);
525 browser->searching_backwards = true;
526 return true;
527}
528
529static bool annotate_browser__search_window(struct annotate_browser *browser,
530 int delay_secs)
531{
532 if (ui_browser__input_window("Search", "String: ", browser->search_bf,
533 "ENTER: OK, ESC: Cancel",
534 delay_secs * 2) != K_ENTER ||
535 !*browser->search_bf)
536 return false;
537
538 return true;
539}
540
541static bool annotate_browser__search(struct annotate_browser *browser, int delay_secs)
542{
543 if (annotate_browser__search_window(browser, delay_secs))
544 return __annotate_browser__search(browser);
545
546 return false;
547}
548
549static bool annotate_browser__continue_search(struct annotate_browser *browser,
550 int delay_secs)
551{
552 if (!*browser->search_bf)
553 return annotate_browser__search(browser, delay_secs);
554
555 return __annotate_browser__search(browser);
556}
557
558static bool annotate_browser__search_reverse(struct annotate_browser *browser,
559 int delay_secs)
560{
561 if (annotate_browser__search_window(browser, delay_secs))
562 return __annotate_browser__search_reverse(browser);
563
564 return false;
565}
566
567static
568bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
569 int delay_secs)
570{
571 if (!*browser->search_bf)
572 return annotate_browser__search_reverse(browser, delay_secs);
573
574 return __annotate_browser__search_reverse(browser);
575}
576
577static int annotate_browser__run(struct annotate_browser *self, int evidx,
578 void(*timer)(void *arg),
579 void *arg, int delay_secs)
580{
581 struct rb_node *nd = NULL;
582 struct map_symbol *ms = self->b.priv;
583 struct symbol *sym = ms->sym;
584 const char *help = "Press 'h' for help on key bindings";
585 int key;
586
587 if (ui_browser__show(&self->b, sym->name, help) < 0)
588 return -1;
589
590 annotate_browser__calc_percent(self, evidx);
591
592 if (self->curr_hot) {
593 annotate_browser__set_rb_top(self, self->curr_hot);
594 self->b.navkeypressed = false;
595 }
596
597 nd = self->curr_hot;
598
599 while (1) {
600 key = ui_browser__run(&self->b, delay_secs);
601
602 if (delay_secs != 0) {
603 annotate_browser__calc_percent(self, evidx);
604 /*
605 * Current line focus got out of the list of most active
606 * lines, NULL it so that if TAB|UNTAB is pressed, we
607 * move to curr_hot (current hottest line).
608 */
609 if (nd != NULL && RB_EMPTY_NODE(nd))
610 nd = NULL;
611 }
612
613 switch (key) {
614 case K_TIMER:
615 if (timer != NULL)
616 timer(arg);
617
618 if (delay_secs != 0)
619 symbol__annotate_decay_histogram(sym, evidx);
620 continue;
621 case K_TAB:
622 if (nd != NULL) {
623 nd = rb_prev(nd);
624 if (nd == NULL)
625 nd = rb_last(&self->entries);
626 } else
627 nd = self->curr_hot;
628 break;
629 case K_UNTAB:
630 if (nd != NULL)
631 nd = rb_next(nd);
632 if (nd == NULL)
633 nd = rb_first(&self->entries);
634 else
635 nd = self->curr_hot;
636 break;
637 case K_F1:
638 case 'h':
639 ui_browser__help_window(&self->b,
640 "UP/DOWN/PGUP\n"
641 "PGDN/SPACE Navigate\n"
642 "q/ESC/CTRL+C Exit\n\n"
643 "-> Go to target\n"
644 "<- Exit\n"
645 "h Cycle thru hottest instructions\n"
646 "j Toggle showing jump to target arrows\n"
647 "J Toggle showing number of jump sources on targets\n"
648 "n Search next string\n"
649 "o Toggle disassembler output/simplified view\n"
650 "s Toggle source code view\n"
651 "/ Search string\n"
652 "? Search previous string\n");
653 continue;
654 case 'H':
655 nd = self->curr_hot;
656 break;
657 case 's':
658 if (annotate_browser__toggle_source(self))
659 ui_helpline__puts(help);
660 continue;
661 case 'o':
662 self->use_offset = !self->use_offset;
663 if (self->use_offset)
664 self->target_width = self->min_addr_width;
665 else
666 self->target_width = self->max_addr_width;
667update_addr_width:
668 self->addr_width = self->target_width;
669 if (self->show_nr_jumps)
670 self->addr_width += self->jumps_width + 1;
671 continue;
672 case 'j':
673 self->jump_arrows = !self->jump_arrows;
674 continue;
675 case 'J':
676 self->show_nr_jumps = !self->show_nr_jumps;
677 goto update_addr_width;
678 case '/':
679 if (annotate_browser__search(self, delay_secs)) {
680show_help:
681 ui_helpline__puts(help);
682 }
683 continue;
684 case 'n':
685 if (self->searching_backwards ?
686 annotate_browser__continue_search_reverse(self, delay_secs) :
687 annotate_browser__continue_search(self, delay_secs))
688 goto show_help;
689 continue;
690 case '?':
691 if (annotate_browser__search_reverse(self, delay_secs))
692 goto show_help;
693 continue;
694 case K_ENTER:
695 case K_RIGHT:
696 if (self->selection == NULL)
697 ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
698 else if (self->selection->offset == -1)
699 ui_helpline__puts("Actions are only available for assembly lines.");
700 else if (!self->selection->ins) {
701 if (strcmp(self->selection->name, "retq"))
702 goto show_sup_ins;
703 goto out;
704 } else if (!(annotate_browser__jump(self) ||
705 annotate_browser__callq(self, evidx, timer, arg, delay_secs))) {
706show_sup_ins:
707 ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
708 }
709 continue;
710 case K_LEFT:
711 case K_ESC:
712 case 'q':
713 case CTRL('c'):
714 goto out;
715 default:
716 continue;
717 }
718
719 if (nd != NULL)
720 annotate_browser__set_rb_top(self, nd);
721 }
722out:
723 ui_browser__hide(&self->b);
724 return key;
725}
726
727int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
728 void(*timer)(void *arg), void *arg, int delay_secs)
729{
730 return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
731 timer, arg, delay_secs);
732}
733
734static void annotate_browser__mark_jump_targets(struct annotate_browser *browser,
735 size_t size)
736{
737 u64 offset;
738
739 for (offset = 0; offset < size; ++offset) {
740 struct disasm_line *dl = browser->offsets[offset], *dlt;
741 struct browser_disasm_line *bdlt;
742
743 if (!dl || !dl->ins || !ins__is_jump(dl->ins) ||
744 !disasm_line__has_offset(dl))
745 continue;
746
747 if (dl->ops.target.offset >= size) {
748 ui__error("jump to after symbol!\n"
749 "size: %zx, jump target: %" PRIx64,
750 size, dl->ops.target.offset);
751 continue;
752 }
753
754 dlt = browser->offsets[dl->ops.target.offset];
755 /*
756 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
757 * have to adjust to the previous offset?
758 */
759 if (dlt == NULL)
760 continue;
761
762 bdlt = disasm_line__browser(dlt);
763 if (++bdlt->jump_sources > browser->max_jump_sources)
764 browser->max_jump_sources = bdlt->jump_sources;
765
766 ++browser->nr_jumps;
767 }
768
769}
770
771static inline int width_jumps(int n)
772{
773 if (n >= 100)
774 return 5;
775 if (n / 10)
776 return 2;
777 return 1;
778}
779
780int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
781 void(*timer)(void *arg), void *arg,
782 int delay_secs)
783{
784 struct disasm_line *pos, *n;
785 struct annotation *notes;
786 const size_t size = symbol__size(sym);
787 struct map_symbol ms = {
788 .map = map,
789 .sym = sym,
790 };
791 struct annotate_browser browser = {
792 .b = {
793 .refresh = annotate_browser__refresh,
794 .seek = ui_browser__list_head_seek,
795 .write = annotate_browser__write,
796 .filter = disasm_line__filter,
797 .priv = &ms,
798 .use_navkeypressed = true,
799 },
800 .use_offset = true,
801 .jump_arrows = true,
802 };
803 int ret = -1;
804
805 if (sym == NULL)
806 return -1;
807
808 if (map->dso->annotate_warned)
809 return -1;
810
811 browser.offsets = zalloc(size * sizeof(struct disasm_line *));
812 if (browser.offsets == NULL) {
813 ui__error("Not enough memory!");
814 return -1;
815 }
816
817 if (symbol__annotate(sym, map, sizeof(struct browser_disasm_line)) < 0) {
818 ui__error("%s", ui_helpline__last_msg);
819 goto out_free_offsets;
820 }
821
822 ui_helpline__push("Press <- or ESC to exit");
823
824 notes = symbol__annotation(sym);
825 browser.start = map__rip_2objdump(map, sym->start);
826
827 list_for_each_entry(pos, &notes->src->source, node) {
828 struct browser_disasm_line *bpos;
829 size_t line_len = strlen(pos->line);
830
831 if (browser.b.width < line_len)
832 browser.b.width = line_len;
833 bpos = disasm_line__browser(pos);
834 bpos->idx = browser.nr_entries++;
835 if (pos->offset != -1) {
836 bpos->idx_asm = browser.nr_asm_entries++;
837 /*
838 * FIXME: short term bandaid to cope with assembly
839 * routines that comes with labels in the same column
840 * as the address in objdump, sigh.
841 *
842 * E.g. copy_user_generic_unrolled
843 */
844 if (pos->offset < (s64)size)
845 browser.offsets[pos->offset] = pos;
846 } else
847 bpos->idx_asm = -1;
848 }
849
850 annotate_browser__mark_jump_targets(&browser, size);
851
852 browser.addr_width = browser.target_width = browser.min_addr_width = hex_width(size);
853 browser.max_addr_width = hex_width(sym->end);
854 browser.jumps_width = width_jumps(browser.max_jump_sources);
855 browser.b.nr_entries = browser.nr_entries;
856 browser.b.entries = &notes->src->source,
857 browser.b.width += 18; /* Percentage */
858 ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
859 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
860 list_del(&pos->node);
861 disasm_line__free(pos);
862 }
863
864out_free_offsets:
865 free(browser.offsets);
866 return ret;
867}
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 2f83e5dc9967..a372a4b02635 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -5,12 +5,12 @@
5#include <newt.h> 5#include <newt.h>
6#include <linux/rbtree.h> 6#include <linux/rbtree.h>
7 7
8#include "../../evsel.h" 8#include "../../util/evsel.h"
9#include "../../evlist.h" 9#include "../../util/evlist.h"
10#include "../../hist.h" 10#include "../../util/hist.h"
11#include "../../pstack.h" 11#include "../../util/pstack.h"
12#include "../../sort.h" 12#include "../../util/sort.h"
13#include "../../util.h" 13#include "../../util/util.h"
14 14
15#include "../browser.h" 15#include "../browser.h"
16#include "../helpline.h" 16#include "../helpline.h"
@@ -840,10 +840,14 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
840 int printed; 840 int printed;
841 const struct dso *dso = self->dso_filter; 841 const struct dso *dso = self->dso_filter;
842 const struct thread *thread = self->thread_filter; 842 const struct thread *thread = self->thread_filter;
843 unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; 843 unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
844 u64 nr_events = self->stats.total_period;
845
846 nr_samples = convert_unit(nr_samples, &unit);
847 printed = scnprintf(bf, size,
848 "Samples: %lu%c of event '%s', Event count (approx.): %lu",
849 nr_samples, unit, ev_name, nr_events);
844 850
845 nr_events = convert_unit(nr_events, &unit);
846 printed = scnprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
847 851
848 if (self->uid_filter_str) 852 if (self->uid_filter_str)
849 printed += snprintf(bf + printed, size - printed, 853 printed += snprintf(bf + printed, size - printed,
@@ -937,7 +941,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
937 goto zoom_dso; 941 goto zoom_dso;
938 case 't': 942 case 't':
939 goto zoom_thread; 943 goto zoom_thread;
940 case 's': 944 case '/':
941 if (ui_browser__input_window("Symbol to show", 945 if (ui_browser__input_window("Symbol to show",
942 "Please enter the name of symbol you want to see", 946 "Please enter the name of symbol you want to see",
943 buf, "ENTER: OK, ESC: Cancel", 947 buf, "ENTER: OK, ESC: Cancel",
@@ -965,7 +969,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
965 "E Expand all callchains\n" 969 "E Expand all callchains\n"
966 "d Zoom into current DSO\n" 970 "d Zoom into current DSO\n"
967 "t Zoom into current Thread\n" 971 "t Zoom into current Thread\n"
968 "s Filter symbol by name"); 972 "/ Filter symbol by name");
969 continue; 973 continue;
970 case K_ENTER: 974 case K_ENTER:
971 case K_RIGHT: 975 case K_RIGHT:
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index eca6575abfd0..98851d55a53e 100644
--- a/tools/perf/util/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -5,9 +5,9 @@
5#include <sys/ttydefaults.h> 5#include <sys/ttydefaults.h>
6#include <string.h> 6#include <string.h>
7#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include "../../util.h" 8#include "../../util/util.h"
9#include "../../debug.h" 9#include "../../util/debug.h"
10#include "../../symbol.h" 10#include "../../util/symbol.h"
11#include "../browser.h" 11#include "../browser.h"
12#include "../helpline.h" 12#include "../helpline.h"
13#include "map.h" 13#include "map.h"
diff --git a/tools/perf/util/ui/browsers/map.h b/tools/perf/ui/browsers/map.h
index df8581a43e17..df8581a43e17 100644
--- a/tools/perf/util/ui/browsers/map.h
+++ b/tools/perf/ui/browsers/map.h
diff --git a/tools/perf/util/gtk/browser.c b/tools/perf/ui/gtk/browser.c
index 258352a2356c..0656c381a89c 100644
--- a/tools/perf/util/gtk/browser.c
+++ b/tools/perf/ui/gtk/browser.c
@@ -9,24 +9,13 @@
9 9
10#define MAX_COLUMNS 32 10#define MAX_COLUMNS 32
11 11
12void perf_gtk_setup_browser(int argc, const char *argv[], 12static void perf_gtk__signal(int sig)
13 bool fallback_to_pager __used)
14{
15 gtk_init(&argc, (char ***)&argv);
16}
17
18void perf_gtk_exit_browser(bool wait_for_ok __used)
19{
20 gtk_main_quit();
21}
22
23static void perf_gtk_signal(int sig)
24{ 13{
25 psignal(sig, "perf"); 14 psignal(sig, "perf");
26 gtk_main_quit(); 15 gtk_main_quit();
27} 16}
28 17
29static void perf_gtk_resize_window(GtkWidget *window) 18static void perf_gtk__resize_window(GtkWidget *window)
30{ 19{
31 GdkRectangle rect; 20 GdkRectangle rect;
32 GdkScreen *screen; 21 GdkScreen *screen;
@@ -46,7 +35,7 @@ static void perf_gtk_resize_window(GtkWidget *window)
46 gtk_window_resize(GTK_WINDOW(window), width, height); 35 gtk_window_resize(GTK_WINDOW(window), width, height);
47} 36}
48 37
49static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists) 38static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
50{ 39{
51 GType col_types[MAX_COLUMNS]; 40 GType col_types[MAX_COLUMNS];
52 GtkCellRenderer *renderer; 41 GtkCellRenderer *renderer;
@@ -142,11 +131,11 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
142 GtkWidget *notebook; 131 GtkWidget *notebook;
143 GtkWidget *window; 132 GtkWidget *window;
144 133
145 signal(SIGSEGV, perf_gtk_signal); 134 signal(SIGSEGV, perf_gtk__signal);
146 signal(SIGFPE, perf_gtk_signal); 135 signal(SIGFPE, perf_gtk__signal);
147 signal(SIGINT, perf_gtk_signal); 136 signal(SIGINT, perf_gtk__signal);
148 signal(SIGQUIT, perf_gtk_signal); 137 signal(SIGQUIT, perf_gtk__signal);
149 signal(SIGTERM, perf_gtk_signal); 138 signal(SIGTERM, perf_gtk__signal);
150 139
151 window = gtk_window_new(GTK_WINDOW_TOPLEVEL); 140 window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
152 141
@@ -168,7 +157,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
168 GTK_POLICY_AUTOMATIC, 157 GTK_POLICY_AUTOMATIC,
169 GTK_POLICY_AUTOMATIC); 158 GTK_POLICY_AUTOMATIC);
170 159
171 perf_gtk_show_hists(scrolled_window, hists); 160 perf_gtk__show_hists(scrolled_window, hists);
172 161
173 tab_label = gtk_label_new(evname); 162 tab_label = gtk_label_new(evname);
174 163
@@ -179,7 +168,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
179 168
180 gtk_widget_show_all(window); 169 gtk_widget_show_all(window);
181 170
182 perf_gtk_resize_window(window); 171 perf_gtk__resize_window(window);
183 172
184 gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); 173 gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
185 174
diff --git a/tools/perf/util/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index 75177ee04032..75177ee04032 100644
--- a/tools/perf/util/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c
new file mode 100644
index 000000000000..829529957766
--- /dev/null
+++ b/tools/perf/ui/gtk/setup.c
@@ -0,0 +1,12 @@
1#include "gtk.h"
2#include "../../util/cache.h"
3
4int perf_gtk__init(void)
5{
6 return gtk_init_check(NULL, NULL) ? 0 : -1;
7}
8
9void perf_gtk__exit(bool wait_for_ok __used)
10{
11 gtk_main_quit();
12}
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/ui/helpline.c
index 2f950c2641c8..2f950c2641c8 100644
--- a/tools/perf/util/ui/helpline.c
+++ b/tools/perf/ui/helpline.c
diff --git a/tools/perf/util/ui/helpline.h b/tools/perf/ui/helpline.h
index 7bab6b34e35e..7bab6b34e35e 100644
--- a/tools/perf/util/ui/helpline.h
+++ b/tools/perf/ui/helpline.h
diff --git a/tools/perf/util/ui/keysyms.h b/tools/perf/ui/keysyms.h
index 809eca5707fa..809eca5707fa 100644
--- a/tools/perf/util/ui/keysyms.h
+++ b/tools/perf/ui/keysyms.h
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/ui/libslang.h
index 4d54b6450f5b..4d54b6450f5b 100644
--- a/tools/perf/util/ui/libslang.h
+++ b/tools/perf/ui/libslang.h
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/ui/progress.c
index 13aa64e50e11..13aa64e50e11 100644
--- a/tools/perf/util/ui/progress.c
+++ b/tools/perf/ui/progress.c
diff --git a/tools/perf/util/ui/progress.h b/tools/perf/ui/progress.h
index d9c205b59aa1..d9c205b59aa1 100644
--- a/tools/perf/util/ui/progress.h
+++ b/tools/perf/ui/progress.h
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
new file mode 100644
index 000000000000..9f5f888f73e3
--- /dev/null
+++ b/tools/perf/ui/setup.c
@@ -0,0 +1,45 @@
1#include "../cache.h"
2#include "../debug.h"
3
4
5void setup_browser(bool fallback_to_pager)
6{
7 if (!isatty(1) || dump_trace)
8 use_browser = 0;
9
10 /* default to TUI */
11 if (use_browser < 0)
12 use_browser = 1;
13
14 switch (use_browser) {
15 case 2:
16 if (perf_gtk__init() == 0)
17 break;
18 /* fall through */
19 case 1:
20 use_browser = 1;
21 if (ui__init() == 0)
22 break;
23 /* fall through */
24 default:
25 if (fallback_to_pager)
26 setup_pager();
27 break;
28 }
29}
30
31void exit_browser(bool wait_for_ok)
32{
33 switch (use_browser) {
34 case 2:
35 perf_gtk__exit(wait_for_ok);
36 break;
37
38 case 1:
39 ui__exit(wait_for_ok);
40 break;
41
42 default:
43 break;
44 }
45}
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/ui/tui/setup.c
index 85a69faa09aa..d33e943ac434 100644
--- a/tools/perf/util/ui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -2,14 +2,14 @@
2#include <signal.h> 2#include <signal.h>
3#include <stdbool.h> 3#include <stdbool.h>
4 4
5#include "../cache.h" 5#include "../../util/cache.h"
6#include "../debug.h" 6#include "../../util/debug.h"
7#include "browser.h" 7#include "../browser.h"
8#include "helpline.h" 8#include "../helpline.h"
9#include "ui.h" 9#include "../ui.h"
10#include "util.h" 10#include "../util.h"
11#include "libslang.h" 11#include "../libslang.h"
12#include "keysyms.h" 12#include "../keysyms.h"
13 13
14pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; 14pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
15 15
@@ -93,45 +93,26 @@ static void newt_suspend(void *d __used)
93 newtResume(); 93 newtResume();
94} 94}
95 95
96static int ui__init(void)
97{
98 int err = SLkp_init();
99
100 if (err < 0)
101 goto out;
102
103 SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
104out:
105 return err;
106}
107
108static void ui__exit(void)
109{
110 SLtt_set_cursor_visibility(1);
111 SLsmg_refresh();
112 SLsmg_reset_smg();
113 SLang_reset_tty();
114}
115
116static void ui__signal(int sig) 96static void ui__signal(int sig)
117{ 97{
118 ui__exit(); 98 ui__exit(false);
119 psignal(sig, "perf"); 99 psignal(sig, "perf");
120 exit(0); 100 exit(0);
121} 101}
122 102
123void setup_browser(bool fallback_to_pager) 103int ui__init(void)
124{ 104{
125 if (!isatty(1) || !use_browser || dump_trace) { 105 int err;
126 use_browser = 0;
127 if (fallback_to_pager)
128 setup_pager();
129 return;
130 }
131 106
132 use_browser = 1;
133 newtInit(); 107 newtInit();
134 ui__init(); 108 err = SLkp_init();
109 if (err < 0) {
110 pr_err("TUI initialization failed.\n");
111 goto out;
112 }
113
114 SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB);
115
135 newtSetSuspendCallback(newt_suspend, NULL); 116 newtSetSuspendCallback(newt_suspend, NULL);
136 ui_helpline__init(); 117 ui_helpline__init();
137 ui_browser__init(); 118 ui_browser__init();
@@ -141,15 +122,19 @@ void setup_browser(bool fallback_to_pager)
141 signal(SIGINT, ui__signal); 122 signal(SIGINT, ui__signal);
142 signal(SIGQUIT, ui__signal); 123 signal(SIGQUIT, ui__signal);
143 signal(SIGTERM, ui__signal); 124 signal(SIGTERM, ui__signal);
125out:
126 return err;
144} 127}
145 128
146void exit_browser(bool wait_for_ok) 129void ui__exit(bool wait_for_ok)
147{ 130{
148 if (use_browser > 0) { 131 if (wait_for_ok)
149 if (wait_for_ok) 132 ui__question_window("Fatal Error",
150 ui__question_window("Fatal Error", 133 ui_helpline__last_msg,
151 ui_helpline__last_msg, 134 "Press any key...", 0);
152 "Press any key...", 0); 135
153 ui__exit(); 136 SLtt_set_cursor_visibility(1);
154 } 137 SLsmg_refresh();
138 SLsmg_reset_smg();
139 SLang_reset_tty();
155} 140}
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/ui/ui.h
index 7b67045479f6..7b67045479f6 100644
--- a/tools/perf/util/ui/ui.h
+++ b/tools/perf/ui/ui.h
diff --git a/tools/perf/util/ui/util.c b/tools/perf/ui/util.c
index ad4374a16bb0..ad4374a16bb0 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/ui/util.c
diff --git a/tools/perf/util/ui/util.h b/tools/perf/ui/util.h
index 2d1738bd71c8..2d1738bd71c8 100644
--- a/tools/perf/util/ui/util.h
+++ b/tools/perf/ui/util.h
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 08c6d138a655..8069dfb5ba77 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -18,6 +18,403 @@
18 18
19const char *disassembler_style; 19const char *disassembler_style;
20 20
21static struct ins *ins__find(const char *name);
22static int disasm_line__parse(char *line, char **namep, char **rawp);
23
24static void ins__delete(struct ins_operands *ops)
25{
26 free(ops->source.raw);
27 free(ops->source.name);
28 free(ops->target.raw);
29 free(ops->target.name);
30}
31
32static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
33 struct ins_operands *ops)
34{
35 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
36}
37
38int ins__scnprintf(struct ins *ins, char *bf, size_t size,
39 struct ins_operands *ops)
40{
41 if (ins->ops->scnprintf)
42 return ins->ops->scnprintf(ins, bf, size, ops);
43
44 return ins__raw_scnprintf(ins, bf, size, ops);
45}
46
47static int call__parse(struct ins_operands *ops)
48{
49 char *endptr, *tok, *name;
50
51 ops->target.addr = strtoull(ops->raw, &endptr, 16);
52
53 name = strchr(endptr, '<');
54 if (name == NULL)
55 goto indirect_call;
56
57 name++;
58
59 tok = strchr(name, '>');
60 if (tok == NULL)
61 return -1;
62
63 *tok = '\0';
64 ops->target.name = strdup(name);
65 *tok = '>';
66
67 return ops->target.name == NULL ? -1 : 0;
68
69indirect_call:
70 tok = strchr(endptr, '(');
71 if (tok != NULL) {
72 ops->target.addr = 0;
73 return 0;
74 }
75
76 tok = strchr(endptr, '*');
77 if (tok == NULL)
78 return -1;
79
80 ops->target.addr = strtoull(tok + 1, NULL, 16);
81 return 0;
82}
83
84static int call__scnprintf(struct ins *ins, char *bf, size_t size,
85 struct ins_operands *ops)
86{
87 if (ops->target.name)
88 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
89
90 if (ops->target.addr == 0)
91 return ins__raw_scnprintf(ins, bf, size, ops);
92
93 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
94}
95
96static struct ins_ops call_ops = {
97 .parse = call__parse,
98 .scnprintf = call__scnprintf,
99};
100
101bool ins__is_call(const struct ins *ins)
102{
103 return ins->ops == &call_ops;
104}
105
106static int jump__parse(struct ins_operands *ops)
107{
108 const char *s = strchr(ops->raw, '+');
109
110 ops->target.addr = strtoll(ops->raw, NULL, 16);
111
112 if (s++ != NULL)
113 ops->target.offset = strtoll(s, NULL, 16);
114 else
115 ops->target.offset = UINT64_MAX;
116
117 return 0;
118}
119
120static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
121 struct ins_operands *ops)
122{
123 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
124}
125
126static struct ins_ops jump_ops = {
127 .parse = jump__parse,
128 .scnprintf = jump__scnprintf,
129};
130
131bool ins__is_jump(const struct ins *ins)
132{
133 return ins->ops == &jump_ops;
134}
135
136static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
137{
138 char *endptr, *name, *t;
139
140 if (strstr(raw, "(%rip)") == NULL)
141 return 0;
142
143 *addrp = strtoull(comment, &endptr, 16);
144 name = strchr(endptr, '<');
145 if (name == NULL)
146 return -1;
147
148 name++;
149
150 t = strchr(name, '>');
151 if (t == NULL)
152 return 0;
153
154 *t = '\0';
155 *namep = strdup(name);
156 *t = '>';
157
158 return 0;
159}
160
161static int lock__parse(struct ins_operands *ops)
162{
163 char *name;
164
165 ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
166 if (ops->locked.ops == NULL)
167 return 0;
168
169 if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0)
170 goto out_free_ops;
171
172 ops->locked.ins = ins__find(name);
173 if (ops->locked.ins == NULL)
174 goto out_free_ops;
175
176 if (!ops->locked.ins->ops)
177 return 0;
178
179 if (ops->locked.ins->ops->parse)
180 ops->locked.ins->ops->parse(ops->locked.ops);
181
182 return 0;
183
184out_free_ops:
185 free(ops->locked.ops);
186 ops->locked.ops = NULL;
187 return 0;
188}
189
190static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
191 struct ins_operands *ops)
192{
193 int printed;
194
195 if (ops->locked.ins == NULL)
196 return ins__raw_scnprintf(ins, bf, size, ops);
197
198 printed = scnprintf(bf, size, "%-6.6s ", ins->name);
199 return printed + ins__scnprintf(ops->locked.ins, bf + printed,
200 size - printed, ops->locked.ops);
201}
202
203static void lock__delete(struct ins_operands *ops)
204{
205 free(ops->locked.ops);
206 free(ops->target.raw);
207 free(ops->target.name);
208}
209
210static struct ins_ops lock_ops = {
211 .free = lock__delete,
212 .parse = lock__parse,
213 .scnprintf = lock__scnprintf,
214};
215
216static int mov__parse(struct ins_operands *ops)
217{
218 char *s = strchr(ops->raw, ','), *target, *comment, prev;
219
220 if (s == NULL)
221 return -1;
222
223 *s = '\0';
224 ops->source.raw = strdup(ops->raw);
225 *s = ',';
226
227 if (ops->source.raw == NULL)
228 return -1;
229
230 target = ++s;
231
232 while (s[0] != '\0' && !isspace(s[0]))
233 ++s;
234 prev = *s;
235 *s = '\0';
236
237 ops->target.raw = strdup(target);
238 *s = prev;
239
240 if (ops->target.raw == NULL)
241 goto out_free_source;
242
243 comment = strchr(s, '#');
244 if (comment == NULL)
245 return 0;
246
247 while (comment[0] != '\0' && isspace(comment[0]))
248 ++comment;
249
250 comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
251 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
252
253 return 0;
254
255out_free_source:
256 free(ops->source.raw);
257 ops->source.raw = NULL;
258 return -1;
259}
260
261static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
262 struct ins_operands *ops)
263{
264 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
265 ops->source.name ?: ops->source.raw,
266 ops->target.name ?: ops->target.raw);
267}
268
269static struct ins_ops mov_ops = {
270 .parse = mov__parse,
271 .scnprintf = mov__scnprintf,
272};
273
274static int dec__parse(struct ins_operands *ops)
275{
276 char *target, *comment, *s, prev;
277
278 target = s = ops->raw;
279
280 while (s[0] != '\0' && !isspace(s[0]))
281 ++s;
282 prev = *s;
283 *s = '\0';
284
285 ops->target.raw = strdup(target);
286 *s = prev;
287
288 if (ops->target.raw == NULL)
289 return -1;
290
291 comment = strchr(s, '#');
292 if (comment == NULL)
293 return 0;
294
295 while (comment[0] != '\0' && isspace(comment[0]))
296 ++comment;
297
298 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
299
300 return 0;
301}
302
303static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
304 struct ins_operands *ops)
305{
306 return scnprintf(bf, size, "%-6.6s %s", ins->name,
307 ops->target.name ?: ops->target.raw);
308}
309
310static struct ins_ops dec_ops = {
311 .parse = dec__parse,
312 .scnprintf = dec__scnprintf,
313};
314
315static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size,
316 struct ins_operands *ops __used)
317{
318 return scnprintf(bf, size, "%-6.6s", "nop");
319}
320
321static struct ins_ops nop_ops = {
322 .scnprintf = nop__scnprintf,
323};
324
325/*
326 * Must be sorted by name!
327 */
328static struct ins instructions[] = {
329 { .name = "add", .ops = &mov_ops, },
330 { .name = "addl", .ops = &mov_ops, },
331 { .name = "addq", .ops = &mov_ops, },
332 { .name = "addw", .ops = &mov_ops, },
333 { .name = "and", .ops = &mov_ops, },
334 { .name = "bts", .ops = &mov_ops, },
335 { .name = "call", .ops = &call_ops, },
336 { .name = "callq", .ops = &call_ops, },
337 { .name = "cmp", .ops = &mov_ops, },
338 { .name = "cmpb", .ops = &mov_ops, },
339 { .name = "cmpl", .ops = &mov_ops, },
340 { .name = "cmpq", .ops = &mov_ops, },
341 { .name = "cmpw", .ops = &mov_ops, },
342 { .name = "cmpxch", .ops = &mov_ops, },
343 { .name = "dec", .ops = &dec_ops, },
344 { .name = "decl", .ops = &dec_ops, },
345 { .name = "imul", .ops = &mov_ops, },
346 { .name = "inc", .ops = &dec_ops, },
347 { .name = "incl", .ops = &dec_ops, },
348 { .name = "ja", .ops = &jump_ops, },
349 { .name = "jae", .ops = &jump_ops, },
350 { .name = "jb", .ops = &jump_ops, },
351 { .name = "jbe", .ops = &jump_ops, },
352 { .name = "jc", .ops = &jump_ops, },
353 { .name = "jcxz", .ops = &jump_ops, },
354 { .name = "je", .ops = &jump_ops, },
355 { .name = "jecxz", .ops = &jump_ops, },
356 { .name = "jg", .ops = &jump_ops, },
357 { .name = "jge", .ops = &jump_ops, },
358 { .name = "jl", .ops = &jump_ops, },
359 { .name = "jle", .ops = &jump_ops, },
360 { .name = "jmp", .ops = &jump_ops, },
361 { .name = "jmpq", .ops = &jump_ops, },
362 { .name = "jna", .ops = &jump_ops, },
363 { .name = "jnae", .ops = &jump_ops, },
364 { .name = "jnb", .ops = &jump_ops, },
365 { .name = "jnbe", .ops = &jump_ops, },
366 { .name = "jnc", .ops = &jump_ops, },
367 { .name = "jne", .ops = &jump_ops, },
368 { .name = "jng", .ops = &jump_ops, },
369 { .name = "jnge", .ops = &jump_ops, },
370 { .name = "jnl", .ops = &jump_ops, },
371 { .name = "jnle", .ops = &jump_ops, },
372 { .name = "jno", .ops = &jump_ops, },
373 { .name = "jnp", .ops = &jump_ops, },
374 { .name = "jns", .ops = &jump_ops, },
375 { .name = "jnz", .ops = &jump_ops, },
376 { .name = "jo", .ops = &jump_ops, },
377 { .name = "jp", .ops = &jump_ops, },
378 { .name = "jpe", .ops = &jump_ops, },
379 { .name = "jpo", .ops = &jump_ops, },
380 { .name = "jrcxz", .ops = &jump_ops, },
381 { .name = "js", .ops = &jump_ops, },
382 { .name = "jz", .ops = &jump_ops, },
383 { .name = "lea", .ops = &mov_ops, },
384 { .name = "lock", .ops = &lock_ops, },
385 { .name = "mov", .ops = &mov_ops, },
386 { .name = "movb", .ops = &mov_ops, },
387 { .name = "movdqa",.ops = &mov_ops, },
388 { .name = "movl", .ops = &mov_ops, },
389 { .name = "movq", .ops = &mov_ops, },
390 { .name = "movslq", .ops = &mov_ops, },
391 { .name = "movzbl", .ops = &mov_ops, },
392 { .name = "movzwl", .ops = &mov_ops, },
393 { .name = "nop", .ops = &nop_ops, },
394 { .name = "nopl", .ops = &nop_ops, },
395 { .name = "nopw", .ops = &nop_ops, },
396 { .name = "or", .ops = &mov_ops, },
397 { .name = "orl", .ops = &mov_ops, },
398 { .name = "test", .ops = &mov_ops, },
399 { .name = "testb", .ops = &mov_ops, },
400 { .name = "testl", .ops = &mov_ops, },
401 { .name = "xadd", .ops = &mov_ops, },
402};
403
404static int ins__cmp(const void *name, const void *insp)
405{
406 const struct ins *ins = insp;
407
408 return strcmp(name, ins->name);
409}
410
411static struct ins *ins__find(const char *name)
412{
413 const int nmemb = ARRAY_SIZE(instructions);
414
415 return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
416}
417
21int symbol__annotate_init(struct map *map __used, struct symbol *sym) 418int symbol__annotate_init(struct map *map __used, struct symbol *sym)
22{ 419{
23 struct annotation *notes = symbol__annotation(sym); 420 struct annotation *notes = symbol__annotation(sym);
@@ -28,7 +425,7 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
28int symbol__alloc_hist(struct symbol *sym) 425int symbol__alloc_hist(struct symbol *sym)
29{ 426{
30 struct annotation *notes = symbol__annotation(sym); 427 struct annotation *notes = symbol__annotation(sym);
31 const size_t size = sym->end - sym->start + 1; 428 const size_t size = symbol__size(sym);
32 size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); 429 size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
33 430
34 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); 431 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
@@ -78,31 +475,110 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
78 return 0; 475 return 0;
79} 476}
80 477
81static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) 478static void disasm_line__init_ins(struct disasm_line *dl)
479{
480 dl->ins = ins__find(dl->name);
481
482 if (dl->ins == NULL)
483 return;
484
485 if (!dl->ins->ops)
486 return;
487
488 if (dl->ins->ops->parse)
489 dl->ins->ops->parse(&dl->ops);
490}
491
492static int disasm_line__parse(char *line, char **namep, char **rawp)
493{
494 char *name = line, tmp;
495
496 while (isspace(name[0]))
497 ++name;
498
499 if (name[0] == '\0')
500 return -1;
501
502 *rawp = name + 1;
503
504 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
505 ++*rawp;
506
507 tmp = (*rawp)[0];
508 (*rawp)[0] = '\0';
509 *namep = strdup(name);
510
511 if (*namep == NULL)
512 goto out_free_name;
513
514 (*rawp)[0] = tmp;
515
516 if ((*rawp)[0] != '\0') {
517 (*rawp)++;
518 while (isspace((*rawp)[0]))
519 ++(*rawp);
520 }
521
522 return 0;
523
524out_free_name:
525 free(*namep);
526 *namep = NULL;
527 return -1;
528}
529
530static struct disasm_line *disasm_line__new(s64 offset, char *line, size_t privsize)
82{ 531{
83 struct objdump_line *self = malloc(sizeof(*self) + privsize); 532 struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
84 533
85 if (self != NULL) { 534 if (dl != NULL) {
86 self->offset = offset; 535 dl->offset = offset;
87 self->line = line; 536 dl->line = strdup(line);
537 if (dl->line == NULL)
538 goto out_delete;
539
540 if (offset != -1) {
541 if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0)
542 goto out_free_line;
543
544 disasm_line__init_ins(dl);
545 }
88 } 546 }
89 547
90 return self; 548 return dl;
549
550out_free_line:
551 free(dl->line);
552out_delete:
553 free(dl);
554 return NULL;
555}
556
557void disasm_line__free(struct disasm_line *dl)
558{
559 free(dl->line);
560 free(dl->name);
561 if (dl->ins && dl->ins->ops->free)
562 dl->ins->ops->free(&dl->ops);
563 else
564 ins__delete(&dl->ops);
565 free(dl);
91} 566}
92 567
93void objdump_line__free(struct objdump_line *self) 568int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
94{ 569{
95 free(self->line); 570 if (raw || !dl->ins)
96 free(self); 571 return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw);
572
573 return ins__scnprintf(dl->ins, bf, size, &dl->ops);
97} 574}
98 575
99static void objdump__add_line(struct list_head *head, struct objdump_line *line) 576static void disasm__add(struct list_head *head, struct disasm_line *line)
100{ 577{
101 list_add_tail(&line->node, head); 578 list_add_tail(&line->node, head);
102} 579}
103 580
104struct objdump_line *objdump__get_next_ip_line(struct list_head *head, 581struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
105 struct objdump_line *pos)
106{ 582{
107 list_for_each_entry_continue(pos, head, node) 583 list_for_each_entry_continue(pos, head, node)
108 if (pos->offset >= 0) 584 if (pos->offset >= 0)
@@ -111,15 +587,14 @@ struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
111 return NULL; 587 return NULL;
112} 588}
113 589
114static int objdump_line__print(struct objdump_line *oline, struct symbol *sym, 590static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
115 int evidx, u64 len, int min_pcnt, 591 int evidx, u64 len, int min_pcnt, int printed,
116 int printed, int max_lines, 592 int max_lines, struct disasm_line *queue)
117 struct objdump_line *queue)
118{ 593{
119 static const char *prev_line; 594 static const char *prev_line;
120 static const char *prev_color; 595 static const char *prev_color;
121 596
122 if (oline->offset != -1) { 597 if (dl->offset != -1) {
123 const char *path = NULL; 598 const char *path = NULL;
124 unsigned int hits = 0; 599 unsigned int hits = 0;
125 double percent = 0.0; 600 double percent = 0.0;
@@ -127,10 +602,11 @@ static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
127 struct annotation *notes = symbol__annotation(sym); 602 struct annotation *notes = symbol__annotation(sym);
128 struct source_line *src_line = notes->src->lines; 603 struct source_line *src_line = notes->src->lines;
129 struct sym_hist *h = annotation__histogram(notes, evidx); 604 struct sym_hist *h = annotation__histogram(notes, evidx);
130 s64 offset = oline->offset; 605 s64 offset = dl->offset;
131 struct objdump_line *next; 606 const u64 addr = start + offset;
607 struct disasm_line *next;
132 608
133 next = objdump__get_next_ip_line(&notes->src->source, oline); 609 next = disasm__get_next_ip_line(&notes->src->source, dl);
134 610
135 while (offset < (s64)len && 611 while (offset < (s64)len &&
136 (next == NULL || offset < next->offset)) { 612 (next == NULL || offset < next->offset)) {
@@ -155,9 +631,9 @@ static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
155 631
156 if (queue != NULL) { 632 if (queue != NULL) {
157 list_for_each_entry_from(queue, &notes->src->source, node) { 633 list_for_each_entry_from(queue, &notes->src->source, node) {
158 if (queue == oline) 634 if (queue == dl)
159 break; 635 break;
160 objdump_line__print(queue, sym, evidx, len, 636 disasm_line__print(queue, sym, start, evidx, len,
161 0, 0, 1, NULL); 637 0, 0, 1, NULL);
162 } 638 }
163 } 639 }
@@ -180,17 +656,18 @@ static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
180 656
181 color_fprintf(stdout, color, " %7.2f", percent); 657 color_fprintf(stdout, color, " %7.2f", percent);
182 printf(" : "); 658 printf(" : ");
183 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line); 659 color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
660 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
184 } else if (max_lines && printed >= max_lines) 661 } else if (max_lines && printed >= max_lines)
185 return 1; 662 return 1;
186 else { 663 else {
187 if (queue) 664 if (queue)
188 return -1; 665 return -1;
189 666
190 if (!*oline->line) 667 if (!*dl->line)
191 printf(" :\n"); 668 printf(" :\n");
192 else 669 else
193 printf(" : %s\n", oline->line); 670 printf(" : %s\n", dl->line);
194 } 671 }
195 672
196 return 0; 673 return 0;
@@ -200,8 +677,8 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
200 FILE *file, size_t privsize) 677 FILE *file, size_t privsize)
201{ 678{
202 struct annotation *notes = symbol__annotation(sym); 679 struct annotation *notes = symbol__annotation(sym);
203 struct objdump_line *objdump_line; 680 struct disasm_line *dl;
204 char *line = NULL, *tmp, *tmp2, *c; 681 char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
205 size_t line_len; 682 size_t line_len;
206 s64 line_ip, offset = -1; 683 s64 line_ip, offset = -1;
207 684
@@ -219,6 +696,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
219 *c = 0; 696 *c = 0;
220 697
221 line_ip = -1; 698 line_ip = -1;
699 parsed_line = line;
222 700
223 /* 701 /*
224 * Strip leading spaces: 702 * Strip leading spaces:
@@ -246,14 +724,17 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
246 offset = line_ip - start; 724 offset = line_ip - start;
247 if (offset < 0 || (u64)line_ip > end) 725 if (offset < 0 || (u64)line_ip > end)
248 offset = -1; 726 offset = -1;
727 else
728 parsed_line = tmp2 + 1;
249 } 729 }
250 730
251 objdump_line = objdump_line__new(offset, line, privsize); 731 dl = disasm_line__new(offset, parsed_line, privsize);
252 if (objdump_line == NULL) { 732 free(line);
253 free(line); 733
734 if (dl == NULL)
254 return -1; 735 return -1;
255 } 736
256 objdump__add_line(&notes->src->source, objdump_line); 737 disasm__add(&notes->src->source, dl);
257 738
258 return 0; 739 return 0;
259} 740}
@@ -476,7 +957,7 @@ static void symbol__annotate_hits(struct symbol *sym, int evidx)
476{ 957{
477 struct annotation *notes = symbol__annotation(sym); 958 struct annotation *notes = symbol__annotation(sym);
478 struct sym_hist *h = annotation__histogram(notes, evidx); 959 struct sym_hist *h = annotation__histogram(notes, evidx);
479 u64 len = sym->end - sym->start, offset; 960 u64 len = symbol__size(sym), offset;
480 961
481 for (offset = 0; offset < len; ++offset) 962 for (offset = 0; offset < len; ++offset)
482 if (h->addr[offset] != 0) 963 if (h->addr[offset] != 0)
@@ -492,7 +973,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
492 struct dso *dso = map->dso; 973 struct dso *dso = map->dso;
493 const char *filename = dso->long_name, *d_filename; 974 const char *filename = dso->long_name, *d_filename;
494 struct annotation *notes = symbol__annotation(sym); 975 struct annotation *notes = symbol__annotation(sym);
495 struct objdump_line *pos, *queue = NULL; 976 struct disasm_line *pos, *queue = NULL;
977 u64 start = map__rip_2objdump(map, sym->start);
496 int printed = 2, queue_len = 0; 978 int printed = 2, queue_len = 0;
497 int more = 0; 979 int more = 0;
498 u64 len; 980 u64 len;
@@ -502,7 +984,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
502 else 984 else
503 d_filename = basename(filename); 985 d_filename = basename(filename);
504 986
505 len = sym->end - sym->start; 987 len = symbol__size(sym);
506 988
507 printf(" Percent | Source code & Disassembly of %s\n", d_filename); 989 printf(" Percent | Source code & Disassembly of %s\n", d_filename);
508 printf("------------------------------------------------\n"); 990 printf("------------------------------------------------\n");
@@ -516,8 +998,9 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
516 queue_len = 0; 998 queue_len = 0;
517 } 999 }
518 1000
519 switch (objdump_line__print(pos, sym, evidx, len, min_pcnt, 1001 switch (disasm_line__print(pos, sym, start, evidx, len,
520 printed, max_lines, queue)) { 1002 min_pcnt, printed, max_lines,
1003 queue)) {
521 case 0: 1004 case 0:
522 ++printed; 1005 ++printed;
523 if (context) { 1006 if (context) {
@@ -561,7 +1044,7 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
561{ 1044{
562 struct annotation *notes = symbol__annotation(sym); 1045 struct annotation *notes = symbol__annotation(sym);
563 struct sym_hist *h = annotation__histogram(notes, evidx); 1046 struct sym_hist *h = annotation__histogram(notes, evidx);
564 int len = sym->end - sym->start, offset; 1047 int len = symbol__size(sym), offset;
565 1048
566 h->sum = 0; 1049 h->sum = 0;
567 for (offset = 0; offset < len; ++offset) { 1050 for (offset = 0; offset < len; ++offset) {
@@ -570,14 +1053,42 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
570 } 1053 }
571} 1054}
572 1055
573void objdump_line_list__purge(struct list_head *head) 1056void disasm__purge(struct list_head *head)
574{ 1057{
575 struct objdump_line *pos, *n; 1058 struct disasm_line *pos, *n;
576 1059
577 list_for_each_entry_safe(pos, n, head, node) { 1060 list_for_each_entry_safe(pos, n, head, node) {
578 list_del(&pos->node); 1061 list_del(&pos->node);
579 objdump_line__free(pos); 1062 disasm_line__free(pos);
1063 }
1064}
1065
1066static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1067{
1068 size_t printed;
1069
1070 if (dl->offset == -1)
1071 return fprintf(fp, "%s\n", dl->line);
1072
1073 printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name);
1074
1075 if (dl->ops.raw[0] != '\0') {
1076 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1077 dl->ops.raw);
580 } 1078 }
1079
1080 return printed + fprintf(fp, "\n");
1081}
1082
1083size_t disasm__fprintf(struct list_head *head, FILE *fp)
1084{
1085 struct disasm_line *pos;
1086 size_t printed = 0;
1087
1088 list_for_each_entry(pos, head, node)
1089 printed += disasm_line__fprintf(pos, fp);
1090
1091 return printed;
581} 1092}
582 1093
583int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, 1094int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
@@ -592,7 +1103,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
592 if (symbol__annotate(sym, map, 0) < 0) 1103 if (symbol__annotate(sym, map, 0) < 0)
593 return -1; 1104 return -1;
594 1105
595 len = sym->end - sym->start; 1106 len = symbol__size(sym);
596 1107
597 if (print_lines) { 1108 if (print_lines) {
598 symbol__get_source_line(sym, map, evidx, &source_line, 1109 symbol__get_source_line(sym, map, evidx, &source_line,
@@ -605,7 +1116,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
605 if (print_lines) 1116 if (print_lines)
606 symbol__free_source_line(sym, len); 1117 symbol__free_source_line(sym, len);
607 1118
608 objdump_line_list__purge(&symbol__annotation(sym)->src->source); 1119 disasm__purge(&symbol__annotation(sym)->src->source);
609 1120
610 return 0; 1121 return 0;
611} 1122}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index efa5dc82bfae..78a5692dd718 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -2,20 +2,69 @@
2#define __PERF_ANNOTATE_H 2#define __PERF_ANNOTATE_H
3 3
4#include <stdbool.h> 4#include <stdbool.h>
5#include <stdint.h>
5#include "types.h" 6#include "types.h"
6#include "symbol.h" 7#include "symbol.h"
7#include <linux/list.h> 8#include <linux/list.h>
8#include <linux/rbtree.h> 9#include <linux/rbtree.h>
9 10
10struct objdump_line { 11struct ins;
11 struct list_head node; 12
12 s64 offset; 13struct ins_operands {
13 char *line; 14 char *raw;
15 struct {
16 char *raw;
17 char *name;
18 u64 addr;
19 u64 offset;
20 } target;
21 union {
22 struct {
23 char *raw;
24 char *name;
25 u64 addr;
26 } source;
27 struct {
28 struct ins *ins;
29 struct ins_operands *ops;
30 } locked;
31 };
14}; 32};
15 33
16void objdump_line__free(struct objdump_line *self); 34struct ins_ops {
17struct objdump_line *objdump__get_next_ip_line(struct list_head *head, 35 void (*free)(struct ins_operands *ops);
18 struct objdump_line *pos); 36 int (*parse)(struct ins_operands *ops);
37 int (*scnprintf)(struct ins *ins, char *bf, size_t size,
38 struct ins_operands *ops);
39};
40
41struct ins {
42 const char *name;
43 struct ins_ops *ops;
44};
45
46bool ins__is_jump(const struct ins *ins);
47bool ins__is_call(const struct ins *ins);
48int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
49
50struct disasm_line {
51 struct list_head node;
52 s64 offset;
53 char *line;
54 char *name;
55 struct ins *ins;
56 struct ins_operands ops;
57};
58
59static inline bool disasm_line__has_offset(const struct disasm_line *dl)
60{
61 return dl->ops.target.offset != UINT64_MAX;
62}
63
64void disasm_line__free(struct disasm_line *dl);
65struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
66int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
67size_t disasm__fprintf(struct list_head *head, FILE *fp);
19 68
20struct sym_hist { 69struct sym_hist {
21 u64 sum; 70 u64 sum;
@@ -32,7 +81,7 @@ struct source_line {
32 * 81 *
33 * @histogram: Array of addr hit histograms per event being monitored 82 * @histogram: Array of addr hit histograms per event being monitored
34 * @lines: If 'print_lines' is specified, per source code line percentages 83 * @lines: If 'print_lines' is specified, per source code line percentages
35 * @source: source parsed from objdump -dS 84 * @source: source parsed from a disassembler like objdump -dS
36 * 85 *
37 * lines is allocated, percentages calculated and all sorted by percentage 86 * lines is allocated, percentages calculated and all sorted by percentage
38 * when the annotation is about to be presented, so the percentages are for 87 * when the annotation is about to be presented, so the percentages are for
@@ -82,7 +131,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
82 int context); 131 int context);
83void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); 132void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
84void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); 133void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
85void objdump_line_list__purge(struct list_head *head); 134void disasm__purge(struct list_head *head);
86 135
87int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, 136int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
88 bool print_lines, bool full_paths, int min_pcnt, 137 bool print_lines, bool full_paths, int min_pcnt,
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 8dd224df3e54..cff18c617d13 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -33,7 +33,7 @@ extern int pager_use_color;
33 33
34extern int use_browser; 34extern int use_browser;
35 35
36#ifdef NO_NEWT_SUPPORT 36#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
37static inline void setup_browser(bool fallback_to_pager) 37static inline void setup_browser(bool fallback_to_pager)
38{ 38{
39 if (fallback_to_pager) 39 if (fallback_to_pager)
@@ -43,19 +43,29 @@ static inline void exit_browser(bool wait_for_ok __used) {}
43#else 43#else
44void setup_browser(bool fallback_to_pager); 44void setup_browser(bool fallback_to_pager);
45void exit_browser(bool wait_for_ok); 45void exit_browser(bool wait_for_ok);
46
47#ifdef NO_NEWT_SUPPORT
48static inline int ui__init(void)
49{
50 return -1;
51}
52static inline void ui__exit(bool wait_for_ok __used) {}
53#else
54int ui__init(void);
55void ui__exit(bool wait_for_ok);
46#endif 56#endif
47 57
48#ifdef NO_GTK2_SUPPORT 58#ifdef NO_GTK2_SUPPORT
49static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager) 59static inline int perf_gtk__init(void)
50{ 60{
51 if (fallback_to_pager) 61 return -1;
52 setup_pager();
53} 62}
54static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {} 63static inline void perf_gtk__exit(bool wait_for_ok __used) {}
55#else 64#else
56void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager); 65int perf_gtk__init(void);
57void perf_gtk_exit_browser(bool wait_for_ok); 66void perf_gtk__exit(bool wait_for_ok);
58#endif 67#endif
68#endif /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
59 69
60char *alias_lookup(const char *alias); 70char *alias_lookup(const char *alias);
61int split_cmdline(char *cmdline, const char ***argv); 71int split_cmdline(char *cmdline, const char ***argv);
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 26817daa2961..efb1fce259a4 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -11,6 +11,7 @@
11#include "event.h" 11#include "event.h"
12#include "debug.h" 12#include "debug.h"
13#include "util.h" 13#include "util.h"
14#include "target.h"
14 15
15int verbose; 16int verbose;
16bool dump_trace = false, quiet = false; 17bool dump_trace = false, quiet = false;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index f2ce88d04f54..6bebe7f0a20c 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -26,7 +26,7 @@ static inline void ui_progress__update(u64 curr __used, u64 total __used,
26#else 26#else
27extern char ui_helpline__last_msg[]; 27extern char ui_helpline__last_msg[];
28int ui_helpline__show_help(const char *format, va_list ap); 28int ui_helpline__show_help(const char *format, va_list ap);
29#include "ui/progress.h" 29#include "../ui/progress.h"
30int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); 30int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
31#endif 31#endif
32 32
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 1986d8051bd1..4ac5f5ae4ce9 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -11,6 +11,7 @@
11#include <poll.h> 11#include <poll.h>
12#include "cpumap.h" 12#include "cpumap.h"
13#include "thread_map.h" 13#include "thread_map.h"
14#include "target.h"
14#include "evlist.h" 15#include "evlist.h"
15#include "evsel.h" 16#include "evsel.h"
16#include <unistd.h> 17#include <unistd.h>
@@ -599,18 +600,21 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
599 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 600 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
600} 601}
601 602
602int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid, 603int perf_evlist__create_maps(struct perf_evlist *evlist,
603 const char *target_tid, uid_t uid, const char *cpu_list) 604 struct perf_target *target)
604{ 605{
605 evlist->threads = thread_map__new_str(target_pid, target_tid, uid); 606 evlist->threads = thread_map__new_str(target->pid, target->tid,
607 target->uid);
606 608
607 if (evlist->threads == NULL) 609 if (evlist->threads == NULL)
608 return -1; 610 return -1;
609 611
610 if (uid != UINT_MAX || (cpu_list == NULL && target_tid)) 612 if (perf_target__has_task(target))
613 evlist->cpus = cpu_map__dummy_new();
614 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
611 evlist->cpus = cpu_map__dummy_new(); 615 evlist->cpus = cpu_map__dummy_new();
612 else 616 else
613 evlist->cpus = cpu_map__new(cpu_list); 617 evlist->cpus = cpu_map__new(target->cpu_list);
614 618
615 if (evlist->cpus == NULL) 619 if (evlist->cpus == NULL)
616 goto out_delete_threads; 620 goto out_delete_threads;
@@ -827,7 +831,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
827 exit(-1); 831 exit(-1);
828 } 832 }
829 833
830 if (!opts->system_wide && !opts->target_tid && !opts->target_pid) 834 if (perf_target__none(&opts->target))
831 evlist->threads->map[0] = evlist->workload.pid; 835 evlist->threads->map[0] = evlist->workload.pid;
832 836
833 close(child_ready_pipe[1]); 837 close(child_ready_pipe[1]);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 21f1c9e57f13..58abb63ac13a 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -106,8 +106,8 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
106 evlist->threads = threads; 106 evlist->threads = threads;
107} 107}
108 108
109int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid, 109int perf_evlist__create_maps(struct perf_evlist *evlist,
110 const char *tid, uid_t uid, const char *cpu_list); 110 struct perf_target *target);
111void perf_evlist__delete_maps(struct perf_evlist *evlist); 111void perf_evlist__delete_maps(struct perf_evlist *evlist);
112int perf_evlist__set_filters(struct perf_evlist *evlist); 112int perf_evlist__set_filters(struct perf_evlist *evlist);
113 113
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 8c13dbcb84b9..f4f427ce4d64 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -14,6 +14,7 @@
14#include "util.h" 14#include "util.h"
15#include "cpumap.h" 15#include "cpumap.h"
16#include "thread_map.h" 16#include "thread_map.h"
17#include "target.h"
17 18
18#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 19#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
19#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) 20#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -69,6 +70,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
69 struct perf_event_attr *attr = &evsel->attr; 70 struct perf_event_attr *attr = &evsel->attr;
70 int track = !evsel->idx; /* only the first counter needs these */ 71 int track = !evsel->idx; /* only the first counter needs these */
71 72
73 attr->disabled = 1;
72 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; 74 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
73 attr->inherit = !opts->no_inherit; 75 attr->inherit = !opts->no_inherit;
74 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 76 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -106,15 +108,15 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
106 if (opts->call_graph) 108 if (opts->call_graph)
107 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 109 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
108 110
109 if (opts->system_wide) 111 if (opts->target.system_wide)
110 attr->sample_type |= PERF_SAMPLE_CPU; 112 attr->sample_type |= PERF_SAMPLE_CPU;
111 113
112 if (opts->period) 114 if (opts->period)
113 attr->sample_type |= PERF_SAMPLE_PERIOD; 115 attr->sample_type |= PERF_SAMPLE_PERIOD;
114 116
115 if (!opts->sample_id_all_missing && 117 if (!opts->sample_id_all_missing &&
116 (opts->sample_time || opts->system_wide || 118 (opts->sample_time || !opts->no_inherit ||
117 !opts->no_inherit || opts->cpu_list)) 119 perf_target__has_cpu(&opts->target)))
118 attr->sample_type |= PERF_SAMPLE_TIME; 120 attr->sample_type |= PERF_SAMPLE_TIME;
119 121
120 if (opts->raw_samples) { 122 if (opts->raw_samples) {
@@ -135,9 +137,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
135 attr->mmap = track; 137 attr->mmap = track;
136 attr->comm = track; 138 attr->comm = track;
137 139
138 if (!opts->target_pid && !opts->target_tid && !opts->system_wide && 140 if (perf_target__none(&opts->target) &&
139 (!opts->group || evsel == first)) { 141 (!opts->group || evsel == first)) {
140 attr->disabled = 1;
141 attr->enable_on_exec = 1; 142 attr->enable_on_exec = 1;
142 } 143 }
143} 144}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index c0b70c697a36..538598012139 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -31,21 +31,16 @@ static const char **header_argv;
31 31
32int perf_header__push_event(u64 id, const char *name) 32int perf_header__push_event(u64 id, const char *name)
33{ 33{
34 struct perf_trace_event_type *nevents;
35
34 if (strlen(name) > MAX_EVENT_NAME) 36 if (strlen(name) > MAX_EVENT_NAME)
35 pr_warning("Event %s will be truncated\n", name); 37 pr_warning("Event %s will be truncated\n", name);
36 38
37 if (!events) { 39 nevents = realloc(events, (event_count + 1) * sizeof(*events));
38 events = malloc(sizeof(struct perf_trace_event_type)); 40 if (nevents == NULL)
39 if (events == NULL) 41 return -ENOMEM;
40 return -ENOMEM; 42 events = nevents;
41 } else {
42 struct perf_trace_event_type *nevents;
43 43
44 nevents = realloc(events, (event_count + 1) * sizeof(*events));
45 if (nevents == NULL)
46 return -ENOMEM;
47 events = nevents;
48 }
49 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); 44 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
50 events[event_count].event_id = id; 45 events[event_count].event_id = id;
51 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); 46 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 9f6d630d5316..1293b5ebea4d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -599,7 +599,7 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
599 if (chain->ms.sym) 599 if (chain->ms.sym)
600 ret += fprintf(fp, "%s\n", chain->ms.sym->name); 600 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
601 else 601 else
602 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip); 602 ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
603 603
604 return ret; 604 return ret;
605} 605}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 2cae9df40e04..cfc64e293f90 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -138,7 +138,7 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
138#define K_LEFT -1 138#define K_LEFT -1
139#define K_RIGHT -2 139#define K_RIGHT -2
140#else 140#else
141#include "ui/keysyms.h" 141#include "../ui/keysyms.h"
142int hist_entry__tui_annotate(struct hist_entry *he, int evidx, 142int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
143 void(*timer)(void *arg), void *arg, int delay_secs); 143 void(*timer)(void *arg), void *arg, int delay_secs);
144 144
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5b3a0ef4e232..c7fc18a33d54 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -593,17 +593,27 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
593static int config_term(struct perf_event_attr *attr, 593static int config_term(struct perf_event_attr *attr,
594 struct parse_events__term *term) 594 struct parse_events__term *term)
595{ 595{
596 switch (term->type) { 596#define CHECK_TYPE_VAL(type) \
597do { \
598 if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
599 return -EINVAL; \
600} while (0)
601
602 switch (term->type_term) {
597 case PARSE_EVENTS__TERM_TYPE_CONFIG: 603 case PARSE_EVENTS__TERM_TYPE_CONFIG:
604 CHECK_TYPE_VAL(NUM);
598 attr->config = term->val.num; 605 attr->config = term->val.num;
599 break; 606 break;
600 case PARSE_EVENTS__TERM_TYPE_CONFIG1: 607 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
608 CHECK_TYPE_VAL(NUM);
601 attr->config1 = term->val.num; 609 attr->config1 = term->val.num;
602 break; 610 break;
603 case PARSE_EVENTS__TERM_TYPE_CONFIG2: 611 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
612 CHECK_TYPE_VAL(NUM);
604 attr->config2 = term->val.num; 613 attr->config2 = term->val.num;
605 break; 614 break;
606 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: 615 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
616 CHECK_TYPE_VAL(NUM);
607 attr->sample_period = term->val.num; 617 attr->sample_period = term->val.num;
608 break; 618 break;
609 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: 619 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
@@ -615,7 +625,9 @@ static int config_term(struct perf_event_attr *attr,
615 default: 625 default:
616 return -EINVAL; 626 return -EINVAL;
617 } 627 }
628
618 return 0; 629 return 0;
630#undef CHECK_TYPE_VAL
619} 631}
620 632
621static int config_attr(struct perf_event_attr *attr, 633static int config_attr(struct perf_event_attr *attr,
@@ -1015,11 +1027,12 @@ void print_events(const char *event_glob)
1015 1027
1016int parse_events__is_hardcoded_term(struct parse_events__term *term) 1028int parse_events__is_hardcoded_term(struct parse_events__term *term)
1017{ 1029{
1018 return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX; 1030 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1019} 1031}
1020 1032
1021int parse_events__new_term(struct parse_events__term **_term, int type, 1033static int new_term(struct parse_events__term **_term, int type_val,
1022 char *config, char *str, long num) 1034 int type_term, char *config,
1035 char *str, long num)
1023{ 1036{
1024 struct parse_events__term *term; 1037 struct parse_events__term *term;
1025 1038
@@ -1028,15 +1041,11 @@ int parse_events__new_term(struct parse_events__term **_term, int type,
1028 return -ENOMEM; 1041 return -ENOMEM;
1029 1042
1030 INIT_LIST_HEAD(&term->list); 1043 INIT_LIST_HEAD(&term->list);
1031 term->type = type; 1044 term->type_val = type_val;
1045 term->type_term = type_term;
1032 term->config = config; 1046 term->config = config;
1033 1047
1034 switch (type) { 1048 switch (type_val) {
1035 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1036 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1037 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1038 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1039 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1040 case PARSE_EVENTS__TERM_TYPE_NUM: 1049 case PARSE_EVENTS__TERM_TYPE_NUM:
1041 term->val.num = num; 1050 term->val.num = num;
1042 break; 1051 break;
@@ -1051,6 +1060,20 @@ int parse_events__new_term(struct parse_events__term **_term, int type,
1051 return 0; 1060 return 0;
1052} 1061}
1053 1062
1063int parse_events__term_num(struct parse_events__term **term,
1064 int type_term, char *config, long num)
1065{
1066 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1067 config, NULL, num);
1068}
1069
1070int parse_events__term_str(struct parse_events__term **term,
1071 int type_term, char *config, char *str)
1072{
1073 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1074 config, str, 0);
1075}
1076
1054void parse_events__free_terms(struct list_head *terms) 1077void parse_events__free_terms(struct list_head *terms)
1055{ 1078{
1056 struct parse_events__term *term, *h; 1079 struct parse_events__term *term, *h;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index ca069f893381..3fddd610d350 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -4,7 +4,9 @@
4 * Parse symbolic events/counts passed in as options: 4 * Parse symbolic events/counts passed in as options:
5 */ 5 */
6 6
7#include <stdbool.h>
7#include "../../../include/linux/perf_event.h" 8#include "../../../include/linux/perf_event.h"
9#include "types.h"
8 10
9struct list_head; 11struct list_head;
10struct perf_evsel; 12struct perf_evsel;
@@ -34,16 +36,17 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
34#define EVENTS_HELP_MAX (128*1024) 36#define EVENTS_HELP_MAX (128*1024)
35 37
36enum { 38enum {
39 PARSE_EVENTS__TERM_TYPE_NUM,
40 PARSE_EVENTS__TERM_TYPE_STR,
41};
42
43enum {
44 PARSE_EVENTS__TERM_TYPE_USER,
37 PARSE_EVENTS__TERM_TYPE_CONFIG, 45 PARSE_EVENTS__TERM_TYPE_CONFIG,
38 PARSE_EVENTS__TERM_TYPE_CONFIG1, 46 PARSE_EVENTS__TERM_TYPE_CONFIG1,
39 PARSE_EVENTS__TERM_TYPE_CONFIG2, 47 PARSE_EVENTS__TERM_TYPE_CONFIG2,
40 PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD, 48 PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
41 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, 49 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
42 PARSE_EVENTS__TERM_TYPE_NUM,
43 PARSE_EVENTS__TERM_TYPE_STR,
44
45 PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
46 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
47}; 50};
48 51
49struct parse_events__term { 52struct parse_events__term {
@@ -52,14 +55,16 @@ struct parse_events__term {
52 char *str; 55 char *str;
53 long num; 56 long num;
54 } val; 57 } val;
55 int type; 58 int type_val;
56 59 int type_term;
57 struct list_head list; 60 struct list_head list;
58}; 61};
59 62
60int parse_events__is_hardcoded_term(struct parse_events__term *term); 63int parse_events__is_hardcoded_term(struct parse_events__term *term);
61int parse_events__new_term(struct parse_events__term **term, int type, 64int parse_events__term_num(struct parse_events__term **_term,
62 char *config, char *str, long num); 65 int type_term, char *config, long num);
66int parse_events__term_str(struct parse_events__term **_term,
67 int type_term, char *config, char *str);
63void parse_events__free_terms(struct list_head *terms); 68void parse_events__free_terms(struct list_head *terms);
64int parse_events_modifier(struct list_head *list __used, char *str __used); 69int parse_events_modifier(struct list_head *list __used, char *str __used);
65int parse_events_add_tracepoint(struct list_head *list, int *idx, 70int parse_events_add_tracepoint(struct list_head *list, int *idx,
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index d9637da7333c..936913ea0ab6 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -176,8 +176,8 @@ PE_NAME '=' PE_NAME
176{ 176{
177 struct parse_events__term *term; 177 struct parse_events__term *term;
178 178
179 ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR, 179 ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER,
180 $1, $3, 0)); 180 $1, $3));
181 $$ = term; 181 $$ = term;
182} 182}
183| 183|
@@ -185,8 +185,8 @@ PE_NAME '=' PE_VALUE
185{ 185{
186 struct parse_events__term *term; 186 struct parse_events__term *term;
187 187
188 ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM, 188 ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER,
189 $1, NULL, $3)); 189 $1, $3));
190 $$ = term; 190 $$ = term;
191} 191}
192| 192|
@@ -194,8 +194,8 @@ PE_NAME
194{ 194{
195 struct parse_events__term *term; 195 struct parse_events__term *term;
196 196
197 ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM, 197 ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER,
198 $1, NULL, 1)); 198 $1, 1));
199 $$ = term; 199 $$ = term;
200} 200}
201| 201|
@@ -203,7 +203,7 @@ PE_TERM '=' PE_VALUE
203{ 203{
204 struct parse_events__term *term; 204 struct parse_events__term *term;
205 205
206 ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3)); 206 ABORT_ON(parse_events__term_num(&term, $1, NULL, $3));
207 $$ = term; 207 $$ = term;
208} 208}
209| 209|
@@ -211,7 +211,7 @@ PE_TERM
211{ 211{
212 struct parse_events__term *term; 212 struct parse_events__term *term;
213 213
214 ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1)); 214 ABORT_ON(parse_events__term_num(&term, $1, NULL, 1));
215 $$ = term; 215 $$ = term;
216} 216}
217 217
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index cb08a118e811..8ee219b7285b 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -225,7 +225,7 @@ static int pmu_config_term(struct list_head *formats,
225 if (parse_events__is_hardcoded_term(term)) 225 if (parse_events__is_hardcoded_term(term))
226 return 0; 226 return 0;
227 227
228 if (term->type != PARSE_EVENTS__TERM_TYPE_NUM) 228 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
229 return -EINVAL; 229 return -EINVAL;
230 230
231 format = pmu_find_format(formats, term->config); 231 format = pmu_find_format(formats, term->config);
@@ -246,6 +246,11 @@ static int pmu_config_term(struct list_head *formats,
246 return -EINVAL; 246 return -EINVAL;
247 } 247 }
248 248
249 /*
250 * XXX If we ever decide to go with string values for
251 * non-hardcoded terms, here's the place to translate
252 * them into value.
253 */
249 *vp |= pmu_format_value(format->bits, term->val.num); 254 *vp |= pmu_format_value(format->bits, term->val.num);
250 return 0; 255 return 0;
251} 256}
@@ -324,49 +329,58 @@ static struct test_format {
324/* Simulated users input. */ 329/* Simulated users input. */
325static struct parse_events__term test_terms[] = { 330static struct parse_events__term test_terms[] = {
326 { 331 {
327 .config = (char *) "krava01", 332 .config = (char *) "krava01",
328 .val.num = 15, 333 .val.num = 15,
329 .type = PARSE_EVENTS__TERM_TYPE_NUM, 334 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
335 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
330 }, 336 },
331 { 337 {
332 .config = (char *) "krava02", 338 .config = (char *) "krava02",
333 .val.num = 170, 339 .val.num = 170,
334 .type = PARSE_EVENTS__TERM_TYPE_NUM, 340 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
341 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
335 }, 342 },
336 { 343 {
337 .config = (char *) "krava03", 344 .config = (char *) "krava03",
338 .val.num = 1, 345 .val.num = 1,
339 .type = PARSE_EVENTS__TERM_TYPE_NUM, 346 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
347 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
340 }, 348 },
341 { 349 {
342 .config = (char *) "krava11", 350 .config = (char *) "krava11",
343 .val.num = 27, 351 .val.num = 27,
344 .type = PARSE_EVENTS__TERM_TYPE_NUM, 352 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
353 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
345 }, 354 },
346 { 355 {
347 .config = (char *) "krava12", 356 .config = (char *) "krava12",
348 .val.num = 1, 357 .val.num = 1,
349 .type = PARSE_EVENTS__TERM_TYPE_NUM, 358 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
359 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
350 }, 360 },
351 { 361 {
352 .config = (char *) "krava13", 362 .config = (char *) "krava13",
353 .val.num = 2, 363 .val.num = 2,
354 .type = PARSE_EVENTS__TERM_TYPE_NUM, 364 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
365 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
355 }, 366 },
356 { 367 {
357 .config = (char *) "krava21", 368 .config = (char *) "krava21",
358 .val.num = 119, 369 .val.num = 119,
359 .type = PARSE_EVENTS__TERM_TYPE_NUM, 370 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
371 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
360 }, 372 },
361 { 373 {
362 .config = (char *) "krava22", 374 .config = (char *) "krava22",
363 .val.num = 11, 375 .val.num = 11,
364 .type = PARSE_EVENTS__TERM_TYPE_NUM, 376 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
377 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
365 }, 378 },
366 { 379 {
367 .config = (char *) "krava23", 380 .config = (char *) "krava23",
368 .val.num = 2, 381 .val.num = 2,
369 .type = PARSE_EVENTS__TERM_TYPE_NUM, 382 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
383 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
370 }, 384 },
371}; 385};
372#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) 386#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c2623c6f9b51..acb9795286c4 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -37,7 +37,7 @@ PyMODINIT_FUNC initperf_trace_context(void);
37#define FTRACE_MAX_EVENT \ 37#define FTRACE_MAX_EVENT \
38 ((1 << (sizeof(unsigned short) * 8)) - 1) 38 ((1 << (sizeof(unsigned short) * 8)) - 1)
39 39
40struct event *events[FTRACE_MAX_EVENT]; 40struct event_format *events[FTRACE_MAX_EVENT];
41 41
42#define MAX_FIELDS 64 42#define MAX_FIELDS 64
43#define N_COMMON_FIELDS 7 43#define N_COMMON_FIELDS 7
@@ -136,7 +136,7 @@ static void define_field(enum print_arg_type field_type,
136 Py_DECREF(t); 136 Py_DECREF(t);
137} 137}
138 138
139static void define_event_symbols(struct event *event, 139static void define_event_symbols(struct event_format *event,
140 const char *ev_name, 140 const char *ev_name,
141 struct print_arg *args) 141 struct print_arg *args)
142{ 142{
@@ -178,6 +178,10 @@ static void define_event_symbols(struct event *event,
178 define_event_symbols(event, ev_name, args->op.right); 178 define_event_symbols(event, ev_name, args->op.right);
179 break; 179 break;
180 default: 180 default:
181 /* gcc warns for these? */
182 case PRINT_BSTRING:
183 case PRINT_DYNAMIC_ARRAY:
184 case PRINT_FUNC:
181 /* we should warn... */ 185 /* we should warn... */
182 return; 186 return;
183 } 187 }
@@ -186,10 +190,10 @@ static void define_event_symbols(struct event *event,
186 define_event_symbols(event, ev_name, args->next); 190 define_event_symbols(event, ev_name, args->next);
187} 191}
188 192
189static inline struct event *find_cache_event(int type) 193static inline struct event_format *find_cache_event(int type)
190{ 194{
191 static char ev_name[256]; 195 static char ev_name[256];
192 struct event *event; 196 struct event_format *event;
193 197
194 if (events[type]) 198 if (events[type])
195 return events[type]; 199 return events[type];
@@ -216,7 +220,7 @@ static void python_process_event(union perf_event *pevent __unused,
216 struct format_field *field; 220 struct format_field *field;
217 unsigned long long val; 221 unsigned long long val;
218 unsigned long s, ns; 222 unsigned long s, ns;
219 struct event *event; 223 struct event_format *event;
220 unsigned n = 0; 224 unsigned n = 0;
221 int type; 225 int type;
222 int pid; 226 int pid;
@@ -436,7 +440,7 @@ out:
436 440
437static int python_generate_script(const char *outfile) 441static int python_generate_script(const char *outfile)
438{ 442{
439 struct event *event = NULL; 443 struct event_format *event = NULL;
440 struct format_field *f; 444 struct format_field *f;
441 char fname[PATH_MAX]; 445 char fname[PATH_MAX];
442 int not_first, count; 446 int not_first, count;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 1efd3bee6336..4dcc8f3190cf 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1108,16 +1108,10 @@ more:
1108 } 1108 }
1109 1109
1110 if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) { 1110 if ((skip = perf_session__process_event(self, &event, tool, head)) < 0) {
1111 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 1111 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1112 head, event.header.size, event.header.type); 1112 head, event.header.size, event.header.type);
1113 /* 1113 err = -EINVAL;
1114 * assume we lost track of the stream, check alignment, and 1114 goto out_err;
1115 * increment a single u64 in the hope to catch on again 'soon'.
1116 */
1117 if (unlikely(head & 7))
1118 head &= ~7ULL;
1119
1120 size = 8;
1121 } 1115 }
1122 1116
1123 head += size; 1117 head += size;
@@ -1226,17 +1220,11 @@ more:
1226 1220
1227 if (size == 0 || 1221 if (size == 0 ||
1228 perf_session__process_event(session, event, tool, file_pos) < 0) { 1222 perf_session__process_event(session, event, tool, file_pos) < 0) {
1229 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 1223 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1230 file_offset + head, event->header.size, 1224 file_offset + head, event->header.size,
1231 event->header.type); 1225 event->header.type);
1232 /* 1226 err = -EINVAL;
1233 * assume we lost track of the stream, check alignment, and 1227 goto out_err;
1234 * increment a single u64 in the hope to catch on again 'soon'.
1235 */
1236 if (unlikely(head & 7))
1237 head &= ~7ULL;
1238
1239 size = 8;
1240 } 1228 }
1241 1229
1242 head += size; 1230 head += size;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index ac49ef208a5f..1f003884f1ab 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -65,6 +65,11 @@ struct symbol {
65 65
66void symbol__delete(struct symbol *sym); 66void symbol__delete(struct symbol *sym);
67 67
68static inline size_t symbol__size(const struct symbol *sym)
69{
70 return sym->end - sym->start + 1;
71}
72
68struct strlist; 73struct strlist;
69 74
70struct symbol_conf { 75struct symbol_conf {
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
new file mode 100644
index 000000000000..1064d5b148ad
--- /dev/null
+++ b/tools/perf/util/target.c
@@ -0,0 +1,142 @@
1/*
2 * Helper functions for handling target threads/cpus
3 *
4 * Copyright (C) 2012, LG Electronics, Namhyung Kim <namhyung.kim@lge.com>
5 *
6 * Released under the GPL v2.
7 */
8
9#include "target.h"
10#include "debug.h"
11
12#include <pwd.h>
13#include <string.h>
14
15
16enum perf_target_errno perf_target__validate(struct perf_target *target)
17{
18 enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS;
19
20 if (target->pid)
21 target->tid = target->pid;
22
23 /* CPU and PID are mutually exclusive */
24 if (target->tid && target->cpu_list) {
25 target->cpu_list = NULL;
26 if (ret == PERF_ERRNO_TARGET__SUCCESS)
27 ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU;
28 }
29
30 /* UID and PID are mutually exclusive */
31 if (target->tid && target->uid_str) {
32 target->uid_str = NULL;
33 if (ret == PERF_ERRNO_TARGET__SUCCESS)
34 ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID;
35 }
36
37 /* UID and CPU are mutually exclusive */
38 if (target->uid_str && target->cpu_list) {
39 target->cpu_list = NULL;
40 if (ret == PERF_ERRNO_TARGET__SUCCESS)
41 ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU;
42 }
43
44 /* PID and SYSTEM are mutually exclusive */
45 if (target->tid && target->system_wide) {
46 target->system_wide = false;
47 if (ret == PERF_ERRNO_TARGET__SUCCESS)
48 ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM;
49 }
50
51 /* UID and SYSTEM are mutually exclusive */
52 if (target->uid_str && target->system_wide) {
53 target->system_wide = false;
54 if (ret == PERF_ERRNO_TARGET__SUCCESS)
55 ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM;
56 }
57
58 return ret;
59}
60
61enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
62{
63 struct passwd pwd, *result;
64 char buf[1024];
65 const char *str = target->uid_str;
66
67 target->uid = UINT_MAX;
68 if (str == NULL)
69 return PERF_ERRNO_TARGET__SUCCESS;
70
71 /* Try user name first */
72 getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
73
74 if (result == NULL) {
75 /*
76 * The user name not found. Maybe it's a UID number.
77 */
78 char *endptr;
79 int uid = strtol(str, &endptr, 10);
80
81 if (*endptr != '\0')
82 return PERF_ERRNO_TARGET__INVALID_UID;
83
84 getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
85
86 if (result == NULL)
87 return PERF_ERRNO_TARGET__USER_NOT_FOUND;
88 }
89
90 target->uid = result->pw_uid;
91 return PERF_ERRNO_TARGET__SUCCESS;
92}
93
94/*
95 * This must have a same ordering as the enum perf_target_errno.
96 */
97static const char *perf_target__error_str[] = {
98 "PID/TID switch overriding CPU",
99 "PID/TID switch overriding UID",
100 "UID switch overriding CPU",
101 "PID/TID switch overriding SYSTEM",
102 "UID switch overriding SYSTEM",
103 "Invalid User: %s",
104 "Problems obtaining information for user %s",
105};
106
107int perf_target__strerror(struct perf_target *target, int errnum,
108 char *buf, size_t buflen)
109{
110 int idx;
111 const char *msg;
112
113 if (errnum >= 0) {
114 strerror_r(errnum, buf, buflen);
115 return 0;
116 }
117
118 if (errnum < __PERF_ERRNO_TARGET__START ||
119 errnum >= __PERF_ERRNO_TARGET__END)
120 return -1;
121
122 idx = errnum - __PERF_ERRNO_TARGET__START;
123 msg = perf_target__error_str[idx];
124
125 switch (errnum) {
126 case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU
127 ... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM:
128 snprintf(buf, buflen, "%s", msg);
129 break;
130
131 case PERF_ERRNO_TARGET__INVALID_UID:
132 case PERF_ERRNO_TARGET__USER_NOT_FOUND:
133 snprintf(buf, buflen, msg, target->uid_str);
134 break;
135
136 default:
137 /* cannot reach here */
138 break;
139 }
140
141 return 0;
142}
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
new file mode 100644
index 000000000000..a4be8575fda5
--- /dev/null
+++ b/tools/perf/util/target.h
@@ -0,0 +1,65 @@
1#ifndef _PERF_TARGET_H
2#define _PERF_TARGET_H
3
4#include <stdbool.h>
5#include <sys/types.h>
6
7struct perf_target {
8 const char *pid;
9 const char *tid;
10 const char *cpu_list;
11 const char *uid_str;
12 uid_t uid;
13 bool system_wide;
14 bool uses_mmap;
15};
16
17enum perf_target_errno {
18 PERF_ERRNO_TARGET__SUCCESS = 0,
19
20 /*
21 * Choose an arbitrary negative big number not to clash with standard
22 * errno since SUS requires the errno has distinct positive values.
23 * See 'Issue 6' in the link below.
24 *
25 * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
26 */
27 __PERF_ERRNO_TARGET__START = -10000,
28
29
30 /* for perf_target__validate() */
31 PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START,
32 PERF_ERRNO_TARGET__PID_OVERRIDE_UID,
33 PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
34 PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
35 PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
36
37 /* for perf_target__parse_uid() */
38 PERF_ERRNO_TARGET__INVALID_UID,
39 PERF_ERRNO_TARGET__USER_NOT_FOUND,
40
41 __PERF_ERRNO_TARGET__END,
42};
43
44enum perf_target_errno perf_target__validate(struct perf_target *target);
45enum perf_target_errno perf_target__parse_uid(struct perf_target *target);
46
47int perf_target__strerror(struct perf_target *target, int errnum, char *buf,
48 size_t buflen);
49
50static inline bool perf_target__has_task(struct perf_target *target)
51{
52 return target->tid || target->pid || target->uid_str;
53}
54
55static inline bool perf_target__has_cpu(struct perf_target *target)
56{
57 return target->system_wide || target->cpu_list;
58}
59
60static inline bool perf_target__none(struct perf_target *target)
61{
62 return !perf_target__has_task(target) && !perf_target__has_cpu(target);
63}
64
65#endif /* _PERF_TARGET_H */
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index 7da80f14418b..f718df8a3c59 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -6,7 +6,7 @@
6 6
7struct thread_map { 7struct thread_map {
8 int nr; 8 int nr;
9 int map[]; 9 pid_t map[];
10}; 10};
11 11
12struct thread_map *thread_map__new_by_pid(pid_t pid); 12struct thread_map *thread_map__new_by_pid(pid_t pid);
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 09fe579ccafb..abe0e8e95068 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -69,23 +69,24 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
69 69
70 ret += SNPRINTF(bf + ret, size - ret, "], "); 70 ret += SNPRINTF(bf + ret, size - ret, "], ");
71 71
72 if (top->target_pid) 72 if (top->target.pid)
73 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", 73 ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
74 top->target_pid); 74 top->target.pid);
75 else if (top->target_tid) 75 else if (top->target.tid)
76 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", 76 ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
77 top->target_tid); 77 top->target.tid);
78 else if (top->uid_str != NULL) 78 else if (top->target.uid_str != NULL)
79 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", 79 ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
80 top->uid_str); 80 top->target.uid_str);
81 else 81 else
82 ret += SNPRINTF(bf + ret, size - ret, " (all"); 82 ret += SNPRINTF(bf + ret, size - ret, " (all");
83 83
84 if (top->cpu_list) 84 if (top->target.cpu_list)
85 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", 85 ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
86 top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list); 86 top->evlist->cpus->nr > 1 ? "s" : "",
87 top->target.cpu_list);
87 else { 88 else {
88 if (top->target_tid) 89 if (top->target.tid)
89 ret += SNPRINTF(bf + ret, size - ret, ")"); 90 ret += SNPRINTF(bf + ret, size - ret, ")");
90 else 91 else
91 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", 92 ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index ce61cb2d1acf..33347ca89ee4 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -13,6 +13,7 @@ struct perf_session;
13struct perf_top { 13struct perf_top {
14 struct perf_tool tool; 14 struct perf_tool tool;
15 struct perf_evlist *evlist; 15 struct perf_evlist *evlist;
16 struct perf_target target;
16 /* 17 /*
17 * Symbols will be added here in perf_event__process_sample and will 18 * Symbols will be added here in perf_event__process_sample and will
18 * get out after decayed. 19 * get out after decayed.
@@ -23,10 +24,7 @@ struct perf_top {
23 u64 guest_us_samples, guest_kernel_samples; 24 u64 guest_us_samples, guest_kernel_samples;
24 int print_entries, count_filter, delay_secs; 25 int print_entries, count_filter, delay_secs;
25 int freq; 26 int freq;
26 const char *target_pid, *target_tid;
27 uid_t uid;
28 bool hide_kernel_symbols, hide_user_symbols, zero; 27 bool hide_kernel_symbols, hide_user_symbols, zero;
29 bool system_wide;
30 bool use_tui, use_stdio; 28 bool use_tui, use_stdio;
31 bool sort_has_symbols; 29 bool sort_has_symbols;
32 bool dont_use_callchains; 30 bool dont_use_callchains;
@@ -37,7 +35,6 @@ struct perf_top {
37 bool sample_id_all_missing; 35 bool sample_id_all_missing;
38 bool exclude_guest_missing; 36 bool exclude_guest_missing;
39 bool dump_symtab; 37 bool dump_symtab;
40 const char *cpu_list;
41 struct hist_entry *sym_filter_entry; 38 struct hist_entry *sym_filter_entry;
42 struct perf_evsel *sym_evsel; 39 struct perf_evsel *sym_evsel;
43 struct perf_session *session; 40 struct perf_session *session;
@@ -47,7 +44,6 @@ struct perf_top {
47 int realtime_prio; 44 int realtime_prio;
48 int sym_pcnt_filter; 45 int sym_pcnt_filter;
49 const char *sym_filter; 46 const char *sym_filter;
50 const char *uid_str;
51}; 47};
52 48
53size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); 49size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index fc22cf5c605f..a8d81c35ef66 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -68,7 +68,7 @@ struct events {
68}; 68};
69 69
70 70
71void *malloc_or_die(unsigned int size) 71static void *malloc_or_die(unsigned int size)
72{ 72{
73 void *data; 73 void *data;
74 74
@@ -448,6 +448,8 @@ static void tracing_data_header(void)
448 else 448 else
449 buf[0] = 0; 449 buf[0] = 0;
450 450
451 read_trace_init(buf[0], buf[0]);
452
451 write_or_die(buf, 1); 453 write_or_die(buf, 1);
452 454
453 /* save size of long */ 455 /* save size of long */
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index dfd1bd8371a4..df2fddbf0cd2 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -17,2169 +17,305 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * 18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 *
21 * The parts for function graph printing was taken and modified from the
22 * Linux Kernel that were written by Frederic Weisbecker.
23 */ 20 */
24
25#include <stdio.h> 21#include <stdio.h>
26#include <stdlib.h> 22#include <stdlib.h>
27#include <string.h> 23#include <string.h>
24#include <ctype.h>
28#include <errno.h> 25#include <errno.h>
29 26
30#include "../perf.h" 27#include "../perf.h"
31#include "util.h" 28#include "util.h"
32#include "trace-event.h" 29#include "trace-event.h"
33 30
34int header_page_ts_offset;
35int header_page_ts_size;
36int header_page_size_offset;
37int header_page_size_size; 31int header_page_size_size;
38int header_page_overwrite_offset; 32int header_page_ts_size;
39int header_page_overwrite_size;
40int header_page_data_offset; 33int header_page_data_offset;
41int header_page_data_size;
42
43bool latency_format;
44
45static char *input_buf;
46static unsigned long long input_buf_ptr;
47static unsigned long long input_buf_siz;
48
49static int cpus;
50static int long_size;
51static int is_flag_field;
52static int is_symbolic_field;
53
54static struct format_field *
55find_any_field(struct event *event, const char *name);
56
57static void init_input_buf(char *buf, unsigned long long size)
58{
59 input_buf = buf;
60 input_buf_siz = size;
61 input_buf_ptr = 0;
62}
63
64struct cmdline {
65 char *comm;
66 int pid;
67};
68
69static struct cmdline *cmdlines;
70static int cmdline_count;
71
72static int cmdline_cmp(const void *a, const void *b)
73{
74 const struct cmdline *ca = a;
75 const struct cmdline *cb = b;
76
77 if (ca->pid < cb->pid)
78 return -1;
79 if (ca->pid > cb->pid)
80 return 1;
81
82 return 0;
83}
84 34
85void parse_cmdlines(char *file, int size __unused) 35struct pevent *perf_pevent;
86{ 36static struct pevent *pevent;
87 struct cmdline_list {
88 struct cmdline_list *next;
89 char *comm;
90 int pid;
91 } *list = NULL, *item;
92 char *line;
93 char *next = NULL;
94 int i;
95 37
96 line = strtok_r(file, "\n", &next); 38bool latency_format;
97 while (line) {
98 item = malloc_or_die(sizeof(*item));
99 sscanf(line, "%d %as", &item->pid,
100 (float *)(void *)&item->comm); /* workaround gcc warning */
101 item->next = list;
102 list = item;
103 line = strtok_r(NULL, "\n", &next);
104 cmdline_count++;
105 }
106
107 cmdlines = malloc_or_die(sizeof(*cmdlines) * cmdline_count);
108
109 i = 0;
110 while (list) {
111 cmdlines[i].pid = list->pid;
112 cmdlines[i].comm = list->comm;
113 i++;
114 item = list;
115 list = list->next;
116 free(item);
117 }
118
119 qsort(cmdlines, cmdline_count, sizeof(*cmdlines), cmdline_cmp);
120}
121
122static struct func_map {
123 unsigned long long addr;
124 char *func;
125 char *mod;
126} *func_list;
127static unsigned int func_count;
128
129static int func_cmp(const void *a, const void *b)
130{
131 const struct func_map *fa = a;
132 const struct func_map *fb = b;
133
134 if (fa->addr < fb->addr)
135 return -1;
136 if (fa->addr > fb->addr)
137 return 1;
138
139 return 0;
140}
141
142void parse_proc_kallsyms(char *file, unsigned int size __unused)
143{
144 struct func_list {
145 struct func_list *next;
146 unsigned long long addr;
147 char *func;
148 char *mod;
149 } *list = NULL, *item;
150 char *line;
151 char *next = NULL;
152 char *addr_str;
153 char ch;
154 int ret __used;
155 int i;
156
157 line = strtok_r(file, "\n", &next);
158 while (line) {
159 item = malloc_or_die(sizeof(*item));
160 item->mod = NULL;
161 ret = sscanf(line, "%as %c %as\t[%as",
162 (float *)(void *)&addr_str, /* workaround gcc warning */
163 &ch,
164 (float *)(void *)&item->func,
165 (float *)(void *)&item->mod);
166 item->addr = strtoull(addr_str, NULL, 16);
167 free(addr_str);
168
169 /* truncate the extra ']' */
170 if (item->mod)
171 item->mod[strlen(item->mod) - 1] = 0;
172
173
174 item->next = list;
175 list = item;
176 line = strtok_r(NULL, "\n", &next);
177 func_count++;
178 }
179
180 func_list = malloc_or_die(sizeof(*func_list) * (func_count + 1));
181
182 i = 0;
183 while (list) {
184 func_list[i].func = list->func;
185 func_list[i].addr = list->addr;
186 func_list[i].mod = list->mod;
187 i++;
188 item = list;
189 list = list->next;
190 free(item);
191 }
192
193 qsort(func_list, func_count, sizeof(*func_list), func_cmp);
194
195 /*
196 * Add a special record at the end.
197 */
198 func_list[func_count].func = NULL;
199 func_list[func_count].addr = 0;
200 func_list[func_count].mod = NULL;
201}
202 39
203/* 40int read_trace_init(int file_bigendian, int host_bigendian)
204 * We are searching for a record in between, not an exact
205 * match.
206 */
207static int func_bcmp(const void *a, const void *b)
208{ 41{
209 const struct func_map *fa = a; 42 if (pevent)
210 const struct func_map *fb = b;
211
212 if ((fa->addr == fb->addr) ||
213
214 (fa->addr > fb->addr &&
215 fa->addr < (fb+1)->addr))
216 return 0; 43 return 0;
217 44
218 if (fa->addr < fb->addr) 45 perf_pevent = pevent_alloc();
219 return -1; 46 pevent = perf_pevent;
220
221 return 1;
222}
223
224static struct func_map *find_func(unsigned long long addr)
225{
226 struct func_map *func;
227 struct func_map key;
228
229 key.addr = addr;
230
231 func = bsearch(&key, func_list, func_count, sizeof(*func_list),
232 func_bcmp);
233
234 return func;
235}
236
237void print_funcs(void)
238{
239 int i;
240
241 for (i = 0; i < (int)func_count; i++) {
242 printf("%016llx %s",
243 func_list[i].addr,
244 func_list[i].func);
245 if (func_list[i].mod)
246 printf(" [%s]\n", func_list[i].mod);
247 else
248 printf("\n");
249 }
250}
251 47
252static struct printk_map { 48 pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
253 unsigned long long addr; 49 pevent_set_file_bigendian(pevent, file_bigendian);
254 char *printk; 50 pevent_set_host_bigendian(pevent, host_bigendian);
255} *printk_list;
256static unsigned int printk_count;
257
258static int printk_cmp(const void *a, const void *b)
259{
260 const struct func_map *fa = a;
261 const struct func_map *fb = b;
262
263 if (fa->addr < fb->addr)
264 return -1;
265 if (fa->addr > fb->addr)
266 return 1;
267 51
268 return 0; 52 return 0;
269} 53}
270 54
271static struct printk_map *find_printk(unsigned long long addr) 55static int get_common_field(struct scripting_context *context,
272{ 56 int *offset, int *size, const char *type)
273 struct printk_map *printk;
274 struct printk_map key;
275
276 key.addr = addr;
277
278 printk = bsearch(&key, printk_list, printk_count, sizeof(*printk_list),
279 printk_cmp);
280
281 return printk;
282}
283
284void parse_ftrace_printk(char *file, unsigned int size __unused)
285{
286 struct printk_list {
287 struct printk_list *next;
288 unsigned long long addr;
289 char *printk;
290 } *list = NULL, *item;
291 char *line;
292 char *next = NULL;
293 char *addr_str;
294 int i;
295
296 line = strtok_r(file, "\n", &next);
297 while (line) {
298 addr_str = strsep(&line, ":");
299 if (!line) {
300 warning("error parsing print strings");
301 break;
302 }
303 item = malloc_or_die(sizeof(*item));
304 item->addr = strtoull(addr_str, NULL, 16);
305 /* fmt still has a space, skip it */
306 item->printk = strdup(line+1);
307 item->next = list;
308 list = item;
309 line = strtok_r(NULL, "\n", &next);
310 printk_count++;
311 }
312
313 printk_list = malloc_or_die(sizeof(*printk_list) * printk_count + 1);
314
315 i = 0;
316 while (list) {
317 printk_list[i].printk = list->printk;
318 printk_list[i].addr = list->addr;
319 i++;
320 item = list;
321 list = list->next;
322 free(item);
323 }
324
325 qsort(printk_list, printk_count, sizeof(*printk_list), printk_cmp);
326}
327
328void print_printk(void)
329{
330 int i;
331
332 for (i = 0; i < (int)printk_count; i++) {
333 printf("%016llx %s\n",
334 printk_list[i].addr,
335 printk_list[i].printk);
336 }
337}
338
339static struct event *alloc_event(void)
340{
341 struct event *event;
342
343 event = malloc_or_die(sizeof(*event));
344 memset(event, 0, sizeof(*event));
345
346 return event;
347}
348
349enum event_type {
350 EVENT_ERROR,
351 EVENT_NONE,
352 EVENT_SPACE,
353 EVENT_NEWLINE,
354 EVENT_OP,
355 EVENT_DELIM,
356 EVENT_ITEM,
357 EVENT_DQUOTE,
358 EVENT_SQUOTE,
359};
360
361static struct event *event_list;
362
363static void add_event(struct event *event)
364{ 57{
365 event->next = event_list; 58 struct event_format *event;
366 event_list = event; 59 struct format_field *field;
367}
368
369static int event_item_type(enum event_type type)
370{
371 switch (type) {
372 case EVENT_ITEM ... EVENT_SQUOTE:
373 return 1;
374 case EVENT_ERROR ... EVENT_DELIM:
375 default:
376 return 0;
377 }
378}
379
380static void free_arg(struct print_arg *arg)
381{
382 if (!arg)
383 return;
384
385 switch (arg->type) {
386 case PRINT_ATOM:
387 if (arg->atom.atom)
388 free(arg->atom.atom);
389 break;
390 case PRINT_NULL:
391 case PRINT_FIELD ... PRINT_OP:
392 default:
393 /* todo */
394 break;
395 }
396
397 free(arg);
398}
399
400static enum event_type get_type(int ch)
401{
402 if (ch == '\n')
403 return EVENT_NEWLINE;
404 if (isspace(ch))
405 return EVENT_SPACE;
406 if (isalnum(ch) || ch == '_')
407 return EVENT_ITEM;
408 if (ch == '\'')
409 return EVENT_SQUOTE;
410 if (ch == '"')
411 return EVENT_DQUOTE;
412 if (!isprint(ch))
413 return EVENT_NONE;
414 if (ch == '(' || ch == ')' || ch == ',')
415 return EVENT_DELIM;
416
417 return EVENT_OP;
418}
419
420static int __read_char(void)
421{
422 if (input_buf_ptr >= input_buf_siz)
423 return -1;
424
425 return input_buf[input_buf_ptr++];
426}
427
428static int __peek_char(void)
429{
430 if (input_buf_ptr >= input_buf_siz)
431 return -1;
432
433 return input_buf[input_buf_ptr];
434}
435
436static enum event_type __read_token(char **tok)
437{
438 char buf[BUFSIZ];
439 int ch, last_ch, quote_ch, next_ch;
440 int i = 0;
441 int tok_size = 0;
442 enum event_type type;
443
444 *tok = NULL;
445
446
447 ch = __read_char();
448 if (ch < 0)
449 return EVENT_NONE;
450
451 type = get_type(ch);
452 if (type == EVENT_NONE)
453 return type;
454
455 buf[i++] = ch;
456
457 switch (type) {
458 case EVENT_NEWLINE:
459 case EVENT_DELIM:
460 *tok = malloc_or_die(2);
461 (*tok)[0] = ch;
462 (*tok)[1] = 0;
463 return type;
464
465 case EVENT_OP:
466 switch (ch) {
467 case '-':
468 next_ch = __peek_char();
469 if (next_ch == '>') {
470 buf[i++] = __read_char();
471 break;
472 }
473 /* fall through */
474 case '+':
475 case '|':
476 case '&':
477 case '>':
478 case '<':
479 last_ch = ch;
480 ch = __peek_char();
481 if (ch != last_ch)
482 goto test_equal;
483 buf[i++] = __read_char();
484 switch (last_ch) {
485 case '>':
486 case '<':
487 goto test_equal;
488 default:
489 break;
490 }
491 break;
492 case '!':
493 case '=':
494 goto test_equal;
495 default: /* what should we do instead? */
496 break;
497 }
498 buf[i] = 0;
499 *tok = strdup(buf);
500 return type;
501
502 test_equal:
503 ch = __peek_char();
504 if (ch == '=')
505 buf[i++] = __read_char();
506 break;
507
508 case EVENT_DQUOTE:
509 case EVENT_SQUOTE:
510 /* don't keep quotes */
511 i--;
512 quote_ch = ch;
513 last_ch = 0;
514 do {
515 if (i == (BUFSIZ - 1)) {
516 buf[i] = 0;
517 if (*tok) {
518 *tok = realloc(*tok, tok_size + BUFSIZ);
519 if (!*tok)
520 return EVENT_NONE;
521 strcat(*tok, buf);
522 } else
523 *tok = strdup(buf);
524
525 if (!*tok)
526 return EVENT_NONE;
527 tok_size += BUFSIZ;
528 i = 0;
529 }
530 last_ch = ch;
531 ch = __read_char();
532 buf[i++] = ch;
533 /* the '\' '\' will cancel itself */
534 if (ch == '\\' && last_ch == '\\')
535 last_ch = 0;
536 } while (ch != quote_ch || last_ch == '\\');
537 /* remove the last quote */
538 i--;
539 goto out;
540
541 case EVENT_ERROR ... EVENT_SPACE:
542 case EVENT_ITEM:
543 default:
544 break;
545 }
546
547 while (get_type(__peek_char()) == type) {
548 if (i == (BUFSIZ - 1)) {
549 buf[i] = 0;
550 if (*tok) {
551 *tok = realloc(*tok, tok_size + BUFSIZ);
552 if (!*tok)
553 return EVENT_NONE;
554 strcat(*tok, buf);
555 } else
556 *tok = strdup(buf);
557
558 if (!*tok)
559 return EVENT_NONE;
560 tok_size += BUFSIZ;
561 i = 0;
562 }
563 ch = __read_char();
564 buf[i++] = ch;
565 }
566
567 out:
568 buf[i] = 0;
569 if (*tok) {
570 *tok = realloc(*tok, tok_size + i);
571 if (!*tok)
572 return EVENT_NONE;
573 strcat(*tok, buf);
574 } else
575 *tok = strdup(buf);
576 if (!*tok)
577 return EVENT_NONE;
578
579 return type;
580}
581
582static void free_token(char *tok)
583{
584 if (tok)
585 free(tok);
586}
587
588static enum event_type read_token(char **tok)
589{
590 enum event_type type;
591
592 for (;;) {
593 type = __read_token(tok);
594 if (type != EVENT_SPACE)
595 return type;
596
597 free_token(*tok);
598 }
599
600 /* not reached */
601 return EVENT_NONE;
602}
603
604/* no newline */
605static enum event_type read_token_item(char **tok)
606{
607 enum event_type type;
608
609 for (;;) {
610 type = __read_token(tok);
611 if (type != EVENT_SPACE && type != EVENT_NEWLINE)
612 return type;
613
614 free_token(*tok);
615 }
616
617 /* not reached */
618 return EVENT_NONE;
619}
620
621static int test_type(enum event_type type, enum event_type expect)
622{
623 if (type != expect) {
624 warning("Error: expected type %d but read %d",
625 expect, type);
626 return -1;
627 }
628 return 0;
629}
630 60
631static int __test_type_token(enum event_type type, char *token, 61 if (!*size) {
632 enum event_type expect, const char *expect_tok, 62 if (!pevent->events)
633 bool warn) 63 return 0;
634{
635 if (type != expect) {
636 if (warn)
637 warning("Error: expected type %d but read %d",
638 expect, type);
639 return -1;
640 }
641 64
642 if (strcmp(token, expect_tok) != 0) { 65 event = pevent->events[0];
643 if (warn) 66 field = pevent_find_common_field(event, type);
644 warning("Error: expected '%s' but read '%s'", 67 if (!field)
645 expect_tok, token); 68 return 0;
646 return -1; 69 *offset = field->offset;
70 *size = field->size;
647 } 71 }
648 return 0;
649}
650 72
651static int test_type_token(enum event_type type, char *token, 73 return pevent_read_number(pevent, context->event_data + *offset, *size);
652 enum event_type expect, const char *expect_tok)
653{
654 return __test_type_token(type, token, expect, expect_tok, true);
655} 74}
656 75
657static int __read_expect_type(enum event_type expect, char **tok, int newline_ok) 76int common_lock_depth(struct scripting_context *context)
658{
659 enum event_type type;
660
661 if (newline_ok)
662 type = read_token(tok);
663 else
664 type = read_token_item(tok);
665 return test_type(type, expect);
666}
667
668static int read_expect_type(enum event_type expect, char **tok)
669{
670 return __read_expect_type(expect, tok, 1);
671}
672
673static int __read_expected(enum event_type expect, const char *str,
674 int newline_ok, bool warn)
675{ 77{
676 enum event_type type; 78 static int offset;
677 char *token; 79 static int size;
678 int ret; 80 int ret;
679 81
680 if (newline_ok) 82 ret = get_common_field(context, &size, &offset,
681 type = read_token(&token); 83 "common_lock_depth");
682 else 84 if (ret < 0)
683 type = read_token_item(&token); 85 return -1;
684
685 ret = __test_type_token(type, token, expect, str, warn);
686
687 free_token(token);
688 86
689 return ret; 87 return ret;
690} 88}
691 89
692static int read_expected(enum event_type expect, const char *str) 90int common_flags(struct scripting_context *context)
693{
694 return __read_expected(expect, str, 1, true);
695}
696
697static int read_expected_item(enum event_type expect, const char *str)
698{
699 return __read_expected(expect, str, 0, true);
700}
701
702static char *event_read_name(void)
703{
704 char *token;
705
706 if (read_expected(EVENT_ITEM, "name") < 0)
707 return NULL;
708
709 if (read_expected(EVENT_OP, ":") < 0)
710 return NULL;
711
712 if (read_expect_type(EVENT_ITEM, &token) < 0)
713 goto fail;
714
715 return token;
716
717 fail:
718 free_token(token);
719 return NULL;
720}
721
722static int event_read_id(void)
723{ 91{
724 char *token; 92 static int offset;
725 int id = -1; 93 static int size;
726 94 int ret;
727 if (read_expected_item(EVENT_ITEM, "ID") < 0)
728 return -1;
729 95
730 if (read_expected(EVENT_OP, ":") < 0) 96 ret = get_common_field(context, &size, &offset,
97 "common_flags");
98 if (ret < 0)
731 return -1; 99 return -1;
732 100
733 if (read_expect_type(EVENT_ITEM, &token) < 0) 101 return ret;
734 goto free;
735
736 id = strtoul(token, NULL, 0);
737
738 free:
739 free_token(token);
740 return id;
741}
742
743static int field_is_string(struct format_field *field)
744{
745 if ((field->flags & FIELD_IS_ARRAY) &&
746 (!strstr(field->type, "char") || !strstr(field->type, "u8") ||
747 !strstr(field->type, "s8")))
748 return 1;
749
750 return 0;
751}
752
753static int field_is_dynamic(struct format_field *field)
754{
755 if (!strncmp(field->type, "__data_loc", 10))
756 return 1;
757
758 return 0;
759}
760
761static int event_read_fields(struct event *event, struct format_field **fields)
762{
763 struct format_field *field = NULL;
764 enum event_type type;
765 char *token;
766 char *last_token;
767 int count = 0;
768
769 do {
770 type = read_token(&token);
771 if (type == EVENT_NEWLINE) {
772 free_token(token);
773 return count;
774 }
775
776 count++;
777
778 if (test_type_token(type, token, EVENT_ITEM, "field"))
779 goto fail;
780 free_token(token);
781
782 type = read_token(&token);
783 /*
784 * The ftrace fields may still use the "special" name.
785 * Just ignore it.
786 */
787 if (event->flags & EVENT_FL_ISFTRACE &&
788 type == EVENT_ITEM && strcmp(token, "special") == 0) {
789 free_token(token);
790 type = read_token(&token);
791 }
792
793 if (test_type_token(type, token, EVENT_OP, ":") < 0)
794 return -1;
795
796 if (read_expect_type(EVENT_ITEM, &token) < 0)
797 goto fail;
798
799 last_token = token;
800
801 field = malloc_or_die(sizeof(*field));
802 memset(field, 0, sizeof(*field));
803
804 /* read the rest of the type */
805 for (;;) {
806 type = read_token(&token);
807 if (type == EVENT_ITEM ||
808 (type == EVENT_OP && strcmp(token, "*") == 0) ||
809 /*
810 * Some of the ftrace fields are broken and have
811 * an illegal "." in them.
812 */
813 (event->flags & EVENT_FL_ISFTRACE &&
814 type == EVENT_OP && strcmp(token, ".") == 0)) {
815
816 if (strcmp(token, "*") == 0)
817 field->flags |= FIELD_IS_POINTER;
818
819 if (field->type) {
820 field->type = realloc(field->type,
821 strlen(field->type) +
822 strlen(last_token) + 2);
823 strcat(field->type, " ");
824 strcat(field->type, last_token);
825 } else
826 field->type = last_token;
827 last_token = token;
828 continue;
829 }
830
831 break;
832 }
833
834 if (!field->type) {
835 die("no type found");
836 goto fail;
837 }
838 field->name = last_token;
839
840 if (test_type(type, EVENT_OP))
841 goto fail;
842
843 if (strcmp(token, "[") == 0) {
844 enum event_type last_type = type;
845 char *brackets = token;
846 int len;
847
848 field->flags |= FIELD_IS_ARRAY;
849
850 type = read_token(&token);
851 while (strcmp(token, "]") != 0) {
852 if (last_type == EVENT_ITEM &&
853 type == EVENT_ITEM)
854 len = 2;
855 else
856 len = 1;
857 last_type = type;
858
859 brackets = realloc(brackets,
860 strlen(brackets) +
861 strlen(token) + len);
862 if (len == 2)
863 strcat(brackets, " ");
864 strcat(brackets, token);
865 free_token(token);
866 type = read_token(&token);
867 if (type == EVENT_NONE) {
868 die("failed to find token");
869 goto fail;
870 }
871 }
872
873 free_token(token);
874
875 brackets = realloc(brackets, strlen(brackets) + 2);
876 strcat(brackets, "]");
877
878 /* add brackets to type */
879
880 type = read_token(&token);
881 /*
882 * If the next token is not an OP, then it is of
883 * the format: type [] item;
884 */
885 if (type == EVENT_ITEM) {
886 field->type = realloc(field->type,
887 strlen(field->type) +
888 strlen(field->name) +
889 strlen(brackets) + 2);
890 strcat(field->type, " ");
891 strcat(field->type, field->name);
892 free_token(field->name);
893 strcat(field->type, brackets);
894 field->name = token;
895 type = read_token(&token);
896 } else {
897 field->type = realloc(field->type,
898 strlen(field->type) +
899 strlen(brackets) + 1);
900 strcat(field->type, brackets);
901 }
902 free(brackets);
903 }
904
905 if (field_is_string(field)) {
906 field->flags |= FIELD_IS_STRING;
907 if (field_is_dynamic(field))
908 field->flags |= FIELD_IS_DYNAMIC;
909 }
910
911 if (test_type_token(type, token, EVENT_OP, ";"))
912 goto fail;
913 free_token(token);
914
915 if (read_expected(EVENT_ITEM, "offset") < 0)
916 goto fail_expect;
917
918 if (read_expected(EVENT_OP, ":") < 0)
919 goto fail_expect;
920
921 if (read_expect_type(EVENT_ITEM, &token))
922 goto fail;
923 field->offset = strtoul(token, NULL, 0);
924 free_token(token);
925
926 if (read_expected(EVENT_OP, ";") < 0)
927 goto fail_expect;
928
929 if (read_expected(EVENT_ITEM, "size") < 0)
930 goto fail_expect;
931
932 if (read_expected(EVENT_OP, ":") < 0)
933 goto fail_expect;
934
935 if (read_expect_type(EVENT_ITEM, &token))
936 goto fail;
937 field->size = strtoul(token, NULL, 0);
938 free_token(token);
939
940 if (read_expected(EVENT_OP, ";") < 0)
941 goto fail_expect;
942
943 type = read_token(&token);
944 if (type != EVENT_NEWLINE) {
945 /* newer versions of the kernel have a "signed" type */
946 if (test_type_token(type, token, EVENT_ITEM, "signed"))
947 goto fail;
948
949 free_token(token);
950
951 if (read_expected(EVENT_OP, ":") < 0)
952 goto fail_expect;
953
954 if (read_expect_type(EVENT_ITEM, &token))
955 goto fail;
956
957 if (strtoul(token, NULL, 0))
958 field->flags |= FIELD_IS_SIGNED;
959
960 free_token(token);
961 if (read_expected(EVENT_OP, ";") < 0)
962 goto fail_expect;
963
964 if (read_expect_type(EVENT_NEWLINE, &token))
965 goto fail;
966 }
967
968 free_token(token);
969
970 *fields = field;
971 fields = &field->next;
972
973 } while (1);
974
975 return 0;
976
977fail:
978 free_token(token);
979fail_expect:
980 if (field)
981 free(field);
982 return -1;
983} 102}
984 103
985static int event_read_format(struct event *event) 104int common_pc(struct scripting_context *context)
986{ 105{
987 char *token; 106 static int offset;
107 static int size;
988 int ret; 108 int ret;
989 109
990 if (read_expected_item(EVENT_ITEM, "format") < 0) 110 ret = get_common_field(context, &size, &offset,
991 return -1; 111 "common_preempt_count");
992
993 if (read_expected(EVENT_OP, ":") < 0)
994 return -1;
995
996 if (read_expect_type(EVENT_NEWLINE, &token))
997 goto fail;
998 free_token(token);
999
1000 ret = event_read_fields(event, &event->format.common_fields);
1001 if (ret < 0) 112 if (ret < 0)
1002 return ret; 113 return -1;
1003 event->format.nr_common = ret;
1004
1005 ret = event_read_fields(event, &event->format.fields);
1006 if (ret < 0)
1007 return ret;
1008 event->format.nr_fields = ret;
1009
1010 return 0;
1011
1012 fail:
1013 free_token(token);
1014 return -1;
1015}
1016
1017enum event_type
1018process_arg_token(struct event *event, struct print_arg *arg,
1019 char **tok, enum event_type type);
1020
1021static enum event_type
1022process_arg(struct event *event, struct print_arg *arg, char **tok)
1023{
1024 enum event_type type;
1025 char *token;
1026
1027 type = read_token(&token);
1028 *tok = token;
1029 114
1030 return process_arg_token(event, arg, tok, type); 115 return ret;
1031} 116}
1032 117
1033static enum event_type 118unsigned long long
1034process_cond(struct event *event, struct print_arg *top, char **tok) 119raw_field_value(struct event_format *event, const char *name, void *data)
1035{ 120{
1036 struct print_arg *arg, *left, *right; 121 struct format_field *field;
1037 enum event_type type; 122 unsigned long long val;
1038 char *token = NULL;
1039
1040 arg = malloc_or_die(sizeof(*arg));
1041 memset(arg, 0, sizeof(*arg));
1042
1043 left = malloc_or_die(sizeof(*left));
1044
1045 right = malloc_or_die(sizeof(*right));
1046
1047 arg->type = PRINT_OP;
1048 arg->op.left = left;
1049 arg->op.right = right;
1050
1051 *tok = NULL;
1052 type = process_arg(event, left, &token);
1053 if (test_type_token(type, token, EVENT_OP, ":"))
1054 goto out_free;
1055
1056 arg->op.op = token;
1057
1058 type = process_arg(event, right, &token);
1059 123
1060 top->op.right = arg; 124 field = pevent_find_any_field(event, name);
125 if (!field)
126 return 0ULL;
1061 127
1062 *tok = token; 128 pevent_read_number_field(field, data, &val);
1063 return type;
1064 129
1065out_free: 130 return val;
1066 free_token(*tok);
1067 free(right);
1068 free(left);
1069 free_arg(arg);
1070 return EVENT_ERROR;
1071} 131}
1072 132
1073static enum event_type 133void *raw_field_ptr(struct event_format *event, const char *name, void *data)
1074process_array(struct event *event, struct print_arg *top, char **tok)
1075{ 134{
1076 struct print_arg *arg; 135 struct format_field *field;
1077 enum event_type type;
1078 char *token = NULL;
1079
1080 arg = malloc_or_die(sizeof(*arg));
1081 memset(arg, 0, sizeof(*arg));
1082
1083 *tok = NULL;
1084 type = process_arg(event, arg, &token);
1085 if (test_type_token(type, token, EVENT_OP, "]"))
1086 goto out_free;
1087
1088 top->op.right = arg;
1089
1090 free_token(token);
1091 type = read_token_item(&token);
1092 *tok = token;
1093
1094 return type;
1095 136
1096out_free: 137 field = pevent_find_any_field(event, name);
1097 free_token(*tok); 138 if (!field)
1098 free_arg(arg); 139 return NULL;
1099 return EVENT_ERROR;
1100}
1101 140
1102static int get_op_prio(char *op) 141 if (field->flags & FIELD_IS_DYNAMIC) {
1103{ 142 int offset;
1104 if (!op[1]) {
1105 switch (op[0]) {
1106 case '*':
1107 case '/':
1108 case '%':
1109 return 6;
1110 case '+':
1111 case '-':
1112 return 7;
1113 /* '>>' and '<<' are 8 */
1114 case '<':
1115 case '>':
1116 return 9;
1117 /* '==' and '!=' are 10 */
1118 case '&':
1119 return 11;
1120 case '^':
1121 return 12;
1122 case '|':
1123 return 13;
1124 case '?':
1125 return 16;
1126 default:
1127 die("unknown op '%c'", op[0]);
1128 return -1;
1129 }
1130 } else {
1131 if (strcmp(op, "++") == 0 ||
1132 strcmp(op, "--") == 0) {
1133 return 3;
1134 } else if (strcmp(op, ">>") == 0 ||
1135 strcmp(op, "<<") == 0) {
1136 return 8;
1137 } else if (strcmp(op, ">=") == 0 ||
1138 strcmp(op, "<=") == 0) {
1139 return 9;
1140 } else if (strcmp(op, "==") == 0 ||
1141 strcmp(op, "!=") == 0) {
1142 return 10;
1143 } else if (strcmp(op, "&&") == 0) {
1144 return 14;
1145 } else if (strcmp(op, "||") == 0) {
1146 return 15;
1147 } else {
1148 die("unknown op '%s'", op);
1149 return -1;
1150 }
1151 }
1152}
1153 143
1154static void set_op_prio(struct print_arg *arg) 144 offset = *(int *)(data + field->offset);
1155{ 145 offset &= 0xffff;
1156 146
1157 /* single ops are the greatest */ 147 return data + offset;
1158 if (!arg->op.left || arg->op.left->type == PRINT_NULL) {
1159 arg->op.prio = 0;
1160 return;
1161 } 148 }
1162 149
1163 arg->op.prio = get_op_prio(arg->op.op); 150 return data + field->offset;
1164} 151}
1165 152
1166static enum event_type 153int trace_parse_common_type(void *data)
1167process_op(struct event *event, struct print_arg *arg, char **tok)
1168{ 154{
1169 struct print_arg *left, *right = NULL; 155 struct pevent_record record;
1170 enum event_type type;
1171 char *token;
1172
1173 /* the op is passed in via tok */
1174 token = *tok;
1175
1176 if (arg->type == PRINT_OP && !arg->op.left) {
1177 /* handle single op */
1178 if (token[1]) {
1179 die("bad op token %s", token);
1180 return EVENT_ERROR;
1181 }
1182 switch (token[0]) {
1183 case '!':
1184 case '+':
1185 case '-':
1186 break;
1187 default:
1188 die("bad op token %s", token);
1189 return EVENT_ERROR;
1190 }
1191
1192 /* make an empty left */
1193 left = malloc_or_die(sizeof(*left));
1194 left->type = PRINT_NULL;
1195 arg->op.left = left;
1196
1197 right = malloc_or_die(sizeof(*right));
1198 arg->op.right = right;
1199
1200 type = process_arg(event, right, tok);
1201
1202 } else if (strcmp(token, "?") == 0) {
1203
1204 left = malloc_or_die(sizeof(*left));
1205 /* copy the top arg to the left */
1206 *left = *arg;
1207
1208 arg->type = PRINT_OP;
1209 arg->op.op = token;
1210 arg->op.left = left;
1211 arg->op.prio = 0;
1212
1213 type = process_cond(event, arg, tok);
1214
1215 } else if (strcmp(token, ">>") == 0 ||
1216 strcmp(token, "<<") == 0 ||
1217 strcmp(token, "&") == 0 ||
1218 strcmp(token, "|") == 0 ||
1219 strcmp(token, "&&") == 0 ||
1220 strcmp(token, "||") == 0 ||
1221 strcmp(token, "-") == 0 ||
1222 strcmp(token, "+") == 0 ||
1223 strcmp(token, "*") == 0 ||
1224 strcmp(token, "^") == 0 ||
1225 strcmp(token, "/") == 0 ||
1226 strcmp(token, "<") == 0 ||
1227 strcmp(token, ">") == 0 ||
1228 strcmp(token, "==") == 0 ||
1229 strcmp(token, "!=") == 0) {
1230
1231 left = malloc_or_die(sizeof(*left));
1232
1233 /* copy the top arg to the left */
1234 *left = *arg;
1235
1236 arg->type = PRINT_OP;
1237 arg->op.op = token;
1238 arg->op.left = left;
1239
1240 set_op_prio(arg);
1241
1242 right = malloc_or_die(sizeof(*right));
1243
1244 type = read_token_item(&token);
1245 *tok = token;
1246
1247 /* could just be a type pointer */
1248 if ((strcmp(arg->op.op, "*") == 0) &&
1249 type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
1250 if (left->type != PRINT_ATOM)
1251 die("bad pointer type");
1252 left->atom.atom = realloc(left->atom.atom,
1253 sizeof(left->atom.atom) + 3);
1254 strcat(left->atom.atom, " *");
1255 *arg = *left;
1256 free(arg);
1257
1258 return type;
1259 }
1260
1261 type = process_arg_token(event, right, tok, type);
1262
1263 arg->op.right = right;
1264
1265 } else if (strcmp(token, "[") == 0) {
1266
1267 left = malloc_or_die(sizeof(*left));
1268 *left = *arg;
1269
1270 arg->type = PRINT_OP;
1271 arg->op.op = token;
1272 arg->op.left = left;
1273
1274 arg->op.prio = 0;
1275 type = process_array(event, arg, tok);
1276
1277 } else {
1278 warning("unknown op '%s'", token);
1279 event->flags |= EVENT_FL_FAILED;
1280 /* the arg is now the left side */
1281 return EVENT_NONE;
1282 }
1283
1284 if (type == EVENT_OP) {
1285 int prio;
1286
1287 /* higher prios need to be closer to the root */
1288 prio = get_op_prio(*tok);
1289
1290 if (prio > arg->op.prio)
1291 return process_op(event, arg, tok);
1292 156
1293 return process_op(event, right, tok); 157 record.data = data;
1294 } 158 return pevent_data_type(pevent, &record);
1295
1296 return type;
1297} 159}
1298 160
1299static enum event_type 161int trace_parse_common_pid(void *data)
1300process_entry(struct event *event __unused, struct print_arg *arg,
1301 char **tok)
1302{ 162{
1303 enum event_type type; 163 struct pevent_record record;
1304 char *field;
1305 char *token;
1306
1307 if (read_expected(EVENT_OP, "->") < 0)
1308 return EVENT_ERROR;
1309
1310 if (read_expect_type(EVENT_ITEM, &token) < 0)
1311 goto fail;
1312 field = token;
1313
1314 arg->type = PRINT_FIELD;
1315 arg->field.name = field;
1316
1317 if (is_flag_field) {
1318 arg->field.field = find_any_field(event, arg->field.name);
1319 arg->field.field->flags |= FIELD_IS_FLAG;
1320 is_flag_field = 0;
1321 } else if (is_symbolic_field) {
1322 arg->field.field = find_any_field(event, arg->field.name);
1323 arg->field.field->flags |= FIELD_IS_SYMBOLIC;
1324 is_symbolic_field = 0;
1325 }
1326
1327 type = read_token(&token);
1328 *tok = token;
1329 164
1330 return type; 165 record.data = data;
1331 166 return pevent_data_pid(pevent, &record);
1332fail:
1333 free_token(token);
1334 return EVENT_ERROR;
1335} 167}
1336 168
1337static char *arg_eval (struct print_arg *arg); 169unsigned long long read_size(void *ptr, int size)
1338
1339static long long arg_num_eval(struct print_arg *arg)
1340{ 170{
1341 long long left, right; 171 return pevent_read_number(pevent, ptr, size);
1342 long long val = 0;
1343
1344 switch (arg->type) {
1345 case PRINT_ATOM:
1346 val = strtoll(arg->atom.atom, NULL, 0);
1347 break;
1348 case PRINT_TYPE:
1349 val = arg_num_eval(arg->typecast.item);
1350 break;
1351 case PRINT_OP:
1352 switch (arg->op.op[0]) {
1353 case '|':
1354 left = arg_num_eval(arg->op.left);
1355 right = arg_num_eval(arg->op.right);
1356 if (arg->op.op[1])
1357 val = left || right;
1358 else
1359 val = left | right;
1360 break;
1361 case '&':
1362 left = arg_num_eval(arg->op.left);
1363 right = arg_num_eval(arg->op.right);
1364 if (arg->op.op[1])
1365 val = left && right;
1366 else
1367 val = left & right;
1368 break;
1369 case '<':
1370 left = arg_num_eval(arg->op.left);
1371 right = arg_num_eval(arg->op.right);
1372 switch (arg->op.op[1]) {
1373 case 0:
1374 val = left < right;
1375 break;
1376 case '<':
1377 val = left << right;
1378 break;
1379 case '=':
1380 val = left <= right;
1381 break;
1382 default:
1383 die("unknown op '%s'", arg->op.op);
1384 }
1385 break;
1386 case '>':
1387 left = arg_num_eval(arg->op.left);
1388 right = arg_num_eval(arg->op.right);
1389 switch (arg->op.op[1]) {
1390 case 0:
1391 val = left > right;
1392 break;
1393 case '>':
1394 val = left >> right;
1395 break;
1396 case '=':
1397 val = left >= right;
1398 break;
1399 default:
1400 die("unknown op '%s'", arg->op.op);
1401 }
1402 break;
1403 case '=':
1404 left = arg_num_eval(arg->op.left);
1405 right = arg_num_eval(arg->op.right);
1406
1407 if (arg->op.op[1] != '=')
1408 die("unknown op '%s'", arg->op.op);
1409
1410 val = left == right;
1411 break;
1412 case '!':
1413 left = arg_num_eval(arg->op.left);
1414 right = arg_num_eval(arg->op.right);
1415
1416 switch (arg->op.op[1]) {
1417 case '=':
1418 val = left != right;
1419 break;
1420 default:
1421 die("unknown op '%s'", arg->op.op);
1422 }
1423 break;
1424 case '+':
1425 left = arg_num_eval(arg->op.left);
1426 right = arg_num_eval(arg->op.right);
1427 val = left + right;
1428 break;
1429 default:
1430 die("unknown op '%s'", arg->op.op);
1431 }
1432 break;
1433
1434 case PRINT_NULL:
1435 case PRINT_FIELD ... PRINT_SYMBOL:
1436 case PRINT_STRING:
1437 default:
1438 die("invalid eval type %d", arg->type);
1439
1440 }
1441 return val;
1442} 172}
1443 173
1444static char *arg_eval (struct print_arg *arg) 174struct event_format *trace_find_event(int type)
1445{ 175{
1446 long long val; 176 return pevent_find_event(pevent, type);
1447 static char buf[20];
1448
1449 switch (arg->type) {
1450 case PRINT_ATOM:
1451 return arg->atom.atom;
1452 case PRINT_TYPE:
1453 return arg_eval(arg->typecast.item);
1454 case PRINT_OP:
1455 val = arg_num_eval(arg);
1456 sprintf(buf, "%lld", val);
1457 return buf;
1458
1459 case PRINT_NULL:
1460 case PRINT_FIELD ... PRINT_SYMBOL:
1461 case PRINT_STRING:
1462 default:
1463 die("invalid eval type %d", arg->type);
1464 break;
1465 }
1466
1467 return NULL;
1468} 177}
1469 178
1470static enum event_type
1471process_fields(struct event *event, struct print_flag_sym **list, char **tok)
1472{
1473 enum event_type type;
1474 struct print_arg *arg = NULL;
1475 struct print_flag_sym *field;
1476 char *token = NULL;
1477 char *value;
1478
1479 do {
1480 free_token(token);
1481 type = read_token_item(&token);
1482 if (test_type_token(type, token, EVENT_OP, "{"))
1483 break;
1484
1485 arg = malloc_or_die(sizeof(*arg));
1486
1487 free_token(token);
1488 type = process_arg(event, arg, &token);
1489
1490 if (type == EVENT_OP)
1491 type = process_op(event, arg, &token);
1492
1493 if (type == EVENT_ERROR)
1494 goto out_free;
1495
1496 if (test_type_token(type, token, EVENT_DELIM, ","))
1497 goto out_free;
1498
1499 field = malloc_or_die(sizeof(*field));
1500 memset(field, 0, sizeof(*field));
1501
1502 value = arg_eval(arg);
1503 field->value = strdup(value);
1504
1505 free_token(token);
1506 type = process_arg(event, arg, &token);
1507 if (test_type_token(type, token, EVENT_OP, "}"))
1508 goto out_free;
1509
1510 value = arg_eval(arg);
1511 field->str = strdup(value);
1512 free_arg(arg);
1513 arg = NULL;
1514
1515 *list = field;
1516 list = &field->next;
1517 179
1518 free_token(token); 180void print_trace_event(int cpu, void *data, int size)
1519 type = read_token_item(&token);
1520 } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
1521
1522 *tok = token;
1523 return type;
1524
1525out_free:
1526 free_arg(arg);
1527 free_token(token);
1528
1529 return EVENT_ERROR;
1530}
1531
1532static enum event_type
1533process_flags(struct event *event, struct print_arg *arg, char **tok)
1534{ 181{
1535 struct print_arg *field; 182 struct event_format *event;
1536 enum event_type type; 183 struct pevent_record record;
1537 char *token; 184 struct trace_seq s;
1538 185 int type;
1539 memset(arg, 0, sizeof(*arg));
1540 arg->type = PRINT_FLAGS;
1541
1542 if (read_expected_item(EVENT_DELIM, "(") < 0)
1543 return EVENT_ERROR;
1544
1545 field = malloc_or_die(sizeof(*field));
1546
1547 type = process_arg(event, field, &token);
1548 while (type == EVENT_OP)
1549 type = process_op(event, field, &token);
1550 if (test_type_token(type, token, EVENT_DELIM, ","))
1551 goto out_free;
1552 186
1553 arg->flags.field = field; 187 type = trace_parse_common_type(data);
1554 188
1555 type = read_token_item(&token); 189 event = trace_find_event(type);
1556 if (event_item_type(type)) { 190 if (!event) {
1557 arg->flags.delim = token; 191 warning("ug! no event found for type %d", type);
1558 type = read_token_item(&token); 192 return;
1559 } 193 }
1560 194
1561 if (test_type_token(type, token, EVENT_DELIM, ",")) 195 memset(&record, 0, sizeof(record));
1562 goto out_free; 196 record.cpu = cpu;
1563 197 record.size = size;
1564 type = process_fields(event, &arg->flags.flags, &token); 198 record.data = data;
1565 if (test_type_token(type, token, EVENT_DELIM, ")"))
1566 goto out_free;
1567 199
1568 free_token(token); 200 trace_seq_init(&s);
1569 type = read_token_item(tok); 201 pevent_print_event(pevent, &s, &record);
1570 return type; 202 trace_seq_do_printf(&s);
1571 203 printf("\n");
1572out_free:
1573 free_token(token);
1574 return EVENT_ERROR;
1575} 204}
1576 205
1577static enum event_type 206void print_event(int cpu, void *data, int size, unsigned long long nsecs,
1578process_symbols(struct event *event, struct print_arg *arg, char **tok) 207 char *comm)
1579{ 208{
1580 struct print_arg *field; 209 struct pevent_record record;
1581 enum event_type type; 210 struct trace_seq s;
1582 char *token; 211 int pid;
1583
1584 memset(arg, 0, sizeof(*arg));
1585 arg->type = PRINT_SYMBOL;
1586
1587 if (read_expected_item(EVENT_DELIM, "(") < 0)
1588 return EVENT_ERROR;
1589
1590 field = malloc_or_die(sizeof(*field));
1591
1592 type = process_arg(event, field, &token);
1593 if (test_type_token(type, token, EVENT_DELIM, ","))
1594 goto out_free;
1595 212
1596 arg->symbol.field = field; 213 pevent->latency_format = latency_format;
1597 214
1598 type = process_fields(event, &arg->symbol.symbols, &token); 215 record.ts = nsecs;
1599 if (test_type_token(type, token, EVENT_DELIM, ")")) 216 record.cpu = cpu;
1600 goto out_free; 217 record.size = size;
218 record.data = data;
219 pid = pevent_data_pid(pevent, &record);
1601 220
1602 free_token(token); 221 if (!pevent_pid_is_registered(pevent, pid))
1603 type = read_token_item(tok); 222 pevent_register_comm(pevent, comm, pid);
1604 return type;
1605 223
1606out_free: 224 trace_seq_init(&s);
1607 free_token(token); 225 pevent_print_event(pevent, &s, &record);
1608 return EVENT_ERROR; 226 trace_seq_do_printf(&s);
227 printf("\n");
1609} 228}
1610 229
1611static enum event_type 230void parse_proc_kallsyms(char *file, unsigned int size __unused)
1612process_paren(struct event *event, struct print_arg *arg, char **tok)
1613{ 231{
1614 struct print_arg *item_arg; 232 unsigned long long addr;
1615 enum event_type type; 233 char *func;
1616 char *token; 234 char *line;
1617 235 char *next = NULL;
1618 type = process_arg(event, arg, &token); 236 char *addr_str;
1619 237 char *mod;
1620 if (type == EVENT_ERROR) 238 char ch;
1621 return EVENT_ERROR;
1622
1623 if (type == EVENT_OP)
1624 type = process_op(event, arg, &token);
1625
1626 if (type == EVENT_ERROR)
1627 return EVENT_ERROR;
1628
1629 if (test_type_token(type, token, EVENT_DELIM, ")")) {
1630 free_token(token);
1631 return EVENT_ERROR;
1632 }
1633
1634 free_token(token);
1635 type = read_token_item(&token);
1636
1637 /*
1638 * If the next token is an item or another open paren, then
1639 * this was a typecast.
1640 */
1641 if (event_item_type(type) ||
1642 (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
1643
1644 /* make this a typecast and contine */
1645 239
1646 /* prevous must be an atom */ 240 line = strtok_r(file, "\n", &next);
1647 if (arg->type != PRINT_ATOM) 241 while (line) {
1648 die("previous needed to be PRINT_ATOM"); 242 mod = NULL;
243 sscanf(line, "%as %c %as\t[%as",
244 (float *)(void *)&addr_str, /* workaround gcc warning */
245 &ch, (float *)(void *)&func, (float *)(void *)&mod);
246 addr = strtoull(addr_str, NULL, 16);
247 free(addr_str);
1649 248
1650 item_arg = malloc_or_die(sizeof(*item_arg)); 249 /* truncate the extra ']' */
250 if (mod)
251 mod[strlen(mod) - 1] = 0;
1651 252
1652 arg->type = PRINT_TYPE; 253 pevent_register_function(pevent, func, addr, mod);
1653 arg->typecast.type = arg->atom.atom; 254 free(func);
1654 arg->typecast.item = item_arg; 255 free(mod);
1655 type = process_arg_token(event, item_arg, &token, type);
1656 256
257 line = strtok_r(NULL, "\n", &next);
1657 } 258 }
1658
1659 *tok = token;
1660 return type;
1661} 259}
1662 260
1663 261void parse_ftrace_printk(char *file, unsigned int size __unused)
1664static enum event_type
1665process_str(struct event *event __unused, struct print_arg *arg, char **tok)
1666{ 262{
1667 enum event_type type; 263 unsigned long long addr;
1668 char *token; 264 char *printk;
1669 265 char *line;
1670 if (read_expected(EVENT_DELIM, "(") < 0) 266 char *next = NULL;
1671 return EVENT_ERROR; 267 char *addr_str;
1672 268 char *fmt;
1673 if (read_expect_type(EVENT_ITEM, &token) < 0)
1674 goto fail;
1675
1676 arg->type = PRINT_STRING;
1677 arg->string.string = token;
1678 arg->string.offset = -1;
1679
1680 if (read_expected(EVENT_DELIM, ")") < 0)
1681 return EVENT_ERROR;
1682
1683 type = read_token(&token);
1684 *tok = token;
1685
1686 return type;
1687fail:
1688 free_token(token);
1689 return EVENT_ERROR;
1690}
1691 269
1692enum event_type 270 line = strtok_r(file, "\n", &next);
1693process_arg_token(struct event *event, struct print_arg *arg, 271 while (line) {
1694 char **tok, enum event_type type) 272 addr_str = strtok_r(line, ":", &fmt);
1695{ 273 if (!addr_str) {
1696 char *token; 274 warning("printk format with empty entry");
1697 char *atom;
1698
1699 token = *tok;
1700
1701 switch (type) {
1702 case EVENT_ITEM:
1703 if (strcmp(token, "REC") == 0) {
1704 free_token(token);
1705 type = process_entry(event, arg, &token);
1706 } else if (strcmp(token, "__print_flags") == 0) {
1707 free_token(token);
1708 is_flag_field = 1;
1709 type = process_flags(event, arg, &token);
1710 } else if (strcmp(token, "__print_symbolic") == 0) {
1711 free_token(token);
1712 is_symbolic_field = 1;
1713 type = process_symbols(event, arg, &token);
1714 } else if (strcmp(token, "__get_str") == 0) {
1715 free_token(token);
1716 type = process_str(event, arg, &token);
1717 } else {
1718 atom = token;
1719 /* test the next token */
1720 type = read_token_item(&token);
1721
1722 /* atoms can be more than one token long */
1723 while (type == EVENT_ITEM) {
1724 atom = realloc(atom, strlen(atom) + strlen(token) + 2);
1725 strcat(atom, " ");
1726 strcat(atom, token);
1727 free_token(token);
1728 type = read_token_item(&token);
1729 }
1730
1731 /* todo, test for function */
1732
1733 arg->type = PRINT_ATOM;
1734 arg->atom.atom = atom;
1735 }
1736 break;
1737 case EVENT_DQUOTE:
1738 case EVENT_SQUOTE:
1739 arg->type = PRINT_ATOM;
1740 arg->atom.atom = token;
1741 type = read_token_item(&token);
1742 break;
1743 case EVENT_DELIM:
1744 if (strcmp(token, "(") == 0) {
1745 free_token(token);
1746 type = process_paren(event, arg, &token);
1747 break; 275 break;
1748 } 276 }
1749 case EVENT_OP: 277 addr = strtoull(addr_str, NULL, 16);
1750 /* handle single ops */ 278 /* fmt still has a space, skip it */
1751 arg->type = PRINT_OP; 279 printk = strdup(fmt+1);
1752 arg->op.op = token; 280 line = strtok_r(NULL, "\n", &next);
1753 arg->op.left = NULL; 281 pevent_register_print_string(pevent, printk, addr);
1754 type = process_op(event, arg, &token);
1755
1756 break;
1757
1758 case EVENT_ERROR ... EVENT_NEWLINE:
1759 default:
1760 die("unexpected type %d", type);
1761 }
1762 *tok = token;
1763
1764 return type;
1765}
1766
1767static int event_read_print_args(struct event *event, struct print_arg **list)
1768{
1769 enum event_type type = EVENT_ERROR;
1770 struct print_arg *arg;
1771 char *token;
1772 int args = 0;
1773
1774 do {
1775 if (type == EVENT_NEWLINE) {
1776 free_token(token);
1777 type = read_token_item(&token);
1778 continue;
1779 }
1780
1781 arg = malloc_or_die(sizeof(*arg));
1782 memset(arg, 0, sizeof(*arg));
1783
1784 type = process_arg(event, arg, &token);
1785
1786 if (type == EVENT_ERROR) {
1787 free_arg(arg);
1788 return -1;
1789 }
1790
1791 *list = arg;
1792 args++;
1793
1794 if (type == EVENT_OP) {
1795 type = process_op(event, arg, &token);
1796 list = &arg->next;
1797 continue;
1798 }
1799
1800 if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
1801 free_token(token);
1802 *list = arg;
1803 list = &arg->next;
1804 continue;
1805 }
1806 break;
1807 } while (type != EVENT_NONE);
1808
1809 if (type != EVENT_NONE)
1810 free_token(token);
1811
1812 return args;
1813}
1814
1815static int event_read_print(struct event *event)
1816{
1817 enum event_type type;
1818 char *token;
1819 int ret;
1820
1821 if (read_expected_item(EVENT_ITEM, "print") < 0)
1822 return -1;
1823
1824 if (read_expected(EVENT_ITEM, "fmt") < 0)
1825 return -1;
1826
1827 if (read_expected(EVENT_OP, ":") < 0)
1828 return -1;
1829
1830 if (read_expect_type(EVENT_DQUOTE, &token) < 0)
1831 goto fail;
1832
1833 concat:
1834 event->print_fmt.format = token;
1835 event->print_fmt.args = NULL;
1836
1837 /* ok to have no arg */
1838 type = read_token_item(&token);
1839
1840 if (type == EVENT_NONE)
1841 return 0;
1842
1843 /* Handle concatination of print lines */
1844 if (type == EVENT_DQUOTE) {
1845 char *cat;
1846
1847 cat = malloc_or_die(strlen(event->print_fmt.format) +
1848 strlen(token) + 1);
1849 strcpy(cat, event->print_fmt.format);
1850 strcat(cat, token);
1851 free_token(token);
1852 free_token(event->print_fmt.format);
1853 event->print_fmt.format = NULL;
1854 token = cat;
1855 goto concat;
1856 }
1857
1858 if (test_type_token(type, token, EVENT_DELIM, ","))
1859 goto fail;
1860
1861 free_token(token);
1862
1863 ret = event_read_print_args(event, &event->print_fmt.args);
1864 if (ret < 0)
1865 return -1;
1866
1867 return ret;
1868
1869 fail:
1870 free_token(token);
1871 return -1;
1872}
1873
1874static struct format_field *
1875find_common_field(struct event *event, const char *name)
1876{
1877 struct format_field *format;
1878
1879 for (format = event->format.common_fields;
1880 format; format = format->next) {
1881 if (strcmp(format->name, name) == 0)
1882 break;
1883 }
1884
1885 return format;
1886}
1887
1888static struct format_field *
1889find_field(struct event *event, const char *name)
1890{
1891 struct format_field *format;
1892
1893 for (format = event->format.fields;
1894 format; format = format->next) {
1895 if (strcmp(format->name, name) == 0)
1896 break;
1897 } 282 }
1898
1899 return format;
1900} 283}
1901 284
1902static struct format_field * 285int parse_ftrace_file(char *buf, unsigned long size)
1903find_any_field(struct event *event, const char *name)
1904{ 286{
1905 struct format_field *format; 287 return pevent_parse_event(pevent, buf, size, "ftrace");
1906
1907 format = find_common_field(event, name);
1908 if (format)
1909 return format;
1910 return find_field(event, name);
1911} 288}
1912 289
1913unsigned long long read_size(void *ptr, int size) 290int parse_event_file(char *buf, unsigned long size, char *sys)
1914{ 291{
1915 switch (size) { 292 return pevent_parse_event(pevent, buf, size, sys);
1916 case 1:
1917 return *(unsigned char *)ptr;
1918 case 2:
1919 return data2host2(ptr);
1920 case 4:
1921 return data2host4(ptr);
1922 case 8:
1923 return data2host8(ptr);
1924 default:
1925 /* BUG! */
1926 return 0;
1927 }
1928} 293}
1929 294
1930unsigned long long 295struct event_format *trace_find_next_event(struct event_format *event)
1931raw_field_value(struct event *event, const char *name, void *data)
1932{ 296{
1933 struct format_field *field; 297 static int idx;
1934
1935 field = find_any_field(event, name);
1936 if (!field)
1937 return 0ULL;
1938 298
1939 return read_size(data + field->offset, field->size); 299 if (!pevent->events)
1940}
1941
1942void *raw_field_ptr(struct event *event, const char *name, void *data)
1943{
1944 struct format_field *field;
1945
1946 field = find_any_field(event, name);
1947 if (!field)
1948 return NULL; 300 return NULL;
1949 301
1950 if (field->flags & FIELD_IS_DYNAMIC) { 302 if (!event) {
1951 int offset; 303 idx = 0;
1952 304 return pevent->events[0];
1953 offset = *(int *)(data + field->offset);
1954 offset &= 0xffff;
1955
1956 return data + offset;
1957 }
1958
1959 return data + field->offset;
1960}
1961
1962static int get_common_info(const char *type, int *offset, int *size)
1963{
1964 struct event *event;
1965 struct format_field *field;
1966
1967 /*
1968 * All events should have the same common elements.
1969 * Pick any event to find where the type is;
1970 */
1971 if (!event_list)
1972 die("no event_list!");
1973
1974 event = event_list;
1975 field = find_common_field(event, type);
1976 if (!field)
1977 die("field '%s' not found", type);
1978
1979 *offset = field->offset;
1980 *size = field->size;
1981
1982 return 0;
1983}
1984
1985static int __parse_common(void *data, int *size, int *offset,
1986 const char *name)
1987{
1988 int ret;
1989
1990 if (!*size) {
1991 ret = get_common_info(name, offset, size);
1992 if (ret < 0)
1993 return ret;
1994 } 305 }
1995 return read_size(data + *offset, *size);
1996}
1997
1998int trace_parse_common_type(void *data)
1999{
2000 static int type_offset;
2001 static int type_size;
2002
2003 return __parse_common(data, &type_size, &type_offset,
2004 "common_type");
2005}
2006
2007int trace_parse_common_pid(void *data)
2008{
2009 static int pid_offset;
2010 static int pid_size;
2011
2012 return __parse_common(data, &pid_size, &pid_offset,
2013 "common_pid");
2014}
2015
2016int parse_common_pc(void *data)
2017{
2018 static int pc_offset;
2019 static int pc_size;
2020
2021 return __parse_common(data, &pc_size, &pc_offset,
2022 "common_preempt_count");
2023}
2024
2025int parse_common_flags(void *data)
2026{
2027 static int flags_offset;
2028 static int flags_size;
2029
2030 return __parse_common(data, &flags_size, &flags_offset,
2031 "common_flags");
2032}
2033
2034int parse_common_lock_depth(void *data)
2035{
2036 static int ld_offset;
2037 static int ld_size;
2038 int ret;
2039 306
2040 ret = __parse_common(data, &ld_size, &ld_offset, 307 if (idx < pevent->nr_events && event == pevent->events[idx]) {
2041 "common_lock_depth"); 308 idx++;
2042 if (ret < 0) 309 if (idx == pevent->nr_events)
2043 return -1; 310 return NULL;
2044 311 return pevent->events[idx];
2045 return ret;
2046}
2047
2048struct event *trace_find_event(int id)
2049{
2050 struct event *event;
2051
2052 for (event = event_list; event; event = event->next) {
2053 if (event->id == id)
2054 break;
2055 } 312 }
2056 return event;
2057}
2058
2059struct event *trace_find_next_event(struct event *event)
2060{
2061 if (!event)
2062 return event_list;
2063
2064 return event->next;
2065}
2066
2067static unsigned long long eval_num_arg(void *data, int size,
2068 struct event *event, struct print_arg *arg)
2069{
2070 unsigned long long val = 0;
2071 unsigned long long left, right;
2072 struct print_arg *larg;
2073 313
2074 switch (arg->type) { 314 for (idx = 1; idx < pevent->nr_events; idx++) {
2075 case PRINT_NULL: 315 if (event == pevent->events[idx - 1])
2076 /* ?? */ 316 return pevent->events[idx];
2077 return 0;
2078 case PRINT_ATOM:
2079 return strtoull(arg->atom.atom, NULL, 0);
2080 case PRINT_FIELD:
2081 if (!arg->field.field) {
2082 arg->field.field = find_any_field(event, arg->field.name);
2083 if (!arg->field.field)
2084 die("field %s not found", arg->field.name);
2085 }
2086 /* must be a number */
2087 val = read_size(data + arg->field.field->offset,
2088 arg->field.field->size);
2089 break;
2090 case PRINT_FLAGS:
2091 case PRINT_SYMBOL:
2092 break;
2093 case PRINT_TYPE:
2094 return eval_num_arg(data, size, event, arg->typecast.item);
2095 case PRINT_STRING:
2096 return 0;
2097 break;
2098 case PRINT_OP:
2099 if (strcmp(arg->op.op, "[") == 0) {
2100 /*
2101 * Arrays are special, since we don't want
2102 * to read the arg as is.
2103 */
2104 if (arg->op.left->type != PRINT_FIELD)
2105 goto default_op; /* oops, all bets off */
2106 larg = arg->op.left;
2107 if (!larg->field.field) {
2108 larg->field.field =
2109 find_any_field(event, larg->field.name);
2110 if (!larg->field.field)
2111 die("field %s not found", larg->field.name);
2112 }
2113 right = eval_num_arg(data, size, event, arg->op.right);
2114 val = read_size(data + larg->field.field->offset +
2115 right * long_size, long_size);
2116 break;
2117 }
2118 default_op:
2119 left = eval_num_arg(data, size, event, arg->op.left);
2120 right = eval_num_arg(data, size, event, arg->op.right);
2121 switch (arg->op.op[0]) {
2122 case '|':
2123 if (arg->op.op[1])
2124 val = left || right;
2125 else
2126 val = left | right;
2127 break;
2128 case '&':
2129 if (arg->op.op[1])
2130 val = left && right;
2131 else
2132 val = left & right;
2133 break;
2134 case '<':
2135 switch (arg->op.op[1]) {
2136 case 0:
2137 val = left < right;
2138 break;
2139 case '<':
2140 val = left << right;
2141 break;
2142 case '=':
2143 val = left <= right;
2144 break;
2145 default:
2146 die("unknown op '%s'", arg->op.op);
2147 }
2148 break;
2149 case '>':
2150 switch (arg->op.op[1]) {
2151 case 0:
2152 val = left > right;
2153 break;
2154 case '>':
2155 val = left >> right;
2156 break;
2157 case '=':
2158 val = left >= right;
2159 break;
2160 default:
2161 die("unknown op '%s'", arg->op.op);
2162 }
2163 break;
2164 case '=':
2165 if (arg->op.op[1] != '=')
2166 die("unknown op '%s'", arg->op.op);
2167 val = left == right;
2168 break;
2169 case '-':
2170 val = left - right;
2171 break;
2172 case '+':
2173 val = left + right;
2174 break;
2175 default:
2176 die("unknown op '%s'", arg->op.op);
2177 }
2178 break;
2179 default: /* not sure what to do there */
2180 return 0;
2181 } 317 }
2182 return val; 318 return NULL;
2183} 319}
2184 320
2185struct flag { 321struct flag {
@@ -2221,933 +357,3 @@ unsigned long long eval_flag(const char *flag)
2221 357
2222 return 0; 358 return 0;
2223} 359}
2224
2225static void print_str_arg(void *data, int size,
2226 struct event *event, struct print_arg *arg)
2227{
2228 struct print_flag_sym *flag;
2229 unsigned long long val, fval;
2230 char *str;
2231 int print;
2232
2233 switch (arg->type) {
2234 case PRINT_NULL:
2235 /* ?? */
2236 return;
2237 case PRINT_ATOM:
2238 printf("%s", arg->atom.atom);
2239 return;
2240 case PRINT_FIELD:
2241 if (!arg->field.field) {
2242 arg->field.field = find_any_field(event, arg->field.name);
2243 if (!arg->field.field)
2244 die("field %s not found", arg->field.name);
2245 }
2246 str = malloc_or_die(arg->field.field->size + 1);
2247 memcpy(str, data + arg->field.field->offset,
2248 arg->field.field->size);
2249 str[arg->field.field->size] = 0;
2250 printf("%s", str);
2251 free(str);
2252 break;
2253 case PRINT_FLAGS:
2254 val = eval_num_arg(data, size, event, arg->flags.field);
2255 print = 0;
2256 for (flag = arg->flags.flags; flag; flag = flag->next) {
2257 fval = eval_flag(flag->value);
2258 if (!val && !fval) {
2259 printf("%s", flag->str);
2260 break;
2261 }
2262 if (fval && (val & fval) == fval) {
2263 if (print && arg->flags.delim)
2264 printf("%s", arg->flags.delim);
2265 printf("%s", flag->str);
2266 print = 1;
2267 val &= ~fval;
2268 }
2269 }
2270 break;
2271 case PRINT_SYMBOL:
2272 val = eval_num_arg(data, size, event, arg->symbol.field);
2273 for (flag = arg->symbol.symbols; flag; flag = flag->next) {
2274 fval = eval_flag(flag->value);
2275 if (val == fval) {
2276 printf("%s", flag->str);
2277 break;
2278 }
2279 }
2280 break;
2281
2282 case PRINT_TYPE:
2283 break;
2284 case PRINT_STRING: {
2285 int str_offset;
2286
2287 if (arg->string.offset == -1) {
2288 struct format_field *f;
2289
2290 f = find_any_field(event, arg->string.string);
2291 arg->string.offset = f->offset;
2292 }
2293 str_offset = *(int *)(data + arg->string.offset);
2294 str_offset &= 0xffff;
2295 printf("%s", ((char *)data) + str_offset);
2296 break;
2297 }
2298 case PRINT_OP:
2299 /*
2300 * The only op for string should be ? :
2301 */
2302 if (arg->op.op[0] != '?')
2303 return;
2304 val = eval_num_arg(data, size, event, arg->op.left);
2305 if (val)
2306 print_str_arg(data, size, event, arg->op.right->op.left);
2307 else
2308 print_str_arg(data, size, event, arg->op.right->op.right);
2309 break;
2310 default:
2311 /* well... */
2312 break;
2313 }
2314}
2315
2316static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event *event)
2317{
2318 static struct format_field *field, *ip_field;
2319 struct print_arg *args, *arg, **next;
2320 unsigned long long ip, val;
2321 char *ptr;
2322 void *bptr;
2323
2324 if (!field) {
2325 field = find_field(event, "buf");
2326 if (!field)
2327 die("can't find buffer field for binary printk");
2328 ip_field = find_field(event, "ip");
2329 if (!ip_field)
2330 die("can't find ip field for binary printk");
2331 }
2332
2333 ip = read_size(data + ip_field->offset, ip_field->size);
2334
2335 /*
2336 * The first arg is the IP pointer.
2337 */
2338 args = malloc_or_die(sizeof(*args));
2339 arg = args;
2340 arg->next = NULL;
2341 next = &arg->next;
2342
2343 arg->type = PRINT_ATOM;
2344 arg->atom.atom = malloc_or_die(32);
2345 sprintf(arg->atom.atom, "%lld", ip);
2346
2347 /* skip the first "%pf : " */
2348 for (ptr = fmt + 6, bptr = data + field->offset;
2349 bptr < data + size && *ptr; ptr++) {
2350 int ls = 0;
2351
2352 if (*ptr == '%') {
2353 process_again:
2354 ptr++;
2355 switch (*ptr) {
2356 case '%':
2357 break;
2358 case 'l':
2359 ls++;
2360 goto process_again;
2361 case 'L':
2362 ls = 2;
2363 goto process_again;
2364 case '0' ... '9':
2365 goto process_again;
2366 case 'p':
2367 ls = 1;
2368 /* fall through */
2369 case 'd':
2370 case 'u':
2371 case 'x':
2372 case 'i':
2373 /* the pointers are always 4 bytes aligned */
2374 bptr = (void *)(((unsigned long)bptr + 3) &
2375 ~3);
2376 switch (ls) {
2377 case 0:
2378 case 1:
2379 ls = long_size;
2380 break;
2381 case 2:
2382 ls = 8;
2383 default:
2384 break;
2385 }
2386 val = read_size(bptr, ls);
2387 bptr += ls;
2388 arg = malloc_or_die(sizeof(*arg));
2389 arg->next = NULL;
2390 arg->type = PRINT_ATOM;
2391 arg->atom.atom = malloc_or_die(32);
2392 sprintf(arg->atom.atom, "%lld", val);
2393 *next = arg;
2394 next = &arg->next;
2395 break;
2396 case 's':
2397 arg = malloc_or_die(sizeof(*arg));
2398 arg->next = NULL;
2399 arg->type = PRINT_STRING;
2400 arg->string.string = strdup(bptr);
2401 bptr += strlen(bptr) + 1;
2402 *next = arg;
2403 next = &arg->next;
2404 default:
2405 break;
2406 }
2407 }
2408 }
2409
2410 return args;
2411}
2412
2413static void free_args(struct print_arg *args)
2414{
2415 struct print_arg *next;
2416
2417 while (args) {
2418 next = args->next;
2419
2420 if (args->type == PRINT_ATOM)
2421 free(args->atom.atom);
2422 else
2423 free(args->string.string);
2424 free(args);
2425 args = next;
2426 }
2427}
2428
2429static char *get_bprint_format(void *data, int size __unused, struct event *event)
2430{
2431 unsigned long long addr;
2432 static struct format_field *field;
2433 struct printk_map *printk;
2434 char *format;
2435 char *p;
2436
2437 if (!field) {
2438 field = find_field(event, "fmt");
2439 if (!field)
2440 die("can't find format field for binary printk");
2441 printf("field->offset = %d size=%d\n", field->offset, field->size);
2442 }
2443
2444 addr = read_size(data + field->offset, field->size);
2445
2446 printk = find_printk(addr);
2447 if (!printk) {
2448 format = malloc_or_die(45);
2449 sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
2450 addr);
2451 return format;
2452 }
2453
2454 p = printk->printk;
2455 /* Remove any quotes. */
2456 if (*p == '"')
2457 p++;
2458 format = malloc_or_die(strlen(p) + 10);
2459 sprintf(format, "%s : %s", "%pf", p);
2460 /* remove ending quotes and new line since we will add one too */
2461 p = format + strlen(format) - 1;
2462 if (*p == '"')
2463 *p = 0;
2464
2465 p -= 2;
2466 if (strcmp(p, "\\n") == 0)
2467 *p = 0;
2468
2469 return format;
2470}
2471
2472static void pretty_print(void *data, int size, struct event *event)
2473{
2474 struct print_fmt *print_fmt = &event->print_fmt;
2475 struct print_arg *arg = print_fmt->args;
2476 struct print_arg *args = NULL;
2477 const char *ptr = print_fmt->format;
2478 unsigned long long val;
2479 struct func_map *func;
2480 const char *saveptr;
2481 char *bprint_fmt = NULL;
2482 char format[32];
2483 int show_func;
2484 int len;
2485 int ls;
2486
2487 if (event->flags & EVENT_FL_ISFUNC)
2488 ptr = " %pF <-- %pF";
2489
2490 if (event->flags & EVENT_FL_ISBPRINT) {
2491 bprint_fmt = get_bprint_format(data, size, event);
2492 args = make_bprint_args(bprint_fmt, data, size, event);
2493 arg = args;
2494 ptr = bprint_fmt;
2495 }
2496
2497 for (; *ptr; ptr++) {
2498 ls = 0;
2499 if (*ptr == '\\') {
2500 ptr++;
2501 switch (*ptr) {
2502 case 'n':
2503 printf("\n");
2504 break;
2505 case 't':
2506 printf("\t");
2507 break;
2508 case 'r':
2509 printf("\r");
2510 break;
2511 case '\\':
2512 printf("\\");
2513 break;
2514 default:
2515 printf("%c", *ptr);
2516 break;
2517 }
2518
2519 } else if (*ptr == '%') {
2520 saveptr = ptr;
2521 show_func = 0;
2522 cont_process:
2523 ptr++;
2524 switch (*ptr) {
2525 case '%':
2526 printf("%%");
2527 break;
2528 case 'l':
2529 ls++;
2530 goto cont_process;
2531 case 'L':
2532 ls = 2;
2533 goto cont_process;
2534 case 'z':
2535 case 'Z':
2536 case '0' ... '9':
2537 goto cont_process;
2538 case 'p':
2539 if (long_size == 4)
2540 ls = 1;
2541 else
2542 ls = 2;
2543
2544 if (*(ptr+1) == 'F' ||
2545 *(ptr+1) == 'f') {
2546 ptr++;
2547 show_func = *ptr;
2548 }
2549
2550 /* fall through */
2551 case 'd':
2552 case 'i':
2553 case 'x':
2554 case 'X':
2555 case 'u':
2556 if (!arg)
2557 die("no argument match");
2558
2559 len = ((unsigned long)ptr + 1) -
2560 (unsigned long)saveptr;
2561
2562 /* should never happen */
2563 if (len > 32)
2564 die("bad format!");
2565
2566 memcpy(format, saveptr, len);
2567 format[len] = 0;
2568
2569 val = eval_num_arg(data, size, event, arg);
2570 arg = arg->next;
2571
2572 if (show_func) {
2573 func = find_func(val);
2574 if (func) {
2575 printf("%s", func->func);
2576 if (show_func == 'F')
2577 printf("+0x%llx",
2578 val - func->addr);
2579 break;
2580 }
2581 }
2582 switch (ls) {
2583 case 0:
2584 printf(format, (int)val);
2585 break;
2586 case 1:
2587 printf(format, (long)val);
2588 break;
2589 case 2:
2590 printf(format, (long long)val);
2591 break;
2592 default:
2593 die("bad count (%d)", ls);
2594 }
2595 break;
2596 case 's':
2597 if (!arg)
2598 die("no matching argument");
2599
2600 print_str_arg(data, size, event, arg);
2601 arg = arg->next;
2602 break;
2603 default:
2604 printf(">%c<", *ptr);
2605
2606 }
2607 } else
2608 printf("%c", *ptr);
2609 }
2610
2611 if (args) {
2612 free_args(args);
2613 free(bprint_fmt);
2614 }
2615}
2616
2617static inline int log10_cpu(int nb)
2618{
2619 if (nb / 100)
2620 return 3;
2621 if (nb / 10)
2622 return 2;
2623 return 1;
2624}
2625
2626static void print_lat_fmt(void *data, int size __unused)
2627{
2628 unsigned int lat_flags;
2629 unsigned int pc;
2630 int lock_depth;
2631 int hardirq;
2632 int softirq;
2633
2634 lat_flags = parse_common_flags(data);
2635 pc = parse_common_pc(data);
2636 lock_depth = parse_common_lock_depth(data);
2637
2638 hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
2639 softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
2640
2641 printf("%c%c%c",
2642 (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
2643 (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
2644 'X' : '.',
2645 (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
2646 'N' : '.',
2647 (hardirq && softirq) ? 'H' :
2648 hardirq ? 'h' : softirq ? 's' : '.');
2649
2650 if (pc)
2651 printf("%x", pc);
2652 else
2653 printf(".");
2654
2655 if (lock_depth < 0)
2656 printf(". ");
2657 else
2658 printf("%d ", lock_depth);
2659}
2660
2661#define TRACE_GRAPH_INDENT 2
2662
2663static struct record *
2664get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
2665 struct record *next)
2666{
2667 struct format_field *field;
2668 struct event *event;
2669 unsigned long val;
2670 int type;
2671 int pid;
2672
2673 type = trace_parse_common_type(next->data);
2674 event = trace_find_event(type);
2675 if (!event)
2676 return NULL;
2677
2678 if (!(event->flags & EVENT_FL_ISFUNCRET))
2679 return NULL;
2680
2681 pid = trace_parse_common_pid(next->data);
2682 field = find_field(event, "func");
2683 if (!field)
2684 die("function return does not have field func");
2685
2686 val = read_size(next->data + field->offset, field->size);
2687
2688 if (cur_pid != pid || cur_func != val)
2689 return NULL;
2690
2691 /* this is a leaf, now advance the iterator */
2692 return trace_read_data(cpu);
2693}
2694
2695/* Signal a overhead of time execution to the output */
2696static void print_graph_overhead(unsigned long long duration)
2697{
2698 /* Non nested entry or return */
2699 if (duration == ~0ULL)
2700 return (void)printf(" ");
2701
2702 /* Duration exceeded 100 msecs */
2703 if (duration > 100000ULL)
2704 return (void)printf("! ");
2705
2706 /* Duration exceeded 10 msecs */
2707 if (duration > 10000ULL)
2708 return (void)printf("+ ");
2709
2710 printf(" ");
2711}
2712
2713static void print_graph_duration(unsigned long long duration)
2714{
2715 unsigned long usecs = duration / 1000;
2716 unsigned long nsecs_rem = duration % 1000;
2717 /* log10(ULONG_MAX) + '\0' */
2718 char msecs_str[21];
2719 char nsecs_str[5];
2720 int len;
2721 int i;
2722
2723 sprintf(msecs_str, "%lu", usecs);
2724
2725 /* Print msecs */
2726 len = printf("%lu", usecs);
2727
2728 /* Print nsecs (we don't want to exceed 7 numbers) */
2729 if (len < 7) {
2730 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
2731 len += printf(".%s", nsecs_str);
2732 }
2733
2734 printf(" us ");
2735
2736 /* Print remaining spaces to fit the row's width */
2737 for (i = len; i < 7; i++)
2738 printf(" ");
2739
2740 printf("| ");
2741}
2742
2743static void
2744print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
2745{
2746 unsigned long long rettime, calltime;
2747 unsigned long long duration, depth;
2748 unsigned long long val;
2749 struct format_field *field;
2750 struct func_map *func;
2751 struct event *ret_event;
2752 int type;
2753 int i;
2754
2755 type = trace_parse_common_type(ret_rec->data);
2756 ret_event = trace_find_event(type);
2757
2758 field = find_field(ret_event, "rettime");
2759 if (!field)
2760 die("can't find rettime in return graph");
2761 rettime = read_size(ret_rec->data + field->offset, field->size);
2762
2763 field = find_field(ret_event, "calltime");
2764 if (!field)
2765 die("can't find rettime in return graph");
2766 calltime = read_size(ret_rec->data + field->offset, field->size);
2767
2768 duration = rettime - calltime;
2769
2770 /* Overhead */
2771 print_graph_overhead(duration);
2772
2773 /* Duration */
2774 print_graph_duration(duration);
2775
2776 field = find_field(event, "depth");
2777 if (!field)
2778 die("can't find depth in entry graph");
2779 depth = read_size(data + field->offset, field->size);
2780
2781 /* Function */
2782 for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
2783 printf(" ");
2784
2785 field = find_field(event, "func");
2786 if (!field)
2787 die("can't find func in entry graph");
2788 val = read_size(data + field->offset, field->size);
2789 func = find_func(val);
2790
2791 if (func)
2792 printf("%s();", func->func);
2793 else
2794 printf("%llx();", val);
2795}
2796
2797static void print_graph_nested(struct event *event, void *data)
2798{
2799 struct format_field *field;
2800 unsigned long long depth;
2801 unsigned long long val;
2802 struct func_map *func;
2803 int i;
2804
2805 /* No overhead */
2806 print_graph_overhead(-1);
2807
2808 /* No time */
2809 printf(" | ");
2810
2811 field = find_field(event, "depth");
2812 if (!field)
2813 die("can't find depth in entry graph");
2814 depth = read_size(data + field->offset, field->size);
2815
2816 /* Function */
2817 for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
2818 printf(" ");
2819
2820 field = find_field(event, "func");
2821 if (!field)
2822 die("can't find func in entry graph");
2823 val = read_size(data + field->offset, field->size);
2824 func = find_func(val);
2825
2826 if (func)
2827 printf("%s() {", func->func);
2828 else
2829 printf("%llx() {", val);
2830}
2831
2832static void
2833pretty_print_func_ent(void *data, int size, struct event *event,
2834 int cpu, int pid)
2835{
2836 struct format_field *field;
2837 struct record *rec;
2838 void *copy_data;
2839 unsigned long val;
2840
2841 if (latency_format) {
2842 print_lat_fmt(data, size);
2843 printf(" | ");
2844 }
2845
2846 field = find_field(event, "func");
2847 if (!field)
2848 die("function entry does not have func field");
2849
2850 val = read_size(data + field->offset, field->size);
2851
2852 /*
2853 * peek_data may unmap the data pointer. Copy it first.
2854 */
2855 copy_data = malloc_or_die(size);
2856 memcpy(copy_data, data, size);
2857 data = copy_data;
2858
2859 rec = trace_peek_data(cpu);
2860 if (rec) {
2861 rec = get_return_for_leaf(cpu, pid, val, rec);
2862 if (rec) {
2863 print_graph_entry_leaf(event, data, rec);
2864 goto out_free;
2865 }
2866 }
2867 print_graph_nested(event, data);
2868out_free:
2869 free(data);
2870}
2871
2872static void
2873pretty_print_func_ret(void *data, int size __unused, struct event *event)
2874{
2875 unsigned long long rettime, calltime;
2876 unsigned long long duration, depth;
2877 struct format_field *field;
2878 int i;
2879
2880 if (latency_format) {
2881 print_lat_fmt(data, size);
2882 printf(" | ");
2883 }
2884
2885 field = find_field(event, "rettime");
2886 if (!field)
2887 die("can't find rettime in return graph");
2888 rettime = read_size(data + field->offset, field->size);
2889
2890 field = find_field(event, "calltime");
2891 if (!field)
2892 die("can't find calltime in return graph");
2893 calltime = read_size(data + field->offset, field->size);
2894
2895 duration = rettime - calltime;
2896
2897 /* Overhead */
2898 print_graph_overhead(duration);
2899
2900 /* Duration */
2901 print_graph_duration(duration);
2902
2903 field = find_field(event, "depth");
2904 if (!field)
2905 die("can't find depth in entry graph");
2906 depth = read_size(data + field->offset, field->size);
2907
2908 /* Function */
2909 for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
2910 printf(" ");
2911
2912 printf("}");
2913}
2914
2915static void
2916pretty_print_func_graph(void *data, int size, struct event *event,
2917 int cpu, int pid)
2918{
2919 if (event->flags & EVENT_FL_ISFUNCENT)
2920 pretty_print_func_ent(data, size, event, cpu, pid);
2921 else if (event->flags & EVENT_FL_ISFUNCRET)
2922 pretty_print_func_ret(data, size, event);
2923 printf("\n");
2924}
2925
2926void print_trace_event(int cpu, void *data, int size)
2927{
2928 struct event *event;
2929 int type;
2930 int pid;
2931
2932 type = trace_parse_common_type(data);
2933
2934 event = trace_find_event(type);
2935 if (!event) {
2936 warning("ug! no event found for type %d", type);
2937 return;
2938 }
2939
2940 pid = trace_parse_common_pid(data);
2941
2942 if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET))
2943 return pretty_print_func_graph(data, size, event, cpu, pid);
2944
2945 if (latency_format)
2946 print_lat_fmt(data, size);
2947
2948 if (event->flags & EVENT_FL_FAILED) {
2949 printf("EVENT '%s' FAILED TO PARSE\n",
2950 event->name);
2951 return;
2952 }
2953
2954 pretty_print(data, size, event);
2955}
2956
2957static void print_fields(struct print_flag_sym *field)
2958{
2959 printf("{ %s, %s }", field->value, field->str);
2960 if (field->next) {
2961 printf(", ");
2962 print_fields(field->next);
2963 }
2964}
2965
2966static void print_args(struct print_arg *args)
2967{
2968 int print_paren = 1;
2969
2970 switch (args->type) {
2971 case PRINT_NULL:
2972 printf("null");
2973 break;
2974 case PRINT_ATOM:
2975 printf("%s", args->atom.atom);
2976 break;
2977 case PRINT_FIELD:
2978 printf("REC->%s", args->field.name);
2979 break;
2980 case PRINT_FLAGS:
2981 printf("__print_flags(");
2982 print_args(args->flags.field);
2983 printf(", %s, ", args->flags.delim);
2984 print_fields(args->flags.flags);
2985 printf(")");
2986 break;
2987 case PRINT_SYMBOL:
2988 printf("__print_symbolic(");
2989 print_args(args->symbol.field);
2990 printf(", ");
2991 print_fields(args->symbol.symbols);
2992 printf(")");
2993 break;
2994 case PRINT_STRING:
2995 printf("__get_str(%s)", args->string.string);
2996 break;
2997 case PRINT_TYPE:
2998 printf("(%s)", args->typecast.type);
2999 print_args(args->typecast.item);
3000 break;
3001 case PRINT_OP:
3002 if (strcmp(args->op.op, ":") == 0)
3003 print_paren = 0;
3004 if (print_paren)
3005 printf("(");
3006 print_args(args->op.left);
3007 printf(" %s ", args->op.op);
3008 print_args(args->op.right);
3009 if (print_paren)
3010 printf(")");
3011 break;
3012 default:
3013 /* we should warn... */
3014 return;
3015 }
3016 if (args->next) {
3017 printf("\n");
3018 print_args(args->next);
3019 }
3020}
3021
3022int parse_ftrace_file(char *buf, unsigned long size)
3023{
3024 struct format_field *field;
3025 struct print_arg *arg, **list;
3026 struct event *event;
3027 int ret;
3028
3029 init_input_buf(buf, size);
3030
3031 event = alloc_event();
3032 if (!event)
3033 return -ENOMEM;
3034
3035 event->flags |= EVENT_FL_ISFTRACE;
3036
3037 event->name = event_read_name();
3038 if (!event->name)
3039 die("failed to read ftrace event name");
3040
3041 if (strcmp(event->name, "function") == 0)
3042 event->flags |= EVENT_FL_ISFUNC;
3043
3044 else if (strcmp(event->name, "funcgraph_entry") == 0)
3045 event->flags |= EVENT_FL_ISFUNCENT;
3046
3047 else if (strcmp(event->name, "funcgraph_exit") == 0)
3048 event->flags |= EVENT_FL_ISFUNCRET;
3049
3050 else if (strcmp(event->name, "bprint") == 0)
3051 event->flags |= EVENT_FL_ISBPRINT;
3052
3053 event->id = event_read_id();
3054 if (event->id < 0)
3055 die("failed to read ftrace event id");
3056
3057 add_event(event);
3058
3059 ret = event_read_format(event);
3060 if (ret < 0)
3061 die("failed to read ftrace event format");
3062
3063 ret = event_read_print(event);
3064 if (ret < 0)
3065 die("failed to read ftrace event print fmt");
3066
3067 /* New ftrace handles args */
3068 if (ret > 0)
3069 return 0;
3070 /*
3071 * The arguments for ftrace files are parsed by the fields.
3072 * Set up the fields as their arguments.
3073 */
3074 list = &event->print_fmt.args;
3075 for (field = event->format.fields; field; field = field->next) {
3076 arg = malloc_or_die(sizeof(*arg));
3077 memset(arg, 0, sizeof(*arg));
3078 *list = arg;
3079 list = &arg->next;
3080 arg->type = PRINT_FIELD;
3081 arg->field.name = field->name;
3082 arg->field.field = field;
3083 }
3084 return 0;
3085}
3086
3087int parse_event_file(char *buf, unsigned long size, char *sys)
3088{
3089 struct event *event;
3090 int ret;
3091
3092 init_input_buf(buf, size);
3093
3094 event = alloc_event();
3095 if (!event)
3096 return -ENOMEM;
3097
3098 event->name = event_read_name();
3099 if (!event->name)
3100 die("failed to read event name");
3101
3102 event->id = event_read_id();
3103 if (event->id < 0)
3104 die("failed to read event id");
3105
3106 ret = event_read_format(event);
3107 if (ret < 0) {
3108 warning("failed to read event format for %s", event->name);
3109 goto event_failed;
3110 }
3111
3112 ret = event_read_print(event);
3113 if (ret < 0) {
3114 warning("failed to read event print fmt for %s", event->name);
3115 goto event_failed;
3116 }
3117
3118 event->system = strdup(sys);
3119
3120#define PRINT_ARGS 0
3121 if (PRINT_ARGS && event->print_fmt.args)
3122 print_args(event->print_fmt.args);
3123
3124 add_event(event);
3125 return 0;
3126
3127 event_failed:
3128 event->flags |= EVENT_FL_FAILED;
3129 /* still add it even if it failed */
3130 add_event(event);
3131 return -1;
3132}
3133
3134void parse_set_info(int nr_cpus, int long_sz)
3135{
3136 cpus = nr_cpus;
3137 long_size = long_sz;
3138}
3139
3140int common_pc(struct scripting_context *context)
3141{
3142 return parse_common_pc(context->event_data);
3143}
3144
3145int common_flags(struct scripting_context *context)
3146{
3147 return parse_common_flags(context->event_data);
3148}
3149
3150int common_lock_depth(struct scripting_context *context)
3151{
3152 return parse_common_lock_depth(context->event_data);
3153}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index b9592e0de8d7..f097e0dd6c5c 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -52,6 +52,16 @@ static unsigned long page_size;
52static ssize_t calc_data_size; 52static ssize_t calc_data_size;
53static bool repipe; 53static bool repipe;
54 54
55static void *malloc_or_die(int size)
56{
57 void *ret;
58
59 ret = malloc(size);
60 if (!ret)
61 die("malloc");
62 return ret;
63}
64
55static int do_read(int fd, void *buf, int size) 65static int do_read(int fd, void *buf, int size)
56{ 66{
57 int rsize = size; 67 int rsize = size;
@@ -109,7 +119,7 @@ static unsigned int read4(void)
109 unsigned int data; 119 unsigned int data;
110 120
111 read_or_die(&data, 4); 121 read_or_die(&data, 4);
112 return __data2host4(data); 122 return __data2host4(perf_pevent, data);
113} 123}
114 124
115static unsigned long long read8(void) 125static unsigned long long read8(void)
@@ -117,7 +127,7 @@ static unsigned long long read8(void)
117 unsigned long long data; 127 unsigned long long data;
118 128
119 read_or_die(&data, 8); 129 read_or_die(&data, 8);
120 return __data2host8(data); 130 return __data2host8(perf_pevent, data);
121} 131}
122 132
123static char *read_string(void) 133static char *read_string(void)
@@ -282,7 +292,7 @@ struct cpu_data {
282 unsigned long long offset; 292 unsigned long long offset;
283 unsigned long long size; 293 unsigned long long size;
284 unsigned long long timestamp; 294 unsigned long long timestamp;
285 struct record *next; 295 struct pevent_record *next;
286 char *page; 296 char *page;
287 int cpu; 297 int cpu;
288 int index; 298 int index;
@@ -367,9 +377,9 @@ static int calc_index(void *ptr, int cpu)
367 return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page; 377 return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
368} 378}
369 379
370struct record *trace_peek_data(int cpu) 380struct pevent_record *trace_peek_data(int cpu)
371{ 381{
372 struct record *data; 382 struct pevent_record *data;
373 void *page = cpu_data[cpu].page; 383 void *page = cpu_data[cpu].page;
374 int idx = cpu_data[cpu].index; 384 int idx = cpu_data[cpu].index;
375 void *ptr = page + idx; 385 void *ptr = page + idx;
@@ -389,15 +399,15 @@ struct record *trace_peek_data(int cpu)
389 /* FIXME: handle header page */ 399 /* FIXME: handle header page */
390 if (header_page_ts_size != 8) 400 if (header_page_ts_size != 8)
391 die("expected a long long type for timestamp"); 401 die("expected a long long type for timestamp");
392 cpu_data[cpu].timestamp = data2host8(ptr); 402 cpu_data[cpu].timestamp = data2host8(perf_pevent, ptr);
393 ptr += 8; 403 ptr += 8;
394 switch (header_page_size_size) { 404 switch (header_page_size_size) {
395 case 4: 405 case 4:
396 cpu_data[cpu].page_size = data2host4(ptr); 406 cpu_data[cpu].page_size = data2host4(perf_pevent, ptr);
397 ptr += 4; 407 ptr += 4;
398 break; 408 break;
399 case 8: 409 case 8:
400 cpu_data[cpu].page_size = data2host8(ptr); 410 cpu_data[cpu].page_size = data2host8(perf_pevent, ptr);
401 ptr += 8; 411 ptr += 8;
402 break; 412 break;
403 default: 413 default:
@@ -414,7 +424,7 @@ read_again:
414 return trace_peek_data(cpu); 424 return trace_peek_data(cpu);
415 } 425 }
416 426
417 type_len_ts = data2host4(ptr); 427 type_len_ts = data2host4(perf_pevent, ptr);
418 ptr += 4; 428 ptr += 4;
419 429
420 type_len = type_len4host(type_len_ts); 430 type_len = type_len4host(type_len_ts);
@@ -424,14 +434,14 @@ read_again:
424 case RINGBUF_TYPE_PADDING: 434 case RINGBUF_TYPE_PADDING:
425 if (!delta) 435 if (!delta)
426 die("error, hit unexpected end of page"); 436 die("error, hit unexpected end of page");
427 length = data2host4(ptr); 437 length = data2host4(perf_pevent, ptr);
428 ptr += 4; 438 ptr += 4;
429 length *= 4; 439 length *= 4;
430 ptr += length; 440 ptr += length;
431 goto read_again; 441 goto read_again;
432 442
433 case RINGBUF_TYPE_TIME_EXTEND: 443 case RINGBUF_TYPE_TIME_EXTEND:
434 extend = data2host4(ptr); 444 extend = data2host4(perf_pevent, ptr);
435 ptr += 4; 445 ptr += 4;
436 extend <<= TS_SHIFT; 446 extend <<= TS_SHIFT;
437 extend += delta; 447 extend += delta;
@@ -442,7 +452,7 @@ read_again:
442 ptr += 12; 452 ptr += 12;
443 break; 453 break;
444 case 0: 454 case 0:
445 length = data2host4(ptr); 455 length = data2host4(perf_pevent, ptr);
446 ptr += 4; 456 ptr += 4;
447 die("here! length=%d", length); 457 die("here! length=%d", length);
448 break; 458 break;
@@ -467,9 +477,9 @@ read_again:
467 return data; 477 return data;
468} 478}
469 479
470struct record *trace_read_data(int cpu) 480struct pevent_record *trace_read_data(int cpu)
471{ 481{
472 struct record *data; 482 struct pevent_record *data;
473 483
474 data = trace_peek_data(cpu); 484 data = trace_peek_data(cpu);
475 cpu_data[cpu].next = NULL; 485 cpu_data[cpu].next = NULL;
@@ -509,6 +519,8 @@ ssize_t trace_report(int fd, bool __repipe)
509 file_bigendian = buf[0]; 519 file_bigendian = buf[0];
510 host_bigendian = bigendian(); 520 host_bigendian = bigendian();
511 521
522 read_trace_init(file_bigendian, host_bigendian);
523
512 read_or_die(buf, 1); 524 read_or_die(buf, 1);
513 long_size = buf[0]; 525 long_size = buf[0];
514 526
@@ -526,11 +538,11 @@ ssize_t trace_report(int fd, bool __repipe)
526 repipe = false; 538 repipe = false;
527 539
528 if (show_funcs) { 540 if (show_funcs) {
529 print_funcs(); 541 pevent_print_funcs(perf_pevent);
530 return size; 542 return size;
531 } 543 }
532 if (show_printk) { 544 if (show_printk) {
533 print_printk(); 545 pevent_print_printk(perf_pevent);
534 return size; 546 return size;
535 } 547 }
536 548
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 58ae14c5baac..639852ac1117 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,20 +1,21 @@
1#ifndef __PERF_TRACE_EVENTS_H 1#ifndef _PERF_UTIL_TRACE_EVENT_H
2#define __PERF_TRACE_EVENTS_H 2#define _PERF_UTIL_TRACE_EVENT_H
3 3
4#include <stdbool.h>
5#include "parse-events.h" 4#include "parse-events.h"
5#include "event-parse.h"
6#include "session.h"
6 7
7struct machine; 8struct machine;
8struct perf_sample; 9struct perf_sample;
9union perf_event; 10union perf_event;
10struct thread; 11struct thread;
11 12
12#define __unused __attribute__((unused)) 13extern int header_page_size_size;
13 14extern int header_page_ts_size;
15extern int header_page_data_offset;
14 16
15#ifndef PAGE_MASK 17extern bool latency_format;
16#define PAGE_MASK (page_size - 1) 18extern struct pevent *perf_pevent;
17#endif
18 19
19enum { 20enum {
20 RINGBUF_TYPE_PADDING = 29, 21 RINGBUF_TYPE_PADDING = 29,
@@ -26,246 +27,37 @@ enum {
26#define TS_SHIFT 27 27#define TS_SHIFT 27
27#endif 28#endif
28 29
29#define NSECS_PER_SEC 1000000000ULL 30int bigendian(void);
30#define NSECS_PER_USEC 1000ULL
31
32enum format_flags {
33 FIELD_IS_ARRAY = 1,
34 FIELD_IS_POINTER = 2,
35 FIELD_IS_SIGNED = 4,
36 FIELD_IS_STRING = 8,
37 FIELD_IS_DYNAMIC = 16,
38 FIELD_IS_FLAG = 32,
39 FIELD_IS_SYMBOLIC = 64,
40};
41
42struct format_field {
43 struct format_field *next;
44 char *type;
45 char *name;
46 int offset;
47 int size;
48 unsigned long flags;
49};
50
51struct format {
52 int nr_common;
53 int nr_fields;
54 struct format_field *common_fields;
55 struct format_field *fields;
56};
57
58struct print_arg_atom {
59 char *atom;
60};
61
62struct print_arg_string {
63 char *string;
64 int offset;
65};
66
67struct print_arg_field {
68 char *name;
69 struct format_field *field;
70};
71
72struct print_flag_sym {
73 struct print_flag_sym *next;
74 char *value;
75 char *str;
76};
77
78struct print_arg_typecast {
79 char *type;
80 struct print_arg *item;
81};
82
83struct print_arg_flags {
84 struct print_arg *field;
85 char *delim;
86 struct print_flag_sym *flags;
87};
88
89struct print_arg_symbol {
90 struct print_arg *field;
91 struct print_flag_sym *symbols;
92};
93
94struct print_arg;
95
96struct print_arg_op {
97 char *op;
98 int prio;
99 struct print_arg *left;
100 struct print_arg *right;
101};
102
103struct print_arg_func {
104 char *name;
105 struct print_arg *args;
106};
107
108enum print_arg_type {
109 PRINT_NULL,
110 PRINT_ATOM,
111 PRINT_FIELD,
112 PRINT_FLAGS,
113 PRINT_SYMBOL,
114 PRINT_TYPE,
115 PRINT_STRING,
116 PRINT_OP,
117};
118
119struct print_arg {
120 struct print_arg *next;
121 enum print_arg_type type;
122 union {
123 struct print_arg_atom atom;
124 struct print_arg_field field;
125 struct print_arg_typecast typecast;
126 struct print_arg_flags flags;
127 struct print_arg_symbol symbol;
128 struct print_arg_func func;
129 struct print_arg_string string;
130 struct print_arg_op op;
131 };
132};
133
134struct print_fmt {
135 char *format;
136 struct print_arg *args;
137};
138
139struct event {
140 struct event *next;
141 char *name;
142 int id;
143 int flags;
144 struct format format;
145 struct print_fmt print_fmt;
146 char *system;
147};
148
149enum {
150 EVENT_FL_ISFTRACE = 0x01,
151 EVENT_FL_ISPRINT = 0x02,
152 EVENT_FL_ISBPRINT = 0x04,
153 EVENT_FL_ISFUNC = 0x08,
154 EVENT_FL_ISFUNCENT = 0x10,
155 EVENT_FL_ISFUNCRET = 0x20,
156
157 EVENT_FL_FAILED = 0x80000000
158};
159
160struct record {
161 unsigned long long ts;
162 int size;
163 void *data;
164};
165
166struct record *trace_peek_data(int cpu);
167struct record *trace_read_data(int cpu);
168
169void parse_set_info(int nr_cpus, int long_sz);
170
171ssize_t trace_report(int fd, bool repipe);
172
173void *malloc_or_die(unsigned int size);
174 31
175void parse_cmdlines(char *file, int size); 32int read_trace_init(int file_bigendian, int host_bigendian);
176void parse_proc_kallsyms(char *file, unsigned int size); 33void print_trace_event(int cpu, void *data, int size);
177void parse_ftrace_printk(char *file, unsigned int size);
178 34
179void print_funcs(void); 35void print_event(int cpu, void *data, int size, unsigned long long nsecs,
180void print_printk(void); 36 char *comm);
181 37
182int parse_ftrace_file(char *buf, unsigned long size); 38int parse_ftrace_file(char *buf, unsigned long size);
183int parse_event_file(char *buf, unsigned long size, char *sys); 39int parse_event_file(char *buf, unsigned long size, char *sys);
184void print_trace_event(int cpu, void *data, int size);
185
186extern int file_bigendian;
187extern int host_bigendian;
188
189int bigendian(void);
190
191static inline unsigned short __data2host2(unsigned short data)
192{
193 unsigned short swap;
194
195 if (host_bigendian == file_bigendian)
196 return data;
197 40
198 swap = ((data & 0xffULL) << 8) | 41struct pevent_record *trace_peek_data(int cpu);
199 ((data & (0xffULL << 8)) >> 8); 42struct event_format *trace_find_event(int type);
200 43
201 return swap; 44unsigned long long
202} 45raw_field_value(struct event_format *event, const char *name, void *data);
203 46void *raw_field_ptr(struct event_format *event, const char *name, void *data);
204static inline unsigned int __data2host4(unsigned int data)
205{
206 unsigned int swap;
207
208 if (host_bigendian == file_bigendian)
209 return data;
210
211 swap = ((data & 0xffULL) << 24) |
212 ((data & (0xffULL << 8)) << 8) |
213 ((data & (0xffULL << 16)) >> 8) |
214 ((data & (0xffULL << 24)) >> 24);
215
216 return swap;
217}
218
219static inline unsigned long long __data2host8(unsigned long long data)
220{
221 unsigned long long swap;
222
223 if (host_bigendian == file_bigendian)
224 return data;
225
226 swap = ((data & 0xffULL) << 56) |
227 ((data & (0xffULL << 8)) << 40) |
228 ((data & (0xffULL << 16)) << 24) |
229 ((data & (0xffULL << 24)) << 8) |
230 ((data & (0xffULL << 32)) >> 8) |
231 ((data & (0xffULL << 40)) >> 24) |
232 ((data & (0xffULL << 48)) >> 40) |
233 ((data & (0xffULL << 56)) >> 56);
234
235 return swap;
236}
237 47
238#define data2host2(ptr) __data2host2(*(unsigned short *)ptr) 48void parse_proc_kallsyms(char *file, unsigned int size __unused);
239#define data2host4(ptr) __data2host4(*(unsigned int *)ptr) 49void parse_ftrace_printk(char *file, unsigned int size __unused);
240#define data2host8(ptr) ({ \
241 unsigned long long __val; \
242 \
243 memcpy(&__val, (ptr), sizeof(unsigned long long)); \
244 __data2host8(__val); \
245})
246 50
247extern int header_page_ts_offset; 51ssize_t trace_report(int fd, bool repipe);
248extern int header_page_ts_size;
249extern int header_page_size_offset;
250extern int header_page_size_size;
251extern int header_page_data_offset;
252extern int header_page_data_size;
253
254extern bool latency_format;
255 52
256int trace_parse_common_type(void *data); 53int trace_parse_common_type(void *data);
257int trace_parse_common_pid(void *data); 54int trace_parse_common_pid(void *data);
258int parse_common_pc(void *data); 55
259int parse_common_flags(void *data); 56struct event_format *trace_find_next_event(struct event_format *event);
260int parse_common_lock_depth(void *data);
261struct event *trace_find_event(int id);
262struct event *trace_find_next_event(struct event *event);
263unsigned long long read_size(void *ptr, int size); 57unsigned long long read_size(void *ptr, int size);
264unsigned long long
265raw_field_value(struct event *event, const char *name, void *data);
266void *raw_field_ptr(struct event *event, const char *name, void *data);
267unsigned long long eval_flag(const char *flag); 58unsigned long long eval_flag(const char *flag);
268 59
60struct pevent_record *trace_read_data(int cpu);
269int read_tracing_data(int fd, struct list_head *pattrs); 61int read_tracing_data(int fd, struct list_head *pattrs);
270 62
271struct tracing_data { 63struct tracing_data {
@@ -280,15 +72,6 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
280void tracing_data_put(struct tracing_data *tdata); 72void tracing_data_put(struct tracing_data *tdata);
281 73
282 74
283/* taken from kernel/trace/trace.h */
284enum trace_flag_type {
285 TRACE_FLAG_IRQS_OFF = 0x01,
286 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
287 TRACE_FLAG_NEED_RESCHED = 0x04,
288 TRACE_FLAG_HARDIRQ = 0x08,
289 TRACE_FLAG_SOFTIRQ = 0x10,
290};
291
292struct scripting_ops { 75struct scripting_ops {
293 const char *name; 76 const char *name;
294 int (*start_script) (const char *script, int argc, const char **argv); 77 int (*start_script) (const char *script, int argc, const char **argv);
@@ -314,4 +97,4 @@ int common_pc(struct scripting_context *context);
314int common_flags(struct scripting_context *context); 97int common_flags(struct scripting_context *context);
315int common_lock_depth(struct scripting_context *context); 98int common_lock_depth(struct scripting_context *context);
316 99
317#endif /* __PERF_TRACE_EVENTS_H */ 100#endif /* _PERF_UTIL_TRACE_EVENT_H */
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
deleted file mode 100644
index 57a4c6ef3fd2..000000000000
--- a/tools/perf/util/ui/browsers/annotate.c
+++ /dev/null
@@ -1,433 +0,0 @@
1#include "../../util.h"
2#include "../browser.h"
3#include "../helpline.h"
4#include "../libslang.h"
5#include "../ui.h"
6#include "../util.h"
7#include "../../annotate.h"
8#include "../../hist.h"
9#include "../../sort.h"
10#include "../../symbol.h"
11#include <pthread.h>
12#include <newt.h>
13
14struct annotate_browser {
15 struct ui_browser b;
16 struct rb_root entries;
17 struct rb_node *curr_hot;
18 struct objdump_line *selection;
19 int nr_asm_entries;
20 int nr_entries;
21 bool hide_src_code;
22};
23
24struct objdump_line_rb_node {
25 struct rb_node rb_node;
26 double percent;
27 u32 idx;
28 int idx_asm;
29};
30
31static inline
32struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self)
33{
34 return (struct objdump_line_rb_node *)(self + 1);
35}
36
37static bool objdump_line__filter(struct ui_browser *browser, void *entry)
38{
39 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
40
41 if (ab->hide_src_code) {
42 struct objdump_line *ol = list_entry(entry, struct objdump_line, node);
43 return ol->offset == -1;
44 }
45
46 return false;
47}
48
49static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
50{
51 struct annotate_browser *ab = container_of(self, struct annotate_browser, b);
52 struct objdump_line *ol = list_entry(entry, struct objdump_line, node);
53 bool current_entry = ui_browser__is_current_entry(self, row);
54 int width = self->width;
55
56 if (ol->offset != -1) {
57 struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
58 ui_browser__set_percent_color(self, olrb->percent, current_entry);
59 slsmg_printf(" %7.2f ", olrb->percent);
60 } else {
61 ui_browser__set_percent_color(self, 0, current_entry);
62 slsmg_write_nstring(" ", 9);
63 }
64
65 SLsmg_write_char(':');
66 slsmg_write_nstring(" ", 8);
67
68 /* The scroll bar isn't being used */
69 if (!self->navkeypressed)
70 width += 1;
71
72 if (!ab->hide_src_code && ol->offset != -1)
73 if (!current_entry || (self->use_navkeypressed &&
74 !self->navkeypressed))
75 ui_browser__set_color(self, HE_COLORSET_CODE);
76
77 if (!*ol->line)
78 slsmg_write_nstring(" ", width - 18);
79 else
80 slsmg_write_nstring(ol->line, width - 18);
81
82 if (current_entry)
83 ab->selection = ol;
84}
85
86static double objdump_line__calc_percent(struct objdump_line *self,
87 struct symbol *sym, int evidx)
88{
89 double percent = 0.0;
90
91 if (self->offset != -1) {
92 int len = sym->end - sym->start;
93 unsigned int hits = 0;
94 struct annotation *notes = symbol__annotation(sym);
95 struct source_line *src_line = notes->src->lines;
96 struct sym_hist *h = annotation__histogram(notes, evidx);
97 s64 offset = self->offset;
98 struct objdump_line *next;
99
100 next = objdump__get_next_ip_line(&notes->src->source, self);
101 while (offset < (s64)len &&
102 (next == NULL || offset < next->offset)) {
103 if (src_line) {
104 percent += src_line[offset].percent;
105 } else
106 hits += h->addr[offset];
107
108 ++offset;
109 }
110 /*
111 * If the percentage wasn't already calculated in
112 * symbol__get_source_line, do it now:
113 */
114 if (src_line == NULL && h->sum)
115 percent = 100.0 * hits / h->sum;
116 }
117
118 return percent;
119}
120
121static void objdump__insert_line(struct rb_root *self,
122 struct objdump_line_rb_node *line)
123{
124 struct rb_node **p = &self->rb_node;
125 struct rb_node *parent = NULL;
126 struct objdump_line_rb_node *l;
127
128 while (*p != NULL) {
129 parent = *p;
130 l = rb_entry(parent, struct objdump_line_rb_node, rb_node);
131 if (line->percent < l->percent)
132 p = &(*p)->rb_left;
133 else
134 p = &(*p)->rb_right;
135 }
136 rb_link_node(&line->rb_node, parent, p);
137 rb_insert_color(&line->rb_node, self);
138}
139
140static void annotate_browser__set_top(struct annotate_browser *self,
141 struct rb_node *nd)
142{
143 struct objdump_line_rb_node *rbpos;
144 struct objdump_line *pos;
145 unsigned back;
146
147 ui_browser__refresh_dimensions(&self->b);
148 back = self->b.height / 2;
149 rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node);
150 pos = ((struct objdump_line *)rbpos) - 1;
151 self->b.top_idx = self->b.index = rbpos->idx;
152
153 while (self->b.top_idx != 0 && back != 0) {
154 pos = list_entry(pos->node.prev, struct objdump_line, node);
155
156 --self->b.top_idx;
157 --back;
158 }
159
160 self->b.top = pos;
161 self->curr_hot = nd;
162}
163
164static void annotate_browser__calc_percent(struct annotate_browser *browser,
165 int evidx)
166{
167 struct map_symbol *ms = browser->b.priv;
168 struct symbol *sym = ms->sym;
169 struct annotation *notes = symbol__annotation(sym);
170 struct objdump_line *pos;
171
172 browser->entries = RB_ROOT;
173
174 pthread_mutex_lock(&notes->lock);
175
176 list_for_each_entry(pos, &notes->src->source, node) {
177 struct objdump_line_rb_node *rbpos = objdump_line__rb(pos);
178 rbpos->percent = objdump_line__calc_percent(pos, sym, evidx);
179 if (rbpos->percent < 0.01) {
180 RB_CLEAR_NODE(&rbpos->rb_node);
181 continue;
182 }
183 objdump__insert_line(&browser->entries, rbpos);
184 }
185 pthread_mutex_unlock(&notes->lock);
186
187 browser->curr_hot = rb_last(&browser->entries);
188}
189
190static bool annotate_browser__toggle_source(struct annotate_browser *browser)
191{
192 struct objdump_line *ol;
193 struct objdump_line_rb_node *olrb;
194 off_t offset = browser->b.index - browser->b.top_idx;
195
196 browser->b.seek(&browser->b, offset, SEEK_CUR);
197 ol = list_entry(browser->b.top, struct objdump_line, node);
198 olrb = objdump_line__rb(ol);
199
200 if (browser->hide_src_code) {
201 if (olrb->idx_asm < offset)
202 offset = olrb->idx;
203
204 browser->b.nr_entries = browser->nr_entries;
205 browser->hide_src_code = false;
206 browser->b.seek(&browser->b, -offset, SEEK_CUR);
207 browser->b.top_idx = olrb->idx - offset;
208 browser->b.index = olrb->idx;
209 } else {
210 if (olrb->idx_asm < 0) {
211 ui_helpline__puts("Only available for assembly lines.");
212 browser->b.seek(&browser->b, -offset, SEEK_CUR);
213 return false;
214 }
215
216 if (olrb->idx_asm < offset)
217 offset = olrb->idx_asm;
218
219 browser->b.nr_entries = browser->nr_asm_entries;
220 browser->hide_src_code = true;
221 browser->b.seek(&browser->b, -offset, SEEK_CUR);
222 browser->b.top_idx = olrb->idx_asm - offset;
223 browser->b.index = olrb->idx_asm;
224 }
225
226 return true;
227}
228
229static int annotate_browser__run(struct annotate_browser *self, int evidx,
230 void(*timer)(void *arg),
231 void *arg, int delay_secs)
232{
233 struct rb_node *nd = NULL;
234 struct map_symbol *ms = self->b.priv;
235 struct symbol *sym = ms->sym;
236 const char *help = "<-/ESC: Exit, TAB/shift+TAB: Cycle hot lines, "
237 "H: Go to hottest line, ->/ENTER: Line action, "
238 "S: Toggle source code view";
239 int key;
240
241 if (ui_browser__show(&self->b, sym->name, help) < 0)
242 return -1;
243
244 annotate_browser__calc_percent(self, evidx);
245
246 if (self->curr_hot)
247 annotate_browser__set_top(self, self->curr_hot);
248
249 nd = self->curr_hot;
250
251 while (1) {
252 key = ui_browser__run(&self->b, delay_secs);
253
254 if (delay_secs != 0) {
255 annotate_browser__calc_percent(self, evidx);
256 /*
257 * Current line focus got out of the list of most active
258 * lines, NULL it so that if TAB|UNTAB is pressed, we
259 * move to curr_hot (current hottest line).
260 */
261 if (nd != NULL && RB_EMPTY_NODE(nd))
262 nd = NULL;
263 }
264
265 switch (key) {
266 case K_TIMER:
267 if (timer != NULL)
268 timer(arg);
269
270 if (delay_secs != 0)
271 symbol__annotate_decay_histogram(sym, evidx);
272 continue;
273 case K_TAB:
274 if (nd != NULL) {
275 nd = rb_prev(nd);
276 if (nd == NULL)
277 nd = rb_last(&self->entries);
278 } else
279 nd = self->curr_hot;
280 break;
281 case K_UNTAB:
282 if (nd != NULL)
283 nd = rb_next(nd);
284 if (nd == NULL)
285 nd = rb_first(&self->entries);
286 else
287 nd = self->curr_hot;
288 break;
289 case 'H':
290 case 'h':
291 nd = self->curr_hot;
292 break;
293 case 'S':
294 case 's':
295 if (annotate_browser__toggle_source(self))
296 ui_helpline__puts(help);
297 continue;
298 case K_ENTER:
299 case K_RIGHT:
300 if (self->selection == NULL) {
301 ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
302 continue;
303 }
304
305 if (self->selection->offset == -1) {
306 ui_helpline__puts("Actions are only available for assembly lines.");
307 continue;
308 } else {
309 char *s = strstr(self->selection->line, "callq ");
310 struct annotation *notes;
311 struct symbol *target;
312 u64 ip;
313
314 if (s == NULL) {
315 ui_helpline__puts("Actions are only available for the 'callq' instruction.");
316 continue;
317 }
318
319 s = strchr(s, ' ');
320 if (s++ == NULL) {
321 ui_helpline__puts("Invallid callq instruction.");
322 continue;
323 }
324
325 ip = strtoull(s, NULL, 16);
326 ip = ms->map->map_ip(ms->map, ip);
327 target = map__find_symbol(ms->map, ip, NULL);
328 if (target == NULL) {
329 ui_helpline__puts("The called function was not found.");
330 continue;
331 }
332
333 notes = symbol__annotation(target);
334 pthread_mutex_lock(&notes->lock);
335
336 if (notes->src == NULL && symbol__alloc_hist(target) < 0) {
337 pthread_mutex_unlock(&notes->lock);
338 ui__warning("Not enough memory for annotating '%s' symbol!\n",
339 target->name);
340 continue;
341 }
342
343 pthread_mutex_unlock(&notes->lock);
344 symbol__tui_annotate(target, ms->map, evidx,
345 timer, arg, delay_secs);
346 ui_browser__show_title(&self->b, sym->name);
347 }
348 continue;
349 case K_LEFT:
350 case K_ESC:
351 case 'q':
352 case CTRL('c'):
353 goto out;
354 default:
355 continue;
356 }
357
358 if (nd != NULL)
359 annotate_browser__set_top(self, nd);
360 }
361out:
362 ui_browser__hide(&self->b);
363 return key;
364}
365
366int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
367 void(*timer)(void *arg), void *arg, int delay_secs)
368{
369 return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx,
370 timer, arg, delay_secs);
371}
372
373int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
374 void(*timer)(void *arg), void *arg,
375 int delay_secs)
376{
377 struct objdump_line *pos, *n;
378 struct annotation *notes;
379 struct map_symbol ms = {
380 .map = map,
381 .sym = sym,
382 };
383 struct annotate_browser browser = {
384 .b = {
385 .refresh = ui_browser__list_head_refresh,
386 .seek = ui_browser__list_head_seek,
387 .write = annotate_browser__write,
388 .filter = objdump_line__filter,
389 .priv = &ms,
390 .use_navkeypressed = true,
391 },
392 };
393 int ret;
394
395 if (sym == NULL)
396 return -1;
397
398 if (map->dso->annotate_warned)
399 return -1;
400
401 if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
402 ui__error("%s", ui_helpline__last_msg);
403 return -1;
404 }
405
406 ui_helpline__push("Press <- or ESC to exit");
407
408 notes = symbol__annotation(sym);
409
410 list_for_each_entry(pos, &notes->src->source, node) {
411 struct objdump_line_rb_node *rbpos;
412 size_t line_len = strlen(pos->line);
413
414 if (browser.b.width < line_len)
415 browser.b.width = line_len;
416 rbpos = objdump_line__rb(pos);
417 rbpos->idx = browser.nr_entries++;
418 if (pos->offset != -1)
419 rbpos->idx_asm = browser.nr_asm_entries++;
420 else
421 rbpos->idx_asm = -1;
422 }
423
424 browser.b.nr_entries = browser.nr_entries;
425 browser.b.entries = &notes->src->source,
426 browser.b.width += 18; /* Percentage */
427 ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs);
428 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
429 list_del(&pos->node);
430 objdump_line__free(pos);
431 }
432 return ret;
433}
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index 52bb07c6442a..4007aca8e0ca 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -82,41 +82,3 @@ void warning(const char *warn, ...)
82 warn_routine(warn, params); 82 warn_routine(warn, params);
83 va_end(params); 83 va_end(params);
84} 84}
85
86uid_t parse_target_uid(const char *str, const char *tid, const char *pid)
87{
88 struct passwd pwd, *result;
89 char buf[1024];
90
91 if (str == NULL)
92 return UINT_MAX;
93
94 /* UID and PID are mutually exclusive */
95 if (tid || pid) {
96 ui__warning("PID/TID switch overriding UID\n");
97 sleep(1);
98 return UINT_MAX;
99 }
100
101 getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
102
103 if (result == NULL) {
104 char *endptr;
105 int uid = strtol(str, &endptr, 10);
106
107 if (*endptr != '\0') {
108 ui__error("Invalid user %s\n", str);
109 return UINT_MAX - 1;
110 }
111
112 getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
113
114 if (result == NULL) {
115 ui__error("Problems obtaining information for user %s\n",
116 str);
117 return UINT_MAX - 1;
118 }
119 }
120
121 return result->pw_uid;
122}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 8109a907841e..d03599fbe78b 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -148,3 +148,13 @@ int readn(int fd, void *buf, size_t n)
148 148
149 return buf - buf_start; 149 return buf - buf_start;
150} 150}
151
152size_t hex_width(u64 v)
153{
154 size_t n = 1;
155
156 while ((v >>= 4))
157 ++n;
158
159 return n;
160}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0f99f394d8e0..2daaedb83d84 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -74,7 +74,6 @@
74#include <netinet/tcp.h> 74#include <netinet/tcp.h>
75#include <arpa/inet.h> 75#include <arpa/inet.h>
76#include <netdb.h> 76#include <netdb.h>
77#include <pwd.h>
78#include <inttypes.h> 77#include <inttypes.h>
79#include "../../../include/linux/magic.h" 78#include "../../../include/linux/magic.h"
80#include "types.h" 79#include "types.h"
@@ -249,8 +248,6 @@ struct perf_event_attr;
249 248
250void event_attr_init(struct perf_event_attr *attr); 249void event_attr_init(struct perf_event_attr *attr);
251 250
252uid_t parse_target_uid(const char *str, const char *tid, const char *pid);
253
254#define _STR(x) #x 251#define _STR(x) #x
255#define STR(x) _STR(x) 252#define STR(x) _STR(x)
256 253
@@ -265,4 +262,6 @@ bool is_power_of_2(unsigned long n)
265 return (n != 0 && ((n & (n - 1)) == 0)); 262 return (n != 0 && ((n & (n - 1)) == 0));
266} 263}
267 264
265size_t hex_width(u64 v);
266
268#endif 267#endif
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
new file mode 100644
index 000000000000..bde8521d56bb
--- /dev/null
+++ b/tools/scripts/Makefile.include
@@ -0,0 +1,58 @@
1ifeq ("$(origin O)", "command line")
2 OUTPUT := $(O)/
3 COMMAND_O := O=$(O)
4endif
5
6ifneq ($(OUTPUT),)
7# check that the output directory actually exists
8OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
9$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
10endif
11
12#
13# Include saner warnings here, which can catch bugs:
14#
15EXTRA_WARNINGS := -Wbad-function-cast
16EXTRA_WARNINGS += -Wdeclaration-after-statement
17EXTRA_WARNINGS += -Wformat-security
18EXTRA_WARNINGS += -Wformat-y2k
19EXTRA_WARNINGS += -Winit-self
20EXTRA_WARNINGS += -Wmissing-declarations
21EXTRA_WARNINGS += -Wmissing-prototypes
22EXTRA_WARNINGS += -Wnested-externs
23EXTRA_WARNINGS += -Wno-system-headers
24EXTRA_WARNINGS += -Wold-style-definition
25EXTRA_WARNINGS += -Wpacked
26EXTRA_WARNINGS += -Wredundant-decls
27EXTRA_WARNINGS += -Wshadow
28EXTRA_WARNINGS += -Wstrict-aliasing=3
29EXTRA_WARNINGS += -Wstrict-prototypes
30EXTRA_WARNINGS += -Wswitch-default
31EXTRA_WARNINGS += -Wswitch-enum
32EXTRA_WARNINGS += -Wundef
33EXTRA_WARNINGS += -Wwrite-strings
34EXTRA_WARNINGS += -Wformat
35
36ifneq ($(findstring $(MAKEFLAGS), w),w)
37PRINT_DIR = --no-print-directory
38else
39NO_SUBDIR = :
40endif
41
42QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
43QUIET_SUBDIR1 =
44
45ifneq ($(findstring $(MAKEFLAGS),s),s)
46ifndef V
47 QUIET_CC = @echo ' ' CC $@;
48 QUIET_AR = @echo ' ' AR $@;
49 QUIET_LINK = @echo ' ' LINK $@;
50 QUIET_MKDIR = @echo ' ' MKDIR $@;
51 QUIET_GEN = @echo ' ' GEN $@;
52 QUIET_SUBDIR0 = +@subdir=
53 QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
54 $(MAKE) $(PRINT_DIR) -C $$subdir
55 QUIET_FLEX = @echo ' ' FLEX $@;
56 QUIET_BISON = @echo ' ' BISON $@;
57endif
58endif