From 3acbf0849bcbb639fde53dc627e3b55a4c6429d2 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 31 Aug 2010 10:44:17 +0200 Subject: oprofile, x86: Add support for AMD family 12h This patch adds support for AMD family 12h (Llano) cpus. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index bd1489c3ce09..0b0d1d628207 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -726,6 +726,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) case 0x11: cpu_type = "x86-64/family11h"; break; + case 0x12: + cpu_type = "x86-64/family12h"; + break; default: return -ENODEV; } -- cgit v1.2.2 From e63414740e15b4e2dc54c63fb9ea501b257fb0b5 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 26 Aug 2010 12:30:17 +0200 Subject: oprofile, x86: Add support for AMD family 14h This patch adds support for AMD family 14h (Ontario/Zacate) cpus. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 0b0d1d628207..4e8baad36d37 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -729,6 +729,9 @@ int __init op_nmi_init(struct oprofile_operations *ops) case 0x12: cpu_type = "x86-64/family12h"; break; + case 0x14: + cpu_type = "x86-64/family14h"; + break; default: return -ENODEV; } -- cgit v1.2.2 From 4ac945f002c0bebdeb530cbc3729e22895e64a7e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 21 Sep 2010 15:58:32 +0200 Subject: oprofile, x86: Check IBS capability bits 1 and 2 There are IBS CPUID feature flags in CPUID Fn8000_001B to detect if the cpu supports IBS fetch sampling (FetchSam) and/or IBS execution sampling (OpSam). This patch adds checks if the both features are available. Spec: http://support.amd.com/us/Processor_TechDocs/31116.pdf Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 59 ++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b67a6b5aa8d4..96852d5480e0 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -70,9 +70,22 @@ static u64 ibs_op_ctl; * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but * bit 0 is used to indicate the existence of IBS. */ -#define IBS_CAPS_AVAIL (1LL<<0) -#define IBS_CAPS_RDWROPCNT (1LL<<3) -#define IBS_CAPS_OPCNT (1LL<<4) +#define IBS_CAPS_AVAIL (1U<<0) +#define IBS_CAPS_FETCHSAM (1U<<1) +#define IBS_CAPS_OPSAM (1U<<2) +#define IBS_CAPS_RDWROPCNT (1U<<3) +#define IBS_CAPS_OPCNT (1U<<4) + +#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ + | IBS_CAPS_FETCHSAM \ + | IBS_CAPS_OPSAM) + +/* + * IBS APIC setup + */ +#define IBSCTL 0x1cc +#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) +#define IBSCTL_LVT_OFFSET_MASK 0x0F /* * IBS randomization macros @@ -92,12 +105,12 @@ static u32 get_ibs_caps(void) /* check IBS cpuid feature flags */ max_level = cpuid_eax(0x80000000); if (max_level < IBS_CPUID_FEATURES) - return IBS_CAPS_AVAIL; + return IBS_CAPS_DEFAULT; ibs_caps = cpuid_eax(IBS_CPUID_FEATURES); if (!(ibs_caps & IBS_CAPS_AVAIL)) /* cpuid flags not valid */ - return IBS_CAPS_AVAIL; + return IBS_CAPS_DEFAULT; return ibs_caps; } @@ -527,22 +540,26 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) ibs_config.op_enabled = 0; ibs_config.dispatched_ops = 0; - dir = oprofilefs_mkdir(sb, root, "ibs_fetch"); - oprofilefs_create_ulong(sb, dir, "enable", - &ibs_config.fetch_enabled); - oprofilefs_create_ulong(sb, dir, "max_count", - &ibs_config.max_cnt_fetch); - oprofilefs_create_ulong(sb, dir, "rand_enable", - &ibs_config.rand_en); - - dir = oprofilefs_mkdir(sb, root, "ibs_op"); - oprofilefs_create_ulong(sb, dir, "enable", - &ibs_config.op_enabled); - oprofilefs_create_ulong(sb, dir, "max_count", - &ibs_config.max_cnt_op); - if (ibs_caps & IBS_CAPS_OPCNT) - oprofilefs_create_ulong(sb, dir, "dispatched_ops", - &ibs_config.dispatched_ops); + if (ibs_caps & IBS_CAPS_FETCHSAM) { + dir = oprofilefs_mkdir(sb, root, "ibs_fetch"); + oprofilefs_create_ulong(sb, dir, "enable", + &ibs_config.fetch_enabled); + oprofilefs_create_ulong(sb, dir, "max_count", + &ibs_config.max_cnt_fetch); + oprofilefs_create_ulong(sb, dir, "rand_enable", + &ibs_config.rand_en); + } + + if (ibs_caps & IBS_CAPS_OPSAM) { + dir = oprofilefs_mkdir(sb, root, "ibs_op"); + oprofilefs_create_ulong(sb, dir, "enable", + &ibs_config.op_enabled); + oprofilefs_create_ulong(sb, dir, "max_count", + &ibs_config.max_cnt_op); + if (ibs_caps & IBS_CAPS_OPCNT) + oprofilefs_create_ulong(sb, dir, "dispatched_ops", + &ibs_config.dispatched_ops); + } return 0; } -- cgit v1.2.2 From fc889aa23f4767c1c3f77fce11e17bb0a638971f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 21 Sep 2010 18:09:00 +0200 Subject: oprofile, x86: Remove duplicate check for IBS_CAPS_OPCNT Since oprofile is setting up ibs_op/dispatched_ops in the fs only if the feature is available, its corresponding variable ibs_config.dispatched_ops is only set, if the feature is available. Thus the check is duplicate and can be removed. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 96852d5480e0..d5e9dab71bec 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -257,8 +257,7 @@ static inline void op_amd_start_ibs(void) ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, IBS_OP_MAX_CNT); } - if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) - ibs_op_ctl |= IBS_OP_CNT_CTL; + ibs_op_ctl |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; ibs_op_ctl |= IBS_OP_ENABLE; val = op_amd_randomize_ibs_op(ibs_op_ctl); wrmsrl(MSR_AMD64_IBSOPCTL, val); -- cgit v1.2.2 From 53b39e9480ef8a286cef9899c455a979acd0eed9 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 21 Sep 2010 17:58:15 +0200 Subject: oprofile, x86: Introduce struct ibs_state This patch introduces struct ibs_state that will extended by additinal members in follow-on patches. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index d5e9dab71bec..9d4509736772 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -48,7 +48,7 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS]; static u32 ibs_caps; -struct op_ibs_config { +struct ibs_config { unsigned long op_enabled; unsigned long fetch_enabled; unsigned long max_cnt_fetch; @@ -57,8 +57,12 @@ struct op_ibs_config { unsigned long dispatched_ops; }; -static struct op_ibs_config ibs_config; -static u64 ibs_op_ctl; +struct ibs_state { + u64 ibs_op_ctl; +}; + +static struct ibs_config ibs_config; +static struct ibs_state ibs_state; /* * IBS cpuid feature detection @@ -219,7 +223,7 @@ op_amd_handle_ibs(struct pt_regs * const regs, oprofile_write_commit(&entry); /* reenable the IRQ */ - ctl = op_amd_randomize_ibs_op(ibs_op_ctl); + ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl); wrmsrl(MSR_AMD64_IBSOPCTL, ctl); } } @@ -232,6 +236,8 @@ static inline void op_amd_start_ibs(void) if (!ibs_caps) return; + memset(&ibs_state, 0, sizeof(ibs_state)); + if (ibs_config.fetch_enabled) { val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; @@ -240,13 +246,13 @@ static inline void op_amd_start_ibs(void) } if (ibs_config.op_enabled) { - ibs_op_ctl = ibs_config.max_cnt_op >> 4; + val = ibs_config.max_cnt_op >> 4; if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) { /* * IbsOpCurCnt not supported. See * op_amd_randomize_ibs_op() for details. */ - ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL); + val = clamp(val, 0x0081ULL, 0xFF80ULL); } else { /* * The start value is randomized with a @@ -254,12 +260,13 @@ static inline void op_amd_start_ibs(void) * with the half of the randomized range. Also * avoid underflows. */ - ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, - IBS_OP_MAX_CNT); + val = min(val + IBS_RANDOM_MAXCNT_OFFSET, + IBS_OP_MAX_CNT); } - ibs_op_ctl |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; - ibs_op_ctl |= IBS_OP_ENABLE; - val = op_amd_randomize_ibs_op(ibs_op_ctl); + val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; + val |= IBS_OP_ENABLE; + ibs_state.ibs_op_ctl = val; + val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl); wrmsrl(MSR_AMD64_IBSOPCTL, val); } } -- cgit v1.2.2 From 25da6950475becb35d7a3bb3b5fbdc715a76887e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 21 Sep 2010 15:49:31 +0200 Subject: oprofile, x86: Add support for IBS branch target address reporting This patch adds support for IBS branch target address reporting. A new MSR (MSRC001_103B IBS Branch Target Address) has been added that provides the logical address in canonical form for the branch target. The size of the IBS sample that is transferred to the userland has been increased. For backward compatibility, the userland daemon must explicit enable the feature by writing to the oprofilefs file ibs_op/branch_target After enabling branch target address reporting, the userland daemon must handle the extended size of the IBS sample. Signed-off-by: Robert Richter --- arch/x86/include/asm/msr-index.h | 1 + arch/x86/oprofile/op_model_amd.c | 26 ++++++++++++++++++++------ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 986f7790fdb2..91ba8e6b630a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -121,6 +121,7 @@ #define MSR_AMD64_IBSDCLINAD 0xc0011038 #define MSR_AMD64_IBSDCPHYSAD 0xc0011039 #define MSR_AMD64_IBSCTL 0xc001103a +#define MSR_AMD64_IBSBRTARGET 0xc001103b /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 9d4509736772..9de33fa9531a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -55,10 +55,13 @@ struct ibs_config { unsigned long max_cnt_op; unsigned long rand_en; unsigned long dispatched_ops; + unsigned long branch_target; }; struct ibs_state { - u64 ibs_op_ctl; + u64 ibs_op_ctl; + int branch_target; + unsigned long sample_size; }; static struct ibs_config ibs_config; @@ -79,6 +82,7 @@ static struct ibs_state ibs_state; #define IBS_CAPS_OPSAM (1U<<2) #define IBS_CAPS_RDWROPCNT (1U<<3) #define IBS_CAPS_OPCNT (1U<<4) +#define IBS_CAPS_BRNTRGT (1U<<5) #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | IBS_CAPS_FETCHSAM \ @@ -207,8 +211,8 @@ op_amd_handle_ibs(struct pt_regs * const regs, rdmsrl(MSR_AMD64_IBSOPCTL, ctl); if (ctl & IBS_OP_VAL) { rdmsrl(MSR_AMD64_IBSOPRIP, val); - oprofile_write_reserve(&entry, regs, val, - IBS_OP_CODE, IBS_OP_SIZE); + oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE, + ibs_state.sample_size); oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSOPDATA, val); oprofile_add_data64(&entry, val); @@ -220,6 +224,10 @@ op_amd_handle_ibs(struct pt_regs * const regs, oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); oprofile_add_data64(&entry, val); + if (ibs_state.branch_target) { + rdmsrl(MSR_AMD64_IBSBRTARGET, val); + oprofile_add_data(&entry, (unsigned long)val); + } oprofile_write_commit(&entry); /* reenable the IRQ */ @@ -266,6 +274,11 @@ static inline void op_amd_start_ibs(void) val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; val |= IBS_OP_ENABLE; ibs_state.ibs_op_ctl = val; + ibs_state.sample_size = IBS_OP_SIZE; + if (ibs_config.branch_target) { + ibs_state.branch_target = 1; + ibs_state.sample_size++; + } val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl); wrmsrl(MSR_AMD64_IBSOPCTL, val); } @@ -540,11 +553,9 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) /* model specific files */ /* setup some reasonable defaults */ + memset(&ibs_config, 0, sizeof(ibs_config)); ibs_config.max_cnt_fetch = 250000; - ibs_config.fetch_enabled = 0; ibs_config.max_cnt_op = 250000; - ibs_config.op_enabled = 0; - ibs_config.dispatched_ops = 0; if (ibs_caps & IBS_CAPS_FETCHSAM) { dir = oprofilefs_mkdir(sb, root, "ibs_fetch"); @@ -565,6 +576,9 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) if (ibs_caps & IBS_CAPS_OPCNT) oprofilefs_create_ulong(sb, dir, "dispatched_ops", &ibs_config.dispatched_ops); + if (ibs_caps & IBS_CAPS_BRNTRGT) + oprofilefs_create_ulong(sb, dir, "branch_target", + &ibs_config.branch_target); } return 0; -- cgit v1.2.2 From b47fad3bfb5940cc3e28a1c69716f6dc44e4b7e6 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 22 Sep 2010 17:45:39 +0200 Subject: oprofile, x86: Add support for IBS periodic op counter extension The count value for IBS op sampling has been extended by 7 bits. The feature is reflected in bit 6 (OpCntExt) of the IBS capability register (CPUID Fn8000_001B_EAX). Signed-off-by: Robert Richter --- arch/x86/include/asm/perf_event.h | 19 ++++++++++--------- arch/x86/oprofile/op_model_amd.c | 22 +++++++++++++++++++--- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 6e742cc4251b..550e26b1dbb3 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -111,17 +111,18 @@ union cpuid10_edx { #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) /* IbsFetchCtl bits/masks */ -#define IBS_FETCH_RAND_EN (1ULL<<57) -#define IBS_FETCH_VAL (1ULL<<49) -#define IBS_FETCH_ENABLE (1ULL<<48) -#define IBS_FETCH_CNT 0xFFFF0000ULL -#define IBS_FETCH_MAX_CNT 0x0000FFFFULL +#define IBS_FETCH_RAND_EN (1ULL<<57) +#define IBS_FETCH_VAL (1ULL<<49) +#define IBS_FETCH_ENABLE (1ULL<<48) +#define IBS_FETCH_CNT 0xFFFF0000ULL +#define IBS_FETCH_MAX_CNT 0x0000FFFFULL /* IbsOpCtl bits */ -#define IBS_OP_CNT_CTL (1ULL<<19) -#define IBS_OP_VAL (1ULL<<18) -#define IBS_OP_ENABLE (1ULL<<17) -#define IBS_OP_MAX_CNT 0x0000FFFFULL +#define IBS_OP_CNT_CTL (1ULL<<19) +#define IBS_OP_VAL (1ULL<<18) +#define IBS_OP_ENABLE (1ULL<<17) +#define IBS_OP_MAX_CNT 0x0000FFFFULL +#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ #ifdef CONFIG_PERF_EVENTS extern void init_hw_perf_events(void); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 9de33fa9531a..65f0a1eb6b86 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -83,6 +83,7 @@ static struct ibs_state ibs_state; #define IBS_CAPS_RDWROPCNT (1U<<3) #define IBS_CAPS_OPCNT (1U<<4) #define IBS_CAPS_BRNTRGT (1U<<5) +#define IBS_CAPS_OPCNTEXT (1U<<6) #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | IBS_CAPS_FETCHSAM \ @@ -246,8 +247,16 @@ static inline void op_amd_start_ibs(void) memset(&ibs_state, 0, sizeof(ibs_state)); + /* + * Note: Since the max count settings may out of range we + * write back the actual used values so that userland can read + * it. + */ + if (ibs_config.fetch_enabled) { - val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; + val = ibs_config.max_cnt_fetch >> 4; + val = min(val, IBS_FETCH_MAX_CNT); + ibs_config.max_cnt_fetch = val << 4; val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; val |= IBS_FETCH_ENABLE; wrmsrl(MSR_AMD64_IBSFETCHCTL, val); @@ -261,6 +270,7 @@ static inline void op_amd_start_ibs(void) * op_amd_randomize_ibs_op() for details. */ val = clamp(val, 0x0081ULL, 0xFF80ULL); + ibs_config.max_cnt_op = val << 4; } else { /* * The start value is randomized with a @@ -268,9 +278,15 @@ static inline void op_amd_start_ibs(void) * with the half of the randomized range. Also * avoid underflows. */ - val = min(val + IBS_RANDOM_MAXCNT_OFFSET, - IBS_OP_MAX_CNT); + val += IBS_RANDOM_MAXCNT_OFFSET; + if (ibs_caps & IBS_CAPS_OPCNTEXT) + val = min(val, IBS_OP_MAX_CNT_EXT); + else + val = min(val, IBS_OP_MAX_CNT); + ibs_config.max_cnt_op = + (val - IBS_RANDOM_MAXCNT_OFFSET) << 4; } + val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT); val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; val |= IBS_OP_ENABLE; ibs_state.ibs_op_ctl = val; -- cgit v1.2.2 From 747e94ae3d1b4c9bf5380e569f614eb9040b79e7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 8 Oct 2010 13:51:48 -0400 Subject: ring-buffer: Make write slow path out of line Gcc inlines the slow path of the ring buffer write which can hurt performance. This patch simply forces the slow path function rb_move_tail() to always be a function. The ring_buffer_benchmark module with reader_disabled=1 shows that this patch changes the time to record an event from 135 ns to 132 ns. (3 ns or 2.22% improvement) Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bca96377fd4e..0b88df849a59 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1823,7 +1823,10 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, local_sub(length, &tail_page->write); } -static struct ring_buffer_event * +/* + * This is the slow path, force gcc not to inline it. + */ +static noinline struct ring_buffer_event * rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length, unsigned long tail, struct buffer_page *tail_page, u64 *ts) @@ -1943,7 +1946,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, tail = write - length; /* See if we shot pass the end of this buffer page */ - if (write > BUF_PAGE_SIZE) + if (unlikely(write > BUF_PAGE_SIZE)) return rb_move_tail(cpu_buffer, length, tail, tail_page, ts); -- cgit v1.2.2 From e8bc43e84fada397af1b677b07dbf26e6ac78fcc Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 10:58:02 -0400 Subject: ring-buffer: Pass timestamp by value and not by reference The original code for the ring buffer had locations that modified the timestamp and that change was used by the callers. Now, the timestamp is not reused by the callers and there is no reason to pass it by reference. By changing the call to pass by value, lets gcc optimize the code a bit more where it can store the timestamp in a register and not worry about updating the reference. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0b88df849a59..c8ce6bde7fa4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1829,7 +1829,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, static noinline struct ring_buffer_event * rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length, unsigned long tail, - struct buffer_page *tail_page, u64 *ts) + struct buffer_page *tail_page, u64 ts) { struct buffer_page *commit_page = cpu_buffer->commit_page; struct ring_buffer *buffer = cpu_buffer->buffer; @@ -1912,8 +1912,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, * Nested commits always have zero deltas, so * just reread the time stamp */ - *ts = rb_time_stamp(buffer); - next_page->page->time_stamp = *ts; + ts = rb_time_stamp(buffer); + next_page->page->time_stamp = ts; } out_again: @@ -1932,7 +1932,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, - unsigned type, unsigned long length, u64 *ts) + unsigned type, unsigned long length, u64 ts) { struct buffer_page *tail_page; struct ring_buffer_event *event; @@ -1965,7 +1965,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, * its timestamp. */ if (!tail) - tail_page->page->time_stamp = *ts; + tail_page->page->time_stamp = ts; return event; } @@ -2008,7 +2008,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, static int rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, - u64 *ts, u64 *delta) + u64 ts, u64 *delta) { struct ring_buffer_event *event; int ret; @@ -2016,7 +2016,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, WARN_ONCE(*delta > (1ULL << 59), KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", (unsigned long long)*delta, - (unsigned long long)*ts, + (unsigned long long)ts, (unsigned long long)cpu_buffer->write_stamp); /* @@ -2051,7 +2051,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, event->array[0] = 0; } } - cpu_buffer->write_stamp = *ts; + cpu_buffer->write_stamp = ts; /* let the caller know this was the commit */ ret = 1; } else { @@ -2175,7 +2175,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, delta = diff; if (unlikely(test_time_stamp(delta))) { - commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); + commit = rb_add_time_stamp(cpu_buffer, ts, &delta); if (commit == -EBUSY) goto out_fail; @@ -2187,7 +2187,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, } get_event: - event = __rb_reserve_next(cpu_buffer, 0, length, &ts); + event = __rb_reserve_next(cpu_buffer, 0, length, ts); if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; -- cgit v1.2.2 From f25106aeab7408394b9dd707e5ecf557e269c723 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 12:40:12 -0400 Subject: ring-buffer: Pass delta by value and not by reference The delta between events is passed to the timestamp code by reference and the timestamp code will reset the value. But it can be reset from the caller. No need to pass it in by reference. By changing the call to pass by value, lets gcc optimize the code a bit more where it can store the delta in a register and not worry about updating the reference. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c8ce6bde7fa4..3af77cd47f21 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2008,14 +2008,14 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, static int rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, - u64 ts, u64 *delta) + u64 ts, u64 delta) { struct ring_buffer_event *event; int ret; - WARN_ONCE(*delta > (1ULL << 59), + WARN_ONCE(delta > (1ULL << 59), KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", - (unsigned long long)*delta, + (unsigned long long)delta, (unsigned long long)ts, (unsigned long long)cpu_buffer->write_stamp); @@ -2041,8 +2041,8 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, * and if we can't just make it zero. */ if (rb_event_index(event)) { - event->time_delta = *delta & TS_MASK; - event->array[0] = *delta >> TS_SHIFT; + event->time_delta = delta & TS_MASK; + event->array[0] = delta >> TS_SHIFT; } else { /* try to discard, since we do not need this */ if (!rb_try_to_discard(cpu_buffer, event)) { @@ -2064,8 +2064,6 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, ret = 0; } - *delta = 0; - return ret; } @@ -2175,7 +2173,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, delta = diff; if (unlikely(test_time_stamp(delta))) { - commit = rb_add_time_stamp(cpu_buffer, ts, &delta); + commit = rb_add_time_stamp(cpu_buffer, ts, delta); + delta = 0; + if (commit == -EBUSY) goto out_fail; -- cgit v1.2.2 From afcc5c6872f0215d515a637041bb51f8691a8ea7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 13:37:56 -0400 Subject: ring-buffer: Remove ring_buffer_event_time_delta() The ring_buffer_event_time_delta() static inline function does not have any users. Remove it. Signed-off-by: Steven Rostedt --- include/linux/ring_buffer.h | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 25b4f686d918..8d3a2486544d 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -62,18 +62,6 @@ enum ring_buffer_type { unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); -/** - * ring_buffer_event_time_delta - return the delta timestamp of the event - * @event: the event to get the delta timestamp of - * - * The delta timestamp is the 27 bit timestamp since the last event. - */ -static inline unsigned -ring_buffer_event_time_delta(struct ring_buffer_event *event) -{ - return event->time_delta; -} - /* * ring_buffer_discard_commit will remove an event that has not * ben committed yet. If this is used, then ring_buffer_unlock_commit -- cgit v1.2.2 From 69d1b839f7eee347e357b3f6cce7f630cc6ff93d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 7 Oct 2010 18:18:05 -0400 Subject: ring-buffer: Bind time extend and data events together When the time between two timestamps is greater than 2^27 nanosecs (~134 ms) a time extend event is added that extends the time difference to 59 bits (~18 years). This is due to events only having a 27 bit field to store time. Currently this time extend is a separate event. We add it just before the event data that is being written to the buffer. But before the event data is committed, the event data can also be discarded (as with the case of filters). But because the time extend has already been committed, it will stay in the buffer. If lots of events are being filtered and no event is being written, then every 134ms a time extend can be added to the buffer without any data attached. To keep from filling the entire buffer with time extends, a time extend will never be the first event in a page because the page timestamp can be used. Time extends can only fill the rest of a page with some data at the beginning. This patch binds the time extend with the data. The difference here is that the time extend is not committed before the data is added. Instead, when a time extend is needed, the space reserved on the ring buffer is the time extend + the data event size. The time extend is added to the first part of the reserved block and the data is added to the second. The time extend event is passed back to the reserver, but since the reserver also uses a function to find the data portion of the reserved block, no changes to the ring buffer interface need to be made. When a commit is discarded, we now remove both the time extend and the event. With this approach no more than one time extend can be in the buffer in a row. Data must always follow a time extend. Thanks to Mathieu Desnoyers for suggesting this idea. Suggested-by: Mathieu Desnoyers Cc: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 266 ++++++++++++++++++++++++--------------------- 1 file changed, 142 insertions(+), 124 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3af77cd47f21..f50f43107e93 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -224,6 +224,9 @@ enum { RB_LEN_TIME_STAMP = 16, }; +#define skip_time_extend(event) \ + ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) + static inline int rb_null_event(struct ring_buffer_event *event) { return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; @@ -248,8 +251,12 @@ rb_event_data_length(struct ring_buffer_event *event) return length + RB_EVNT_HDR_SIZE; } -/* inline for ring buffer fast paths */ -static unsigned +/* + * Return the length of the given event. Will return + * the length of the time extend if the event is a + * time extend. + */ +static inline unsigned rb_event_length(struct ring_buffer_event *event) { switch (event->type_len) { @@ -274,13 +281,41 @@ rb_event_length(struct ring_buffer_event *event) return 0; } +/* + * Return total length of time extend and data, + * or just the event length for all other events. + */ +static inline unsigned +rb_event_ts_length(struct ring_buffer_event *event) +{ + unsigned len = 0; + + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { + /* time extends include the data event after it */ + len = RB_LEN_TIME_EXTEND; + event = skip_time_extend(event); + } + return len + rb_event_length(event); +} + /** * ring_buffer_event_length - return the length of the event * @event: the event to get the length of + * + * Returns the size of the data load of a data event. + * If the event is something other than a data event, it + * returns the size of the event itself. With the exception + * of a TIME EXTEND, where it still returns the size of the + * data load of the data event after it. */ unsigned ring_buffer_event_length(struct ring_buffer_event *event) { - unsigned length = rb_event_length(event); + unsigned length; + + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) + event = skip_time_extend(event); + + length = rb_event_length(event); if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) return length; length -= RB_EVNT_HDR_SIZE; @@ -294,6 +329,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_length); static void * rb_event_data(struct ring_buffer_event *event) { + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) + event = skip_time_extend(event); BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); /* If length is in len field, then array[0] has the data */ if (event->type_len) @@ -1546,6 +1583,25 @@ static void rb_inc_iter(struct ring_buffer_iter *iter) iter->head = 0; } +/* Slow path, do not inline */ +static noinline struct ring_buffer_event * +rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) +{ + event->type_len = RINGBUF_TYPE_TIME_EXTEND; + + /* Not the first event on the page? */ + if (rb_event_index(event)) { + event->time_delta = delta & TS_MASK; + event->array[0] = delta >> TS_SHIFT; + } else { + /* nope, just zero it */ + event->time_delta = 0; + event->array[0] = 0; + } + + return skip_time_extend(event); +} + /** * ring_buffer_update_event - update event type and data * @event: the even to update @@ -1558,28 +1614,31 @@ static void rb_inc_iter(struct ring_buffer_iter *iter) * data field. */ static void -rb_update_event(struct ring_buffer_event *event, - unsigned type, unsigned length) +rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, + struct ring_buffer_event *event, unsigned length, + int add_timestamp, u64 delta) { - event->type_len = type; - - switch (type) { - - case RINGBUF_TYPE_PADDING: - case RINGBUF_TYPE_TIME_EXTEND: - case RINGBUF_TYPE_TIME_STAMP: - break; + /* Only a commit updates the timestamp */ + if (unlikely(!rb_event_is_commit(cpu_buffer, event))) + delta = 0; - case 0: - length -= RB_EVNT_HDR_SIZE; - if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) - event->array[0] = length; - else - event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); - break; - default: - BUG(); + /* + * If we need to add a timestamp, then we + * add it to the start of the resevered space. + */ + if (unlikely(add_timestamp)) { + event = rb_add_time_stamp(event, delta); + length -= RB_LEN_TIME_EXTEND; + delta = 0; } + + event->time_delta = delta; + length -= RB_EVNT_HDR_SIZE; + if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { + event->type_len = 0; + event->array[0] = length; + } else + event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); } /* @@ -1932,12 +1991,21 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, - unsigned type, unsigned long length, u64 ts) + unsigned long length, u64 ts, + u64 delta, int add_timestamp) { struct buffer_page *tail_page; struct ring_buffer_event *event; unsigned long tail, write; + /* + * If the time delta since the last event is too big to + * hold in the time field of the event, then we append a + * TIME EXTEND event ahead of the data event. + */ + if (unlikely(add_timestamp)) + length += RB_LEN_TIME_EXTEND; + tail_page = cpu_buffer->tail_page; write = local_add_return(length, &tail_page->write); @@ -1954,11 +2022,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, event = __rb_page_index(tail_page, tail); kmemcheck_annotate_bitfield(event, bitfield); - rb_update_event(event, type, length); + rb_update_event(cpu_buffer, event, length, add_timestamp, delta); - /* The passed in type is zero for DATA */ - if (likely(!type)) - local_inc(&tail_page->entries); + local_inc(&tail_page->entries); /* * If this is the first commit on the page, then update @@ -1980,7 +2046,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, unsigned long addr; new_index = rb_event_index(event); - old_index = new_index + rb_event_length(event); + old_index = new_index + rb_event_ts_length(event); addr = (unsigned long)event; addr &= PAGE_MASK; @@ -2006,67 +2072,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, return 0; } -static int -rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, - u64 ts, u64 delta) -{ - struct ring_buffer_event *event; - int ret; - - WARN_ONCE(delta > (1ULL << 59), - KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", - (unsigned long long)delta, - (unsigned long long)ts, - (unsigned long long)cpu_buffer->write_stamp); - - /* - * The delta is too big, we to add a - * new timestamp. - */ - event = __rb_reserve_next(cpu_buffer, - RINGBUF_TYPE_TIME_EXTEND, - RB_LEN_TIME_EXTEND, - ts); - if (!event) - return -EBUSY; - - if (PTR_ERR(event) == -EAGAIN) - return -EAGAIN; - - /* Only a commited time event can update the write stamp */ - if (rb_event_is_commit(cpu_buffer, event)) { - /* - * If this is the first on the page, then it was - * updated with the page itself. Try to discard it - * and if we can't just make it zero. - */ - if (rb_event_index(event)) { - event->time_delta = delta & TS_MASK; - event->array[0] = delta >> TS_SHIFT; - } else { - /* try to discard, since we do not need this */ - if (!rb_try_to_discard(cpu_buffer, event)) { - /* nope, just zero it */ - event->time_delta = 0; - event->array[0] = 0; - } - } - cpu_buffer->write_stamp = ts; - /* let the caller know this was the commit */ - ret = 1; - } else { - /* Try to discard the event */ - if (!rb_try_to_discard(cpu_buffer, event)) { - /* Darn, this is just wasted space */ - event->time_delta = 0; - event->array[0] = 0; - } - ret = 0; - } - - return ret; -} - static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) { local_inc(&cpu_buffer->committing); @@ -2111,9 +2116,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, unsigned long length) { struct ring_buffer_event *event; - u64 ts, delta = 0; - int commit = 0; + u64 ts, delta; int nr_loops = 0; + int add_timestamp; rb_start_commit(cpu_buffer); @@ -2134,6 +2139,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, length = rb_calculate_event_length(length); again: + add_timestamp = 0; + delta = 0; + /* * We allow for interrupts to reenter here and do a trace. * If one does, it will cause this original code to loop @@ -2172,33 +2180,24 @@ rb_reserve_next_event(struct ring_buffer *buffer, delta = diff; if (unlikely(test_time_stamp(delta))) { - - commit = rb_add_time_stamp(cpu_buffer, ts, delta); - delta = 0; - - if (commit == -EBUSY) - goto out_fail; - - if (commit == -EAGAIN) - goto again; - - RB_WARN_ON(cpu_buffer, commit < 0); + WARN_ONCE(delta > (1ULL << 59), + KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n", + (unsigned long long)delta, + (unsigned long long)ts, + (unsigned long long)cpu_buffer->write_stamp); + add_timestamp = 1; } } get_event: - event = __rb_reserve_next(cpu_buffer, 0, length, ts); + event = __rb_reserve_next(cpu_buffer, length, ts, + delta, add_timestamp); if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; if (!event) goto out_fail; - if (!rb_event_is_commit(cpu_buffer, event)) - delta = 0; - - event->time_delta = delta; - return event; out_fail: @@ -2311,12 +2310,28 @@ static void rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { + u64 delta; + /* * The event first in the commit queue updates the * time stamp. */ - if (rb_event_is_commit(cpu_buffer, event)) - cpu_buffer->write_stamp += event->time_delta; + if (rb_event_is_commit(cpu_buffer, event)) { + /* + * A commit event that is first on a page + * updates the write timestamp with the page stamp + */ + if (!rb_event_index(event)) + cpu_buffer->write_stamp = + cpu_buffer->commit_page->page->time_stamp; + else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { + delta = event->array[0]; + delta <<= TS_SHIFT; + delta += event->time_delta; + cpu_buffer->write_stamp += delta; + } else + cpu_buffer->write_stamp += event->time_delta; + } } static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, @@ -2356,6 +2371,9 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); static inline void rb_event_discard(struct ring_buffer_event *event) { + if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) + event = skip_time_extend(event); + /* array[0] holds the actual length for the discarded event */ event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; event->type_len = RINGBUF_TYPE_PADDING; @@ -3043,12 +3061,12 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, again: /* - * We repeat when a timestamp is encountered. It is possible - * to get multiple timestamps from an interrupt entering just - * as one timestamp is about to be written, or from discarded - * commits. The most that we can have is the number on a single page. + * We repeat when a time extend is encountered. + * Since the time extend is always attached to a data event, + * we should never loop more than once. + * (We never hit the following condition more than twice). */ - if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) return NULL; reader = rb_get_reader_page(cpu_buffer); @@ -3124,14 +3142,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) return NULL; /* - * We repeat when a timestamp is encountered. - * We can get multiple timestamps by nested interrupts or also - * if filtering is on (discarding commits). Since discarding - * commits can be frequent we can get a lot of timestamps. - * But we limit them by not adding timestamps if they begin - * at the start of a page. + * We repeat when a time extend is encountered. + * Since the time extend is always attached to a data event, + * we should never loop more than once. + * (We never hit the following condition more than twice). */ - if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) return NULL; if (rb_per_cpu_empty(cpu_buffer)) @@ -3829,7 +3845,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, if (len > (commit - read)) len = (commit - read); - size = rb_event_length(event); + /* Always keep the time extend and data together */ + size = rb_event_ts_length(event); if (len < size) goto out_unlock; @@ -3851,7 +3868,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, break; event = rb_reader_event(cpu_buffer); - size = rb_event_length(event); + /* Always keep the time extend and data together */ + size = rb_event_ts_length(event); } while (len > size); /* update bpage */ -- cgit v1.2.2 From 140ff89127c74b1b1c1b0152a36ea3720ccf6bc3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 8 Oct 2010 10:50:30 -0400 Subject: ring-buffer: Remove condition to add timestamp in fast path There's a condition to check if we should add a time extend or not in the fast path. But this condition is racey (in the sense that we can add a unnecessary time extend, but nothing that can break anything). We later check if the time or event time delta should be zero or have real data in it (not racey), making this first check redundant. This check may help save space once in a while, but really is not worth the hassle to try to save some space that happens at most 134 ms at a time. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f50f43107e93..d9f3e7a82137 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2119,6 +2119,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, u64 ts, delta; int nr_loops = 0; int add_timestamp; + u64 diff; rb_start_commit(cpu_buffer); @@ -2155,29 +2156,13 @@ rb_reserve_next_event(struct ring_buffer *buffer, goto out_fail; ts = rb_time_stamp(cpu_buffer->buffer); + diff = ts - cpu_buffer->write_stamp; - /* - * Only the first commit can update the timestamp. - * Yes there is a race here. If an interrupt comes in - * just after the conditional and it traces too, then it - * will also check the deltas. More than one timestamp may - * also be made. But only the entry that did the actual - * commit will be something other than zero. - */ - if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page && - rb_page_write(cpu_buffer->tail_page) == - rb_commit_index(cpu_buffer))) { - u64 diff; - - diff = ts - cpu_buffer->write_stamp; - - /* make sure this diff is calculated here */ - barrier(); - - /* Did the write stamp get updated already? */ - if (unlikely(ts < cpu_buffer->write_stamp)) - goto get_event; + /* make sure this diff is calculated here */ + barrier(); + /* Did the write stamp get updated already? */ + if (likely(ts >= cpu_buffer->write_stamp)) { delta = diff; if (unlikely(test_time_stamp(delta))) { WARN_ONCE(delta > (1ULL << 59), @@ -2189,7 +2174,6 @@ rb_reserve_next_event(struct ring_buffer *buffer, } } - get_event: event = __rb_reserve_next(cpu_buffer, length, ts, delta, add_timestamp); if (unlikely(PTR_ERR(event) == -EAGAIN)) -- cgit v1.2.2 From d9abde2138e0a00a0d7e44676928efa0ef629d48 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 19 Oct 2010 13:17:08 -0400 Subject: ring-buffer: Micro-optimize with some strategic inlining By using inline and noinline, we are able to make the fast path of recording an event 4% faster. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d9f3e7a82137..f5007d0d932d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2078,7 +2078,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) local_inc(&cpu_buffer->commits); } -static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) +static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) { unsigned long commits; @@ -2193,13 +2193,9 @@ rb_reserve_next_event(struct ring_buffer *buffer, #define TRACE_RECURSIVE_DEPTH 16 -static int trace_recursive_lock(void) +/* Keep this code out of the fast path cache */ +static noinline void trace_recursive_fail(void) { - current->trace_recursion++; - - if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) - return 0; - /* Disable all tracing before we do anything else */ tracing_off_permanent(); @@ -2211,10 +2207,21 @@ static int trace_recursive_lock(void) in_nmi()); WARN_ON_ONCE(1); +} + +static inline int trace_recursive_lock(void) +{ + current->trace_recursion++; + + if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) + return 0; + + trace_recursive_fail(); + return -1; } -static void trace_recursive_unlock(void) +static inline void trace_recursive_unlock(void) { WARN_ON_ONCE(!current->trace_recursion); -- cgit v1.2.2 From b8b2663bd7c9da04ac804659b9f617c199d0252c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 19 Oct 2010 13:23:25 -0400 Subject: ring-buffer: Remove unused macro RB_TIMESTAMPS_PER_PAGE With the binding of time extends to events we no longer need to use the macro RB_TIMESTAMPS_PER_PAGE. Remove it. Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index f5007d0d932d..ad25490f8b40 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -441,9 +441,6 @@ static inline int test_time_stamp(u64 delta) /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) -/* Max number of timestamps that can fit on a page */ -#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND) - int ring_buffer_print_page_header(struct trace_seq *s) { struct buffer_data_page field; -- cgit v1.2.2 From dd49a38cf30944be27892c10b1c0e5b3fa73bcb2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Oct 2010 21:51:26 -0400 Subject: tracing: Do not limit the size of the number of CPU buffers The tracing per_cpu buffers were limited to 999 CPUs for a mear savings in stack space of a char array. Up the array to 30 characters which is more than enough to hold a 64 bit number. Reported-by: Robin Holt Suggested-by: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 001bcd2ccf4a..82d9b8106cd0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3996,13 +3996,9 @@ static void tracing_init_debugfs_percpu(long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(); struct dentry *d_cpu; - /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ - char cpu_dir[7]; + char cpu_dir[30]; /* 30 characters should be more than enough */ - if (cpu > 999 || cpu < 0) - return; - - sprintf(cpu_dir, "cpu%ld", cpu); + snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = debugfs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); -- cgit v1.2.2 From f4bc6bb2d562703eafc895c37e7be20906de139d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 19 Oct 2010 15:00:13 +0200 Subject: tracing: Cleanup the convoluted softirq tracepoints With the addition of trace_softirq_raise() the softirq tracepoint got even more convoluted. Why the tracepoints take two pointers to assign an integer is beyond my comprehension. But adding an extra case which treats the first pointer as an unsigned long when the second pointer is NULL including the back and forth type casting is just horrible. Convert the softirq tracepoints to take a single unsigned int argument for the softirq vector number and fix the call sites. Signed-off-by: Thomas Gleixner LKML-Reference: Acked-by: Peter Zijlstra Acked-by: mathieu.desnoyers@efficios.com Cc: Frederic Weisbecker Cc: Steven Rostedt --- include/linux/interrupt.h | 2 +- include/trace/events/irq.h | 54 +++++++++++++++++----------------------------- kernel/softirq.c | 16 ++++++++------ 3 files changed, 30 insertions(+), 42 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 531495db1708..0ac194946fec 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -410,7 +410,7 @@ extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); static inline void __raise_softirq_irqoff(unsigned int nr) { - trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL); + trace_softirq_raise(nr); or_softirq_pending(1UL << nr); } diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 6fa7cbab7d93..1c09820df585 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -86,76 +86,62 @@ TRACE_EVENT(irq_handler_exit, DECLARE_EVENT_CLASS(softirq, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec), + TP_ARGS(vec_nr), TP_STRUCT__entry( - __field( int, vec ) + __field( unsigned int, vec ) ), TP_fast_assign( - if (vec) - __entry->vec = (int)(h - vec); - else - __entry->vec = (int)(long)h; + __entry->vec = vec_nr; ), - TP_printk("vec=%d [action=%s]", __entry->vec, + TP_printk("vec=%u [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); /** * softirq_entry - called immediately before the softirq handler - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter, contains a pointer to the struct softirq_action - * which has a pointer to the action handler that is called. By subtracting - * the @vec pointer from the @h pointer, we can determine the softirq - * number. Also, when used in combination with the softirq_exit tracepoint - * we can determine the softirq latency. + * When used in combination with the softirq_exit tracepoint + * we can determine the softirq handler runtine. */ DEFINE_EVENT(softirq, softirq_entry, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec) + TP_ARGS(vec_nr) ); /** * softirq_exit - called immediately after the softirq handler returns - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter contains a pointer to the struct softirq_action - * that has handled the softirq. By subtracting the @vec pointer from - * the @h pointer, we can determine the softirq number. Also, when used in - * combination with the softirq_entry tracepoint we can determine the softirq - * latency. + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq handler runtine. */ DEFINE_EVENT(softirq, softirq_exit, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec) + TP_ARGS(vec_nr) ); /** * softirq_raise - called immediately when a softirq is raised - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array + * @vec_nr: softirq vector number * - * The @h parameter contains a pointer to the softirq vector number which is - * raised. @vec is NULL and it means @h includes vector number not - * softirq_action. When used in combination with the softirq_entry tracepoint - * we can determine the softirq raise latency. + * When used in combination with the softirq_entry tracepoint + * we can determine the softirq raise to run latency. */ DEFINE_EVENT(softirq, softirq_raise, - TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + TP_PROTO(unsigned int vec_nr), - TP_ARGS(h, vec) + TP_ARGS(vec_nr) ); #endif /* _TRACE_IRQ_H */ diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73a..b3cb1dc15795 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -212,18 +212,20 @@ restart: do { if (pending & 1) { + unsigned int vec_nr = h - softirq_vec; int prev_count = preempt_count(); - kstat_incr_softirqs_this_cpu(h - softirq_vec); - trace_softirq_entry(h, softirq_vec); + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); h->action(h); - trace_softirq_exit(h, softirq_vec); + trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %td %s %p" + printk(KERN_ERR "huh, entered softirq %u %s %p" "with preempt_count %08x," - " exited with %08x?\n", h - softirq_vec, - softirq_to_name[h - softirq_vec], - h->action, prev_count, preempt_count()); + " exited with %08x?\n", vec_nr, + softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); preempt_count() = prev_count; } -- cgit v1.2.2 From 4046b8bb5ffae9ee916e504cdadee804f10e0c56 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:02 +0900 Subject: perf probe: Fix type searching Fix to get the actual type die of variables by using dwarf_attr_integrate() which gets attribute from die even if the type die is connected by DW_AT_abstract_origin. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar LKML-Reference: <20101021101302.3542.38549.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-finder.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 32b81f707ff5..a2d1f790d937 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -160,26 +160,35 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) return name ? (strcmp(tname, name) == 0) : false; } +/* Get type die */ +static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) +{ + Dwarf_Attribute attr; + + if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) && + dwarf_formref_die(&attr, die_mem)) + return die_mem; + else + return NULL; +} + /* Get type die, but skip qualifiers and typedef */ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) { - Dwarf_Attribute attr; int tag; do { - if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL || - dwarf_formref_die(&attr, die_mem) == NULL) - return NULL; - - tag = dwarf_tag(die_mem); - vr_die = die_mem; + vr_die = die_get_type(vr_die, die_mem); + if (!vr_die) + break; + tag = dwarf_tag(vr_die); } while (tag == DW_TAG_const_type || tag == DW_TAG_restrict_type || tag == DW_TAG_volatile_type || tag == DW_TAG_shared_type || tag == DW_TAG_typedef); - return die_mem; + return vr_die; } static bool die_is_signed_type(Dwarf_Die *tp_die) -- cgit v1.2.2 From 378eeaad3e1cfea7f6614018fb335de93df2ba1f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:09 +0900 Subject: perf probe: Fix local variable searching loop Fix to check the die's address and search into the die only if it has given address. This will avoid finding wrong variables in wrong basic block. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar LKML-Reference: <20101021101309.3542.46434.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-finder.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index a2d1f790d937..abcaec555204 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -329,25 +329,35 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); } +struct __find_variable_param { + const char *name; + Dwarf_Addr addr; +}; + static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) { - const char *name = data; + struct __find_variable_param *fvp = data; int tag; tag = dwarf_tag(die_mem); if ((tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) && - die_compare_name(die_mem, name)) + die_compare_name(die_mem, fvp->name)) return DIE_FIND_CB_FOUND; - return DIE_FIND_CB_CONTINUE; + if (dwarf_haspc(die_mem, fvp->addr)) + return DIE_FIND_CB_CONTINUE; + else + return DIE_FIND_CB_SIBLING; } -/* Find a variable called 'name' */ -static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name, - Dwarf_Die *die_mem) +/* Find a variable called 'name' at given address */ +static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name, + Dwarf_Addr addr, Dwarf_Die *die_mem) { - return die_find_child(sp_die, __die_find_variable_cb, (void *)name, + struct __find_variable_param fvp = { .name = name, .addr = addr}; + + return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp, die_mem); } @@ -731,7 +741,7 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); /* Search child die for local variables and parameters. */ - if (die_find_variable(sp_die, pf->pvar->var, &vr_die)) + if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die)) ret = convert_variable(&vr_die, pf); else { /* Search upper class */ -- cgit v1.2.2 From 632941c4f8fbd5b90dcb1672cd0422dfd7332bc9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:16 +0900 Subject: perf probe: Support global variables Allow users to set external defined global variables as event arguments (e.g. jiffies). Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar LKML-Reference: <20101021101316.3542.1999.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-finder.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index abcaec555204..c6fe80ebb262 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -406,6 +406,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) struct probe_trace_arg *tvar = pf->tvar; int ret; + if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL) + goto static_var; + /* TODO: handle more than 1 exprs */ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL || dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 || @@ -417,6 +420,7 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) } if (op->atom == DW_OP_addr) { +static_var: /* Static variables on memory (not stack), make @varname */ ret = strlen(dwarf_diename(vr_die)); tvar->value = zalloc(ret + 2); @@ -746,17 +750,22 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) else { /* Search upper class */ nscopes = dwarf_getscopes_die(sp_die, &scopes); - if (nscopes > 0) { - ret = dwarf_getscopevar(scopes, nscopes, pf->pvar->var, - 0, NULL, 0, 0, &vr_die); - if (ret >= 0) + while (nscopes-- > 1) { + pr_debug("Searching variables in %s\n", + dwarf_diename(&scopes[nscopes])); + /* We should check this scope, so give dummy address */ + if (die_find_variable_at(&scopes[nscopes], + pf->pvar->var, 0, + &vr_die)) { ret = convert_variable(&vr_die, pf); - else - ret = -ENOENT; + goto found; + } + } + if (scopes) free(scopes); - } else - ret = -ENOENT; + ret = -ENOENT; } +found: if (ret < 0) pr_warning("Failed to find '%s' in this function.\n", pf->pvar->var); -- cgit v1.2.2 From cf6eb489e5c04c8f8d5fd7bf90b8346c987688bc Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:23 +0900 Subject: perf probe: Show accessible local variables Add -V (--vars) option for listing accessible local variables at given probe point. This will help finding which local variables are available for event arguments. e.g.) # perf probe -V call_timer_fn:23 Available variables at call_timer_fn:23 @ function_type* fn int preempt_count long unsigned int data struct list_head work_list struct list_head* head struct timer_list* timer struct tvec_base* base Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Frederic Weisbecker LKML-Reference: <20101021101323.3542.40282.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-probe.txt | 7 + tools/perf/builtin-probe.c | 61 ++++- tools/perf/util/probe-event.c | 72 ++++++ tools/perf/util/probe-event.h | 8 + tools/perf/util/probe-finder.c | 412 ++++++++++++++++++++++++-------- tools/perf/util/probe-finder.h | 25 +- 6 files changed, 480 insertions(+), 105 deletions(-) diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 27d52dae5a43..72f5d9e21432 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -17,6 +17,8 @@ or 'perf probe' --list or 'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]' +or +'perf probe' --vars='PROBEPOINT' DESCRIPTION ----------- @@ -57,6 +59,11 @@ OPTIONS Show source code lines which can be probed. This needs an argument which specifies a range of the source code. (see LINE SYNTAX for detail) +-V:: +--vars=:: + Show available local variables at given probe point. The argument + syntax is same as PROBE SYNTAX, but NO ARGs. + -f:: --force:: Forcibly add events with existing name. diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 199d5e19554f..91bb6cf4e025 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -50,6 +50,8 @@ static struct { bool list_events; bool force_add; bool show_lines; + bool show_vars; + bool mod_events; int nevents; struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; @@ -57,7 +59,6 @@ static struct { int max_probe_points; } params; - /* Parse an event definition. Note that any error must die. */ static int parse_probe_event(const char *str) { @@ -92,6 +93,7 @@ static int parse_probe_event_argv(int argc, const char **argv) len = 0; for (i = 0; i < argc; i++) len += sprintf(&buf[len], "%s ", argv[i]); + params.mod_events = true; ret = parse_probe_event(buf); free(buf); return ret; @@ -100,9 +102,10 @@ static int parse_probe_event_argv(int argc, const char **argv) static int opt_add_probe_event(const struct option *opt __used, const char *str, int unset __used) { - if (str) + if (str) { + params.mod_events = true; return parse_probe_event(str); - else + } else return 0; } @@ -110,6 +113,7 @@ static int opt_del_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { + params.mod_events = true; if (!params.dellist) params.dellist = strlist__new(true, NULL); strlist__add(params.dellist, str); @@ -130,6 +134,25 @@ static int opt_show_lines(const struct option *opt __used, return ret; } + +static int opt_show_vars(const struct option *opt __used, + const char *str, int unset __used) +{ + struct perf_probe_event *pev = ¶ms.events[params.nevents]; + int ret; + + if (!str) + return 0; + + ret = parse_probe_event(str); + if (!ret && pev->nargs != 0) { + pr_err(" Error: '--vars' doesn't accept arguments.\n"); + return -EINVAL; + } + params.show_vars = true; + + return ret; +} #endif static const char * const probe_usage[] = { @@ -139,6 +162,7 @@ static const char * const probe_usage[] = { "perf probe --list", #ifdef DWARF_SUPPORT "perf probe --line 'LINEDESC'", + "perf probe --vars 'PROBEPOINT'", #endif NULL }; @@ -180,6 +204,9 @@ static const struct option options[] = { OPT_CALLBACK('L', "line", NULL, "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]", "Show source code lines.", opt_show_lines), + OPT_CALLBACK('V', "vars", NULL, + "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT", + "Show accessible variables on PROBEDEF", opt_show_vars), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, @@ -217,7 +244,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) usage_with_options(probe_usage, options); if (params.list_events) { - if (params.nevents != 0 || params.dellist) { + if (params.mod_events) { pr_err(" Error: Don't use --list with --add/--del.\n"); usage_with_options(probe_usage, options); } @@ -225,6 +252,10 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) pr_err(" Error: Don't use --list with --line.\n"); usage_with_options(probe_usage, options); } + if (params.show_vars) { + pr_err(" Error: Don't use --list with --vars.\n"); + usage_with_options(probe_usage, options); + } ret = show_perf_probe_events(); if (ret < 0) pr_err(" Error: Failed to show event list. (%d)\n", @@ -234,9 +265,13 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) #ifdef DWARF_SUPPORT if (params.show_lines) { - if (params.nevents != 0 || params.dellist) { - pr_warning(" Error: Don't use --line with" - " --add/--del.\n"); + if (params.mod_events) { + pr_err(" Error: Don't use --line with" + " --add/--del.\n"); + usage_with_options(probe_usage, options); + } + if (params.show_vars) { + pr_err(" Error: Don't use --line with --vars.\n"); usage_with_options(probe_usage, options); } @@ -245,6 +280,18 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) pr_err(" Error: Failed to show lines. (%d)\n", ret); return ret; } + if (params.show_vars) { + if (params.mod_events) { + pr_err(" Error: Don't use --vars with" + " --add/--del.\n"); + usage_with_options(probe_usage, options); + } + ret = show_available_vars(params.events, params.nevents, + params.max_probe_points); + if (ret < 0) + pr_err(" Error: Failed to show vars. (%d)\n", ret); + return ret; + } #endif if (params.dellist) { diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index fcc16e4349df..288ebe8279d2 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -378,6 +378,72 @@ end: return ret; } +static int show_available_vars_at(int fd, struct perf_probe_event *pev, + int max_vls) +{ + char *buf; + int ret, i; + struct str_node *node; + struct variable_list *vls = NULL, *vl; + + buf = synthesize_perf_probe_point(&pev->point); + if (!buf) + return -EINVAL; + pr_debug("Searching variables at %s\n", buf); + + ret = find_available_vars_at(fd, pev, &vls, max_vls); + if (ret > 0) { + /* Some variables were found */ + fprintf(stdout, "Available variables at %s\n", buf); + for (i = 0; i < ret; i++) { + vl = &vls[i]; + /* + * A probe point might be converted to + * several trace points. + */ + fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol, + vl->point.offset); + free(vl->point.symbol); + if (vl->vars) { + strlist__for_each(node, vl->vars) + fprintf(stdout, "\t\t%s\n", node->s); + strlist__delete(vl->vars); + } else + fprintf(stdout, "(No variables)\n"); + } + free(vls); + } else + pr_err("Failed to find variables at %s (%d)\n", buf, ret); + + free(buf); + return ret; +} + +/* Show available variables on given probe point */ +int show_available_vars(struct perf_probe_event *pevs, int npevs, + int max_vls) +{ + int i, fd, ret = 0; + + ret = init_vmlinux(); + if (ret < 0) + return ret; + + fd = open_vmlinux(); + if (fd < 0) { + pr_warning("Failed to open debuginfo file.\n"); + return fd; + } + + setup_pager(); + + for (i = 0; i < npevs && ret >= 0; i++) + ret = show_available_vars_at(fd, &pevs[i], max_vls); + + close(fd); + return ret; +} + #else /* !DWARF_SUPPORT */ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, @@ -409,6 +475,12 @@ int show_line_range(struct line_range *lr __unused) return -ENOSYS; } +int show_available_vars(struct perf_probe_event *pevs __unused, + int npevs __unused, int max_probe_points __unused) +{ + pr_warning("Debuginfo-analysis is not supported.\n"); + return -ENOSYS; +} #endif int parse_line_range_desc(const char *arg, struct line_range *lr) diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 5af39243a25b..727a7fe81a02 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -90,6 +90,12 @@ struct line_range { struct list_head line_list; /* Visible lines */ }; +/* List of variables */ +struct variable_list { + struct probe_trace_point point; /* Actual probepoint */ + struct strlist *vars; /* Available variables */ +}; + /* Command string to events */ extern int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev); @@ -115,6 +121,8 @@ extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, extern int del_perf_probe_events(struct strlist *dellist); extern int show_perf_probe_events(void); extern int show_line_range(struct line_range *lr); +extern int show_available_vars(struct perf_probe_event *pevs, int npevs, + int max_probe_points); /* Maximum index number of event-name postfix */ diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index c6fe80ebb262..986027fa495f 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -172,8 +172,8 @@ static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) return NULL; } -/* Get type die, but skip qualifiers and typedef */ -static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) +/* Get a type die, but skip qualifiers */ +static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) { int tag; @@ -185,8 +185,17 @@ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) } while (tag == DW_TAG_const_type || tag == DW_TAG_restrict_type || tag == DW_TAG_volatile_type || - tag == DW_TAG_shared_type || - tag == DW_TAG_typedef); + tag == DW_TAG_shared_type); + + return vr_die; +} + +/* Get a type die, but skip qualifiers and typedef */ +static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) +{ + do { + vr_die = __die_get_real_type(vr_die, die_mem); + } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef); return vr_die; } @@ -380,6 +389,60 @@ static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, die_mem); } +/* Get the name of given variable DIE */ +static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) +{ + Dwarf_Die type; + int tag, ret, ret2; + const char *tmp = ""; + + if (__die_get_real_type(vr_die, &type) == NULL) + return -ENOENT; + + tag = dwarf_tag(&type); + if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type) + tmp = "*"; + else if (tag == DW_TAG_subroutine_type) { + /* Function pointer */ + ret = snprintf(buf, len, "(function_type)"); + return (ret >= len) ? -E2BIG : ret; + } else { + if (!dwarf_diename(&type)) + return -ENOENT; + if (tag == DW_TAG_union_type) + tmp = "union "; + else if (tag == DW_TAG_structure_type) + tmp = "struct "; + /* Write a base name */ + ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); + return (ret >= len) ? -E2BIG : ret; + } + ret = die_get_typename(&type, buf, len); + if (ret > 0) { + ret2 = snprintf(buf + ret, len - ret, "%s", tmp); + ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; + } + return ret; +} + +/* Get the name and type of given variable DIE, stored as "type\tname" */ +static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len) +{ + int ret, ret2; + + ret = die_get_typename(vr_die, buf, len); + if (ret < 0) { + pr_debug("Failed to get type, make it unknown.\n"); + ret = snprintf(buf, len, "(unknown_type)"); + } + if (ret > 0) { + ret2 = snprintf(buf + ret, len - ret, "\t%s", + dwarf_diename(vr_die)); + ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; + } + return ret; +} + /* * Probe finder related functions */ @@ -393,8 +456,13 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs) return ref; } -/* Show a location */ -static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) +/* + * Convert a location into trace_arg. + * If tvar == NULL, this just checks variable can be converted. + */ +static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, + Dwarf_Op *fb_ops, + struct probe_trace_arg *tvar) { Dwarf_Attribute attr; Dwarf_Op *op; @@ -403,7 +471,6 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) Dwarf_Word offs = 0; bool ref = false; const char *regs; - struct probe_trace_arg *tvar = pf->tvar; int ret; if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL) @@ -411,16 +478,16 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) /* TODO: handle more than 1 exprs */ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL || - dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 || + dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 || nops == 0) { /* TODO: Support const_value */ - pr_err("Failed to find the location of %s at this address.\n" - " Perhaps, it has been optimized out.\n", pf->pvar->var); return -ENOENT; } if (op->atom == DW_OP_addr) { static_var: + if (!tvar) + return 0; /* Static variables on memory (not stack), make @varname */ ret = strlen(dwarf_diename(vr_die)); tvar->value = zalloc(ret + 2); @@ -435,14 +502,11 @@ static_var: /* If this is based on frame buffer, set the offset */ if (op->atom == DW_OP_fbreg) { - if (pf->fb_ops == NULL) { - pr_warning("The attribute of frame base is not " - "supported.\n"); + if (fb_ops == NULL) return -ENOTSUP; - } ref = true; offs = op->number; - op = &pf->fb_ops[0]; + op = &fb_ops[0]; } if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) { @@ -458,13 +522,18 @@ static_var: } else if (op->atom == DW_OP_regx) { regn = op->number; } else { - pr_warning("DW_OP %x is not supported.\n", op->atom); + pr_debug("DW_OP %x is not supported.\n", op->atom); return -ENOTSUP; } + if (!tvar) + return 0; + regs = get_arch_regstr(regn); if (!regs) { - pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn); + /* This should be a bug in DWARF or this tool */ + pr_warning("Mapping for DWARF register number %u " + "missing on this architecture.", regn); return -ERANGE; } @@ -689,8 +758,14 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) pr_debug("Converting variable %s into trace event.\n", dwarf_diename(vr_die)); - ret = convert_variable_location(vr_die, pf); - if (ret == 0 && pf->pvar->field) { + ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, + pf->tvar); + if (ret == -ENOENT) + pr_err("Failed to find the location of %s at this address.\n" + " Perhaps, it has been optimized out.\n", pf->pvar->var); + else if (ret == -ENOTSUP) + pr_err("Sorry, we don't support this variable location yet.\n"); + else if (pf->pvar->field) { ret = convert_variable_fields(vr_die, pf->pvar->var, pf->pvar->field, &pf->tvar->ref, &die_mem); @@ -772,34 +847,12 @@ found: return ret; } -/* Show a probe point to output buffer */ -static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) +/* Convert subprogram DIE to trace point */ +static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, + bool retprobe, struct probe_trace_point *tp) { - struct probe_trace_event *tev; Dwarf_Addr eaddr; - Dwarf_Die die_mem; const char *name; - int ret, i; - Dwarf_Attribute fb_attr; - size_t nops; - - if (pf->ntevs == pf->max_tevs) { - pr_warning("Too many( > %d) probe point found.\n", - pf->max_tevs); - return -ERANGE; - } - tev = &pf->tevs[pf->ntevs++]; - - /* If no real subprogram, find a real one */ - if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { - sp_die = die_find_real_subprogram(&pf->cu_die, - pf->addr, &die_mem); - if (!sp_die) { - pr_warning("Failed to find probe point in any " - "functions.\n"); - return -ENOENT; - } - } /* Copy the name of probe point */ name = dwarf_diename(sp_die); @@ -809,26 +862,45 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) dwarf_diename(sp_die)); return -ENOENT; } - tev->point.symbol = strdup(name); - if (tev->point.symbol == NULL) + tp->symbol = strdup(name); + if (tp->symbol == NULL) return -ENOMEM; - tev->point.offset = (unsigned long)(pf->addr - eaddr); + tp->offset = (unsigned long)(paddr - eaddr); } else /* This function has no name. */ - tev->point.offset = (unsigned long)pf->addr; + tp->offset = (unsigned long)paddr; /* Return probe must be on the head of a subprogram */ - if (pf->pev->point.retprobe) { - if (tev->point.offset != 0) { + if (retprobe) { + if (eaddr != paddr) { pr_warning("Return probe must be on the head of" " a real function\n"); return -EINVAL; } - tev->point.retprobe = true; + tp->retprobe = true; } - pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, - tev->point.offset); + return 0; +} + +/* Call probe_finder callback with real subprogram DIE */ +static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) +{ + Dwarf_Die die_mem; + Dwarf_Attribute fb_attr; + size_t nops; + int ret; + + /* If no real subprogram, find a real one */ + if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { + sp_die = die_find_real_subprogram(&pf->cu_die, + pf->addr, &die_mem); + if (!sp_die) { + pr_warning("Failed to find probe point in any " + "functions.\n"); + return -ENOENT; + } + } /* Get the frame base attribute/ops */ dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); @@ -848,22 +920,13 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) #endif } - /* Find each argument */ - tev->nargs = pf->pev->nargs; - tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); - if (tev->args == NULL) - return -ENOMEM; - for (i = 0; i < pf->pev->nargs; i++) { - pf->pvar = &pf->pev->args[i]; - pf->tvar = &tev->args[i]; - ret = find_variable(sp_die, pf); - if (ret != 0) - return ret; - } + /* Call finder's callback handler */ + ret = pf->callback(sp_die, pf); /* *pf->fb_ops will be cached in libdw. Don't free it. */ pf->fb_ops = NULL; - return 0; + + return ret; } /* Find probe point from its line number */ @@ -899,7 +962,7 @@ static int find_probe_point_by_line(struct probe_finder *pf) (int)i, lineno, (uintmax_t)addr); pf->addr = addr; - ret = convert_probe_point(NULL, pf); + ret = call_probe_finder(NULL, pf); /* Continuing, because target line might be inlined. */ } return ret; @@ -1012,7 +1075,7 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) (int)i, lineno, (unsigned long long)addr); pf->addr = addr; - ret = convert_probe_point(sp_die, pf); + ret = call_probe_finder(sp_die, pf); /* Continuing, because target line might be inlined. */ } /* TODO: deallocate lines, but how? */ @@ -1047,7 +1110,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) pr_debug("found inline addr: 0x%jx\n", (uintmax_t)pf->addr); - param->retval = convert_probe_point(in_die, pf); + param->retval = call_probe_finder(in_die, pf); if (param->retval < 0) return DWARF_CB_ABORT; } @@ -1085,7 +1148,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) } pf->addr += pp->offset; /* TODO: Check the address in this function */ - param->retval = convert_probe_point(sp_die, pf); + param->retval = call_probe_finder(sp_die, pf); } } else { struct dwarf_callback_param _param = {.data = (void *)pf, @@ -1107,70 +1170,229 @@ static int find_probe_point_by_func(struct probe_finder *pf) return _param.retval; } -/* Find probe_trace_events specified by perf_probe_event from debuginfo */ -int find_probe_trace_events(int fd, struct perf_probe_event *pev, - struct probe_trace_event **tevs, int max_tevs) +/* Find probe points from debuginfo */ +static int find_probes(int fd, struct probe_finder *pf) { - struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs}; - struct perf_probe_point *pp = &pev->point; + struct perf_probe_point *pp = &pf->pev->point; Dwarf_Off off, noff; size_t cuhl; Dwarf_Die *diep; Dwarf *dbg; int ret = 0; - pf.tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); - if (pf.tevs == NULL) - return -ENOMEM; - *tevs = pf.tevs; - pf.ntevs = 0; - dbg = dwarf_begin(fd, DWARF_C_READ); if (!dbg) { pr_warning("No dwarf info found in the vmlinux - " "please rebuild with CONFIG_DEBUG_INFO=y.\n"); - free(pf.tevs); - *tevs = NULL; return -EBADF; } #if _ELFUTILS_PREREQ(0, 142) /* Get the call frame information from this dwarf */ - pf.cfi = dwarf_getcfi(dbg); + pf->cfi = dwarf_getcfi(dbg); #endif off = 0; - line_list__init(&pf.lcache); + line_list__init(&pf->lcache); /* Loop on CUs (Compilation Unit) */ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) && ret >= 0) { /* Get the DIE(Debugging Information Entry) of this CU */ - diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die); + diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die); if (!diep) continue; /* Check if target file is included. */ if (pp->file) - pf.fname = cu_find_realpath(&pf.cu_die, pp->file); + pf->fname = cu_find_realpath(&pf->cu_die, pp->file); else - pf.fname = NULL; + pf->fname = NULL; - if (!pp->file || pf.fname) { + if (!pp->file || pf->fname) { if (pp->function) - ret = find_probe_point_by_func(&pf); + ret = find_probe_point_by_func(pf); else if (pp->lazy_line) - ret = find_probe_point_lazy(NULL, &pf); + ret = find_probe_point_lazy(NULL, pf); else { - pf.lno = pp->line; - ret = find_probe_point_by_line(&pf); + pf->lno = pp->line; + ret = find_probe_point_by_line(pf); } } off = noff; } - line_list__free(&pf.lcache); + line_list__free(&pf->lcache); dwarf_end(dbg); - return (ret < 0) ? ret : pf.ntevs; + return ret; +} + +/* Add a found probe point into trace event list */ +static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) +{ + struct trace_event_finder *tf = + container_of(pf, struct trace_event_finder, pf); + struct probe_trace_event *tev; + int ret, i; + + /* Check number of tevs */ + if (tf->ntevs == tf->max_tevs) { + pr_warning("Too many( > %d) probe point found.\n", + tf->max_tevs); + return -ERANGE; + } + tev = &tf->tevs[tf->ntevs++]; + + ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, + &tev->point); + if (ret < 0) + return ret; + + pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, + tev->point.offset); + + /* Find each argument */ + tev->nargs = pf->pev->nargs; + tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); + if (tev->args == NULL) + return -ENOMEM; + for (i = 0; i < pf->pev->nargs; i++) { + pf->pvar = &pf->pev->args[i]; + pf->tvar = &tev->args[i]; + ret = find_variable(sp_die, pf); + if (ret != 0) + return ret; + } + + return 0; +} + +/* Find probe_trace_events specified by perf_probe_event from debuginfo */ +int find_probe_trace_events(int fd, struct perf_probe_event *pev, + struct probe_trace_event **tevs, int max_tevs) +{ + struct trace_event_finder tf = { + .pf = {.pev = pev, .callback = add_probe_trace_event}, + .max_tevs = max_tevs}; + int ret; + + /* Allocate result tevs array */ + *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); + if (*tevs == NULL) + return -ENOMEM; + + tf.tevs = *tevs; + tf.ntevs = 0; + + ret = find_probes(fd, &tf.pf); + if (ret < 0) { + free(*tevs); + *tevs = NULL; + return ret; + } + + return (ret < 0) ? ret : tf.ntevs; +} + +#define MAX_VAR_LEN 64 + +/* Collect available variables in this scope */ +static int collect_variables_cb(Dwarf_Die *die_mem, void *data) +{ + struct available_var_finder *af = data; + struct variable_list *vl; + char buf[MAX_VAR_LEN]; + int tag, ret; + + vl = &af->vls[af->nvls - 1]; + + tag = dwarf_tag(die_mem); + if (tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) { + ret = convert_variable_location(die_mem, af->pf.addr, + af->pf.fb_ops, NULL); + if (ret == 0) { + ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); + if (ret > 0) + strlist__add(vl->vars, buf); + } + } + + if (dwarf_haspc(die_mem, af->pf.addr)) + return DIE_FIND_CB_CONTINUE; + else + return DIE_FIND_CB_SIBLING; +} + +/* Add a found vars into available variables list */ +static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) +{ + struct available_var_finder *af = + container_of(pf, struct available_var_finder, pf); + struct variable_list *vl; + Dwarf_Die die_mem; + int ret; + + /* Check number of tevs */ + if (af->nvls == af->max_vls) { + pr_warning("Too many( > %d) probe point found.\n", af->max_vls); + return -ERANGE; + } + vl = &af->vls[af->nvls++]; + + ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, + &vl->point); + if (ret < 0) + return ret; + + pr_debug("Probe point found: %s+%lu\n", vl->point.symbol, + vl->point.offset); + + /* Find local variables */ + vl->vars = strlist__new(true, NULL); + if (vl->vars == NULL) + return -ENOMEM; + die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); + + if (strlist__empty(vl->vars)) { + strlist__delete(vl->vars); + vl->vars = NULL; + } + + return ret; +} + +/* Find available variables at given probe point */ +int find_available_vars_at(int fd, struct perf_probe_event *pev, + struct variable_list **vls, int max_vls) +{ + struct available_var_finder af = { + .pf = {.pev = pev, .callback = add_available_vars}, + .max_vls = max_vls}; + int ret; + + /* Allocate result vls array */ + *vls = zalloc(sizeof(struct variable_list) * max_vls); + if (*vls == NULL) + return -ENOMEM; + + af.vls = *vls; + af.nvls = 0; + + ret = find_probes(fd, &af.pf); + if (ret < 0) { + /* Free vlist for error */ + while (af.nvls--) { + if (af.vls[af.nvls].point.symbol) + free(af.vls[af.nvls].point.symbol); + if (af.vls[af.nvls].vars) + strlist__delete(af.vls[af.nvls].vars); + } + free(af.vls); + *vls = NULL; + return ret; + } + + return (ret < 0) ? ret : af.nvls; } /* Reverse search */ diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 4507d519f183..baffd25f39c9 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -25,17 +25,22 @@ extern int find_probe_trace_events(int fd, struct perf_probe_event *pev, extern int find_perf_probe_point(int fd, unsigned long addr, struct perf_probe_point *ppt); +/* Find a line range */ extern int find_line_range(int fd, struct line_range *lr); +/* Find available variables */ +extern int find_available_vars_at(int fd, struct perf_probe_event *pev, + struct variable_list **vls, int max_points); + #include #include #include struct probe_finder { struct perf_probe_event *pev; /* Target probe event */ - struct probe_trace_event *tevs; /* Result trace events */ - int ntevs; /* Number of trace events */ - int max_tevs; /* Max number of trace events */ + + /* Callback when a probe point is found */ + int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf); /* For function searching */ int lno; /* Line number */ @@ -53,6 +58,20 @@ struct probe_finder { struct probe_trace_arg *tvar; /* Current result variable */ }; +struct trace_event_finder { + struct probe_finder pf; + struct probe_trace_event *tevs; /* Found trace events */ + int ntevs; /* Number of trace events */ + int max_tevs; /* Max number of trace events */ +}; + +struct available_var_finder { + struct probe_finder pf; + struct variable_list *vls; /* Found variable lists */ + int nvls; /* Number of variable lists */ + int max_vls; /* Max no. of variable lists */ +}; + struct line_finder { struct line_range *lr; /* Target line range */ -- cgit v1.2.2 From c82ec0a2bd7725a2d2ac3065d8cde13e1f717d3c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:29 +0900 Subject: perf probe: Function style fix Just change the order of function arguments for ease of read; moving optional bool flag to the last. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Frederic Weisbecker LKML-Reference: <20101021101329.3542.51200.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-probe.c | 4 ++-- tools/perf/util/probe-event.c | 2 +- tools/perf/util/probe-event.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 91bb6cf4e025..c777bec28466 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -305,8 +305,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (params.nevents) { ret = add_perf_probe_events(params.events, params.nevents, - params.force_add, - params.max_probe_points); + params.max_probe_points, + params.force_add); if (ret < 0) { pr_err(" Error: Failed to add events. (%d)\n", ret); return ret; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 288ebe8279d2..83192a59f02a 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1668,7 +1668,7 @@ struct __event_package { }; int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - bool force_add, int max_tevs) + int max_tevs, bool force_add) { int i, j, ret; struct __event_package *pkgs; diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 727a7fe81a02..83130f634f01 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -117,7 +117,7 @@ extern int parse_line_range_desc(const char *cmd, struct line_range *lr); extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - bool force_add, int max_probe_points); + int max_probe_points, bool force_add); extern int del_perf_probe_events(struct strlist *dellist); extern int show_perf_probe_events(void); extern int show_line_range(struct line_range *lr); -- cgit v1.2.2 From fb8c5a56c7ddbc2b0d2ee7a8da60fe1355f75141 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:35 +0900 Subject: perf probe: Show accessible global variables Add --externs for allowing --vars to show accessible global (externally defined) variables from a given probe point too. This will give you a hint which globals can be accessible from the probe point. Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Frederic Weisbecker LKML-Reference: <20101021101335.3542.31003.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-probe.txt | 6 +++++- tools/perf/builtin-probe.c | 8 ++++++-- tools/perf/util/probe-event.c | 8 ++++---- tools/perf/util/probe-event.h | 2 +- tools/perf/util/probe-finder.c | 26 +++++++++++++++++++++----- tools/perf/util/probe-finder.h | 5 ++++- 6 files changed, 41 insertions(+), 14 deletions(-) diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 72f5d9e21432..148c178a35aa 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -18,7 +18,7 @@ or or 'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]' or -'perf probe' --vars='PROBEPOINT' +'perf probe' [--externs] --vars='PROBEPOINT' DESCRIPTION ----------- @@ -64,6 +64,10 @@ OPTIONS Show available local variables at given probe point. The argument syntax is same as PROBE SYNTAX, but NO ARGs. +--externs:: + (Only for --vars) Show external defined variables in addition to local + variables. + -f:: --force:: Forcibly add events with existing name. diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index c777bec28466..bdf60cfdf70f 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -51,6 +51,7 @@ static struct { bool force_add; bool show_lines; bool show_vars; + bool show_ext_vars; bool mod_events; int nevents; struct perf_probe_event events[MAX_PROBES]; @@ -162,7 +163,7 @@ static const char * const probe_usage[] = { "perf probe --list", #ifdef DWARF_SUPPORT "perf probe --line 'LINEDESC'", - "perf probe --vars 'PROBEPOINT'", + "perf probe [--externs] --vars 'PROBEPOINT'", #endif NULL }; @@ -207,6 +208,8 @@ static const struct option options[] = { OPT_CALLBACK('V', "vars", NULL, "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT", "Show accessible variables on PROBEDEF", opt_show_vars), + OPT_BOOLEAN('\0', "externs", ¶ms.show_ext_vars, + "Show external variables too (with --vars only)"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, @@ -287,7 +290,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) usage_with_options(probe_usage, options); } ret = show_available_vars(params.events, params.nevents, - params.max_probe_points); + params.max_probe_points, + params.show_ext_vars); if (ret < 0) pr_err(" Error: Failed to show vars. (%d)\n", ret); return ret; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 83192a59f02a..82b0976e2053 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -379,7 +379,7 @@ end: } static int show_available_vars_at(int fd, struct perf_probe_event *pev, - int max_vls) + int max_vls, bool externs) { char *buf; int ret, i; @@ -391,7 +391,7 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev, return -EINVAL; pr_debug("Searching variables at %s\n", buf); - ret = find_available_vars_at(fd, pev, &vls, max_vls); + ret = find_available_vars_at(fd, pev, &vls, max_vls, externs); if (ret > 0) { /* Some variables were found */ fprintf(stdout, "Available variables at %s\n", buf); @@ -421,7 +421,7 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev, /* Show available variables on given probe point */ int show_available_vars(struct perf_probe_event *pevs, int npevs, - int max_vls) + int max_vls, bool externs) { int i, fd, ret = 0; @@ -438,7 +438,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, setup_pager(); for (i = 0; i < npevs && ret >= 0; i++) - ret = show_available_vars_at(fd, &pevs[i], max_vls); + ret = show_available_vars_at(fd, &pevs[i], max_vls, externs); close(fd); return ret; diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 83130f634f01..c74b1fd2c1fa 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -122,7 +122,7 @@ extern int del_perf_probe_events(struct strlist *dellist); extern int show_perf_probe_events(void); extern int show_line_range(struct line_range *lr); extern int show_available_vars(struct perf_probe_event *pevs, int npevs, - int max_probe_points); + int max_probe_points, bool externs); /* Maximum index number of event-name postfix */ diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 986027fa495f..a274fd0c143e 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1312,12 +1312,13 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) af->pf.fb_ops, NULL); if (ret == 0) { ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); + pr_debug2("Add new var: %s\n", buf); if (ret > 0) strlist__add(vl->vars, buf); } } - if (dwarf_haspc(die_mem, af->pf.addr)) + if (af->child && dwarf_haspc(die_mem, af->pf.addr)) return DIE_FIND_CB_CONTINUE; else return DIE_FIND_CB_SIBLING; @@ -1329,8 +1330,8 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) struct available_var_finder *af = container_of(pf, struct available_var_finder, pf); struct variable_list *vl; - Dwarf_Die die_mem; - int ret; + Dwarf_Die die_mem, *scopes = NULL; + int ret, nscopes; /* Check number of tevs */ if (af->nvls == af->max_vls) { @@ -1351,8 +1352,22 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) vl->vars = strlist__new(true, NULL); if (vl->vars == NULL) return -ENOMEM; + af->child = true; die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); + /* Find external variables */ + if (!af->externs) + goto out; + /* Don't need to search child DIE for externs. */ + af->child = false; + nscopes = dwarf_getscopes_die(sp_die, &scopes); + while (nscopes-- > 1) + die_find_child(&scopes[nscopes], collect_variables_cb, + (void *)af, &die_mem); + if (scopes) + free(scopes); + +out: if (strlist__empty(vl->vars)) { strlist__delete(vl->vars); vl->vars = NULL; @@ -1363,11 +1378,12 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) /* Find available variables at given probe point */ int find_available_vars_at(int fd, struct perf_probe_event *pev, - struct variable_list **vls, int max_vls) + struct variable_list **vls, int max_vls, + bool externs) { struct available_var_finder af = { .pf = {.pev = pev, .callback = add_available_vars}, - .max_vls = max_vls}; + .max_vls = max_vls, .externs = externs}; int ret; /* Allocate result vls array */ diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index baffd25f39c9..516912a04011 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -30,7 +30,8 @@ extern int find_line_range(int fd, struct line_range *lr); /* Find available variables */ extern int find_available_vars_at(int fd, struct perf_probe_event *pev, - struct variable_list **vls, int max_points); + struct variable_list **vls, int max_points, + bool externs); #include #include @@ -70,6 +71,8 @@ struct available_var_finder { struct variable_list *vls; /* Found variable lists */ int nvls; /* Number of variable lists */ int max_vls; /* Max no. of variable lists */ + bool externs; /* Find external vars too */ + bool child; /* Search child scopes */ }; struct line_finder { -- cgit v1.2.2 From 469b9b88488e89114bb3e9ac5ee7906b7b96123f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 21 Oct 2010 19:13:41 +0900 Subject: perf probe: Add basic module support Add basic module probe support on perf probe. This introduces "--module " option to perf probe for putting probes and showing lines and variables in the given module. Currently, this supports only probing on running modules. Supporting off-line module probing is the next step. e.g.) [show lines] # ./perf probe --module drm -L drm_vblank_info 0 int drm_vblank_info(struct seq_file *m, void *data) 1 { struct drm_info_node *node = (struct drm_info_node *) m->private 3 struct drm_device *dev = node->minor->dev; ... [show vars] # ./perf probe --module drm -V drm_vblank_info:3 Available variables at drm_vblank_info:3 @ (unknown_type) data struct drm_info_node* node struct seq_file* m [put a probe] # ./perf probe --module drm drm_vblank_info:3 node m Add new event: probe:drm_vblank_info (on drm_vblank_info:3 with node m) You can now use it on all perf tools, such as: perf record -e probe:drm_vblank_info -aR sleep 1 [list probes] # ./perf probe -l probe:drm_vblank_info (on drm_vblank_info:3@drivers/gpu/drm/drm_info.c with ... Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Frederic Weisbecker LKML-Reference: <20101021101341.3542.71638.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-probe.txt | 9 +- tools/perf/builtin-probe.c | 11 ++- tools/perf/util/map.h | 10 +++ tools/perf/util/probe-event.c | 123 +++++++++++++++++---------- tools/perf/util/probe-event.h | 10 ++- tools/perf/util/probe-finder.c | 142 ++++++++++++++++++++++++++++---- tools/perf/util/probe-finder.h | 3 +- 7 files changed, 239 insertions(+), 69 deletions(-) diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 148c178a35aa..62de1b7f4e76 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -16,9 +16,9 @@ or or 'perf probe' --list or -'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]' +'perf probe' [options] --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]' or -'perf probe' [--externs] --vars='PROBEPOINT' +'perf probe' [options] --vars='PROBEPOINT' DESCRIPTION ----------- @@ -33,6 +33,11 @@ OPTIONS --vmlinux=PATH:: Specify vmlinux path which has debuginfo (Dwarf binary). +-m:: +--module=MODNAME:: + Specify module name in which perf-probe searches probe points + or lines. + -s:: --source=PATH:: Specify path to kernel source. diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index bdf60cfdf70f..2e000c068cc5 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -57,6 +57,7 @@ static struct { struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; struct line_range line_range; + const char *target_module; int max_probe_points; } params; @@ -162,8 +163,8 @@ static const char * const probe_usage[] = { "perf probe [] --del '[GROUP:]EVENT' ...", "perf probe --list", #ifdef DWARF_SUPPORT - "perf probe --line 'LINEDESC'", - "perf probe [--externs] --vars 'PROBEPOINT'", + "perf probe [] --line 'LINEDESC'", + "perf probe [] --vars 'PROBEPOINT'", #endif NULL }; @@ -214,6 +215,8 @@ static const struct option options[] = { "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, "directory", "path to kernel source"), + OPT_STRING('m', "module", ¶ms.target_module, + "modname", "target module name"), #endif OPT__DRY_RUN(&probe_event_dry_run), OPT_INTEGER('\0', "max-probes", ¶ms.max_probe_points, @@ -278,7 +281,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) usage_with_options(probe_usage, options); } - ret = show_line_range(¶ms.line_range); + ret = show_line_range(¶ms.line_range, params.target_module); if (ret < 0) pr_err(" Error: Failed to show lines. (%d)\n", ret); return ret; @@ -291,6 +294,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) } ret = show_available_vars(params.events, params.nevents, params.max_probe_points, + params.target_module, params.show_ext_vars); if (ret < 0) pr_err(" Error: Failed to show vars. (%d)\n", ret); @@ -310,6 +314,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (params.nevents) { ret = add_perf_probe_events(params.events, params.nevents, params.max_probe_points, + params.target_module, params.force_add); if (ret < 0) { pr_err(" Error: Failed to add events. (%d)\n", ret); diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 78575796d5f3..b397c0383728 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -215,6 +215,16 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *self, return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter); } +static inline +struct symbol *machine__find_kernel_function_by_name(struct machine *self, + const char *name, + struct map **mapp, + symbol_filter_t filter) +{ + return map_groups__find_function_by_name(&self->kmaps, name, mapp, + filter); +} + int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, int verbose, FILE *fp); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 82b0976e2053..3b6a5297bf16 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -74,10 +74,9 @@ static int e_snprintf(char *str, size_t size, const char *format, ...) static char *synthesize_perf_probe_point(struct perf_probe_point *pp); static struct machine machine; -/* Initialize symbol maps and path of vmlinux */ +/* Initialize symbol maps and path of vmlinux/modules */ static int init_vmlinux(void) { - struct dso *kernel; int ret; symbol_conf.sort_by_name = true; @@ -91,33 +90,61 @@ static int init_vmlinux(void) goto out; } - ret = machine__init(&machine, "/", 0); + ret = machine__init(&machine, "", HOST_KERNEL_ID); if (ret < 0) goto out; - kernel = dso__new_kernel(symbol_conf.vmlinux_name); - if (kernel == NULL) - die("Failed to create kernel dso."); - - ret = __machine__create_kernel_maps(&machine, kernel); - if (ret < 0) - pr_debug("Failed to create kernel maps.\n"); - + if (machine__create_kernel_maps(&machine) < 0) { + pr_debug("machine__create_kernel_maps "); + goto out; + } out: if (ret < 0) pr_warning("Failed to init vmlinux path.\n"); return ret; } +static struct symbol *__find_kernel_function_by_name(const char *name, + struct map **mapp) +{ + return machine__find_kernel_function_by_name(&machine, name, mapp, + NULL); +} + +const char *kernel_get_module_path(const char *module) +{ + struct dso *dso; + + if (module) { + list_for_each_entry(dso, &machine.kernel_dsos, node) { + if (strncmp(dso->short_name + 1, module, + dso->short_name_len - 2) == 0) + goto found; + } + pr_debug("Failed to find module %s.\n", module); + return NULL; + } else { + dso = machine.vmlinux_maps[MAP__FUNCTION]->dso; + if (dso__load_vmlinux_path(dso, + machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) { + pr_debug("Failed to load kernel map.\n"); + return NULL; + } + } +found: + return dso->long_name; +} + #ifdef DWARF_SUPPORT -static int open_vmlinux(void) +static int open_vmlinux(const char *module) { - if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) { - pr_debug("Failed to load kernel map.\n"); - return -EINVAL; + const char *path = kernel_get_module_path(module); + if (!path) { + pr_err("Failed to find path of %s module", module ?: "kernel"); + return -ENOENT; } - pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name); - return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY); + pr_debug("Try to open %s\n", path); + return open(path, O_RDONLY); } /* @@ -125,20 +152,19 @@ static int open_vmlinux(void) * Currently only handles kprobes. */ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, - struct perf_probe_point *pp) + struct perf_probe_point *pp) { struct symbol *sym; - int fd, ret = -ENOENT; + struct map *map; + u64 addr; + int ret = -ENOENT; - sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION], - tp->symbol, NULL); + sym = __find_kernel_function_by_name(tp->symbol, &map); if (sym) { - fd = open_vmlinux(); - if (fd >= 0) { - ret = find_perf_probe_point(fd, - sym->start + tp->offset, pp); - close(fd); - } + addr = map->unmap_ip(map, sym->start + tp->offset); + pr_debug("try to find %s+%ld@%llx\n", tp->symbol, + tp->offset, addr); + ret = find_perf_probe_point((unsigned long)addr, pp); } if (ret <= 0) { pr_debug("Failed to find corresponding probes from " @@ -156,12 +182,12 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, /* Try to find perf_probe_event with debuginfo */ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs, - int max_tevs) + int max_tevs, const char *module) { bool need_dwarf = perf_probe_event_need_dwarf(pev); int fd, ntevs; - fd = open_vmlinux(); + fd = open_vmlinux(module); if (fd < 0) { if (need_dwarf) { pr_warning("Failed to open debuginfo file.\n"); @@ -300,7 +326,7 @@ error: * Show line-range always requires debuginfo to find source file and * line number. */ -int show_line_range(struct line_range *lr) +int show_line_range(struct line_range *lr, const char *module) { int l = 1; struct line_node *ln; @@ -313,7 +339,7 @@ int show_line_range(struct line_range *lr) if (ret < 0) return ret; - fd = open_vmlinux(); + fd = open_vmlinux(module); if (fd < 0) { pr_warning("Failed to open debuginfo file.\n"); return fd; @@ -421,7 +447,7 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev, /* Show available variables on given probe point */ int show_available_vars(struct perf_probe_event *pevs, int npevs, - int max_vls, bool externs) + int max_vls, const char *module, bool externs) { int i, fd, ret = 0; @@ -429,7 +455,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, if (ret < 0) return ret; - fd = open_vmlinux(); + fd = open_vmlinux(module); if (fd < 0) { pr_warning("Failed to open debuginfo file.\n"); return fd; @@ -447,8 +473,15 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, #else /* !DWARF_SUPPORT */ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, - struct perf_probe_point *pp) + struct perf_probe_point *pp) { + struct symbol *sym; + + sym = __find_kernel_function_by_name(tp->symbol, NULL); + if (!sym) { + pr_err("Failed to find symbol %s in kernel.\n", tp->symbol); + return -ENOENT; + } pp->function = strdup(tp->symbol); if (pp->function == NULL) return -ENOMEM; @@ -460,7 +493,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs __unused, - int max_tevs __unused) + int max_tevs __unused, const char *mod __unused) { if (perf_probe_event_need_dwarf(pev)) { pr_warning("Debuginfo-analysis is not supported.\n"); @@ -469,14 +502,15 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, return 0; } -int show_line_range(struct line_range *lr __unused) +int show_line_range(struct line_range *lr __unused, const char *module __unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; } int show_available_vars(struct perf_probe_event *pevs __unused, - int npevs __unused, int max_probe_points __unused) + int npevs __unused, int max_vls __unused, + const char *module __unused, bool externs __unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; @@ -1159,7 +1193,7 @@ error: } static int convert_to_perf_probe_event(struct probe_trace_event *tev, - struct perf_probe_event *pev) + struct perf_probe_event *pev) { char buf[64] = ""; int i, ret; @@ -1588,14 +1622,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, static int convert_to_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs, - int max_tevs) + int max_tevs, const char *module) { struct symbol *sym; int ret = 0, i; struct probe_trace_event *tev; /* Convert perf_probe_event with debuginfo */ - ret = try_to_find_probe_trace_events(pev, tevs, max_tevs); + ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module); if (ret != 0) return ret; @@ -1644,8 +1678,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, } /* Currently just checking function name from symbol map */ - sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION], - tev->point.symbol, NULL); + sym = __find_kernel_function_by_name(tev->point.symbol, NULL); if (!sym) { pr_warning("Kernel symbol \'%s\' not found.\n", tev->point.symbol); @@ -1668,7 +1701,7 @@ struct __event_package { }; int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - int max_tevs, bool force_add) + int max_tevs, const char *module, bool force_add) { int i, j, ret; struct __event_package *pkgs; @@ -1689,7 +1722,9 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, pkgs[i].pev = &pevs[i]; /* Convert with or without debuginfo */ ret = convert_to_probe_trace_events(pkgs[i].pev, - &pkgs[i].tevs, max_tevs); + &pkgs[i].tevs, + max_tevs, + module); if (ret < 0) goto end; pkgs[i].ntevs = ret; diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index c74b1fd2c1fa..5accbedfea37 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -115,14 +115,18 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev); /* Command string to line-range */ extern int parse_line_range_desc(const char *cmd, struct line_range *lr); +/* Internal use: Return kernel/module path */ +extern const char *kernel_get_module_path(const char *module); extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - int max_probe_points, bool force_add); + int max_probe_points, const char *module, + bool force_add); extern int del_perf_probe_events(struct strlist *dellist); extern int show_perf_probe_events(void); -extern int show_line_range(struct line_range *lr); +extern int show_line_range(struct line_range *lr, const char *module); extern int show_available_vars(struct perf_probe_event *pevs, int npevs, - int max_probe_points, bool externs); + int max_probe_points, const char *module, + bool externs); /* Maximum index number of event-name postfix */ diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index a274fd0c143e..c20bd52833aa 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -116,6 +116,101 @@ static void line_list__free(struct list_head *head) } } +/* Dwarf FL wrappers */ + +static int __linux_kernel_find_elf(Dwfl_Module *mod, + void **userdata, + const char *module_name, + Dwarf_Addr base, + char **file_name, Elf **elfp) +{ + int fd; + const char *path = kernel_get_module_path(module_name); + + if (path) { + fd = open(path, O_RDONLY); + if (fd >= 0) { + *file_name = strdup(path); + return fd; + } + } + /* If failed, try to call standard method */ + return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base, + file_name, elfp); +} + +static char *debuginfo_path; /* Currently dummy */ + +static const Dwfl_Callbacks offline_callbacks = { + .find_debuginfo = dwfl_standard_find_debuginfo, + .debuginfo_path = &debuginfo_path, + + .section_address = dwfl_offline_section_address, + + /* We use this table for core files too. */ + .find_elf = dwfl_build_id_find_elf, +}; + +static const Dwfl_Callbacks kernel_callbacks = { + .find_debuginfo = dwfl_standard_find_debuginfo, + .debuginfo_path = &debuginfo_path, + + .find_elf = __linux_kernel_find_elf, + .section_address = dwfl_linux_kernel_module_section_address, +}; + +/* Get a Dwarf from offline image */ +static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias) +{ + Dwfl_Module *mod; + Dwarf *dbg = NULL; + + if (!dwflp) + return NULL; + + *dwflp = dwfl_begin(&offline_callbacks); + if (!*dwflp) + return NULL; + + mod = dwfl_report_offline(*dwflp, "", "", fd); + if (!mod) + goto error; + + dbg = dwfl_module_getdwarf(mod, bias); + if (!dbg) { +error: + dwfl_end(*dwflp); + *dwflp = NULL; + } + return dbg; +} + +/* Get a Dwarf from live kernel image */ +static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp, + Dwarf_Addr *bias) +{ + Dwarf *dbg; + + if (!dwflp) + return NULL; + + *dwflp = dwfl_begin(&kernel_callbacks); + if (!*dwflp) + return NULL; + + /* Load the kernel dwarves: Don't care the result here */ + dwfl_linux_kernel_report_kernel(*dwflp); + dwfl_linux_kernel_report_modules(*dwflp); + + dbg = dwfl_addrdwarf(*dwflp, addr, bias); + /* Here, check whether we could get a real dwarf */ + if (!dbg) { + dwfl_end(*dwflp); + *dwflp = NULL; + } + return dbg; +} + /* Dwarf wrappers */ /* Find the realpath of the target file. */ @@ -1177,10 +1272,12 @@ static int find_probes(int fd, struct probe_finder *pf) Dwarf_Off off, noff; size_t cuhl; Dwarf_Die *diep; - Dwarf *dbg; + Dwarf *dbg = NULL; + Dwfl *dwfl; + Dwarf_Addr bias; /* Currently ignored */ int ret = 0; - dbg = dwarf_begin(fd, DWARF_C_READ); + dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); if (!dbg) { pr_warning("No dwarf info found in the vmlinux - " "please rebuild with CONFIG_DEBUG_INFO=y.\n"); @@ -1221,7 +1318,8 @@ static int find_probes(int fd, struct probe_finder *pf) off = noff; } line_list__free(&pf->lcache); - dwarf_end(dbg); + if (dwfl) + dwfl_end(dwfl); return ret; } @@ -1412,23 +1510,31 @@ int find_available_vars_at(int fd, struct perf_probe_event *pev, } /* Reverse search */ -int find_perf_probe_point(int fd, unsigned long addr, - struct perf_probe_point *ppt) +int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt) { Dwarf_Die cudie, spdie, indie; - Dwarf *dbg; + Dwarf *dbg = NULL; + Dwfl *dwfl = NULL; Dwarf_Line *line; - Dwarf_Addr laddr, eaddr; + Dwarf_Addr laddr, eaddr, bias = 0; const char *tmp; int lineno, ret = 0; bool found = false; - dbg = dwarf_begin(fd, DWARF_C_READ); - if (!dbg) - return -EBADF; + /* Open the live linux kernel */ + dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias); + if (!dbg) { + pr_warning("No dwarf info found in the vmlinux - " + "please rebuild with CONFIG_DEBUG_INFO=y.\n"); + ret = -EINVAL; + goto end; + } + /* Adjust address with bias */ + addr += bias; /* Find cu die */ - if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) { + if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) { + pr_warning("No CU DIE is found at %lx\n", addr); ret = -EINVAL; goto end; } @@ -1491,7 +1597,8 @@ found: } end: - dwarf_end(dbg); + if (dwfl) + dwfl_end(dwfl); if (ret >= 0) ret = found ? 1 : 0; return ret; @@ -1624,6 +1731,8 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) struct line_finder *lf = param->data; struct line_range *lr = lf->lr; + pr_debug("find (%lx) %s\n", dwarf_dieoffset(sp_die), + dwarf_diename(sp_die)); if (dwarf_tag(sp_die) == DW_TAG_subprogram && die_compare_name(sp_die, lr->function)) { lf->fname = dwarf_decl_file(sp_die); @@ -1667,10 +1776,12 @@ int find_line_range(int fd, struct line_range *lr) Dwarf_Off off = 0, noff; size_t cuhl; Dwarf_Die *diep; - Dwarf *dbg; + Dwarf *dbg = NULL; + Dwfl *dwfl; + Dwarf_Addr bias; /* Currently ignored */ const char *comp_dir; - dbg = dwarf_begin(fd, DWARF_C_READ); + dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); if (!dbg) { pr_warning("No dwarf info found in the vmlinux - " "please rebuild with CONFIG_DEBUG_INFO=y.\n"); @@ -1716,8 +1827,7 @@ int find_line_range(int fd, struct line_range *lr) } pr_debug("path: %s\n", lr->path); - dwarf_end(dbg); - + dwfl_end(dwfl); return (ret < 0) ? ret : lf.found; } diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 516912a04011..bba69d455699 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -22,7 +22,7 @@ extern int find_probe_trace_events(int fd, struct perf_probe_event *pev, int max_tevs); /* Find a perf_probe_point from debuginfo */ -extern int find_perf_probe_point(int fd, unsigned long addr, +extern int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt); /* Find a line range */ @@ -35,6 +35,7 @@ extern int find_available_vars_at(int fd, struct perf_probe_event *pev, #include #include +#include #include struct probe_finder { -- cgit v1.2.2 From b39f88acd7d989b6b247ba87c480fc24ed71d9c5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:08:29 +0200 Subject: perf, x86: Extract PEBS/BTS buffer free routines So that we may grow additional call-sites.. Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.196793164@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_ds.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 4977f9c400e5..1bc13518dd59 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -74,6 +74,28 @@ static void fini_debug_store_on_cpu(int cpu) wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); } +static void release_pebs_buffer(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds || !x86_pmu.pebs) + return; + + kfree((void *)(unsigned long)ds->pebs_buffer_base); + ds->pebs_buffer_base = 0; +} + +static void release_bts_buffer(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds || !x86_pmu.bts) + return; + + kfree((void *)(unsigned long)ds->bts_buffer_base); + ds->bts_buffer_base = 0; +} + static void release_ds_buffers(void) { int cpu; @@ -82,7 +104,6 @@ static void release_ds_buffers(void) return; get_online_cpus(); - for_each_online_cpu(cpu) fini_debug_store_on_cpu(cpu); @@ -92,13 +113,12 @@ static void release_ds_buffers(void) if (!ds) continue; - per_cpu(cpu_hw_events, cpu).ds = NULL; + release_pebs_buffer(cpu); + release_bts_buffer(cpu); - kfree((void *)(unsigned long)ds->pebs_buffer_base); - kfree((void *)(unsigned long)ds->bts_buffer_base); + per_cpu(cpu_hw_events, cpu).ds = NULL; kfree(ds); } - put_online_cpus(); } -- cgit v1.2.2 From 5ee25c87318fa3722026fd77089fa7ba0db8d447 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:15:04 +0200 Subject: perf, x86: Extract PEBS/BTS allocation functions Mostly a cleanup.. it reduces code indentation and makes the code flow of reserve_ds_buffers() clearer. Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.253453452@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_ds.c | 90 +++++++++++++++++++------------ 1 file changed, 56 insertions(+), 34 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 1bc13518dd59..14d98bd52055 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -74,6 +74,32 @@ static void fini_debug_store_on_cpu(int cpu) wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); } +static int alloc_pebs_buffer(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + int max, thresh = 1; /* always use a single PEBS record */ + void *buffer; + + if (!x86_pmu.pebs) + return 0; + + buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); + if (unlikely(!buffer)) + return -ENOMEM; + + max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; + + ds->pebs_buffer_base = (u64)(unsigned long)buffer; + ds->pebs_index = ds->pebs_buffer_base; + ds->pebs_absolute_maximum = ds->pebs_buffer_base + + max * x86_pmu.pebs_record_size; + + ds->pebs_interrupt_threshold = ds->pebs_buffer_base + + thresh * x86_pmu.pebs_record_size; + + return 0; +} + static void release_pebs_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -85,6 +111,32 @@ static void release_pebs_buffer(int cpu) ds->pebs_buffer_base = 0; } +static int alloc_bts_buffer(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + int max, thresh; + void *buffer; + + if (!x86_pmu.bts) + return 0; + + buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); + if (unlikely(!buffer)) + return -ENOMEM; + + max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; + thresh = max / 16; + + ds->bts_buffer_base = (u64)(unsigned long)buffer; + ds->bts_index = ds->bts_buffer_base; + ds->bts_absolute_maximum = ds->bts_buffer_base + + max * BTS_RECORD_SIZE; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - + thresh * BTS_RECORD_SIZE; + + return 0; +} + static void release_bts_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -133,8 +185,6 @@ static int reserve_ds_buffers(void) for_each_possible_cpu(cpu) { struct debug_store *ds; - void *buffer; - int max, thresh; err = -ENOMEM; ds = kzalloc(sizeof(*ds), GFP_KERNEL); @@ -142,39 +192,11 @@ static int reserve_ds_buffers(void) break; per_cpu(cpu_hw_events, cpu).ds = ds; - if (x86_pmu.bts) { - buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); - if (unlikely(!buffer)) - break; - - max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; - thresh = max / 16; - - ds->bts_buffer_base = (u64)(unsigned long)buffer; - ds->bts_index = ds->bts_buffer_base; - ds->bts_absolute_maximum = ds->bts_buffer_base + - max * BTS_RECORD_SIZE; - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - - thresh * BTS_RECORD_SIZE; - } + if (alloc_bts_buffer(cpu)) + break; - if (x86_pmu.pebs) { - buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); - if (unlikely(!buffer)) - break; - - max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size; - - ds->pebs_buffer_base = (u64)(unsigned long)buffer; - ds->pebs_index = ds->pebs_buffer_base; - ds->pebs_absolute_maximum = ds->pebs_buffer_base + - max * x86_pmu.pebs_record_size; - /* - * Always use single record PEBS - */ - ds->pebs_interrupt_threshold = ds->pebs_buffer_base + - x86_pmu.pebs_record_size; - } + if (alloc_pebs_buffer(cpu)) + break; err = 0; } -- cgit v1.2.2 From 65af94baca56beb3514d6cfce782634db9cf676d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:37:23 +0200 Subject: perf, x86: Extract DS alloc/free functions Again, mostly a cleanup to unclutter the reserve_ds_buffer() code. Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.304495776@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_ds.c | 40 ++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 14d98bd52055..3c86f4d2f02d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -148,6 +148,30 @@ static void release_bts_buffer(int cpu) ds->bts_buffer_base = 0; } +static int alloc_ds_buffer(int cpu) +{ + struct debug_store *ds; + + ds = kzalloc(sizeof(*ds), GFP_KERNEL); + if (unlikely(!ds)) + return -ENOMEM; + + per_cpu(cpu_hw_events, cpu).ds = ds; + + return 0; +} + +static void release_ds_buffer(int cpu) +{ + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + + if (!ds) + return; + + per_cpu(cpu_hw_events, cpu).ds = NULL; + kfree(ds); +} + static void release_ds_buffers(void) { int cpu; @@ -160,16 +184,9 @@ static void release_ds_buffers(void) fini_debug_store_on_cpu(cpu); for_each_possible_cpu(cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - continue; - release_pebs_buffer(cpu); release_bts_buffer(cpu); - - per_cpu(cpu_hw_events, cpu).ds = NULL; - kfree(ds); + release_ds_buffer(cpu); } put_online_cpus(); } @@ -184,13 +201,8 @@ static int reserve_ds_buffers(void) get_online_cpus(); for_each_possible_cpu(cpu) { - struct debug_store *ds; - - err = -ENOMEM; - ds = kzalloc(sizeof(*ds), GFP_KERNEL); - if (unlikely(!ds)) + if (alloc_ds_buffer(cpu)) break; - per_cpu(cpu_hw_events, cpu).ds = ds; if (alloc_bts_buffer(cpu)) break; -- cgit v1.2.2 From 5553be2620ac901c21a25657bd5b59f73254e6d5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:38:11 +0200 Subject: perf, x86: Fixup the precise_ip computation In case we don't have PEBS, the LBR fixup doesn't make sense. Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.354429461@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fe73c1844a9a..f369c53315a5 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -497,12 +497,13 @@ static int x86_pmu_hw_config(struct perf_event *event) int precise = 0; /* Support for constant skid */ - if (x86_pmu.pebs) + if (x86_pmu.pebs) { precise++; - /* Support for IP fixup */ - if (x86_pmu.lbr_nr) - precise++; + /* Support for IP fixup */ + if (x86_pmu.lbr_nr) + precise++; + } if (event->attr.precise_ip > precise) return -EOPNOTSUPP; -- cgit v1.2.2 From 6809b6ea73f7291f2e495d40397f1172c9caa77e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:22:50 +0200 Subject: perf, x86: Less disastrous PEBS/BTS buffer allocation failure Currently PEBS/BTS buffers are allocated when we instantiate the first event, when this fails everything fails. This is a problem because esp. BTS tries to allocate a rather large buffer (64K), which can easily fail. This patch changes the logic such that when either buffer allocation fails, we simply don't allow events that would use these facilities, but continue functioning for all other events. This logic comes from a much larger patch proposed by Stephane. Suggested-by: Stephane Eranian Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.354429461@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 5 +-- arch/x86/kernel/cpu/perf_event_intel_ds.c | 58 +++++++++++++++++++++++-------- 2 files changed, 47 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f369c53315a5..61e78f651060 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -238,6 +238,7 @@ struct x86_pmu { * Intel DebugStore bits */ int bts, pebs; + int bts_active, pebs_active; int pebs_record_size; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; @@ -478,7 +479,7 @@ static int x86_setup_perfctr(struct perf_event *event) if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && (hwc->sample_period == 1)) { /* BTS is not supported by this architecture. */ - if (!x86_pmu.bts) + if (!x86_pmu.bts_active) return -EOPNOTSUPP; /* BTS is currently only allowed for user-mode. */ @@ -497,7 +498,7 @@ static int x86_pmu_hw_config(struct perf_event *event) int precise = 0; /* Support for constant skid */ - if (x86_pmu.pebs) { + if (x86_pmu.pebs_active) { precise++; /* Support for IP fixup */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 3c86f4d2f02d..05c7db68277b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -193,36 +193,66 @@ static void release_ds_buffers(void) static int reserve_ds_buffers(void) { - int cpu, err = 0; + int bts_err = 0, pebs_err = 0; + int cpu; + + x86_pmu.bts_active = 0; + x86_pmu.pebs_active = 0; if (!x86_pmu.bts && !x86_pmu.pebs) return 0; + if (!x86_pmu.bts) + bts_err = 1; + + if (!x86_pmu.pebs) + pebs_err = 1; + get_online_cpus(); for_each_possible_cpu(cpu) { - if (alloc_ds_buffer(cpu)) - break; + if (alloc_ds_buffer(cpu)) { + bts_err = 1; + pebs_err = 1; + } - if (alloc_bts_buffer(cpu)) - break; + if (!bts_err && alloc_bts_buffer(cpu)) + bts_err = 1; + + if (!pebs_err && alloc_pebs_buffer(cpu)) + pebs_err = 1; - if (alloc_pebs_buffer(cpu)) + if (bts_err && pebs_err) break; + } + + if (bts_err) { + for_each_possible_cpu(cpu) + release_bts_buffer(cpu); + } - err = 0; + if (pebs_err) { + for_each_possible_cpu(cpu) + release_pebs_buffer(cpu); } - if (err) - release_ds_buffers(); - else { + if (bts_err && pebs_err) { + for_each_possible_cpu(cpu) + release_ds_buffer(cpu); + } else { + if (x86_pmu.bts && !bts_err) + x86_pmu.bts_active = 1; + + if (x86_pmu.pebs && !pebs_err) + x86_pmu.pebs_active = 1; + for_each_online_cpu(cpu) init_debug_store_on_cpu(cpu); } put_online_cpus(); - return err; + return 0; } /* @@ -287,7 +317,7 @@ static int intel_pmu_drain_bts_buffer(void) if (!event) return 0; - if (!ds) + if (!x86_pmu.bts_active) return 0; at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; @@ -557,7 +587,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) struct pebs_record_core *at, *top; int n; - if (!ds || !x86_pmu.pebs) + if (!x86_pmu.pebs_active) return; at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; @@ -599,7 +629,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) u64 status = 0; int bit, n; - if (!ds || !x86_pmu.pebs) + if (!x86_pmu.pebs_active) return; at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; -- cgit v1.2.2 From f80c9e304b8e8062230b0cda2c2fdd586149c771 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:50:02 +0200 Subject: perf, x86: Clean up reserve_ds_buffers() signature Now that reserve_ds_buffers() never fails, change it to return void and remove all code dealing with the error return. Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.462621937@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 9 +++------ arch/x86/kernel/cpu/perf_event_intel_ds.c | 9 +++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 61e78f651060..a333bf9189f6 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -382,7 +382,7 @@ static void release_pmc_hardware(void) {} #endif -static int reserve_ds_buffers(void); +static void reserve_ds_buffers(void); static void release_ds_buffers(void); static void hw_perf_event_destroy(struct perf_event *event) @@ -546,11 +546,8 @@ static int __x86_pmu_event_init(struct perf_event *event) if (atomic_read(&active_events) == 0) { if (!reserve_pmc_hardware()) err = -EBUSY; - else { - err = reserve_ds_buffers(); - if (err) - release_pmc_hardware(); - } + else + reserve_ds_buffers(); } if (!err) atomic_inc(&active_events); diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 05c7db68277b..8a7f81cbd617 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -191,7 +191,7 @@ static void release_ds_buffers(void) put_online_cpus(); } -static int reserve_ds_buffers(void) +static void reserve_ds_buffers(void) { int bts_err = 0, pebs_err = 0; int cpu; @@ -200,7 +200,7 @@ static int reserve_ds_buffers(void) x86_pmu.pebs_active = 0; if (!x86_pmu.bts && !x86_pmu.pebs) - return 0; + return; if (!x86_pmu.bts) bts_err = 1; @@ -251,8 +251,6 @@ static int reserve_ds_buffers(void) } put_online_cpus(); - - return 0; } /* @@ -714,9 +712,8 @@ static void intel_ds_init(void) #else /* CONFIG_CPU_SUP_INTEL */ -static int reserve_ds_buffers(void) +static void reserve_ds_buffers(void) { - return 0; } static void release_ds_buffers(void) -- cgit v1.2.2 From 96681fc3c9e7d1f89ab64e5eec40b6467c97680f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Oct 2010 14:55:33 +0200 Subject: perf, x86: Use NUMA aware allocations for PEBS/BTS/DS allocations For performance reasons its best to use memory node local memory for per-cpu buffers. This logic comes from a much larger patch proposed by Stephane. Suggested-by: Stephane Eranian Signed-off-by: Peter Zijlstra Acked-by: Stephane Eranian LKML-Reference: <20101019134808.514465326@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_ds.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 8a7f81cbd617..b7dcd9f2b8a0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -77,13 +77,14 @@ static void fini_debug_store_on_cpu(int cpu) static int alloc_pebs_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + int node = cpu_to_node(cpu); int max, thresh = 1; /* always use a single PEBS record */ void *buffer; if (!x86_pmu.pebs) return 0; - buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); + buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!buffer)) return -ENOMEM; @@ -114,13 +115,14 @@ static void release_pebs_buffer(int cpu) static int alloc_bts_buffer(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + int node = cpu_to_node(cpu); int max, thresh; void *buffer; if (!x86_pmu.bts) return 0; - buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); + buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!buffer)) return -ENOMEM; @@ -150,9 +152,10 @@ static void release_bts_buffer(int cpu) static int alloc_ds_buffer(int cpu) { + int node = cpu_to_node(cpu); struct debug_store *ds; - ds = kzalloc(sizeof(*ds), GFP_KERNEL); + ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node); if (unlikely(!ds)) return -ENOMEM; -- cgit v1.2.2 From 9ffcfa6f1f63eeac15555b745c292eb9f59130f6 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 20 Oct 2010 15:25:01 +0200 Subject: perf_events: Revert: Fix transaction recovery in group_sched_in() This patch reverts commit 8e5fc1a (perf_events: Fix transaction recovery in group_sched_in()) because it had one flaw in case the group could never be scheduled. It would cause time_enabled to get negative. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4cbeeeb7.0aefd80a.6e40.0e2f@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 76 +++++++++-------------------------------------------- 1 file changed, 13 insertions(+), 63 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f309e8014c78..39afdb07d758 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event) return event->cpu == -1 || event->cpu == smp_processor_id(); } -static int -__event_sched_out(struct perf_event *event, +static void +event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { @@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event, } if (event->state != PERF_EVENT_STATE_ACTIVE) - return 0; + return; event->state = PERF_EVENT_STATE_INACTIVE; if (event->pending_disable) { event->pending_disable = 0; event->state = PERF_EVENT_STATE_OFF; } + event->tstamp_stopped = ctx->time; event->pmu->del(event, 0); event->oncpu = -1; @@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event, ctx->nr_active--; if (event->attr.exclusive || !cpuctx->active_oncpu) cpuctx->exclusive = 0; - return 1; -} - -static void -event_sched_out(struct perf_event *event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - int ret; - - ret = __event_sched_out(event, cpuctx, ctx); - if (ret) - event->tstamp_stopped = ctx->time; } static void @@ -664,7 +652,7 @@ retry: } static int -__event_sched_in(struct perf_event *event, +event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { @@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event, return -EAGAIN; } + event->tstamp_running += ctx->time - event->tstamp_stopped; + if (!is_software_event(event)) cpuctx->active_oncpu++; ctx->nr_active++; @@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event, return 0; } -static inline int -event_sched_in(struct perf_event *event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - int ret = __event_sched_in(event, cpuctx, ctx); - if (ret) - return ret; - event->tstamp_running += ctx->time - event->tstamp_stopped; - return 0; -} - -static void -group_commit_event_sched_in(struct perf_event *group_event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - struct perf_event *event; - u64 now = ctx->time; - - group_event->tstamp_running += now - group_event->tstamp_stopped; - /* - * Schedule in siblings as one group (if any): - */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) { - event->tstamp_running += now - event->tstamp_stopped; - } -} - static int group_sched_in(struct perf_event *group_event, struct perf_cpu_context *cpuctx, @@ -736,13 +697,7 @@ group_sched_in(struct perf_event *group_event, pmu->start_txn(pmu); - /* - * use __event_sched_in() to delay updating tstamp_running - * until the transaction is committed. In case of failure - * we will keep an unmodified tstamp_running which is a - * requirement to get correct timing information - */ - if (__event_sched_in(group_event, cpuctx, ctx)) { + if (event_sched_in(group_event, cpuctx, ctx)) { pmu->cancel_txn(pmu); return -EAGAIN; } @@ -751,31 +706,26 @@ group_sched_in(struct perf_event *group_event, * Schedule in siblings as one group (if any): */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { - if (__event_sched_in(event, cpuctx, ctx)) { + if (event_sched_in(event, cpuctx, ctx)) { partial_group = event; goto group_error; } } - if (!pmu->commit_txn(pmu)) { - /* commit tstamp_running */ - group_commit_event_sched_in(group_event, cpuctx, ctx); + if (!pmu->commit_txn(pmu)) return 0; - } + group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: - * - * use __event_sched_out() to avoid updating tstamp_stopped - * because the event never actually ran */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) break; - __event_sched_out(event, cpuctx, ctx); + event_sched_out(event, cpuctx, ctx); } - __event_sched_out(group_event, cpuctx, ctx); + event_sched_out(group_event, cpuctx, ctx); pmu->cancel_txn(pmu); -- cgit v1.2.2 From d7842da470f244d258f21c5f72cd8388b3541d04 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 20 Oct 2010 15:25:01 +0200 Subject: perf_events: Fix for transaction recovery in group_sched_in() This new version (see commit 8e5fc1a) is much simpler and ensures that in case of error in group_sched_in() during event_sched_in(), the events up to the failed event go through regular event_sched_out(). But the failed event and the remaining events in the group have their timings adjusted as if they had also gone through event_sched_in() and event_sched_out(). This ensures timing uniformity across all events in a group. This also takes care of the tstamp_stopped problem in case the group could never be scheduled. The tstamp_stopped is updated as if the event had actually run. With this patch, the following now reports correct time_enabled, in case the NMI watchdog is active: $ task -e unhalted_core_cycles,instructions_retired,baclears,baclears noploop 1 noploop for 1 seconds 0 unhalted_core_cycles (100.00% scaling, ena=997,552,872, run=0) 0 instructions_retired (100.00% scaling, ena=997,552,872, run=0) 0 baclears (100.00% scaling, ena=997,552,872, run=0) 0 baclears (100.00% scaling, ena=997,552,872, run=0) And the older test case also works: $ task -einstructions_retired,baclears,baclears -e unhalted_core_cycles,baclears,baclears sleep 5 1680885 instructions_retired (69.39% scaling, ena=950756, run=291006) 10735 baclears (69.39% scaling, ena=950756, run=291006) 10735 baclears (69.39% scaling, ena=950756, run=291006) 0 unhalted_core_cycles (100.00% scaling, ena=817932, run=0) 0 baclears (100.00% scaling, ena=817932, run=0) 0 baclears (100.00% scaling, ena=817932, run=0) Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4cbeeebc.8ee7d80a.5a28.0d5f@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 39afdb07d758..517d827f4982 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -691,6 +691,8 @@ group_sched_in(struct perf_event *group_event, { struct perf_event *event, *partial_group = NULL; struct pmu *pmu = group_event->pmu; + u64 now = ctx->time; + bool simulate = false; if (group_event->state == PERF_EVENT_STATE_OFF) return 0; @@ -719,11 +721,27 @@ group_error: /* * Groups can be scheduled in as one unit only, so undo any * partial group before returning: + * The events up to the failed event are scheduled out normally, + * tstamp_stopped will be updated. + * + * The failed events and the remaining siblings need to have + * their timings updated as if they had gone thru event_sched_in() + * and event_sched_out(). This is required to get consistent timings + * across the group. This also takes care of the case where the group + * could never be scheduled by ensuring tstamp_stopped is set to mark + * the time the event was actually stopped, such that time delta + * calculation in update_event_times() is correct. */ list_for_each_entry(event, &group_event->sibling_list, group_entry) { if (event == partial_group) - break; - event_sched_out(event, cpuctx, ctx); + simulate = true; + + if (simulate) { + event->tstamp_running += now - event->tstamp_stopped; + event->tstamp_stopped = now; + } else { + event_sched_out(event, cpuctx, ctx); + } } event_sched_out(group_event, cpuctx, ctx); -- cgit v1.2.2 From 8bfb5e7d6a14b29cffddd113f4b0be7d9aafc1e8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 19 Oct 2010 20:47:56 -0200 Subject: perf tools: Remove direct slang.h include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We wrap it in libslang.h because we need to deal with older slang release where HAVE_LONG_LONG is referenced as: So we need to define it. Noticed when rebuilding the perf tools on a RHEL5 machine. Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Stephane Eranian LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/ui/browser.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c index 6d0df809a2ed..8bc010edca25 100644 --- a/tools/perf/util/ui/browser.c +++ b/tools/perf/util/ui/browser.c @@ -1,4 +1,3 @@ -#include #include "libslang.h" #include #include -- cgit v1.2.2 From ffec516976f64b4e3e59ba1bb8a5020008cc678a Mon Sep 17 00:00:00 2001 From: Sonny Rao Date: Thu, 14 Oct 2010 20:51:00 -0500 Subject: perf tools: Document event modifiers Existing documentation doesn't discuss event modifiers, so add a description of what's currently possible to the documentation of perf-list. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Robert Richter LKML-Reference: <1287107460-12112-1-git-send-email-sonnyrao@linux.vnet.ibm.com> Signed-off-by: Sonny Rao Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-list.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index 43e3dd284b90..399751befeed 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -15,6 +15,23 @@ DESCRIPTION This command displays the symbolic event types which can be selected in the various perf commands with the -e option. +EVENT MODIFIERS +--------------- + +Events can optionally have a modifer by appending a colon and one or +more modifiers. Modifiers allow the user to restrict when events are +counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor. + +The 'p' modifier can be used for specifying how precise the instruction +address should be. The 'p' modifier is currently only implemented for +Intel PEBS and can be specified multiple times: + 0 - SAMPLE_IP can have arbitrary skid + 1 - SAMPLE_IP must have constant skid + 2 - SAMPLE_IP requested to have 0 skid + 3 - SAMPLE_IP must have 0 skid + +The PEBS implementation now supports up to 2. + RAW HARDWARE EVENT DESCRIPTOR ----------------------------- Even when an event is not available in a symbolic form within perf right now, -- cgit v1.2.2 From 44e668c6faa9a6c477a32788e7e88f0754c54a4e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 10 Oct 2010 16:10:03 +0100 Subject: perf trace: Use $PERF_EXEC_PATH in canned report scripts Set $PERF_EXEC_PATH before starting the record and report scripts, and make them use it where necessary. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1286723403.2955.205.camel@localhost> Signed-off-by: Ben Hutchings Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 6 +++--- tools/perf/scripts/perl/bin/failed-syscalls-report | 2 +- tools/perf/scripts/perl/bin/rw-by-file-report | 2 +- tools/perf/scripts/perl/bin/rw-by-pid-report | 2 +- tools/perf/scripts/perl/bin/rwtop-report | 2 +- tools/perf/scripts/perl/bin/wakeup-latency-report | 2 +- tools/perf/scripts/perl/bin/workqueue-stats-report | 2 +- tools/perf/scripts/python/bin/failed-syscalls-by-pid-report | 2 +- tools/perf/scripts/python/bin/netdev-times-report | 2 +- tools/perf/scripts/python/bin/sched-migration-report | 2 +- tools/perf/scripts/python/bin/sctop-report | 2 +- tools/perf/scripts/python/bin/syscall-counts-by-pid-report | 2 +- tools/perf/scripts/python/bin/syscall-counts-report | 2 +- 13 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 40a6a2992d15..3ea62199ffe1 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -46,9 +46,6 @@ static struct scripting_ops *scripting_ops; static void setup_scripting(void) { - /* make sure PERF_EXEC_PATH is set for scripts */ - perf_set_argv_exec_path(perf_exec_path()); - setup_perl_scripting(); setup_python_scripting(); @@ -593,6 +590,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) suffix = REPORT_SUFFIX; } + /* make sure PERF_EXEC_PATH is set for scripts */ + perf_set_argv_exec_path(perf_exec_path()); + if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) { char *record_script_path, *report_script_path; int live_pipe[2]; diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report index e3a5e55d54ff..4028d92dc4ae 100644 --- a/tools/perf/scripts/perl/bin/failed-syscalls-report +++ b/tools/perf/scripts/perl/bin/failed-syscalls-report @@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then shift fi fi -perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/failed-syscalls.pl $comm diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report index d83070b7eeb5..ba25f4d41fb0 100644 --- a/tools/perf/scripts/perl/bin/rw-by-file-report +++ b/tools/perf/scripts/perl/bin/rw-by-file-report @@ -7,7 +7,7 @@ if [ $# -lt 1 ] ; then fi comm=$1 shift -perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-file.pl $comm diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report index 7ef46983f62f..641a3f5d085c 100644 --- a/tools/perf/scripts/perl/bin/rw-by-pid-report +++ b/tools/perf/scripts/perl/bin/rw-by-pid-report @@ -1,6 +1,6 @@ #!/bin/bash # description: system-wide r/w activity -perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rw-by-pid.pl diff --git a/tools/perf/scripts/perl/bin/rwtop-report b/tools/perf/scripts/perl/bin/rwtop-report index 93e698cd3f38..4918dba77021 100644 --- a/tools/perf/scripts/perl/bin/rwtop-report +++ b/tools/perf/scripts/perl/bin/rwtop-report @@ -17,7 +17,7 @@ if [ "$n_args" -gt 0 ] ; then interval=$1 shift fi -perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/rwtop.pl $interval diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report index a0d898f9ca1d..49052ebcb632 100644 --- a/tools/perf/scripts/perl/bin/wakeup-latency-report +++ b/tools/perf/scripts/perl/bin/wakeup-latency-report @@ -1,6 +1,6 @@ #!/bin/bash # description: system-wide min/max/avg wakeup latency -perf trace $@ -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/wakeup-latency.pl diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report index 35081132ef97..df0c65f4ca93 100644 --- a/tools/perf/scripts/perl/bin/workqueue-stats-report +++ b/tools/perf/scripts/perl/bin/workqueue-stats-report @@ -1,6 +1,6 @@ #!/bin/bash # description: workqueue stats (ins/exe/create/destroy) -perf trace $@ -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report index 30293545fcc2..03587021463d 100644 --- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report +++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report @@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then shift fi fi -perf trace $@ -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $comm +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm diff --git a/tools/perf/scripts/python/bin/netdev-times-report b/tools/perf/scripts/python/bin/netdev-times-report index c3d0a638123d..4ad361b31249 100644 --- a/tools/perf/scripts/python/bin/netdev-times-report +++ b/tools/perf/scripts/python/bin/netdev-times-report @@ -2,4 +2,4 @@ # description: display a process of packet and processing time # args: [tx] [rx] [dev=] [debug] -perf trace -s ~/libexec/perf-core/scripts/python/netdev-times.py $@ +perf trace -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@ diff --git a/tools/perf/scripts/python/bin/sched-migration-report b/tools/perf/scripts/python/bin/sched-migration-report index 61d05f72e443..df1791f07c24 100644 --- a/tools/perf/scripts/python/bin/sched-migration-report +++ b/tools/perf/scripts/python/bin/sched-migration-report @@ -1,3 +1,3 @@ #!/bin/bash # description: sched migration overview -perf trace $@ -s ~/libexec/perf-core/scripts/python/sched-migration.py +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py diff --git a/tools/perf/scripts/python/bin/sctop-report b/tools/perf/scripts/python/bin/sctop-report index b01c842ae7b4..36b409c05e50 100644 --- a/tools/perf/scripts/python/bin/sctop-report +++ b/tools/perf/scripts/python/bin/sctop-report @@ -21,4 +21,4 @@ elif [ "$n_args" -gt 0 ] ; then interval=$1 shift fi -perf trace $@ -s ~/libexec/perf-core/scripts/python/sctop.py $comm $interval +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report index 9e9d8ddd72ce..4eb88c9fc83c 100644 --- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report +++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report @@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then shift fi fi -perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $comm +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report index dc076b618796..cb2f9c5cf17e 100644 --- a/tools/perf/scripts/python/bin/syscall-counts-report +++ b/tools/perf/scripts/python/bin/syscall-counts-report @@ -7,4 +7,4 @@ if [ $# -gt 0 ] ; then shift fi fi -perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts.py $comm +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm -- cgit v1.2.2 From d1e95bb5309f98368dd4fbef6ff3a8f573b24506 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 10 Oct 2010 16:11:02 +0100 Subject: perf trace: Fix detection of script extension The extension starts with the last dot in the name, not the first. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1286723462.2955.206.camel@localhost> Signed-off-by: Ben Hutchings Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 3ea62199ffe1..deda1a93131e 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -282,7 +282,7 @@ static int parse_scriptname(const struct option *opt __used, script++; } else { script = str; - ext = strchr(script, '.'); + ext = strrchr(script, '.'); if (!ext) { fprintf(stderr, "invalid script extension"); return -1; -- cgit v1.2.2 From 66a301c380d4e463424db572b348de4913ec191a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 23 Oct 2010 15:12:29 -0200 Subject: perf probe: Fix format specified for Dwarf_Off parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixing the following error on 32-bit arches: util/probe-finder.c: In function ‘line_range_search_cb’: util/probe-finder.c:1734: error: format ‘%lx’ expects type ‘long unsigned int’, but argument 3 has type ‘Dwarf_Off’ Reported-by: Ingo Molnar Cc: Frederic Weisbecker Cc: Masami Hiramatsu LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-finder.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index c20bd52833aa..3991d73d1cff 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1731,7 +1731,8 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) struct line_finder *lf = param->data; struct line_range *lr = lf->lr; - pr_debug("find (%lx) %s\n", dwarf_dieoffset(sp_die), + pr_debug("find (%llx) %s\n", + (unsigned long long)dwarf_dieoffset(sp_die), dwarf_diename(sp_die)); if (dwarf_tag(sp_die) == DW_TAG_subprogram && die_compare_name(sp_die, lr->function)) { -- cgit v1.2.2 From aa7b250c252cc8e6b1daf0e1eada5eba42a1a68d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 21 Oct 2010 22:17:19 -0700 Subject: tracing: Fix 'faild' -> 'failed' typo Signed-off-by: Joe Perches Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jiri Kosina LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 544301d29dee..b8d2852baa4a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -648,7 +648,7 @@ static int register_trace_probe(struct trace_probe *tp) } ret = register_probe_event(tp); if (ret) { - pr_warning("Faild to register probe event(%d)\n", ret); + pr_warning("Failed to register probe event(%d)\n", ret); goto end; } -- cgit v1.2.2 From 2c78ffeca98fcd5a1dfd4a322438944506ed5e64 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 25 Oct 2010 08:41:09 +0200 Subject: x86/oprofile: Fix uninitialized variable use in debug printk Stephen Rothwell reported this build warning: arch/x86/oprofile/op_model_amd.c: In function 'ibs_eilvt_valid': arch/x86/oprofile/op_model_amd.c:289: warning: 'offset' may be used uninitialized in this function And correctly observed that indeed the variable is used uninitialized in this function. The result of this bug can be a debug printk with a bogus value. Also fix a few more small details that made this function hard to read and which probably contributed to the bug being introduced to begin with: - Use more symmetric error conditions - Remove the !0 obfuscation - Add newlines to the printk output - Remove bogus linebreaks in printk strings and elsewhere Reported-by: Stephen Rothwell Cc: Robert Richter Cc: Linus Torvalds LKML-Reference: <20101025115736.41d51abe.sfr@canb.auug.org.au> Signed-off-by: Ingo Molnar --- arch/x86/oprofile/op_model_amd.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 42fb46f83883..68759e716f0f 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -281,29 +281,25 @@ static inline int eilvt_is_available(int offset) static inline int ibs_eilvt_valid(void) { - u64 val; int offset; + u64 val; rdmsrl(MSR_AMD64_IBSCTL, val); + offset = val & IBSCTL_LVT_OFFSET_MASK; + if (!(val & IBSCTL_LVT_OFFSET_VALID)) { - pr_err(FW_BUG "cpu %d, invalid IBS " - "interrupt offset %d (MSR%08X=0x%016llx)", - smp_processor_id(), offset, - MSR_AMD64_IBSCTL, val); + pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", + smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); return 0; } - offset = val & IBSCTL_LVT_OFFSET_MASK; - - if (eilvt_is_available(offset)) - return !0; - - pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " - "not available (MSR%08X=0x%016llx)", - smp_processor_id(), offset, - MSR_AMD64_IBSCTL, val); + if (!eilvt_is_available(offset)) { + pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", + smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); + return 0; + } - return 0; + return 1; } static inline int get_ibs_offset(void) -- cgit v1.2.2 From 43948f50276eca010a22726860dfe9a4e8130136 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 25 Oct 2010 22:18:01 +0900 Subject: kprobes: Remove redundant text_mutex lock in optimize Remove text_mutex locking in optimize_all_kprobes, because this function doesn't modify text. It simply queues probes on optimization list for kprobe_optimizer worker thread. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Namhyung Kim Cc: Jason Baron Cc: Peter Zijlstra LKML-Reference: <20101025131801.19160.70939.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ec4210c6501e..7c44133f51ec 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; /* NOTE: change this value only with kprobe_mutex held */ static bool kprobes_all_disarmed; -static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +/* This protects kprobe_table and optimizing_list */ +static DEFINE_MUTEX(kprobe_mutex); static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; static struct { spinlock_t lock ____cacheline_aligned_in_smp; @@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) } #ifdef CONFIG_SYSCTL +/* This should be called with kprobe_mutex locked */ static void __kprobes optimize_all_kprobes(void) { struct hlist_head *head; @@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void) return; kprobes_allow_optimization = true; - mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) if (!kprobe_disabled(p)) optimize_kprobe(p); } - mutex_unlock(&text_mutex); printk(KERN_INFO "Kprobes globally optimized\n"); } +/* This should be called with kprobe_mutex locked */ static void __kprobes unoptimize_all_kprobes(void) { struct hlist_head *head; -- cgit v1.2.2 From 6cc7361440e499abb3a30cdbcfedad03e43c92ae Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Oct 2010 15:15:10 -0200 Subject: perf python scripting: Improve the failed-syscalls-by-pid script . Print message at script start telling how to get te summary . Print the syscall name using the audit-lib-python package, if installed . Print the errno string . Accept both pid (if numeric) or COMM name Now it looks like this: [root@emilia ~]# perf trace failed-syscalls-by-pid Press control+C to stop and show the summary ^C syscall errors: comm [pid] count ------------------------------ ---------- automount [1670] syscall: futex err = ETIMEDOUT 39 irqbalance [1462] syscall: openat err = ENOENT 4 perf [7888] syscall: lseek err = ESPIPE 1 syscall: open err = ENOENT 24 perf [7889] syscall: ioctl err = EINVAL 1 syscall: readlink err = EINVAL 2 syscall: open err = ENOENT 389 syscall: stat err = ENOENT 141 syscall: lseek err = ESPIPE 3 [root@emilia ~]# [root@emilia ~]# perf trace failed-syscalls-by-pid 1670 Press control+C to stop and show the summary ^C syscall errors: comm [pid] count ------------------------------ ---------- automount [1670] syscall: futex err = ETIMEDOUT 2 [root@emilia ~]# [root@emilia ~]# [root@emilia ~]# [root@emilia ~]# perf trace failed-syscalls-by-pid automount Press control+C to stop and show the summary ^C syscall errors for automount: comm [pid] count ------------------------------ ---------- automount [1669] syscall: futex err = ETIMEDOUT 1 automount [1670] syscall: futex err = ETIMEDOUT 5 [root@emilia ~]# Cc: David S. Miller Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- .../python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 37 ++++++++++++++++++++++ .../perf/scripts/python/failed-syscalls-by-pid.py | 21 +++++++----- 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py index 9689bc0acd9f..9d15f484308c 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -6,6 +6,8 @@ # Public License ("GPL") version 2 as published by the Free Software # Foundation. +import errno, os + NSECS_PER_SEC = 1000000000 def avg(total, n): @@ -26,3 +28,38 @@ def nsecs_str(nsecs): def clear_term(): print("\x1b[H\x1b[2J") + +audit_package_warned = False + +try: + import audit + machine_to_id = { + 'x86_64': audit.MACH_86_64, + 'alpha' : audit.MACH_ALPHA, + 'armeb' : audit.MACH_ARMEB, + 'ia64' : audit.MACH_IA64, + 'ppc' : audit.MACH_PPC, + 'ppc64' : audit.MACH_PPC64, + 's390' : audit.MACH_S390, + 's390x' : audit.MACH_S390X, + 'i386' : audit.MACH_X86, + 'i586' : audit.MACH_X86, + 'i686' : audit.MACH_X86, + } + machine_id = machine_to_id[os.uname()[4]] +except: + if not audit_package_warned: + audit_package_warned = True + print "Install the audit-libs-python package to get syscall names" + +def syscall_name(id): + try: + return audit.audit_syscall_to_name(id, machine_id) + except: + return str(id) + +def strerror(nr): + try: + return errno.errorcode[abs(nr)] + except: + return "Unknown %d errno" % nr diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py index 0ca02278fe69..acd7848717b3 100644 --- a/tools/perf/scripts/python/failed-syscalls-by-pid.py +++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py @@ -13,21 +13,26 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \ from perf_trace_context import * from Core import * +from Util import * -usage = "perf trace -s syscall-counts-by-pid.py [comm]\n"; +usage = "perf trace -s syscall-counts-by-pid.py [comm|pid]\n"; for_comm = None +for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: - for_comm = sys.argv[1] + try: + for_pid = int(sys.argv[1]) + except: + for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): - pass + print "Press control+C to stop and show the summary" def trace_end(): print_error_totals() @@ -35,9 +40,9 @@ def trace_end(): def raw_syscalls__sys_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, ret): - if for_comm is not None: - if common_comm != for_comm: - return + if (for_comm and common_comm != for_comm) or \ + (for_pid and common_pid != for_pid ): + return if ret < 0: try: @@ -62,7 +67,7 @@ def print_error_totals(): print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id in id_keys: - print " syscall: %-16d\n" % (id), + print " syscall: %-16s\n" % syscall_name(id), ret_keys = syscalls[comm][pid][id].keys() for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True): - print " err = %-20d %10d\n" % (ret, val), + print " err = %-20s %10d\n" % (strerror(ret), val), -- cgit v1.2.2 From 6545aaa561b5678c497e94dea22cb2d1af1d6859 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Oct 2010 17:11:25 -0200 Subject: perf python scripting: Improve the syscalls-counts script . Print message at script start telling how to get te summary . Print the syscall name Now it looks like this: [root@emilia ~]# perf trace syscall-counts Press control+C to stop and show the summary ^C syscall events: event count ---------------------------------------- ----------- read 102752 open 1293 close 878 write 319 stat 185 fstat 149 getdents 116 mmap 98 brk 80 rt_sigaction 66 munmap 42 mprotect 24 lseek 21 lstat 7 rt_sigprocmask 4 futex 3 statfs 3 ioctl 3 readlink 2 select 2 getegid 1 geteuid 1 getgid 1 getuid 1 getrlimit 1 fcntl 1 uname 1 [root@emilia ~]# Cc: David S. Miller Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/syscall-counts.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py index f977e85ff049..ea183dc82d29 100644 --- a/tools/perf/scripts/python/syscall-counts.py +++ b/tools/perf/scripts/python/syscall-counts.py @@ -13,6 +13,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \ from perf_trace_context import * from Core import * +from Util import syscall_name usage = "perf trace -s syscall-counts.py [comm]\n"; @@ -27,7 +28,7 @@ if len(sys.argv) > 1: syscalls = autodict() def trace_begin(): - pass + print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() @@ -55,4 +56,4 @@ def print_syscall_totals(): for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): - print "%-40d %10d\n" % (id, val), + print "%-40s %10d\n" % (syscall_name(id), val), -- cgit v1.2.2 From 2e7d1e3fb8043380a2fc5d759eb357bf05acf935 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Oct 2010 18:39:20 -0200 Subject: perf python scripting: print the syscall name on sctop [root@emilia tmp]# perf trace sctop 1 syscall events: event count ---------------------------------------- ---------- read 215400 futex 4029 write 376 brk 33 rt_sigprocmask 24 select 17 lseek 2 fsync 1 ^C[root@emilia tmp]# Cc: David S. Miller Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/sctop.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py index 6cafad40c296..547cbe99de68 100644 --- a/tools/perf/scripts/python/sctop.py +++ b/tools/perf/scripts/python/sctop.py @@ -8,10 +8,7 @@ # will be refreshed every [interval] seconds. The default interval is # 3 seconds. -import thread -import time -import os -import sys +import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') @@ -71,7 +68,7 @@ def print_syscall_totals(interval): for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: - print "%-40d %10d\n" % (id, val), + print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() -- cgit v1.2.2 From a64fa198ba1cd232871710c37476e006ed5516ed Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Oct 2010 18:44:09 -0200 Subject: perf python scripting: Improve the syscalls-by-pid script . Print message at script start telling how to get te summary . Print the syscall names . Accept both pid (if numeric) or COMM name Now it looks like this: [root@emilia tmp]# perf trace syscall-counts-by-pid Press control+C to stop and show the summary ^C syscall events by comm/pid: comm [pid]/syscalls count ---------------------------------------- ---------- automount [1670] futex 2 sshd [2322] rt_sigprocmask 4 select 2 write 1 read 1 perf [15178] read 2506 open 794 close 769 write 240 getdents 112 lseek 16 stat 9 perf_counter_open 5 fcntl 5 mmap 5 statfs 2 perf [15179] read 56701 open 499 stat 176 fstat 149 close 109 mmap 98 brk 75 rt_sigaction 66 munmap 42 mprotect 24 lstat 7 lseek 5 getdents 4 ioctl 3 readlink 2 futex 1 statfs 1 getegid 1 geteuid 1 getgid 1 getuid 1 getrlimit 1 fcntl 1 uname 1 write 1 [root@emilia tmp]# fg -bash: fg: current: no such job [root@emilia tmp]# perf trace syscall-counts-by-pid 2322 Press control+C to stop and show the summary ^C syscall events by comm/pid: comm [pid]/syscalls count ---------------------------------------- ---------- sshd [2322] rt_sigprocmask 4 select 2 write 1 read 1 [root@emilia tmp]# perf trace syscall-counts-by-pid sshd Press control+C to stop and show the summary ^C syscall events for sshd: comm [pid]/syscalls count ---------------------------------------- ---------- sshd [2322] rt_sigprocmask 4 select 2 write 1 read 1 [root@emilia tmp]# Cc: David S. Miller Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/syscall-counts-by-pid.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py index af722d6a4b3f..d1ee3ec10cf2 100644 --- a/tools/perf/scripts/python/syscall-counts-by-pid.py +++ b/tools/perf/scripts/python/syscall-counts-by-pid.py @@ -5,29 +5,33 @@ # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. -import os -import sys +import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * +from Util import syscall_name usage = "perf trace -s syscall-counts-by-pid.py [comm]\n"; for_comm = None +for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: - for_comm = sys.argv[1] + try: + for_pid = int(sys.argv[1]) + except: + for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): - pass + print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() @@ -35,9 +39,10 @@ def trace_end(): def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): - if for_comm is not None: - if common_comm != for_comm: - return + + if (for_comm and common_comm != for_comm) or \ + (for_pid and common_pid != for_pid ): + return try: syscalls[common_comm][common_pid][id] += 1 except TypeError: @@ -61,4 +66,4 @@ def print_syscall_totals(): id_keys = syscalls[comm][pid].keys() for id, val in sorted(syscalls[comm][pid].iteritems(), \ key = lambda(k, v): (v, k), reverse = True): - print " %-38d %10d\n" % (id, val), + print " %-38s %10d\n" % (syscall_name(id), val), -- cgit v1.2.2 From 7f6c1bd50d73d12f8b4ea09edb4515997f6527f5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Oct 2010 22:12:01 -0200 Subject: perf python scripting: Support fedora 11 (audit 1.7.17) Where we don't have the audit.MACH_ARMEB constant. Cc: David S. Miller Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py index 9d15f484308c..99ff1b7a0d2c 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -36,7 +36,6 @@ try: machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, - 'armeb' : audit.MACH_ARMEB, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, @@ -46,6 +45,10 @@ try: 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } + try: + machine_to_id['armeb'] = audit.MACH_ARMEB + except: + pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: -- cgit v1.2.2 From 0ab7368f8dad561b6164b3e942cab00f9565bd37 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 28 Aug 2010 16:46:19 +0100 Subject: perf record: Remove newline character from perror() argument If we include a newline character in the string argument to perror() then the output will be split across two lines like so, Unable to read perf file descriptor : No space left on device Deleting the newline character prints a much more readable error, Unable to read perf file descriptor: No space left on device Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Frederic Weisbecker Cc: Tom Zanussi LKML-Reference: <89e77b54659bc3798b23a5596c2debb7f6f4cf27.1283010281.git.matt@console-pimps.org> Signed-off-by: Matt Fleming Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index ff77b805de71..b530beefd9ad 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -353,7 +353,7 @@ try_again: } if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) { - perror("Unable to read perf file descriptor\n"); + perror("Unable to read perf file descriptor"); exit(-1); } @@ -626,7 +626,7 @@ static int __cmd_record(int argc, const char **argv) nr_cpus = read_cpu_map(cpu_list); if (nr_cpus < 1) { - perror("failed to collect number of CPUs\n"); + perror("failed to collect number of CPUs"); return -1; } -- cgit v1.2.2 From b44308f54062a4c1dee2602946f481f03739b76d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 Oct 2010 15:20:09 -0200 Subject: perf scripting: Shut up 'perf record' final status We want just the script output, not internal details about the record phase. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-record.txt | 4 ++++ tools/perf/builtin-record.c | 4 ++++ tools/perf/builtin-trace.c | 9 +++++---- tools/perf/util/debug.c | 4 ++-- tools/perf/util/debug.h | 2 +- 5 files changed, 16 insertions(+), 7 deletions(-) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 3ee27dccfde9..a91f9f9e6e5c 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -83,6 +83,10 @@ OPTIONS --call-graph:: Do call-graph (stack chain/backtrace) recording. +-q:: +--quiet:: + Don't print any message, useful for scripting. + -v:: --verbose:: Be more verbose (show counter open errors, etc). diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b530beefd9ad..4e75583ddd6d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -761,6 +761,9 @@ static int __cmd_record(int argc, const char **argv) } } + if (quiet) + return 0; + fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); /* @@ -820,6 +823,7 @@ static const struct option options[] = { "do call-graph (stack chain/backtrace) recording"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), + OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), OPT_BOOLEAN('s', "stat", &inherit_stat, "per thread counts"), OPT_BOOLEAN('d', "data", &sample_address, diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index deda1a93131e..2f8df45c4dcb 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -625,12 +625,13 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) dup2(live_pipe[1], 1); close(live_pipe[0]); - __argv = malloc(5 * sizeof(const char *)); + __argv = malloc(6 * sizeof(const char *)); __argv[0] = "/bin/sh"; __argv[1] = record_script_path; - __argv[2] = "-o"; - __argv[3] = "-"; - __argv[4] = NULL; + __argv[2] = "-q"; + __argv[3] = "-o"; + __argv[4] = "-"; + __argv[5] = NULL; execvp("/bin/sh", (char **)__argv); exit(-1); diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index f9c7e3ad1aa7..c8d81b00089d 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -12,8 +12,8 @@ #include "debug.h" #include "util.h" -int verbose = 0; -bool dump_trace = false; +int verbose; +bool dump_trace = false, quiet = false; int eprintf(int level, const char *fmt, ...) { diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 7a17ee061bcb..7b514082bbaf 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -6,7 +6,7 @@ #include "event.h" extern int verbose; -extern bool dump_trace; +extern bool quiet, dump_trace; int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(event_t *event); -- cgit v1.2.2 From 22d0594b31046793dfb58b7ce866d7cb0a9862e5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 Oct 2010 15:21:15 -0200 Subject: perf python scripting: Fixup cut'n'paste error in sctop script Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/sctop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py index 547cbe99de68..7a6ec2c7d8ab 100644 --- a/tools/perf/scripts/python/sctop.py +++ b/tools/perf/scripts/python/sctop.py @@ -17,7 +17,7 @@ from perf_trace_context import * from Core import * from Util import * -usage = "perf trace -s syscall-counts.py [comm] [interval]\n"; +usage = "perf trace -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 -- cgit v1.2.2 From 00204c3396469f407bac56e1475ea16e4a279b13 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 26 Oct 2010 17:07:33 -0200 Subject: perf python scripting: Add futex-contention script The equivalent to this SystemTAP script: http://sourceware.org/systemtap/wiki/WSFutexContention [root@doppio ~]# perf trace futex-contention Press control+C to stop and show the summary ^Cnpviewer.bin[15242] lock 7f0a8be19104 contended 29 times, 72806 avg ns npviewer.bin[15242] lock 7f0a8be19130 contended 2 times, 1355 avg ns synergyc[17245] lock f127f4 contended 1 times, 1830569 avg ns firefox[15116] lock 7f2b7238af0c contended 168 times, 1230390 avg ns synergyc[17245] lock f2fc20 contended 1 times, 33149 avg ns npviewer.bin[15255] lock 7f0a8be19074 contended 155 times, 73047 avg ns npviewer.bin[15255] lock 7f0a8be190a0 contended 127 times, 7088 avg ns synergyc[17247] lock f12854 contended 1 times, 46741 avg ns synergyc[17245] lock f12610 contended 1 times, 7358 avg ns [root@doppio ~]# Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- .../python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 18 ++++++++ .../scripts/python/bin/futex-contention-record | 2 + .../scripts/python/bin/futex-contention-report | 4 ++ tools/perf/scripts/python/futex-contention.py | 50 ++++++++++++++++++++++ 4 files changed, 74 insertions(+) create mode 100644 tools/perf/scripts/python/bin/futex-contention-record create mode 100644 tools/perf/scripts/python/bin/futex-contention-report create mode 100644 tools/perf/scripts/python/futex-contention.py diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py index 99ff1b7a0d2c..13cc02b5893a 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py +++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -8,6 +8,12 @@ import errno, os +FUTEX_WAIT = 0 +FUTEX_WAKE = 1 +FUTEX_PRIVATE_FLAG = 128 +FUTEX_CLOCK_REALTIME = 256 +FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) + NSECS_PER_SEC = 1000000000 def avg(total, n): @@ -26,6 +32,18 @@ def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str +def add_stats(dict, key, value): + if not dict.has_key(key): + dict[key] = (value, value, value, 1) + else: + min, max, avg, count = dict[key] + if value < min: + min = value + if value > max: + max = value + avg = (avg + value) / 2 + dict[key] = (min, max, avg, count + 1) + def clear_term(): print("\x1b[H\x1b[2J") diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record new file mode 100644 index 000000000000..5ecbb433caf4 --- /dev/null +++ b/tools/perf/scripts/python/bin/futex-contention-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@ diff --git a/tools/perf/scripts/python/bin/futex-contention-report b/tools/perf/scripts/python/bin/futex-contention-report new file mode 100644 index 000000000000..c8268138fb7e --- /dev/null +++ b/tools/perf/scripts/python/bin/futex-contention-report @@ -0,0 +1,4 @@ +#!/bin/bash +# description: futext contention measurement + +perf trace $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py new file mode 100644 index 000000000000..11e70a388d41 --- /dev/null +++ b/tools/perf/scripts/python/futex-contention.py @@ -0,0 +1,50 @@ +# futex contention +# (c) 2010, Arnaldo Carvalho de Melo +# Licensed under the terms of the GNU GPL License version 2 +# +# Translation of: +# +# http://sourceware.org/systemtap/wiki/WSFutexContention +# +# to perf python scripting. +# +# Measures futex contention + +import os, sys +sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') +from Util import * + +process_names = {} +thread_thislock = {} +thread_blocktime = {} + +lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time +process_names = {} # long-lived pid-to-execname mapping + +def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, + nr, uaddr, op, val, utime, uaddr2, val3): + cmd = op & FUTEX_CMD_MASK + if cmd != FUTEX_WAIT: + return # we don't care about originators of WAKE events + + process_names[tid] = comm + thread_thislock[tid] = uaddr + thread_blocktime[tid] = nsecs(s, ns) + +def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, + nr, ret): + if thread_blocktime.has_key(tid): + elapsed = nsecs(s, ns) - thread_blocktime[tid] + add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) + del thread_blocktime[tid] + del thread_thislock[tid] + +def trace_begin(): + print "Press control+C to stop and show the summary" + +def trace_end(): + for (tid, lock) in lock_waits: + min, max, avg, count = lock_waits[tid, lock] + print "%s[%d] lock %x contended %d times, %d avg ns" % \ + (process_names[tid], tid, lock, count, avg) + -- cgit v1.2.2