aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-22 10:35:24 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-25 15:39:06 -0400
commit194002b274e9169a04beb1b23dcc132159bb566c (patch)
tree6977e48df4a1429dae999d4afe23e5e28febc99e
parent7f8b4e4e0988dadfd22330fd147ad2453e19f510 (diff)
perf_counter, x86: Add mmap counter read support
Update the mmap control page with the needed information to use the userspace RDPMC instruction for self monitoring. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/powerpc/include/asm/perf_counter.h2
-rw-r--r--arch/x86/include/asm/perf_counter.h3
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c6
-rw-r--r--kernel/perf_counter.c10
4 files changed, 20 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index 8ccd4e155768..0ea0639fcf75 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -61,6 +61,8 @@ struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs); 61extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63 63
64#define PERF_COUNTER_INDEX_OFFSET 1
65
64/* 66/*
65 * Only override the default definitions in include/linux/perf_counter.h 67 * Only override the default definitions in include/linux/perf_counter.h
66 * if we have hardware PMU support. 68 * if we have hardware PMU support.
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 5fb33e160ea0..fa64e401589d 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -87,6 +87,9 @@ union cpuid10_edx {
87#ifdef CONFIG_PERF_COUNTERS 87#ifdef CONFIG_PERF_COUNTERS
88extern void init_hw_perf_counters(void); 88extern void init_hw_perf_counters(void);
89extern void perf_counters_lapic_init(void); 89extern void perf_counters_lapic_init(void);
90
91#define PERF_COUNTER_INDEX_OFFSET 0
92
90#else 93#else
91static inline void init_hw_perf_counters(void) { } 94static inline void init_hw_perf_counters(void) { }
92static inline void perf_counters_lapic_init(void) { } 95static inline void perf_counters_lapic_init(void) { }
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index a310d19faca3..b83474b6021a 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
912 err = checking_wrmsrl(hwc->counter_base + idx, 912 err = checking_wrmsrl(hwc->counter_base + idx,
913 (u64)(-left) & x86_pmu.counter_mask); 913 (u64)(-left) & x86_pmu.counter_mask);
914 914
915 perf_counter_update_userpage(counter);
916
915 return ret; 917 return ret;
916} 918}
917 919
@@ -1034,6 +1036,8 @@ try_generic:
1034 x86_perf_counter_set_period(counter, hwc, idx); 1036 x86_perf_counter_set_period(counter, hwc, idx);
1035 x86_pmu.enable(hwc, idx); 1037 x86_pmu.enable(hwc, idx);
1036 1038
1039 perf_counter_update_userpage(counter);
1040
1037 return 0; 1041 return 0;
1038} 1042}
1039 1043
@@ -1126,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
1126 x86_perf_counter_update(counter, hwc, idx); 1130 x86_perf_counter_update(counter, hwc, idx);
1127 cpuc->counters[idx] = NULL; 1131 cpuc->counters[idx] = NULL;
1128 clear_bit(idx, cpuc->used_mask); 1132 clear_bit(idx, cpuc->used_mask);
1133
1134 perf_counter_update_userpage(counter);
1129} 1135}
1130 1136
1131/* 1137/*
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 23614adab475..02994a719e27 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1753,6 +1753,14 @@ int perf_counter_task_disable(void)
1753 return 0; 1753 return 0;
1754} 1754}
1755 1755
1756static int perf_counter_index(struct perf_counter *counter)
1757{
1758 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1759 return 0;
1760
1761 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
1762}
1763
1756/* 1764/*
1757 * Callers need to ensure there can be no nesting of this function, otherwise 1765 * Callers need to ensure there can be no nesting of this function, otherwise
1758 * the seqlock logic goes bad. We can not serialize this because the arch 1766 * the seqlock logic goes bad. We can not serialize this because the arch
@@ -1777,7 +1785,7 @@ void perf_counter_update_userpage(struct perf_counter *counter)
1777 preempt_disable(); 1785 preempt_disable();
1778 ++userpg->lock; 1786 ++userpg->lock;
1779 barrier(); 1787 barrier();
1780 userpg->index = counter->hw.idx; 1788 userpg->index = perf_counter_index(counter);
1781 userpg->offset = atomic64_read(&counter->count); 1789 userpg->offset = atomic64_read(&counter->count);
1782 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 1790 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1783 userpg->offset -= atomic64_read(&counter->hw.prev_count); 1791 userpg->offset -= atomic64_read(&counter->hw.prev_count);