aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-02 14:16:01 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-10 07:22:27 -0500
commit07088edb88164c2a2406cd2d9a7be19d8515214b (patch)
treeb46d8db19f4fedd149219a0122be9fd4cc669e4e /arch/x86/kernel
parent3fb2b8ddcc6a7aa62af6bd2cb939edfd4c460506 (diff)
perf, x86: Remove superfluous arguments to x86_perf_event_set_period()
The second and third argument to x86_perf_event_set_period() are superfluous since they are simple expressions of the first argument. Hence remove them. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo <acme@infradead.org> LKML-Reference: <20100304140100.006500906@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
2 files changed, 8 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 585d5608ae6b..fcf1788f9626 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -170,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
170 .enabled = 1, 170 .enabled = 1,
171}; 171};
172 172
173static int x86_perf_event_set_period(struct perf_event *event, 173static int x86_perf_event_set_period(struct perf_event *event);
174 struct hw_perf_event *hwc, int idx);
175 174
176/* 175/*
177 * Generalized hw caching related hw_event table, filled 176 * Generalized hw caching related hw_event table, filled
@@ -835,7 +834,7 @@ void hw_perf_enable(void)
835 834
836 if (hwc->idx == -1) { 835 if (hwc->idx == -1) {
837 x86_assign_hw_event(event, cpuc, i); 836 x86_assign_hw_event(event, cpuc, i);
838 x86_perf_event_set_period(event, hwc, hwc->idx); 837 x86_perf_event_set_period(event);
839 } 838 }
840 /* 839 /*
841 * need to mark as active because x86_pmu_disable() 840 * need to mark as active because x86_pmu_disable()
@@ -876,12 +875,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
876 * To be called with the event disabled in hw: 875 * To be called with the event disabled in hw:
877 */ 876 */
878static int 877static int
879x86_perf_event_set_period(struct perf_event *event, 878x86_perf_event_set_period(struct perf_event *event)
880 struct hw_perf_event *hwc, int idx)
881{ 879{
880 struct hw_perf_event *hwc = &event->hw;
882 s64 left = atomic64_read(&hwc->period_left); 881 s64 left = atomic64_read(&hwc->period_left);
883 s64 period = hwc->sample_period; 882 s64 period = hwc->sample_period;
884 int err, ret = 0; 883 int err, ret = 0, idx = hwc->idx;
885 884
886 if (idx == X86_PMC_IDX_FIXED_BTS) 885 if (idx == X86_PMC_IDX_FIXED_BTS)
887 return 0; 886 return 0;
@@ -979,7 +978,7 @@ static int x86_pmu_start(struct perf_event *event)
979 if (hwc->idx == -1) 978 if (hwc->idx == -1)
980 return -EAGAIN; 979 return -EAGAIN;
981 980
982 x86_perf_event_set_period(event, hwc, hwc->idx); 981 x86_perf_event_set_period(event);
983 x86_pmu.enable(hwc, hwc->idx); 982 x86_pmu.enable(hwc, hwc->idx);
984 983
985 return 0; 984 return 0;
@@ -1123,7 +1122,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1123 handled = 1; 1122 handled = 1;
1124 data.period = event->hw.last_period; 1123 data.period = event->hw.last_period;
1125 1124
1126 if (!x86_perf_event_set_period(event, hwc, idx)) 1125 if (!x86_perf_event_set_period(event))
1127 continue; 1126 continue;
1128 1127
1129 if (perf_event_overflow(event, 1, &data, regs)) 1128 if (perf_event_overflow(event, 1, &data, regs))
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index c582449163fa..6dbdf91ab342 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -699,7 +699,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
699 int ret; 699 int ret;
700 700
701 x86_perf_event_update(event, hwc, idx); 701 x86_perf_event_update(event, hwc, idx);
702 ret = x86_perf_event_set_period(event, hwc, idx); 702 ret = x86_perf_event_set_period(event);
703 703
704 return ret; 704 return ret;
705} 705}