aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-23 06:04:16 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:08 -0500
commit8fb9331391af95ca1f4e5c0a0da8120b13cbae01 (patch)
treea0c0ac8e2dffd562ed023a012db3fd56540e7db6 /arch
parent94c46572a6d9bb497eda0a14099d9f1360d57d5d (diff)
perfcounters: remove warnings
Impact: remove debug checks Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c7
1 files changed, 0 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index bdbdb56eaa34..89fad5d4fb37 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -64,7 +64,6 @@ x86_perf_counter_update(struct perf_counter *counter,
64{ 64{
65 u64 prev_raw_count, new_raw_count, delta; 65 u64 prev_raw_count, new_raw_count, delta;
66 66
67 WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE);
68 /* 67 /*
69 * Careful: an NMI might modify the previous counter value. 68 * Careful: an NMI might modify the previous counter value.
70 * 69 *
@@ -89,7 +88,6 @@ again:
89 * of the count, so we do that by clipping the delta to 32 bits: 88 * of the count, so we do that by clipping the delta to 32 bits:
90 */ 89 */
91 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); 90 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
92 WARN_ON_ONCE((int)delta < 0);
93 91
94 atomic64_add(delta, &counter->count); 92 atomic64_add(delta, &counter->count);
95 atomic64_sub(delta, &hwc->period_left); 93 atomic64_sub(delta, &hwc->period_left);
@@ -193,7 +191,6 @@ __x86_perf_counter_disable(struct perf_counter *counter,
193 int err; 191 int err;
194 192
195 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); 193 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
196 WARN_ON_ONCE(err);
197} 194}
198 195
199static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]); 196static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
@@ -209,8 +206,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
209 s32 left = atomic64_read(&hwc->period_left); 206 s32 left = atomic64_read(&hwc->period_left);
210 s32 period = hwc->irq_period; 207 s32 period = hwc->irq_period;
211 208
212 WARN_ON_ONCE(period <= 0);
213
214 /* 209 /*
215 * If we are way outside a reasoable range then just skip forward: 210 * If we are way outside a reasoable range then just skip forward:
216 */ 211 */
@@ -224,8 +219,6 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
224 atomic64_set(&hwc->period_left, left); 219 atomic64_set(&hwc->period_left, left);
225 } 220 }
226 221
227 WARN_ON_ONCE(left <= 0);
228
229 per_cpu(prev_left[idx], smp_processor_id()) = left; 222 per_cpu(prev_left[idx], smp_processor_id()) = left;
230 223
231 /* 224 /*