diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 12:30:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 12:30:52 -0400 |
commit | 4aed2fd8e3181fea7c09ba79cf64e7e3f4413bf9 (patch) | |
tree | 1f69733e5daab4915a76a41de0e4d1dc61e12cfb /arch/arm/kernel | |
parent | 3a3527b6461b1298cc53ce72f336346739297ac8 (diff) | |
parent | fc9ea5a1e53ee54f681e226d735008e2a6f8f470 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits)
tracing/kprobes: unregister_trace_probe needs to be called under mutex
perf: expose event__process function
perf events: Fix mmap offset determination
perf, powerpc: fsl_emb: Restore setting perf_sample_data.period
perf, powerpc: Convert the FSL driver to use local64_t
perf tools: Don't keep unreferenced maps when unmaps are detected
perf session: Invalidate last_match when removing threads from rb_tree
perf session: Free the ref_reloc_sym memory at the right place
x86,mmiotrace: Add support for tracing STOS instruction
perf, sched migration: Librarize task states and event headers helpers
perf, sched migration: Librarize the GUI class
perf, sched migration: Make the GUI class client agnostic
perf, sched migration: Make it vertically scrollable
perf, sched migration: Parameterize cpu height and spacing
perf, sched migration: Fix key bindings
perf, sched migration: Ignore unhandled task states
perf, sched migration: Handle ignored migrate out events
perf: New migration tool overview
tracing: Drop cpparg() macro
perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call
...
Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index de12536d687f..417c392ddf1c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -164,20 +164,20 @@ armpmu_event_set_period(struct perf_event *event, | |||
164 | struct hw_perf_event *hwc, | 164 | struct hw_perf_event *hwc, |
165 | int idx) | 165 | int idx) |
166 | { | 166 | { |
167 | s64 left = atomic64_read(&hwc->period_left); | 167 | s64 left = local64_read(&hwc->period_left); |
168 | s64 period = hwc->sample_period; | 168 | s64 period = hwc->sample_period; |
169 | int ret = 0; | 169 | int ret = 0; |
170 | 170 | ||
171 | if (unlikely(left <= -period)) { | 171 | if (unlikely(left <= -period)) { |
172 | left = period; | 172 | left = period; |
173 | atomic64_set(&hwc->period_left, left); | 173 | local64_set(&hwc->period_left, left); |
174 | hwc->last_period = period; | 174 | hwc->last_period = period; |
175 | ret = 1; | 175 | ret = 1; |
176 | } | 176 | } |
177 | 177 | ||
178 | if (unlikely(left <= 0)) { | 178 | if (unlikely(left <= 0)) { |
179 | left += period; | 179 | left += period; |
180 | atomic64_set(&hwc->period_left, left); | 180 | local64_set(&hwc->period_left, left); |
181 | hwc->last_period = period; | 181 | hwc->last_period = period; |
182 | ret = 1; | 182 | ret = 1; |
183 | } | 183 | } |
@@ -185,7 +185,7 @@ armpmu_event_set_period(struct perf_event *event, | |||
185 | if (left > (s64)armpmu->max_period) | 185 | if (left > (s64)armpmu->max_period) |
186 | left = armpmu->max_period; | 186 | left = armpmu->max_period; |
187 | 187 | ||
188 | atomic64_set(&hwc->prev_count, (u64)-left); | 188 | local64_set(&hwc->prev_count, (u64)-left); |
189 | 189 | ||
190 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | 190 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); |
191 | 191 | ||
@@ -204,18 +204,18 @@ armpmu_event_update(struct perf_event *event, | |||
204 | u64 delta; | 204 | u64 delta; |
205 | 205 | ||
206 | again: | 206 | again: |
207 | prev_raw_count = atomic64_read(&hwc->prev_count); | 207 | prev_raw_count = local64_read(&hwc->prev_count); |
208 | new_raw_count = armpmu->read_counter(idx); | 208 | new_raw_count = armpmu->read_counter(idx); |
209 | 209 | ||
210 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 210 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
211 | new_raw_count) != prev_raw_count) | 211 | new_raw_count) != prev_raw_count) |
212 | goto again; | 212 | goto again; |
213 | 213 | ||
214 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 214 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
215 | delta >>= shift; | 215 | delta >>= shift; |
216 | 216 | ||
217 | atomic64_add(delta, &event->count); | 217 | local64_add(delta, &event->count); |
218 | atomic64_sub(delta, &hwc->period_left); | 218 | local64_sub(delta, &hwc->period_left); |
219 | 219 | ||
220 | return new_raw_count; | 220 | return new_raw_count; |
221 | } | 221 | } |
@@ -478,7 +478,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
478 | if (!hwc->sample_period) { | 478 | if (!hwc->sample_period) { |
479 | hwc->sample_period = armpmu->max_period; | 479 | hwc->sample_period = armpmu->max_period; |
480 | hwc->last_period = hwc->sample_period; | 480 | hwc->last_period = hwc->sample_period; |
481 | atomic64_set(&hwc->period_left, hwc->sample_period); | 481 | local64_set(&hwc->period_left, hwc->sample_period); |
482 | } | 482 | } |
483 | 483 | ||
484 | err = 0; | 484 | err = 0; |