diff options
-rw-r--r-- | arch/arm/kernel/perf_event.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 34 | ||||
-rw-r--r-- | arch/sh/kernel/perf_event.c | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 18 | ||||
-rw-r--r-- | include/linux/perf_event.h | 7 | ||||
-rw-r--r-- | kernel/perf_event.c | 42 |
7 files changed, 72 insertions, 71 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c45768614c8a..5b7cfafc0720 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -164,20 +164,20 @@ armpmu_event_set_period(struct perf_event *event, | |||
164 | struct hw_perf_event *hwc, | 164 | struct hw_perf_event *hwc, |
165 | int idx) | 165 | int idx) |
166 | { | 166 | { |
167 | s64 left = atomic64_read(&hwc->period_left); | 167 | s64 left = local64_read(&hwc->period_left); |
168 | s64 period = hwc->sample_period; | 168 | s64 period = hwc->sample_period; |
169 | int ret = 0; | 169 | int ret = 0; |
170 | 170 | ||
171 | if (unlikely(left <= -period)) { | 171 | if (unlikely(left <= -period)) { |
172 | left = period; | 172 | left = period; |
173 | atomic64_set(&hwc->period_left, left); | 173 | local64_set(&hwc->period_left, left); |
174 | hwc->last_period = period; | 174 | hwc->last_period = period; |
175 | ret = 1; | 175 | ret = 1; |
176 | } | 176 | } |
177 | 177 | ||
178 | if (unlikely(left <= 0)) { | 178 | if (unlikely(left <= 0)) { |
179 | left += period; | 179 | left += period; |
180 | atomic64_set(&hwc->period_left, left); | 180 | local64_set(&hwc->period_left, left); |
181 | hwc->last_period = period; | 181 | hwc->last_period = period; |
182 | ret = 1; | 182 | ret = 1; |
183 | } | 183 | } |
@@ -185,7 +185,7 @@ armpmu_event_set_period(struct perf_event *event, | |||
185 | if (left > (s64)armpmu->max_period) | 185 | if (left > (s64)armpmu->max_period) |
186 | left = armpmu->max_period; | 186 | left = armpmu->max_period; |
187 | 187 | ||
188 | atomic64_set(&hwc->prev_count, (u64)-left); | 188 | local64_set(&hwc->prev_count, (u64)-left); |
189 | 189 | ||
190 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | 190 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); |
191 | 191 | ||
@@ -204,18 +204,18 @@ armpmu_event_update(struct perf_event *event, | |||
204 | s64 delta; | 204 | s64 delta; |
205 | 205 | ||
206 | again: | 206 | again: |
207 | prev_raw_count = atomic64_read(&hwc->prev_count); | 207 | prev_raw_count = local64_read(&hwc->prev_count); |
208 | new_raw_count = armpmu->read_counter(idx); | 208 | new_raw_count = armpmu->read_counter(idx); |
209 | 209 | ||
210 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 210 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
211 | new_raw_count) != prev_raw_count) | 211 | new_raw_count) != prev_raw_count) |
212 | goto again; | 212 | goto again; |
213 | 213 | ||
214 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 214 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
215 | delta >>= shift; | 215 | delta >>= shift; |
216 | 216 | ||
217 | atomic64_add(delta, &event->count); | 217 | local64_add(delta, &event->count); |
218 | atomic64_sub(delta, &hwc->period_left); | 218 | local64_sub(delta, &hwc->period_left); |
219 | 219 | ||
220 | return new_raw_count; | 220 | return new_raw_count; |
221 | } | 221 | } |
@@ -478,7 +478,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
478 | if (!hwc->sample_period) { | 478 | if (!hwc->sample_period) { |
479 | hwc->sample_period = armpmu->max_period; | 479 | hwc->sample_period = armpmu->max_period; |
480 | hwc->last_period = hwc->sample_period; | 480 | hwc->last_period = hwc->sample_period; |
481 | atomic64_set(&hwc->period_left, hwc->sample_period); | 481 | local64_set(&hwc->period_left, hwc->sample_period); |
482 | } | 482 | } |
483 | 483 | ||
484 | err = 0; | 484 | err = 0; |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index ac2a8c2554d9..af1d9a7c65d1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -410,15 +410,15 @@ static void power_pmu_read(struct perf_event *event) | |||
410 | * Therefore we treat them like NMIs. | 410 | * Therefore we treat them like NMIs. |
411 | */ | 411 | */ |
412 | do { | 412 | do { |
413 | prev = atomic64_read(&event->hw.prev_count); | 413 | prev = local64_read(&event->hw.prev_count); |
414 | barrier(); | 414 | barrier(); |
415 | val = read_pmc(event->hw.idx); | 415 | val = read_pmc(event->hw.idx); |
416 | } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 416 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
417 | 417 | ||
418 | /* The counters are only 32 bits wide */ | 418 | /* The counters are only 32 bits wide */ |
419 | delta = (val - prev) & 0xfffffffful; | 419 | delta = (val - prev) & 0xfffffffful; |
420 | atomic64_add(delta, &event->count); | 420 | local64_add(delta, &event->count); |
421 | atomic64_sub(delta, &event->hw.period_left); | 421 | local64_sub(delta, &event->hw.period_left); |
422 | } | 422 | } |
423 | 423 | ||
424 | /* | 424 | /* |
@@ -444,10 +444,10 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | |||
444 | if (!event->hw.idx) | 444 | if (!event->hw.idx) |
445 | continue; | 445 | continue; |
446 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 446 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
447 | prev = atomic64_read(&event->hw.prev_count); | 447 | prev = local64_read(&event->hw.prev_count); |
448 | event->hw.idx = 0; | 448 | event->hw.idx = 0; |
449 | delta = (val - prev) & 0xfffffffful; | 449 | delta = (val - prev) & 0xfffffffful; |
450 | atomic64_add(delta, &event->count); | 450 | local64_add(delta, &event->count); |
451 | } | 451 | } |
452 | } | 452 | } |
453 | 453 | ||
@@ -462,7 +462,7 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | |||
462 | event = cpuhw->limited_counter[i]; | 462 | event = cpuhw->limited_counter[i]; |
463 | event->hw.idx = cpuhw->limited_hwidx[i]; | 463 | event->hw.idx = cpuhw->limited_hwidx[i]; |
464 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 464 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
465 | atomic64_set(&event->hw.prev_count, val); | 465 | local64_set(&event->hw.prev_count, val); |
466 | perf_event_update_userpage(event); | 466 | perf_event_update_userpage(event); |
467 | } | 467 | } |
468 | } | 468 | } |
@@ -666,11 +666,11 @@ void hw_perf_enable(void) | |||
666 | } | 666 | } |
667 | val = 0; | 667 | val = 0; |
668 | if (event->hw.sample_period) { | 668 | if (event->hw.sample_period) { |
669 | left = atomic64_read(&event->hw.period_left); | 669 | left = local64_read(&event->hw.period_left); |
670 | if (left < 0x80000000L) | 670 | if (left < 0x80000000L) |
671 | val = 0x80000000L - left; | 671 | val = 0x80000000L - left; |
672 | } | 672 | } |
673 | atomic64_set(&event->hw.prev_count, val); | 673 | local64_set(&event->hw.prev_count, val); |
674 | event->hw.idx = idx; | 674 | event->hw.idx = idx; |
675 | write_pmc(idx, val); | 675 | write_pmc(idx, val); |
676 | perf_event_update_userpage(event); | 676 | perf_event_update_userpage(event); |
@@ -842,8 +842,8 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
842 | if (left < 0x80000000L) | 842 | if (left < 0x80000000L) |
843 | val = 0x80000000L - left; | 843 | val = 0x80000000L - left; |
844 | write_pmc(event->hw.idx, val); | 844 | write_pmc(event->hw.idx, val); |
845 | atomic64_set(&event->hw.prev_count, val); | 845 | local64_set(&event->hw.prev_count, val); |
846 | atomic64_set(&event->hw.period_left, left); | 846 | local64_set(&event->hw.period_left, left); |
847 | perf_event_update_userpage(event); | 847 | perf_event_update_userpage(event); |
848 | perf_enable(); | 848 | perf_enable(); |
849 | local_irq_restore(flags); | 849 | local_irq_restore(flags); |
@@ -1109,7 +1109,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1109 | event->hw.config = events[n]; | 1109 | event->hw.config = events[n]; |
1110 | event->hw.event_base = cflags[n]; | 1110 | event->hw.event_base = cflags[n]; |
1111 | event->hw.last_period = event->hw.sample_period; | 1111 | event->hw.last_period = event->hw.sample_period; |
1112 | atomic64_set(&event->hw.period_left, event->hw.last_period); | 1112 | local64_set(&event->hw.period_left, event->hw.last_period); |
1113 | 1113 | ||
1114 | /* | 1114 | /* |
1115 | * See if we need to reserve the PMU. | 1115 | * See if we need to reserve the PMU. |
@@ -1147,16 +1147,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1147 | int record = 0; | 1147 | int record = 0; |
1148 | 1148 | ||
1149 | /* we don't have to worry about interrupts here */ | 1149 | /* we don't have to worry about interrupts here */ |
1150 | prev = atomic64_read(&event->hw.prev_count); | 1150 | prev = local64_read(&event->hw.prev_count); |
1151 | delta = (val - prev) & 0xfffffffful; | 1151 | delta = (val - prev) & 0xfffffffful; |
1152 | atomic64_add(delta, &event->count); | 1152 | local64_add(delta, &event->count); |
1153 | 1153 | ||
1154 | /* | 1154 | /* |
1155 | * See if the total period for this event has expired, | 1155 | * See if the total period for this event has expired, |
1156 | * and update for the next period. | 1156 | * and update for the next period. |
1157 | */ | 1157 | */ |
1158 | val = 0; | 1158 | val = 0; |
1159 | left = atomic64_read(&event->hw.period_left) - delta; | 1159 | left = local64_read(&event->hw.period_left) - delta; |
1160 | if (period) { | 1160 | if (period) { |
1161 | if (left <= 0) { | 1161 | if (left <= 0) { |
1162 | left += period; | 1162 | left += period; |
@@ -1194,8 +1194,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | write_pmc(event->hw.idx, val); | 1196 | write_pmc(event->hw.idx, val); |
1197 | atomic64_set(&event->hw.prev_count, val); | 1197 | local64_set(&event->hw.prev_count, val); |
1198 | atomic64_set(&event->hw.period_left, left); | 1198 | local64_set(&event->hw.period_left, left); |
1199 | perf_event_update_userpage(event); | 1199 | perf_event_update_userpage(event); |
1200 | } | 1200 | } |
1201 | 1201 | ||
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 81b6de41ae5d..7a3dc3567258 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -185,10 +185,10 @@ static void sh_perf_event_update(struct perf_event *event, | |||
185 | * this is the simplest approach for maintaining consistency. | 185 | * this is the simplest approach for maintaining consistency. |
186 | */ | 186 | */ |
187 | again: | 187 | again: |
188 | prev_raw_count = atomic64_read(&hwc->prev_count); | 188 | prev_raw_count = local64_read(&hwc->prev_count); |
189 | new_raw_count = sh_pmu->read(idx); | 189 | new_raw_count = sh_pmu->read(idx); |
190 | 190 | ||
191 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 191 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
192 | new_raw_count) != prev_raw_count) | 192 | new_raw_count) != prev_raw_count) |
193 | goto again; | 193 | goto again; |
194 | 194 | ||
@@ -203,7 +203,7 @@ again: | |||
203 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 203 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
204 | delta >>= shift; | 204 | delta >>= shift; |
205 | 205 | ||
206 | atomic64_add(delta, &event->count); | 206 | local64_add(delta, &event->count); |
207 | } | 207 | } |
208 | 208 | ||
209 | static void sh_pmu_disable(struct perf_event *event) | 209 | static void sh_pmu_disable(struct perf_event *event) |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index beeb92fa3acd..8a6660da8e08 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -572,18 +572,18 @@ static u64 sparc_perf_event_update(struct perf_event *event, | |||
572 | s64 delta; | 572 | s64 delta; |
573 | 573 | ||
574 | again: | 574 | again: |
575 | prev_raw_count = atomic64_read(&hwc->prev_count); | 575 | prev_raw_count = local64_read(&hwc->prev_count); |
576 | new_raw_count = read_pmc(idx); | 576 | new_raw_count = read_pmc(idx); |
577 | 577 | ||
578 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 578 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
579 | new_raw_count) != prev_raw_count) | 579 | new_raw_count) != prev_raw_count) |
580 | goto again; | 580 | goto again; |
581 | 581 | ||
582 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 582 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
583 | delta >>= shift; | 583 | delta >>= shift; |
584 | 584 | ||
585 | atomic64_add(delta, &event->count); | 585 | local64_add(delta, &event->count); |
586 | atomic64_sub(delta, &hwc->period_left); | 586 | local64_sub(delta, &hwc->period_left); |
587 | 587 | ||
588 | return new_raw_count; | 588 | return new_raw_count; |
589 | } | 589 | } |
@@ -591,27 +591,27 @@ again: | |||
591 | static int sparc_perf_event_set_period(struct perf_event *event, | 591 | static int sparc_perf_event_set_period(struct perf_event *event, |
592 | struct hw_perf_event *hwc, int idx) | 592 | struct hw_perf_event *hwc, int idx) |
593 | { | 593 | { |
594 | s64 left = atomic64_read(&hwc->period_left); | 594 | s64 left = local64_read(&hwc->period_left); |
595 | s64 period = hwc->sample_period; | 595 | s64 period = hwc->sample_period; |
596 | int ret = 0; | 596 | int ret = 0; |
597 | 597 | ||
598 | if (unlikely(left <= -period)) { | 598 | if (unlikely(left <= -period)) { |
599 | left = period; | 599 | left = period; |
600 | atomic64_set(&hwc->period_left, left); | 600 | local64_set(&hwc->period_left, left); |
601 | hwc->last_period = period; | 601 | hwc->last_period = period; |
602 | ret = 1; | 602 | ret = 1; |
603 | } | 603 | } |
604 | 604 | ||
605 | if (unlikely(left <= 0)) { | 605 | if (unlikely(left <= 0)) { |
606 | left += period; | 606 | left += period; |
607 | atomic64_set(&hwc->period_left, left); | 607 | local64_set(&hwc->period_left, left); |
608 | hwc->last_period = period; | 608 | hwc->last_period = period; |
609 | ret = 1; | 609 | ret = 1; |
610 | } | 610 | } |
611 | if (left > MAX_PERIOD) | 611 | if (left > MAX_PERIOD) |
612 | left = MAX_PERIOD; | 612 | left = MAX_PERIOD; |
613 | 613 | ||
614 | atomic64_set(&hwc->prev_count, (u64)-left); | 614 | local64_set(&hwc->prev_count, (u64)-left); |
615 | 615 | ||
616 | write_pmc(idx, (u64)(-left) & 0xffffffff); | 616 | write_pmc(idx, (u64)(-left) & 0xffffffff); |
617 | 617 | ||
@@ -1087,7 +1087,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1087 | if (!hwc->sample_period) { | 1087 | if (!hwc->sample_period) { |
1088 | hwc->sample_period = MAX_PERIOD; | 1088 | hwc->sample_period = MAX_PERIOD; |
1089 | hwc->last_period = hwc->sample_period; | 1089 | hwc->last_period = hwc->sample_period; |
1090 | atomic64_set(&hwc->period_left, hwc->sample_period); | 1090 | local64_set(&hwc->period_left, hwc->sample_period); |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | return 0; | 1093 | return 0; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 79e199843db6..2d0d29069275 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -296,10 +296,10 @@ x86_perf_event_update(struct perf_event *event) | |||
296 | * count to the generic event atomically: | 296 | * count to the generic event atomically: |
297 | */ | 297 | */ |
298 | again: | 298 | again: |
299 | prev_raw_count = atomic64_read(&hwc->prev_count); | 299 | prev_raw_count = local64_read(&hwc->prev_count); |
300 | rdmsrl(hwc->event_base + idx, new_raw_count); | 300 | rdmsrl(hwc->event_base + idx, new_raw_count); |
301 | 301 | ||
302 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 302 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
303 | new_raw_count) != prev_raw_count) | 303 | new_raw_count) != prev_raw_count) |
304 | goto again; | 304 | goto again; |
305 | 305 | ||
@@ -314,8 +314,8 @@ again: | |||
314 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 314 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
315 | delta >>= shift; | 315 | delta >>= shift; |
316 | 316 | ||
317 | atomic64_add(delta, &event->count); | 317 | local64_add(delta, &event->count); |
318 | atomic64_sub(delta, &hwc->period_left); | 318 | local64_sub(delta, &hwc->period_left); |
319 | 319 | ||
320 | return new_raw_count; | 320 | return new_raw_count; |
321 | } | 321 | } |
@@ -439,7 +439,7 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
439 | if (!hwc->sample_period) { | 439 | if (!hwc->sample_period) { |
440 | hwc->sample_period = x86_pmu.max_period; | 440 | hwc->sample_period = x86_pmu.max_period; |
441 | hwc->last_period = hwc->sample_period; | 441 | hwc->last_period = hwc->sample_period; |
442 | atomic64_set(&hwc->period_left, hwc->sample_period); | 442 | local64_set(&hwc->period_left, hwc->sample_period); |
443 | } else { | 443 | } else { |
444 | /* | 444 | /* |
445 | * If we have a PMU initialized but no APIC | 445 | * If we have a PMU initialized but no APIC |
@@ -886,7 +886,7 @@ static int | |||
886 | x86_perf_event_set_period(struct perf_event *event) | 886 | x86_perf_event_set_period(struct perf_event *event) |
887 | { | 887 | { |
888 | struct hw_perf_event *hwc = &event->hw; | 888 | struct hw_perf_event *hwc = &event->hw; |
889 | s64 left = atomic64_read(&hwc->period_left); | 889 | s64 left = local64_read(&hwc->period_left); |
890 | s64 period = hwc->sample_period; | 890 | s64 period = hwc->sample_period; |
891 | int ret = 0, idx = hwc->idx; | 891 | int ret = 0, idx = hwc->idx; |
892 | 892 | ||
@@ -898,14 +898,14 @@ x86_perf_event_set_period(struct perf_event *event) | |||
898 | */ | 898 | */ |
899 | if (unlikely(left <= -period)) { | 899 | if (unlikely(left <= -period)) { |
900 | left = period; | 900 | left = period; |
901 | atomic64_set(&hwc->period_left, left); | 901 | local64_set(&hwc->period_left, left); |
902 | hwc->last_period = period; | 902 | hwc->last_period = period; |
903 | ret = 1; | 903 | ret = 1; |
904 | } | 904 | } |
905 | 905 | ||
906 | if (unlikely(left <= 0)) { | 906 | if (unlikely(left <= 0)) { |
907 | left += period; | 907 | left += period; |
908 | atomic64_set(&hwc->period_left, left); | 908 | local64_set(&hwc->period_left, left); |
909 | hwc->last_period = period; | 909 | hwc->last_period = period; |
910 | ret = 1; | 910 | ret = 1; |
911 | } | 911 | } |
@@ -924,7 +924,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
924 | * The hw event starts counting from this event offset, | 924 | * The hw event starts counting from this event offset, |
925 | * mark it to be able to extra future deltas: | 925 | * mark it to be able to extra future deltas: |
926 | */ | 926 | */ |
927 | atomic64_set(&hwc->prev_count, (u64)-left); | 927 | local64_set(&hwc->prev_count, (u64)-left); |
928 | 928 | ||
929 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); | 929 | wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); |
930 | 930 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f34dab9b275e..7342979f95f2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -487,6 +487,7 @@ struct perf_guest_info_callbacks { | |||
487 | #include <linux/cpu.h> | 487 | #include <linux/cpu.h> |
488 | #include <asm/atomic.h> | 488 | #include <asm/atomic.h> |
489 | #include <asm/local.h> | 489 | #include <asm/local.h> |
490 | #include <asm/local64.h> | ||
490 | 491 | ||
491 | #define PERF_MAX_STACK_DEPTH 255 | 492 | #define PERF_MAX_STACK_DEPTH 255 |
492 | 493 | ||
@@ -536,10 +537,10 @@ struct hw_perf_event { | |||
536 | struct arch_hw_breakpoint info; | 537 | struct arch_hw_breakpoint info; |
537 | #endif | 538 | #endif |
538 | }; | 539 | }; |
539 | atomic64_t prev_count; | 540 | local64_t prev_count; |
540 | u64 sample_period; | 541 | u64 sample_period; |
541 | u64 last_period; | 542 | u64 last_period; |
542 | atomic64_t period_left; | 543 | local64_t period_left; |
543 | u64 interrupts; | 544 | u64 interrupts; |
544 | 545 | ||
545 | u64 freq_time_stamp; | 546 | u64 freq_time_stamp; |
@@ -670,7 +671,7 @@ struct perf_event { | |||
670 | 671 | ||
671 | enum perf_event_active_state state; | 672 | enum perf_event_active_state state; |
672 | unsigned int attach_state; | 673 | unsigned int attach_state; |
673 | atomic64_t count; | 674 | local64_t count; |
674 | atomic64_t child_count; | 675 | atomic64_t child_count; |
675 | 676 | ||
676 | /* | 677 | /* |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a395fda2d94c..97c73018592e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1148,9 +1148,9 @@ static void __perf_event_sync_stat(struct perf_event *event, | |||
1148 | * In order to keep per-task stats reliable we need to flip the event | 1148 | * In order to keep per-task stats reliable we need to flip the event |
1149 | * values when we flip the contexts. | 1149 | * values when we flip the contexts. |
1150 | */ | 1150 | */ |
1151 | value = atomic64_read(&next_event->count); | 1151 | value = local64_read(&next_event->count); |
1152 | value = atomic64_xchg(&event->count, value); | 1152 | value = local64_xchg(&event->count, value); |
1153 | atomic64_set(&next_event->count, value); | 1153 | local64_set(&next_event->count, value); |
1154 | 1154 | ||
1155 | swap(event->total_time_enabled, next_event->total_time_enabled); | 1155 | swap(event->total_time_enabled, next_event->total_time_enabled); |
1156 | swap(event->total_time_running, next_event->total_time_running); | 1156 | swap(event->total_time_running, next_event->total_time_running); |
@@ -1540,10 +1540,10 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
1540 | 1540 | ||
1541 | hwc->sample_period = sample_period; | 1541 | hwc->sample_period = sample_period; |
1542 | 1542 | ||
1543 | if (atomic64_read(&hwc->period_left) > 8*sample_period) { | 1543 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
1544 | perf_disable(); | 1544 | perf_disable(); |
1545 | perf_event_stop(event); | 1545 | perf_event_stop(event); |
1546 | atomic64_set(&hwc->period_left, 0); | 1546 | local64_set(&hwc->period_left, 0); |
1547 | perf_event_start(event); | 1547 | perf_event_start(event); |
1548 | perf_enable(); | 1548 | perf_enable(); |
1549 | } | 1549 | } |
@@ -1584,7 +1584,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1584 | 1584 | ||
1585 | perf_disable(); | 1585 | perf_disable(); |
1586 | event->pmu->read(event); | 1586 | event->pmu->read(event); |
1587 | now = atomic64_read(&event->count); | 1587 | now = local64_read(&event->count); |
1588 | delta = now - hwc->freq_count_stamp; | 1588 | delta = now - hwc->freq_count_stamp; |
1589 | hwc->freq_count_stamp = now; | 1589 | hwc->freq_count_stamp = now; |
1590 | 1590 | ||
@@ -1738,7 +1738,7 @@ static void __perf_event_read(void *info) | |||
1738 | 1738 | ||
1739 | static inline u64 perf_event_count(struct perf_event *event) | 1739 | static inline u64 perf_event_count(struct perf_event *event) |
1740 | { | 1740 | { |
1741 | return atomic64_read(&event->count) + atomic64_read(&event->child_count); | 1741 | return local64_read(&event->count) + atomic64_read(&event->child_count); |
1742 | } | 1742 | } |
1743 | 1743 | ||
1744 | static u64 perf_event_read(struct perf_event *event) | 1744 | static u64 perf_event_read(struct perf_event *event) |
@@ -2141,7 +2141,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) | |||
2141 | static void perf_event_reset(struct perf_event *event) | 2141 | static void perf_event_reset(struct perf_event *event) |
2142 | { | 2142 | { |
2143 | (void)perf_event_read(event); | 2143 | (void)perf_event_read(event); |
2144 | atomic64_set(&event->count, 0); | 2144 | local64_set(&event->count, 0); |
2145 | perf_event_update_userpage(event); | 2145 | perf_event_update_userpage(event); |
2146 | } | 2146 | } |
2147 | 2147 | ||
@@ -2359,7 +2359,7 @@ void perf_event_update_userpage(struct perf_event *event) | |||
2359 | userpg->index = perf_event_index(event); | 2359 | userpg->index = perf_event_index(event); |
2360 | userpg->offset = perf_event_count(event); | 2360 | userpg->offset = perf_event_count(event); |
2361 | if (event->state == PERF_EVENT_STATE_ACTIVE) | 2361 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
2362 | userpg->offset -= atomic64_read(&event->hw.prev_count); | 2362 | userpg->offset -= local64_read(&event->hw.prev_count); |
2363 | 2363 | ||
2364 | userpg->time_enabled = event->total_time_enabled + | 2364 | userpg->time_enabled = event->total_time_enabled + |
2365 | atomic64_read(&event->child_total_time_enabled); | 2365 | atomic64_read(&event->child_total_time_enabled); |
@@ -4035,14 +4035,14 @@ static u64 perf_swevent_set_period(struct perf_event *event) | |||
4035 | hwc->last_period = hwc->sample_period; | 4035 | hwc->last_period = hwc->sample_period; |
4036 | 4036 | ||
4037 | again: | 4037 | again: |
4038 | old = val = atomic64_read(&hwc->period_left); | 4038 | old = val = local64_read(&hwc->period_left); |
4039 | if (val < 0) | 4039 | if (val < 0) |
4040 | return 0; | 4040 | return 0; |
4041 | 4041 | ||
4042 | nr = div64_u64(period + val, period); | 4042 | nr = div64_u64(period + val, period); |
4043 | offset = nr * period; | 4043 | offset = nr * period; |
4044 | val -= offset; | 4044 | val -= offset; |
4045 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | 4045 | if (local64_cmpxchg(&hwc->period_left, old, val) != old) |
4046 | goto again; | 4046 | goto again; |
4047 | 4047 | ||
4048 | return nr; | 4048 | return nr; |
@@ -4081,7 +4081,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
4081 | { | 4081 | { |
4082 | struct hw_perf_event *hwc = &event->hw; | 4082 | struct hw_perf_event *hwc = &event->hw; |
4083 | 4083 | ||
4084 | atomic64_add(nr, &event->count); | 4084 | local64_add(nr, &event->count); |
4085 | 4085 | ||
4086 | if (!regs) | 4086 | if (!regs) |
4087 | return; | 4087 | return; |
@@ -4092,7 +4092,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
4092 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4092 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
4093 | return perf_swevent_overflow(event, 1, nmi, data, regs); | 4093 | return perf_swevent_overflow(event, 1, nmi, data, regs); |
4094 | 4094 | ||
4095 | if (atomic64_add_negative(nr, &hwc->period_left)) | 4095 | if (local64_add_negative(nr, &hwc->period_left)) |
4096 | return; | 4096 | return; |
4097 | 4097 | ||
4098 | perf_swevent_overflow(event, 0, nmi, data, regs); | 4098 | perf_swevent_overflow(event, 0, nmi, data, regs); |
@@ -4383,8 +4383,8 @@ static void cpu_clock_perf_event_update(struct perf_event *event) | |||
4383 | u64 now; | 4383 | u64 now; |
4384 | 4384 | ||
4385 | now = cpu_clock(cpu); | 4385 | now = cpu_clock(cpu); |
4386 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4386 | prev = local64_xchg(&event->hw.prev_count, now); |
4387 | atomic64_add(now - prev, &event->count); | 4387 | local64_add(now - prev, &event->count); |
4388 | } | 4388 | } |
4389 | 4389 | ||
4390 | static int cpu_clock_perf_event_enable(struct perf_event *event) | 4390 | static int cpu_clock_perf_event_enable(struct perf_event *event) |
@@ -4392,7 +4392,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
4392 | struct hw_perf_event *hwc = &event->hw; | 4392 | struct hw_perf_event *hwc = &event->hw; |
4393 | int cpu = raw_smp_processor_id(); | 4393 | int cpu = raw_smp_processor_id(); |
4394 | 4394 | ||
4395 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4395 | local64_set(&hwc->prev_count, cpu_clock(cpu)); |
4396 | perf_swevent_start_hrtimer(event); | 4396 | perf_swevent_start_hrtimer(event); |
4397 | 4397 | ||
4398 | return 0; | 4398 | return 0; |
@@ -4424,9 +4424,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now) | |||
4424 | u64 prev; | 4424 | u64 prev; |
4425 | s64 delta; | 4425 | s64 delta; |
4426 | 4426 | ||
4427 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4427 | prev = local64_xchg(&event->hw.prev_count, now); |
4428 | delta = now - prev; | 4428 | delta = now - prev; |
4429 | atomic64_add(delta, &event->count); | 4429 | local64_add(delta, &event->count); |
4430 | } | 4430 | } |
4431 | 4431 | ||
4432 | static int task_clock_perf_event_enable(struct perf_event *event) | 4432 | static int task_clock_perf_event_enable(struct perf_event *event) |
@@ -4436,7 +4436,7 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4436 | 4436 | ||
4437 | now = event->ctx->time; | 4437 | now = event->ctx->time; |
4438 | 4438 | ||
4439 | atomic64_set(&hwc->prev_count, now); | 4439 | local64_set(&hwc->prev_count, now); |
4440 | 4440 | ||
4441 | perf_swevent_start_hrtimer(event); | 4441 | perf_swevent_start_hrtimer(event); |
4442 | 4442 | ||
@@ -4879,7 +4879,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4879 | hwc->sample_period = 1; | 4879 | hwc->sample_period = 1; |
4880 | hwc->last_period = hwc->sample_period; | 4880 | hwc->last_period = hwc->sample_period; |
4881 | 4881 | ||
4882 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4882 | local64_set(&hwc->period_left, hwc->sample_period); |
4883 | 4883 | ||
4884 | /* | 4884 | /* |
4885 | * we currently do not support PERF_FORMAT_GROUP on inherited events | 4885 | * we currently do not support PERF_FORMAT_GROUP on inherited events |
@@ -5313,7 +5313,7 @@ inherit_event(struct perf_event *parent_event, | |||
5313 | hwc->sample_period = sample_period; | 5313 | hwc->sample_period = sample_period; |
5314 | hwc->last_period = sample_period; | 5314 | hwc->last_period = sample_period; |
5315 | 5315 | ||
5316 | atomic64_set(&hwc->period_left, sample_period); | 5316 | local64_set(&hwc->period_left, sample_period); |
5317 | } | 5317 | } |
5318 | 5318 | ||
5319 | child_event->overflow_handler = parent_event->overflow_handler; | 5319 | child_event->overflow_handler = parent_event->overflow_handler; |