diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/local64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/perf_event.h | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/helpers.S | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 25 |
4 files changed, 25 insertions, 15 deletions
diff --git a/arch/sparc/include/asm/local64.h b/arch/sparc/include/asm/local64.h new file mode 100644 index 000000000000..36c93b5cc239 --- /dev/null +++ b/arch/sparc/include/asm/local64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local64.h> | |||
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index 7e2669894ce8..74c4e0cd889c 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h | |||
@@ -6,7 +6,15 @@ extern void set_perf_event_pending(void); | |||
6 | #define PERF_EVENT_INDEX_OFFSET 0 | 6 | #define PERF_EVENT_INDEX_OFFSET 0 |
7 | 7 | ||
8 | #ifdef CONFIG_PERF_EVENTS | 8 | #ifdef CONFIG_PERF_EVENTS |
9 | #include <asm/ptrace.h> | ||
10 | |||
9 | extern void init_hw_perf_events(void); | 11 | extern void init_hw_perf_events(void); |
12 | |||
13 | extern void | ||
14 | __perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
15 | |||
16 | #define perf_arch_fetch_caller_regs(pt_regs, ip) \ | ||
17 | __perf_arch_fetch_caller_regs(pt_regs, ip, 1); | ||
10 | #else | 18 | #else |
11 | static inline void init_hw_perf_events(void) { } | 19 | static inline void init_hw_perf_events(void) { } |
12 | #endif | 20 | #endif |
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S index 92090cc9e829..682fee06a16b 100644 --- a/arch/sparc/kernel/helpers.S +++ b/arch/sparc/kernel/helpers.S | |||
@@ -47,9 +47,9 @@ stack_trace_flush: | |||
47 | .size stack_trace_flush,.-stack_trace_flush | 47 | .size stack_trace_flush,.-stack_trace_flush |
48 | 48 | ||
49 | #ifdef CONFIG_PERF_EVENTS | 49 | #ifdef CONFIG_PERF_EVENTS |
50 | .globl perf_arch_fetch_caller_regs | 50 | .globl __perf_arch_fetch_caller_regs |
51 | .type perf_arch_fetch_caller_regs,#function | 51 | .type __perf_arch_fetch_caller_regs,#function |
52 | perf_arch_fetch_caller_regs: | 52 | __perf_arch_fetch_caller_regs: |
53 | /* We always read the %pstate into %o5 since we will use | 53 | /* We always read the %pstate into %o5 since we will use |
54 | * that to construct a fake %tstate to store into the regs. | 54 | * that to construct a fake %tstate to store into the regs. |
55 | */ | 55 | */ |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 0ec92c8861dd..8a6660da8e08 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -572,18 +572,18 @@ static u64 sparc_perf_event_update(struct perf_event *event, | |||
572 | s64 delta; | 572 | s64 delta; |
573 | 573 | ||
574 | again: | 574 | again: |
575 | prev_raw_count = atomic64_read(&hwc->prev_count); | 575 | prev_raw_count = local64_read(&hwc->prev_count); |
576 | new_raw_count = read_pmc(idx); | 576 | new_raw_count = read_pmc(idx); |
577 | 577 | ||
578 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 578 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
579 | new_raw_count) != prev_raw_count) | 579 | new_raw_count) != prev_raw_count) |
580 | goto again; | 580 | goto again; |
581 | 581 | ||
582 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 582 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
583 | delta >>= shift; | 583 | delta >>= shift; |
584 | 584 | ||
585 | atomic64_add(delta, &event->count); | 585 | local64_add(delta, &event->count); |
586 | atomic64_sub(delta, &hwc->period_left); | 586 | local64_sub(delta, &hwc->period_left); |
587 | 587 | ||
588 | return new_raw_count; | 588 | return new_raw_count; |
589 | } | 589 | } |
@@ -591,27 +591,27 @@ again: | |||
591 | static int sparc_perf_event_set_period(struct perf_event *event, | 591 | static int sparc_perf_event_set_period(struct perf_event *event, |
592 | struct hw_perf_event *hwc, int idx) | 592 | struct hw_perf_event *hwc, int idx) |
593 | { | 593 | { |
594 | s64 left = atomic64_read(&hwc->period_left); | 594 | s64 left = local64_read(&hwc->period_left); |
595 | s64 period = hwc->sample_period; | 595 | s64 period = hwc->sample_period; |
596 | int ret = 0; | 596 | int ret = 0; |
597 | 597 | ||
598 | if (unlikely(left <= -period)) { | 598 | if (unlikely(left <= -period)) { |
599 | left = period; | 599 | left = period; |
600 | atomic64_set(&hwc->period_left, left); | 600 | local64_set(&hwc->period_left, left); |
601 | hwc->last_period = period; | 601 | hwc->last_period = period; |
602 | ret = 1; | 602 | ret = 1; |
603 | } | 603 | } |
604 | 604 | ||
605 | if (unlikely(left <= 0)) { | 605 | if (unlikely(left <= 0)) { |
606 | left += period; | 606 | left += period; |
607 | atomic64_set(&hwc->period_left, left); | 607 | local64_set(&hwc->period_left, left); |
608 | hwc->last_period = period; | 608 | hwc->last_period = period; |
609 | ret = 1; | 609 | ret = 1; |
610 | } | 610 | } |
611 | if (left > MAX_PERIOD) | 611 | if (left > MAX_PERIOD) |
612 | left = MAX_PERIOD; | 612 | left = MAX_PERIOD; |
613 | 613 | ||
614 | atomic64_set(&hwc->prev_count, (u64)-left); | 614 | local64_set(&hwc->prev_count, (u64)-left); |
615 | 615 | ||
616 | write_pmc(idx, (u64)(-left) & 0xffffffff); | 616 | write_pmc(idx, (u64)(-left) & 0xffffffff); |
617 | 617 | ||
@@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
1005 | * skip the schedulability test here, it will be peformed | 1005 | * skip the schedulability test here, it will be peformed |
1006 | * at commit time(->commit_txn) as a whole | 1006 | * at commit time(->commit_txn) as a whole |
1007 | */ | 1007 | */ |
1008 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 1008 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1009 | goto nocheck; | 1009 | goto nocheck; |
1010 | 1010 | ||
1011 | if (check_excludes(cpuc->event, n0, 1)) | 1011 | if (check_excludes(cpuc->event, n0, 1)) |
@@ -1087,7 +1087,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1087 | if (!hwc->sample_period) { | 1087 | if (!hwc->sample_period) { |
1088 | hwc->sample_period = MAX_PERIOD; | 1088 | hwc->sample_period = MAX_PERIOD; |
1089 | hwc->last_period = hwc->sample_period; | 1089 | hwc->last_period = hwc->sample_period; |
1090 | atomic64_set(&hwc->period_left, hwc->sample_period); | 1090 | local64_set(&hwc->period_left, hwc->sample_period); |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | return 0; | 1093 | return 0; |
@@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) | |||
1102 | { | 1102 | { |
1103 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1103 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1104 | 1104 | ||
1105 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 1105 | cpuhw->group_flag |= PERF_EVENT_TXN; |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | /* | 1108 | /* |
@@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) | |||
1114 | { | 1114 | { |
1115 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1115 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1116 | 1116 | ||
1117 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1117 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* | 1120 | /* |
@@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) | |||
1137 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | 1137 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) |
1138 | return -EAGAIN; | 1138 | return -EAGAIN; |
1139 | 1139 | ||
1140 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1140 | return 0; | 1141 | return 0; |
1141 | } | 1142 | } |
1142 | 1143 | ||