diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/local64.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/perf_event.h | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/helpers.S | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 25 |
4 files changed, 25 insertions, 15 deletions
diff --git a/arch/sparc/include/asm/local64.h b/arch/sparc/include/asm/local64.h new file mode 100644 index 000000000000..36c93b5cc239 --- /dev/null +++ b/arch/sparc/include/asm/local64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local64.h> | |||
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index 7e2669894ce8..74c4e0cd889c 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h | |||
@@ -6,7 +6,15 @@ extern void set_perf_event_pending(void); | |||
6 | #define PERF_EVENT_INDEX_OFFSET 0 | 6 | #define PERF_EVENT_INDEX_OFFSET 0 |
7 | 7 | ||
8 | #ifdef CONFIG_PERF_EVENTS | 8 | #ifdef CONFIG_PERF_EVENTS |
9 | #include <asm/ptrace.h> | ||
10 | |||
9 | extern void init_hw_perf_events(void); | 11 | extern void init_hw_perf_events(void); |
12 | |||
13 | extern void | ||
14 | __perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
15 | |||
16 | #define perf_arch_fetch_caller_regs(pt_regs, ip) \ | ||
17 | __perf_arch_fetch_caller_regs(pt_regs, ip, 1); | ||
10 | #else | 18 | #else |
11 | static inline void init_hw_perf_events(void) { } | 19 | static inline void init_hw_perf_events(void) { } |
12 | #endif | 20 | #endif |
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S index 92090cc9e829..682fee06a16b 100644 --- a/arch/sparc/kernel/helpers.S +++ b/arch/sparc/kernel/helpers.S | |||
@@ -47,9 +47,9 @@ stack_trace_flush: | |||
47 | .size stack_trace_flush,.-stack_trace_flush | 47 | .size stack_trace_flush,.-stack_trace_flush |
48 | 48 | ||
49 | #ifdef CONFIG_PERF_EVENTS | 49 | #ifdef CONFIG_PERF_EVENTS |
50 | .globl perf_arch_fetch_caller_regs | 50 | .globl __perf_arch_fetch_caller_regs |
51 | .type perf_arch_fetch_caller_regs,#function | 51 | .type __perf_arch_fetch_caller_regs,#function |
52 | perf_arch_fetch_caller_regs: | 52 | __perf_arch_fetch_caller_regs: |
53 | /* We always read the %pstate into %o5 since we will use | 53 | /* We always read the %pstate into %o5 since we will use |
54 | * that to construct a fake %tstate to store into the regs. | 54 | * that to construct a fake %tstate to store into the regs. |
55 | */ | 55 | */ |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 44faabc3c02c..357ced3c33ff 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -572,18 +572,18 @@ static u64 sparc_perf_event_update(struct perf_event *event, | |||
572 | s64 delta; | 572 | s64 delta; |
573 | 573 | ||
574 | again: | 574 | again: |
575 | prev_raw_count = atomic64_read(&hwc->prev_count); | 575 | prev_raw_count = local64_read(&hwc->prev_count); |
576 | new_raw_count = read_pmc(idx); | 576 | new_raw_count = read_pmc(idx); |
577 | 577 | ||
578 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | 578 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
579 | new_raw_count) != prev_raw_count) | 579 | new_raw_count) != prev_raw_count) |
580 | goto again; | 580 | goto again; |
581 | 581 | ||
582 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | 582 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
583 | delta >>= shift; | 583 | delta >>= shift; |
584 | 584 | ||
585 | atomic64_add(delta, &event->count); | 585 | local64_add(delta, &event->count); |
586 | atomic64_sub(delta, &hwc->period_left); | 586 | local64_sub(delta, &hwc->period_left); |
587 | 587 | ||
588 | return new_raw_count; | 588 | return new_raw_count; |
589 | } | 589 | } |
@@ -591,27 +591,27 @@ again: | |||
591 | static int sparc_perf_event_set_period(struct perf_event *event, | 591 | static int sparc_perf_event_set_period(struct perf_event *event, |
592 | struct hw_perf_event *hwc, int idx) | 592 | struct hw_perf_event *hwc, int idx) |
593 | { | 593 | { |
594 | s64 left = atomic64_read(&hwc->period_left); | 594 | s64 left = local64_read(&hwc->period_left); |
595 | s64 period = hwc->sample_period; | 595 | s64 period = hwc->sample_period; |
596 | int ret = 0; | 596 | int ret = 0; |
597 | 597 | ||
598 | if (unlikely(left <= -period)) { | 598 | if (unlikely(left <= -period)) { |
599 | left = period; | 599 | left = period; |
600 | atomic64_set(&hwc->period_left, left); | 600 | local64_set(&hwc->period_left, left); |
601 | hwc->last_period = period; | 601 | hwc->last_period = period; |
602 | ret = 1; | 602 | ret = 1; |
603 | } | 603 | } |
604 | 604 | ||
605 | if (unlikely(left <= 0)) { | 605 | if (unlikely(left <= 0)) { |
606 | left += period; | 606 | left += period; |
607 | atomic64_set(&hwc->period_left, left); | 607 | local64_set(&hwc->period_left, left); |
608 | hwc->last_period = period; | 608 | hwc->last_period = period; |
609 | ret = 1; | 609 | ret = 1; |
610 | } | 610 | } |
611 | if (left > MAX_PERIOD) | 611 | if (left > MAX_PERIOD) |
612 | left = MAX_PERIOD; | 612 | left = MAX_PERIOD; |
613 | 613 | ||
614 | atomic64_set(&hwc->prev_count, (u64)-left); | 614 | local64_set(&hwc->prev_count, (u64)-left); |
615 | 615 | ||
616 | write_pmc(idx, (u64)(-left) & 0xffffffff); | 616 | write_pmc(idx, (u64)(-left) & 0xffffffff); |
617 | 617 | ||
@@ -1006,7 +1006,7 @@ static int sparc_pmu_enable(struct perf_event *event) | |||
1006 | * skip the schedulability test here, it will be peformed | 1006 | * skip the schedulability test here, it will be peformed |
1007 | * at commit time(->commit_txn) as a whole | 1007 | * at commit time(->commit_txn) as a whole |
1008 | */ | 1008 | */ |
1009 | if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) | 1009 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1010 | goto nocheck; | 1010 | goto nocheck; |
1011 | 1011 | ||
1012 | if (check_excludes(cpuc->event, n0, 1)) | 1012 | if (check_excludes(cpuc->event, n0, 1)) |
@@ -1088,7 +1088,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1088 | if (!hwc->sample_period) { | 1088 | if (!hwc->sample_period) { |
1089 | hwc->sample_period = MAX_PERIOD; | 1089 | hwc->sample_period = MAX_PERIOD; |
1090 | hwc->last_period = hwc->sample_period; | 1090 | hwc->last_period = hwc->sample_period; |
1091 | atomic64_set(&hwc->period_left, hwc->sample_period); | 1091 | local64_set(&hwc->period_left, hwc->sample_period); |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | return 0; | 1094 | return 0; |
@@ -1103,7 +1103,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) | |||
1103 | { | 1103 | { |
1104 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1104 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1105 | 1105 | ||
1106 | cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; | 1106 | cpuhw->group_flag |= PERF_EVENT_TXN; |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | /* | 1109 | /* |
@@ -1115,7 +1115,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) | |||
1115 | { | 1115 | { |
1116 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1116 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1117 | 1117 | ||
1118 | cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; | 1118 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | /* | 1121 | /* |
@@ -1138,6 +1138,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) | |||
1138 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) | 1138 | if (sparc_check_constraints(cpuc->event, cpuc->events, n)) |
1139 | return -EAGAIN; | 1139 | return -EAGAIN; |
1140 | 1140 | ||
1141 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1141 | return 0; | 1142 | return 0; |
1142 | } | 1143 | } |
1143 | 1144 | ||