aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/bitops_64.h11
-rw-r--r--arch/sparc/kernel/perf_event.c108
4 files changed, 70 insertions, 55 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index f0d343c3b956..7ae128b19d3f 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -25,7 +25,7 @@ extern int atomic_cmpxchg(atomic_t *, int, int);
25extern int atomic_add_unless(atomic_t *, int, int); 25extern int atomic_add_unless(atomic_t *, int, int);
26extern void atomic_set(atomic_t *, int); 26extern void atomic_set(atomic_t *, int);
27 27
28#define atomic_read(v) ((v)->counter) 28#define atomic_read(v) (*(volatile int *)&(v)->counter)
29 29
30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) 30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) 31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index f2e48009989e..2050ca02c423 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -13,8 +13,8 @@
13#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
14#define ATOMIC64_INIT(i) { (i) } 14#define ATOMIC64_INIT(i) { (i) }
15 15
16#define atomic_read(v) ((v)->counter) 16#define atomic_read(v) (*(volatile int *)&(v)->counter)
17#define atomic64_read(v) ((v)->counter) 17#define atomic64_read(v) (*(volatile long *)&(v)->counter)
18 18
19#define atomic_set(v, i) (((v)->counter) = i) 19#define atomic_set(v, i) (((v)->counter) = i)
20#define atomic64_set(v, i) (((v)->counter) = i) 20#define atomic64_set(v, i) (((v)->counter) = i)
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index e72ac9cdfb98..766121a67a24 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -44,7 +44,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
44 44
45#ifdef ULTRA_HAS_POPULATION_COUNT 45#ifdef ULTRA_HAS_POPULATION_COUNT
46 46
47static inline unsigned int hweight64(unsigned long w) 47static inline unsigned int __arch_hweight64(unsigned long w)
48{ 48{
49 unsigned int res; 49 unsigned int res;
50 50
@@ -52,7 +52,7 @@ static inline unsigned int hweight64(unsigned long w)
52 return res; 52 return res;
53} 53}
54 54
55static inline unsigned int hweight32(unsigned int w) 55static inline unsigned int __arch_hweight32(unsigned int w)
56{ 56{
57 unsigned int res; 57 unsigned int res;
58 58
@@ -60,7 +60,7 @@ static inline unsigned int hweight32(unsigned int w)
60 return res; 60 return res;
61} 61}
62 62
63static inline unsigned int hweight16(unsigned int w) 63static inline unsigned int __arch_hweight16(unsigned int w)
64{ 64{
65 unsigned int res; 65 unsigned int res;
66 66
@@ -68,7 +68,7 @@ static inline unsigned int hweight16(unsigned int w)
68 return res; 68 return res;
69} 69}
70 70
71static inline unsigned int hweight8(unsigned int w) 71static inline unsigned int __arch_hweight8(unsigned int w)
72{ 72{
73 unsigned int res; 73 unsigned int res;
74 74
@@ -78,9 +78,10 @@ static inline unsigned int hweight8(unsigned int w)
78 78
79#else 79#else
80 80
81#include <asm-generic/bitops/hweight.h> 81#include <asm-generic/bitops/arch_hweight.h>
82 82
83#endif 83#endif
84#include <asm-generic/bitops/const_hweight.h>
84#include <asm-generic/bitops/lock.h> 85#include <asm-generic/bitops/lock.h>
85#endif /* __KERNEL__ */ 86#endif /* __KERNEL__ */
86 87
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e2771939341d..cf4ce263ff81 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -91,6 +91,8 @@ struct cpu_hw_events {
91 91
92 /* Enabled/disable state. */ 92 /* Enabled/disable state. */
93 int enabled; 93 int enabled;
94
95 unsigned int group_flag;
94}; 96};
95DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 97DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
96 98
@@ -980,53 +982,6 @@ static int collect_events(struct perf_event *group, int max_count,
980 return n; 982 return n;
981} 983}
982 984
983static void event_sched_in(struct perf_event *event)
984{
985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = smp_processor_id();
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event))
989 event->pmu->enable(event);
990}
991
992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx)
995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub;
998 int n0, n;
999
1000 if (!sparc_pmu)
1001 return 0;
1002
1003 n0 = cpuc->n_events;
1004 n = collect_events(group_leader, perf_max_events - n0,
1005 &cpuc->event[n0], &cpuc->events[n0],
1006 &cpuc->current_idx[n0]);
1007 if (n < 0)
1008 return -EAGAIN;
1009 if (check_excludes(cpuc->event, n0, n))
1010 return -EINVAL;
1011 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1012 return -EAGAIN;
1013 cpuc->n_events = n0 + n;
1014 cpuc->n_added += n;
1015
1016 cpuctx->active_oncpu += n;
1017 n = 1;
1018 event_sched_in(group_leader);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub);
1022 n++;
1023 }
1024 }
1025 ctx->nr_active += n;
1026
1027 return 1;
1028}
1029
1030static int sparc_pmu_enable(struct perf_event *event) 985static int sparc_pmu_enable(struct perf_event *event)
1031{ 986{
1032 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 987 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1044,11 +999,20 @@ static int sparc_pmu_enable(struct perf_event *event)
1044 cpuc->events[n0] = event->hw.event_base; 999 cpuc->events[n0] = event->hw.event_base;
1045 cpuc->current_idx[n0] = PIC_NO_INDEX; 1000 cpuc->current_idx[n0] = PIC_NO_INDEX;
1046 1001
1002 /*
1003 * If group events scheduling transaction was started,
1004 * skip the schedulability test here, it will be peformed
1005 * at commit time(->commit_txn) as a whole
1006 */
1007 if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
1008 goto nocheck;
1009
1047 if (check_excludes(cpuc->event, n0, 1)) 1010 if (check_excludes(cpuc->event, n0, 1))
1048 goto out; 1011 goto out;
1049 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) 1012 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1050 goto out; 1013 goto out;
1051 1014
1015nocheck:
1052 cpuc->n_events++; 1016 cpuc->n_events++;
1053 cpuc->n_added++; 1017 cpuc->n_added++;
1054 1018
@@ -1128,11 +1092,61 @@ static int __hw_perf_event_init(struct perf_event *event)
1128 return 0; 1092 return 0;
1129} 1093}
1130 1094
1095/*
1096 * Start group events scheduling transaction
1097 * Set the flag to make pmu::enable() not perform the
1098 * schedulability test, it will be performed at commit time
1099 */
1100static void sparc_pmu_start_txn(const struct pmu *pmu)
1101{
1102 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1103
1104 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
1105}
1106
1107/*
1108 * Stop group events scheduling transaction
1109 * Clear the flag and pmu::enable() will perform the
1110 * schedulability test.
1111 */
1112static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1113{
1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1115
1116 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
1117}
1118
1119/*
1120 * Commit group events scheduling transaction
1121 * Perform the group schedulability test as a whole
1122 * Return 0 if success
1123 */
1124static int sparc_pmu_commit_txn(const struct pmu *pmu)
1125{
1126 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1127 int n;
1128
1129 if (!sparc_pmu)
1130 return -EINVAL;
1131
1132 cpuc = &__get_cpu_var(cpu_hw_events);
1133 n = cpuc->n_events;
1134 if (check_excludes(cpuc->event, 0, n))
1135 return -EINVAL;
1136 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1137 return -EAGAIN;
1138
1139 return 0;
1140}
1141
1131static const struct pmu pmu = { 1142static const struct pmu pmu = {
1132 .enable = sparc_pmu_enable, 1143 .enable = sparc_pmu_enable,
1133 .disable = sparc_pmu_disable, 1144 .disable = sparc_pmu_disable,
1134 .read = sparc_pmu_read, 1145 .read = sparc_pmu_read,
1135 .unthrottle = sparc_pmu_unthrottle, 1146 .unthrottle = sparc_pmu_unthrottle,
1147 .start_txn = sparc_pmu_start_txn,
1148 .cancel_txn = sparc_pmu_cancel_txn,
1149 .commit_txn = sparc_pmu_commit_txn,
1136}; 1150};
1137 1151
1138const struct pmu *hw_perf_event_init(struct perf_event *event) 1152const struct pmu *hw_perf_event_init(struct perf_event *event)