aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-08-17 05:37:06 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-19 02:26:20 -0400
commit5344303ca8dad9881def6cfb45ad01201dba16de (patch)
tree66c0d286e26f414a0353b2da7e296a89c046a612 /arch
parent59660495e80e7eabc726c301ddc46afd2ce1bcac (diff)
sparc64: Abstract PMC read/write behind sparc_pmu.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/kernel/perf_event.c68
1 files changed, 38 insertions, 30 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index f7b9ae39c264..fbd80299a4bb 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -141,6 +141,8 @@ struct sparc_pmu {
141 const struct perf_event_map *(*event_map)(int); 141 const struct perf_event_map *(*event_map)(int);
142 const cache_map_t *cache_map; 142 const cache_map_t *cache_map;
143 int max_events; 143 int max_events;
144 u32 (*read_pmc)(int);
145 void (*write_pmc)(int, u64);
144 int upper_shift; 146 int upper_shift;
145 int lower_shift; 147 int lower_shift;
146 int event_mask; 148 int event_mask;
@@ -154,6 +156,34 @@ struct sparc_pmu {
154 int max_hw_events; 156 int max_hw_events;
155}; 157};
156 158
159static u32 sparc_default_read_pmc(int idx)
160{
161 u64 val;
162
163 val = pcr_ops->read_pic(0);
164 if (idx == PIC_UPPER_INDEX)
165 val >>= 32;
166
167 return val & 0xffffffff;
168}
169
170static void sparc_default_write_pmc(int idx, u64 val)
171{
172 u64 shift, mask, pic;
173
174 shift = 0;
175 if (idx == PIC_UPPER_INDEX)
176 shift = 32;
177
178 mask = ((u64) 0xffffffff) << shift;
179 val <<= shift;
180
181 pic = pcr_ops->read_pic(0);
182 pic &= ~mask;
183 pic |= val;
184 pcr_ops->write_pic(0, pic);
185}
186
157static const struct perf_event_map ultra3_perfmon_event_map[] = { 187static const struct perf_event_map ultra3_perfmon_event_map[] = {
158 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, 188 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
159 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, 189 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
@@ -271,6 +301,8 @@ static const struct sparc_pmu ultra3_pmu = {
271 .event_map = ultra3_event_map, 301 .event_map = ultra3_event_map,
272 .cache_map = &ultra3_cache_map, 302 .cache_map = &ultra3_cache_map,
273 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), 303 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
304 .read_pmc = sparc_default_read_pmc,
305 .write_pmc = sparc_default_write_pmc,
274 .upper_shift = 11, 306 .upper_shift = 11,
275 .lower_shift = 4, 307 .lower_shift = 4,
276 .event_mask = 0x3f, 308 .event_mask = 0x3f,
@@ -403,6 +435,8 @@ static const struct sparc_pmu niagara1_pmu = {
403 .event_map = niagara1_event_map, 435 .event_map = niagara1_event_map,
404 .cache_map = &niagara1_cache_map, 436 .cache_map = &niagara1_cache_map,
405 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), 437 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
438 .read_pmc = sparc_default_read_pmc,
439 .write_pmc = sparc_default_write_pmc,
406 .upper_shift = 0, 440 .upper_shift = 0,
407 .lower_shift = 4, 441 .lower_shift = 4,
408 .event_mask = 0x7, 442 .event_mask = 0x7,
@@ -532,6 +566,8 @@ static const struct sparc_pmu niagara2_pmu = {
532 .event_map = niagara2_event_map, 566 .event_map = niagara2_event_map,
533 .cache_map = &niagara2_cache_map, 567 .cache_map = &niagara2_cache_map,
534 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), 568 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
569 .read_pmc = sparc_default_read_pmc,
570 .write_pmc = sparc_default_write_pmc,
535 .upper_shift = 19, 571 .upper_shift = 19,
536 .lower_shift = 6, 572 .lower_shift = 6,
537 .event_mask = 0xfff, 573 .event_mask = 0xfff,
@@ -593,34 +629,6 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw
593 pcr_ops->write_pcr(0, cpuc->pcr); 629 pcr_ops->write_pcr(0, cpuc->pcr);
594} 630}
595 631
596static u32 read_pmc(int idx)
597{
598 u64 val;
599
600 val = pcr_ops->read_pic(0);
601 if (idx == PIC_UPPER_INDEX)
602 val >>= 32;
603
604 return val & 0xffffffff;
605}
606
607static void write_pmc(int idx, u64 val)
608{
609 u64 shift, mask, pic;
610
611 shift = 0;
612 if (idx == PIC_UPPER_INDEX)
613 shift = 32;
614
615 mask = ((u64) 0xffffffff) << shift;
616 val <<= shift;
617
618 pic = pcr_ops->read_pic(0);
619 pic &= ~mask;
620 pic |= val;
621 pcr_ops->write_pic(0, pic);
622}
623
624static u64 sparc_perf_event_update(struct perf_event *event, 632static u64 sparc_perf_event_update(struct perf_event *event,
625 struct hw_perf_event *hwc, int idx) 633 struct hw_perf_event *hwc, int idx)
626{ 634{
@@ -630,7 +638,7 @@ static u64 sparc_perf_event_update(struct perf_event *event,
630 638
631again: 639again:
632 prev_raw_count = local64_read(&hwc->prev_count); 640 prev_raw_count = local64_read(&hwc->prev_count);
633 new_raw_count = read_pmc(idx); 641 new_raw_count = sparc_pmu->read_pmc(idx);
634 642
635 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 643 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
636 new_raw_count) != prev_raw_count) 644 new_raw_count) != prev_raw_count)
@@ -670,7 +678,7 @@ static int sparc_perf_event_set_period(struct perf_event *event,
670 678
671 local64_set(&hwc->prev_count, (u64)-left); 679 local64_set(&hwc->prev_count, (u64)-left);
672 680
673 write_pmc(idx, (u64)(-left) & 0xffffffff); 681 sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
674 682
675 perf_event_update_userpage(event); 683 perf_event_update_userpage(event);
676 684