aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-26 23:42:10 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-26 23:42:10 -0400
commit2ce4da2efcaca0dcbfed7a1f24177f18e75e0e89 (patch)
tree1fe33c39e4ae903340a9a5cc604ffade10bb9e85
parent0d9df2515dbceb67d343c0f10fd3ff218380d524 (diff)
sparc: Support HW cache events.
First supported chip for HW cache events is Ultra-IIIi. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/kernel/perf_event.c145
1 files changed, 139 insertions, 6 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2d6a1b10c81d..48375f694673 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -68,8 +68,19 @@ struct perf_event_map {
68#define PIC_LOWER 0x02 68#define PIC_LOWER 0x02
69}; 69};
70 70
71#define C(x) PERF_COUNT_HW_CACHE_##x
72
73#define CACHE_OP_UNSUPPORTED 0xfffe
74#define CACHE_OP_NONSENSE 0xffff
75
76typedef struct perf_event_map cache_map_t
77 [PERF_COUNT_HW_CACHE_MAX]
78 [PERF_COUNT_HW_CACHE_OP_MAX]
79 [PERF_COUNT_HW_CACHE_RESULT_MAX];
80
71struct sparc_pmu { 81struct sparc_pmu {
72 const struct perf_event_map *(*event_map)(int); 82 const struct perf_event_map *(*event_map)(int);
83 const cache_map_t *cache_map;
73 int max_events; 84 int max_events;
74 int upper_shift; 85 int upper_shift;
75 int lower_shift; 86 int lower_shift;
@@ -92,8 +103,96 @@ static const struct perf_event_map *ultra3i_event_map(int event_id)
92 return &ultra3i_perfmon_event_map[event_id]; 103 return &ultra3i_perfmon_event_map[event_id];
93} 104}
94 105
106static const cache_map_t ultra3i_cache_map = {
107[C(L1D)] = {
108 [C(OP_READ)] = {
109 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
110 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
111 },
112 [C(OP_WRITE)] = {
113 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
114 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
115 },
116 [C(OP_PREFETCH)] = {
117 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
118 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
119 },
120},
121[C(L1I)] = {
122 [C(OP_READ)] = {
123 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
124 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
125 },
126 [ C(OP_WRITE) ] = {
127 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
128 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
129 },
130 [ C(OP_PREFETCH) ] = {
131 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
132 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
133 },
134},
135[C(LL)] = {
136 [C(OP_READ)] = {
137 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
138 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
139 },
140 [C(OP_WRITE)] = {
141 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
142 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
143 },
144 [C(OP_PREFETCH)] = {
145 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
146 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
147 },
148},
149[C(DTLB)] = {
150 [C(OP_READ)] = {
151 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
152 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
153 },
154 [ C(OP_WRITE) ] = {
155 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
156 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
157 },
158 [ C(OP_PREFETCH) ] = {
159 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
160 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
161 },
162},
163[C(ITLB)] = {
164 [C(OP_READ)] = {
165 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
166 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
167 },
168 [ C(OP_WRITE) ] = {
169 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
170 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
171 },
172 [ C(OP_PREFETCH) ] = {
173 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
174 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
175 },
176},
177[C(BPU)] = {
178 [C(OP_READ)] = {
179 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
180 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
181 },
182 [ C(OP_WRITE) ] = {
183 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
184 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
185 },
186 [ C(OP_PREFETCH) ] = {
187 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
188 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
189 },
190},
191};
192
95static const struct sparc_pmu ultra3i_pmu = { 193static const struct sparc_pmu ultra3i_pmu = {
96 .event_map = ultra3i_event_map, 194 .event_map = ultra3i_event_map,
195 .cache_map = &ultra3i_cache_map,
97 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), 196 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map),
98 .upper_shift = 11, 197 .upper_shift = 11,
99 .lower_shift = 4, 198 .lower_shift = 4,
@@ -375,6 +474,37 @@ void perf_event_release_pmc(void)
375 } 474 }
376} 475}
377 476
477static const struct perf_event_map *sparc_map_cache_event(u64 config)
478{
479 unsigned int cache_type, cache_op, cache_result;
480 const struct perf_event_map *pmap;
481
482 if (!sparc_pmu->cache_map)
483 return ERR_PTR(-ENOENT);
484
485 cache_type = (config >> 0) & 0xff;
486 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
487 return ERR_PTR(-EINVAL);
488
489 cache_op = (config >> 8) & 0xff;
490 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
491 return ERR_PTR(-EINVAL);
492
493 cache_result = (config >> 16) & 0xff;
494 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
495 return ERR_PTR(-EINVAL);
496
497 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
498
499 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
500 return ERR_PTR(-ENOENT);
501
502 if (pmap->encoding == CACHE_OP_NONSENSE)
503 return ERR_PTR(-EINVAL);
504
505 return pmap;
506}
507
378static void hw_perf_event_destroy(struct perf_event *event) 508static void hw_perf_event_destroy(struct perf_event *event)
379{ 509{
380 perf_event_release_pmc(); 510 perf_event_release_pmc();
@@ -390,12 +520,17 @@ static int __hw_perf_event_init(struct perf_event *event)
390 if (atomic_read(&nmi_active) < 0) 520 if (atomic_read(&nmi_active) < 0)
391 return -ENODEV; 521 return -ENODEV;
392 522
393 if (attr->type != PERF_TYPE_HARDWARE) 523 if (attr->type == PERF_TYPE_HARDWARE) {
524 if (attr->config >= sparc_pmu->max_events)
525 return -EINVAL;
526 pmap = sparc_pmu->event_map(attr->config);
527 } else if (attr->type == PERF_TYPE_HW_CACHE) {
528 pmap = sparc_map_cache_event(attr->config);
529 if (IS_ERR(pmap))
530 return PTR_ERR(pmap);
531 } else
394 return -EOPNOTSUPP; 532 return -EOPNOTSUPP;
395 533
396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL;
398
399 perf_event_grab_pmc(); 534 perf_event_grab_pmc();
400 event->destroy = hw_perf_event_destroy; 535 event->destroy = hw_perf_event_destroy;
401 536
@@ -417,8 +552,6 @@ static int __hw_perf_event_init(struct perf_event *event)
417 atomic64_set(&hwc->period_left, hwc->sample_period); 552 atomic64_set(&hwc->period_left, hwc->sample_period);
418 } 553 }
419 554
420 pmap = sparc_pmu->event_map(attr->config);
421
422 enc = pmap->encoding; 555 enc = pmap->encoding;
423 if (pmap->pic_mask & PIC_UPPER) { 556 if (pmap->pic_mask & PIC_UPPER) {
424 hwc->idx = PIC_UPPER_INDEX; 557 hwc->idx = PIC_UPPER_INDEX;