aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-27 00:23:41 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-27 00:23:41 -0400
commit7eebda60d57a0862a410f45122c73b8bbe6e260c (patch)
tree2450ccca0bd7fc21526d2900a9c11a86ab9a1f39 /arch
parentd0b86480f5b33f4a86d7c106706d6e0dcd1935ce (diff)
sparc: Niagara1 perf event support.
This chip is extremely limited, and many of the events supported are approximations at best. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/kernel/perf_event.c119
1 files changed, 119 insertions, 0 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6f01e04cc323..9541b456c3ee 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -201,6 +201,121 @@ static const struct sparc_pmu ultra3_pmu = {
201 .lower_nop = 0x14, 201 .lower_nop = 0x14,
202}; 202};
203 203
204/* Niagara1 is very limited. The upper PIC is hard-locked to count
205 * only instructions, so it is free running which creates all kinds of
206 * problems. Some hardware designs make one wonder if the creastor
207 * even looked at how this stuff gets used by software.
208 */
209static const struct perf_event_map niagara1_perfmon_event_map[] = {
210 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
211 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
212 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
213 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
214};
215
216static const struct perf_event_map *niagara1_event_map(int event_id)
217{
218 return &niagara1_perfmon_event_map[event_id];
219}
220
221static const cache_map_t niagara1_cache_map = {
222[C(L1D)] = {
223 [C(OP_READ)] = {
224 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
225 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
226 },
227 [C(OP_WRITE)] = {
228 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
229 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
230 },
231 [C(OP_PREFETCH)] = {
232 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
233 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
234 },
235},
236[C(L1I)] = {
237 [C(OP_READ)] = {
238 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
239 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
240 },
241 [ C(OP_WRITE) ] = {
242 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
243 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
244 },
245 [ C(OP_PREFETCH) ] = {
246 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
247 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
248 },
249},
250[C(LL)] = {
251 [C(OP_READ)] = {
252 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
253 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
254 },
255 [C(OP_WRITE)] = {
256 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
257 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
258 },
259 [C(OP_PREFETCH)] = {
260 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
261 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
262 },
263},
264[C(DTLB)] = {
265 [C(OP_READ)] = {
266 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
267 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
268 },
269 [ C(OP_WRITE) ] = {
270 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
271 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
272 },
273 [ C(OP_PREFETCH) ] = {
274 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
275 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
276 },
277},
278[C(ITLB)] = {
279 [C(OP_READ)] = {
280 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
281 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
282 },
283 [ C(OP_WRITE) ] = {
284 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
285 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
286 },
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
289 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
290 },
291},
292[C(BPU)] = {
293 [C(OP_READ)] = {
294 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
295 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
296 },
297 [ C(OP_WRITE) ] = {
298 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
299 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
300 },
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
303 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
304 },
305},
306};
307
308static const struct sparc_pmu niagara1_pmu = {
309 .event_map = niagara1_event_map,
310 .cache_map = &niagara1_cache_map,
311 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
312 .upper_shift = 0,
313 .lower_shift = 4,
314 .event_mask = 0x7,
315 .upper_nop = 0x0,
316 .lower_nop = 0x0,
317};
318
204static const struct perf_event_map niagara2_perfmon_event_map[] = { 319static const struct perf_event_map niagara2_perfmon_event_map[] = {
205 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 320 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
206 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 321 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -753,6 +868,10 @@ static bool __init supported_pmu(void)
753 sparc_pmu = &ultra3_pmu; 868 sparc_pmu = &ultra3_pmu;
754 return true; 869 return true;
755 } 870 }
871 if (!strcmp(sparc_pmu_type, "niagara")) {
872 sparc_pmu = &niagara1_pmu;
873 return true;
874 }
756 if (!strcmp(sparc_pmu_type, "niagara2")) { 875 if (!strcmp(sparc_pmu_type, "niagara2")) {
757 sparc_pmu = &niagara2_pmu; 876 sparc_pmu = &niagara2_pmu;
758 return true; 877 return true;