aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2011-09-23 20:29:55 -0400
committerRalf Baechle <ralf@linux-mips.org>2011-10-24 18:34:27 -0400
commit939991cff173f769efb8c56286d4e59fb9ced191 (patch)
tree6208a111429e280a64c901d30bbe3923de16aceb /arch/mips/kernel
parent82091564cfd7ab8def42777a9c662dbf655c5d25 (diff)
MIPS: perf: Add Octeon support for hardware perf.
Enable hardware counters for Octeon, and add the corresponding event mappings. Signed-off-by: David Daney <david.daney@cavium.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2790/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c147
1 files changed, 147 insertions, 0 deletions
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 0c9549480c4..4f2971bcf8e 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -841,6 +841,16 @@ static const struct mips_perf_event mipsxx74Kcore_event_map
841 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, 841 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
842}; 842};
843 843
844static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
845 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
846 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
847 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
848 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
849 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
850 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
851 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
852};
853
844/* 24K/34K/1004K cores can share the same cache event map. */ 854/* 24K/34K/1004K cores can share the same cache event map. */
845static const struct mips_perf_event mipsxxcore_cache_map 855static const struct mips_perf_event mipsxxcore_cache_map
846 [PERF_COUNT_HW_CACHE_MAX] 856 [PERF_COUNT_HW_CACHE_MAX]
@@ -1074,6 +1084,102 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
1074}, 1084},
1075}; 1085};
1076 1086
1087
1088static const struct mips_perf_event octeon_cache_map
1089 [PERF_COUNT_HW_CACHE_MAX]
1090 [PERF_COUNT_HW_CACHE_OP_MAX]
1091 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1092[C(L1D)] = {
1093 [C(OP_READ)] = {
1094 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1095 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1096 },
1097 [C(OP_WRITE)] = {
1098 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1099 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1100 },
1101 [C(OP_PREFETCH)] = {
1102 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1103 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1104 },
1105},
1106[C(L1I)] = {
1107 [C(OP_READ)] = {
1108 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1109 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1110 },
1111 [C(OP_WRITE)] = {
1112 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1113 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1114 },
1115 [C(OP_PREFETCH)] = {
1116 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1117 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1118 },
1119},
1120[C(LL)] = {
1121 [C(OP_READ)] = {
1122 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1123 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1124 },
1125 [C(OP_WRITE)] = {
1126 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1127 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1128 },
1129 [C(OP_PREFETCH)] = {
1130 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1131 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1132 },
1133},
1134[C(DTLB)] = {
1135 /*
1136 * Only general DTLB misses are counted use the same event for
1137 * read and write.
1138 */
1139 [C(OP_READ)] = {
1140 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1141 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1142 },
1143 [C(OP_WRITE)] = {
1144 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1145 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1146 },
1147 [C(OP_PREFETCH)] = {
1148 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1149 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1150 },
1151},
1152[C(ITLB)] = {
1153 [C(OP_READ)] = {
1154 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1155 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1156 },
1157 [C(OP_WRITE)] = {
1158 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1159 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1160 },
1161 [C(OP_PREFETCH)] = {
1162 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1163 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1164 },
1165},
1166[C(BPU)] = {
1167 /* Using the same code for *HW_BRANCH* */
1168 [C(OP_READ)] = {
1169 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1170 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1171 },
1172 [C(OP_WRITE)] = {
1173 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1174 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1175 },
1176 [C(OP_PREFETCH)] = {
1177 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1178 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1179 },
1180},
1181};
1182
1077#ifdef CONFIG_MIPS_MT_SMP 1183#ifdef CONFIG_MIPS_MT_SMP
1078static void check_and_calc_range(struct perf_event *event, 1184static void check_and_calc_range(struct perf_event *event,
1079 const struct mips_perf_event *pev) 1185 const struct mips_perf_event *pev)
@@ -1411,6 +1517,39 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1411 return &raw_event; 1517 return &raw_event;
1412} 1518}
1413 1519
1520static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1521{
1522 unsigned int raw_id = config & 0xff;
1523 unsigned int base_id = raw_id & 0x7f;
1524
1525
1526 raw_event.cntr_mask = CNTR_ALL;
1527 raw_event.event_id = base_id;
1528
1529 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1530 if (base_id > 0x42)
1531 return ERR_PTR(-EOPNOTSUPP);
1532 } else {
1533 if (base_id > 0x3a)
1534 return ERR_PTR(-EOPNOTSUPP);
1535 }
1536
1537 switch (base_id) {
1538 case 0x00:
1539 case 0x0f:
1540 case 0x1e:
1541 case 0x1f:
1542 case 0x2f:
1543 case 0x34:
1544 case 0x3b ... 0x3f:
1545 return ERR_PTR(-EOPNOTSUPP);
1546 default:
1547 break;
1548 }
1549
1550 return &raw_event;
1551}
1552
1414static int __init 1553static int __init
1415init_hw_perf_events(void) 1554init_hw_perf_events(void)
1416{ 1555{
@@ -1470,6 +1609,14 @@ init_hw_perf_events(void)
1470 mipspmu.general_event_map = &mipsxxcore_event_map; 1609 mipspmu.general_event_map = &mipsxxcore_event_map;
1471 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1610 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1472 break; 1611 break;
1612 case CPU_CAVIUM_OCTEON:
1613 case CPU_CAVIUM_OCTEON_PLUS:
1614 case CPU_CAVIUM_OCTEON2:
1615 mipspmu.name = "octeon";
1616 mipspmu.general_event_map = &octeon_event_map;
1617 mipspmu.cache_event_map = &octeon_cache_map;
1618 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1619 break;
1473 default: 1620 default:
1474 pr_cont("Either hardware does not support performance " 1621 pr_cont("Either hardware does not support performance "
1475 "counters, or not yet implemented.\n"); 1622 "counters, or not yet implemented.\n");