aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/perf_event.c')
-rw-r--r--arch/powerpc/kernel/perf_event.c46
1 files changed, 24 insertions, 22 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 5f78681ad90..19131b2614b 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -904,16 +904,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
904 return 0; 904 return 0;
905} 905}
906 906
907struct pmu power_pmu = {
908 .enable = power_pmu_enable,
909 .disable = power_pmu_disable,
910 .read = power_pmu_read,
911 .unthrottle = power_pmu_unthrottle,
912 .start_txn = power_pmu_start_txn,
913 .cancel_txn = power_pmu_cancel_txn,
914 .commit_txn = power_pmu_commit_txn,
915};
916
917/* 907/*
918 * Return 1 if we might be able to put event on a limited PMC, 908 * Return 1 if we might be able to put event on a limited PMC,
919 * or 0 if not. 909 * or 0 if not.
@@ -1014,7 +1004,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
1014 return 0; 1004 return 0;
1015} 1005}
1016 1006
1017struct pmu *hw_perf_event_init(struct perf_event *event) 1007static int power_pmu_event_init(struct perf_event *event)
1018{ 1008{
1019 u64 ev; 1009 u64 ev;
1020 unsigned long flags; 1010 unsigned long flags;
@@ -1026,25 +1016,27 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
1026 struct cpu_hw_events *cpuhw; 1016 struct cpu_hw_events *cpuhw;
1027 1017
1028 if (!ppmu) 1018 if (!ppmu)
1029 return ERR_PTR(-ENXIO); 1019 return -ENOENT;
1020
1030 switch (event->attr.type) { 1021 switch (event->attr.type) {
1031 case PERF_TYPE_HARDWARE: 1022 case PERF_TYPE_HARDWARE:
1032 ev = event->attr.config; 1023 ev = event->attr.config;
1033 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 1024 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1034 return ERR_PTR(-EOPNOTSUPP); 1025 return -EOPNOTSUPP;
1035 ev = ppmu->generic_events[ev]; 1026 ev = ppmu->generic_events[ev];
1036 break; 1027 break;
1037 case PERF_TYPE_HW_CACHE: 1028 case PERF_TYPE_HW_CACHE:
1038 err = hw_perf_cache_event(event->attr.config, &ev); 1029 err = hw_perf_cache_event(event->attr.config, &ev);
1039 if (err) 1030 if (err)
1040 return ERR_PTR(err); 1031 return err;
1041 break; 1032 break;
1042 case PERF_TYPE_RAW: 1033 case PERF_TYPE_RAW:
1043 ev = event->attr.config; 1034 ev = event->attr.config;
1044 break; 1035 break;
1045 default: 1036 default:
1046 return ERR_PTR(-EINVAL); 1037 return -ENOENT;
1047 } 1038 }
1039
1048 event->hw.config_base = ev; 1040 event->hw.config_base = ev;
1049 event->hw.idx = 0; 1041 event->hw.idx = 0;
1050 1042
@@ -1081,7 +1073,7 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
1081 */ 1073 */
1082 ev = normal_pmc_alternative(ev, flags); 1074 ev = normal_pmc_alternative(ev, flags);
1083 if (!ev) 1075 if (!ev)
1084 return ERR_PTR(-EINVAL); 1076 return -EINVAL;
1085 } 1077 }
1086 } 1078 }
1087 1079
@@ -1095,19 +1087,19 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
1095 n = collect_events(event->group_leader, ppmu->n_counter - 1, 1087 n = collect_events(event->group_leader, ppmu->n_counter - 1,
1096 ctrs, events, cflags); 1088 ctrs, events, cflags);
1097 if (n < 0) 1089 if (n < 0)
1098 return ERR_PTR(-EINVAL); 1090 return -EINVAL;
1099 } 1091 }
1100 events[n] = ev; 1092 events[n] = ev;
1101 ctrs[n] = event; 1093 ctrs[n] = event;
1102 cflags[n] = flags; 1094 cflags[n] = flags;
1103 if (check_excludes(ctrs, cflags, n, 1)) 1095 if (check_excludes(ctrs, cflags, n, 1))
1104 return ERR_PTR(-EINVAL); 1096 return -EINVAL;
1105 1097
1106 cpuhw = &get_cpu_var(cpu_hw_events); 1098 cpuhw = &get_cpu_var(cpu_hw_events);
1107 err = power_check_constraints(cpuhw, events, cflags, n + 1); 1099 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1108 put_cpu_var(cpu_hw_events); 1100 put_cpu_var(cpu_hw_events);
1109 if (err) 1101 if (err)
1110 return ERR_PTR(-EINVAL); 1102 return -EINVAL;
1111 1103
1112 event->hw.config = events[n]; 1104 event->hw.config = events[n];
1113 event->hw.event_base = cflags[n]; 1105 event->hw.event_base = cflags[n];
@@ -1132,11 +1124,20 @@ struct pmu *hw_perf_event_init(struct perf_event *event)
1132 } 1124 }
1133 event->destroy = hw_perf_event_destroy; 1125 event->destroy = hw_perf_event_destroy;
1134 1126
1135 if (err) 1127 return err;
1136 return ERR_PTR(err);
1137 return &power_pmu;
1138} 1128}
1139 1129
1130struct pmu power_pmu = {
1131 .event_init = power_pmu_event_init,
1132 .enable = power_pmu_enable,
1133 .disable = power_pmu_disable,
1134 .read = power_pmu_read,
1135 .unthrottle = power_pmu_unthrottle,
1136 .start_txn = power_pmu_start_txn,
1137 .cancel_txn = power_pmu_cancel_txn,
1138 .commit_txn = power_pmu_commit_txn,
1139};
1140
1140/* 1141/*
1141 * A counter has overflowed; update its count and record 1142 * A counter has overflowed; update its count and record
1142 * things if requested. Note that interrupts are hard-disabled 1143 * things if requested. Note that interrupts are hard-disabled
@@ -1342,6 +1343,7 @@ int register_power_pmu(struct power_pmu *pmu)
1342 freeze_events_kernel = MMCR0_FCHV; 1343 freeze_events_kernel = MMCR0_FCHV;
1343#endif /* CONFIG_PPC64 */ 1344#endif /* CONFIG_PPC64 */
1344 1345
1346 perf_pmu_register(&power_pmu);
1345 perf_cpu_notifier(power_pmu_notifier); 1347 perf_cpu_notifier(power_pmu_notifier);
1346 1348
1347 return 0; 1349 return 0;