aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_knc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_knc.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c93
1 files changed, 82 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 7c46bfdbc373..4b7731bf23a8 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -3,6 +3,8 @@
3#include <linux/perf_event.h> 3#include <linux/perf_event.h>
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#include <asm/hardirq.h>
7
6#include "perf_event.h" 8#include "perf_event.h"
7 9
8static const u64 knc_perfmon_event_map[] = 10static const u64 knc_perfmon_event_map[] =
@@ -173,30 +175,100 @@ static void knc_pmu_enable_all(int added)
173static inline void 175static inline void
174knc_pmu_disable_event(struct perf_event *event) 176knc_pmu_disable_event(struct perf_event *event)
175{ 177{
176 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
177 struct hw_perf_event *hwc = &event->hw; 178 struct hw_perf_event *hwc = &event->hw;
178 u64 val; 179 u64 val;
179 180
180 val = hwc->config; 181 val = hwc->config;
181 if (cpuc->enabled) 182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
182 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183 183
184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 184 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
185} 185}
186 186
187static void knc_pmu_enable_event(struct perf_event *event) 187static void knc_pmu_enable_event(struct perf_event *event)
188{ 188{
189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
190 struct hw_perf_event *hwc = &event->hw; 189 struct hw_perf_event *hwc = &event->hw;
191 u64 val; 190 u64 val;
192 191
193 val = hwc->config; 192 val = hwc->config;
194 if (cpuc->enabled) 193 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
195 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
196 194
197 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val); 195 (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
198} 196}
199 197
198static inline u64 knc_pmu_get_status(void)
199{
200 u64 status;
201
202 rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
203
204 return status;
205}
206
207static inline void knc_pmu_ack_status(u64 ack)
208{
209 wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
210}
211
212static int knc_pmu_handle_irq(struct pt_regs *regs)
213{
214 struct perf_sample_data data;
215 struct cpu_hw_events *cpuc;
216 int handled = 0;
217 int bit, loops;
218 u64 status;
219
220 cpuc = &__get_cpu_var(cpu_hw_events);
221
222 knc_pmu_disable_all();
223
224 status = knc_pmu_get_status();
225 if (!status) {
226 knc_pmu_enable_all(0);
227 return handled;
228 }
229
230 loops = 0;
231again:
232 knc_pmu_ack_status(status);
233 if (++loops > 100) {
234 WARN_ONCE(1, "perf: irq loop stuck!\n");
235 perf_event_print_debug();
236 goto done;
237 }
238
239 inc_irq_stat(apic_perf_irqs);
240
241 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
242 struct perf_event *event = cpuc->events[bit];
243
244 handled++;
245
246 if (!test_bit(bit, cpuc->active_mask))
247 continue;
248
249 if (!intel_pmu_save_and_restart(event))
250 continue;
251
252 perf_sample_data_init(&data, 0, event->hw.last_period);
253
254 if (perf_event_overflow(event, &data, regs))
255 x86_pmu_stop(event, 0);
256 }
257
258 /*
259 * Repeat if there is more work to be done:
260 */
261 status = knc_pmu_get_status();
262 if (status)
263 goto again;
264
265done:
266 knc_pmu_enable_all(0);
267
268 return handled;
269}
270
271
200PMU_FORMAT_ATTR(event, "config:0-7" ); 272PMU_FORMAT_ATTR(event, "config:0-7" );
201PMU_FORMAT_ATTR(umask, "config:8-15" ); 273PMU_FORMAT_ATTR(umask, "config:8-15" );
202PMU_FORMAT_ATTR(edge, "config:18" ); 274PMU_FORMAT_ATTR(edge, "config:18" );
@@ -214,7 +286,7 @@ static struct attribute *intel_knc_formats_attr[] = {
214 286
215static __initconst struct x86_pmu knc_pmu = { 287static __initconst struct x86_pmu knc_pmu = {
216 .name = "knc", 288 .name = "knc",
217 .handle_irq = x86_pmu_handle_irq, 289 .handle_irq = knc_pmu_handle_irq,
218 .disable_all = knc_pmu_disable_all, 290 .disable_all = knc_pmu_disable_all,
219 .enable_all = knc_pmu_enable_all, 291 .enable_all = knc_pmu_enable_all,
220 .enable = knc_pmu_enable_event, 292 .enable = knc_pmu_enable_event,
@@ -226,12 +298,11 @@ static __initconst struct x86_pmu knc_pmu = {
226 .event_map = knc_pmu_event_map, 298 .event_map = knc_pmu_event_map,
227 .max_events = ARRAY_SIZE(knc_perfmon_event_map), 299 .max_events = ARRAY_SIZE(knc_perfmon_event_map),
228 .apic = 1, 300 .apic = 1,
229 .max_period = (1ULL << 31) - 1, 301 .max_period = (1ULL << 39) - 1,
230 .version = 0, 302 .version = 0,
231 .num_counters = 2, 303 .num_counters = 2,
232 /* in theory 40 bits, early silicon is buggy though */ 304 .cntval_bits = 40,
233 .cntval_bits = 32, 305 .cntval_mask = (1ULL << 40) - 1,
234 .cntval_mask = (1ULL << 32) - 1,
235 .get_event_constraints = x86_get_event_constraints, 306 .get_event_constraints = x86_get_event_constraints,
236 .event_constraints = knc_event_constraints, 307 .event_constraints = knc_event_constraints,
237 .format_attrs = intel_knc_formats_attr, 308 .format_attrs = intel_knc_formats_attr,