aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-22 05:10:42 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:25 -0500
commit2f18d1e8d07ae67dd0afce875287756d4bd31a46 (patch)
treebad96eb7e3330ac470b98d22ec7650da7b7b8910 /arch/x86
parentf650a672359819454c3d8d4135ecd1558cde0b24 (diff)
x86, perfcounters: add support for fixed-function pmcs
Impact: extend performance counter support on x86 Intel CPUs Modern Intel CPUs have 3 "fixed-function" performance counters, which count these hardware events: Instr_Retired.Any CPU_CLK_Unhalted.Core CPU_CLK_Unhalted.Ref Add support for them to the performance counters subsystem. Their use is transparent to user-space: the counter scheduler is extended to automatically recognize the cases where a fixed-function PMC can be utilized instead of a generic PMC. In such cases the generic PMC is kept available for more counters. The above fixed-function events map to these generic counter hw events: PERF_COUNT_INSTRUCTIONS PERF_COUNT_CPU_CYCLES PERF_COUNT_BUS_CYCLES (The 'bus' cycles are in reality often CPU-ish cycles, just with a fixed frequency.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/perf_counter.h8
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c149
2 files changed, 133 insertions, 24 deletions
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 13745deb16c8..2e08ed736647 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -23,6 +23,11 @@
23#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 23#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
24#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) 24#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
25 25
26/*
27 * Includes eventsel and unit mask as well:
28 */
29#define ARCH_PERFMON_EVENT_MASK 0xffff
30
26#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 31#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
27#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 32#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
28#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 33#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
@@ -69,12 +74,15 @@ union cpuid10_edx {
69 74
70/* Instr_Retired.Any: */ 75/* Instr_Retired.Any: */
71#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 76#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
77#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
72 78
73/* CPU_CLK_Unhalted.Core: */ 79/* CPU_CLK_Unhalted.Core: */
74#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a 80#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
81#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
75 82
76/* CPU_CLK_Unhalted.Ref: */ 83/* CPU_CLK_Unhalted.Ref: */
77#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 84#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
85#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
78 86
79#ifdef CONFIG_PERF_COUNTERS 87#ifdef CONFIG_PERF_COUNTERS
80extern void init_hw_perf_counters(void); 88extern void init_hw_perf_counters(void);
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 86b2fdd344a6..da46eca12543 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -26,6 +26,7 @@ static bool perf_counters_initialized __read_mostly;
26 */ 26 */
27static int nr_counters_generic __read_mostly; 27static int nr_counters_generic __read_mostly;
28static u64 perf_counter_mask __read_mostly; 28static u64 perf_counter_mask __read_mostly;
29static u64 counter_value_mask __read_mostly;
29 30
30static int nr_counters_fixed __read_mostly; 31static int nr_counters_fixed __read_mostly;
31 32
@@ -120,9 +121,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
120 hwc->nmi = 1; 121 hwc->nmi = 1;
121 } 122 }
122 123
123 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
124 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
125
126 hwc->irq_period = hw_event->irq_period; 124 hwc->irq_period = hw_event->irq_period;
127 /* 125 /*
128 * Intel PMCs cannot be accessed sanely above 32 bit width, 126 * Intel PMCs cannot be accessed sanely above 32 bit width,
@@ -184,15 +182,33 @@ void hw_perf_restore(u64 ctrl)
184EXPORT_SYMBOL_GPL(hw_perf_restore); 182EXPORT_SYMBOL_GPL(hw_perf_restore);
185 183
186static inline void 184static inline void
185__pmc_fixed_disable(struct perf_counter *counter,
186 struct hw_perf_counter *hwc, unsigned int __idx)
187{
188 int idx = __idx - X86_PMC_IDX_FIXED;
189 u64 ctrl_val, mask;
190 int err;
191
192 mask = 0xfULL << (idx * 4);
193
194 rdmsrl(hwc->config_base, ctrl_val);
195 ctrl_val &= ~mask;
196 err = checking_wrmsrl(hwc->config_base, ctrl_val);
197}
198
199static inline void
187__pmc_generic_disable(struct perf_counter *counter, 200__pmc_generic_disable(struct perf_counter *counter,
188 struct hw_perf_counter *hwc, unsigned int idx) 201 struct hw_perf_counter *hwc, unsigned int idx)
189{ 202{
190 int err; 203 int err;
191 204
205 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
206 return __pmc_fixed_disable(counter, hwc, idx);
207
192 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); 208 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
193} 209}
194 210
195static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]); 211static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
196 212
197/* 213/*
198 * Set the next IRQ period, based on the hwc->period_left value. 214 * Set the next IRQ period, based on the hwc->period_left value.
@@ -202,8 +218,9 @@ static void
202__hw_perf_counter_set_period(struct perf_counter *counter, 218__hw_perf_counter_set_period(struct perf_counter *counter,
203 struct hw_perf_counter *hwc, int idx) 219 struct hw_perf_counter *hwc, int idx)
204{ 220{
205 s32 left = atomic64_read(&hwc->period_left); 221 s64 left = atomic64_read(&hwc->period_left);
206 s32 period = hwc->irq_period; 222 s32 period = hwc->irq_period;
223 int err;
207 224
208 /* 225 /*
209 * If we are way outside a reasoable range then just skip forward: 226 * If we are way outside a reasoable range then just skip forward:
@@ -224,21 +241,64 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
224 * The hw counter starts counting from this counter offset, 241 * The hw counter starts counting from this counter offset,
225 * mark it to be able to extra future deltas: 242 * mark it to be able to extra future deltas:
226 */ 243 */
227 atomic64_set(&hwc->prev_count, (u64)(s64)-left); 244 atomic64_set(&hwc->prev_count, (u64)-left);
228 245
229 wrmsr(hwc->counter_base + idx, -left, 0); 246 err = checking_wrmsrl(hwc->counter_base + idx,
247 (u64)(-left) & counter_value_mask);
248}
249
250static inline void
251__pmc_fixed_enable(struct perf_counter *counter,
252 struct hw_perf_counter *hwc, unsigned int __idx)
253{
254 int idx = __idx - X86_PMC_IDX_FIXED;
255 u64 ctrl_val, bits, mask;
256 int err;
257
258 /*
259 * Enable IRQ generation (0x8) and ring-3 counting (0x2),
260 * and enable ring-0 counting if allowed:
261 */
262 bits = 0x8ULL | 0x2ULL;
263 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
264 bits |= 0x1;
265 bits <<= (idx * 4);
266 mask = 0xfULL << (idx * 4);
267
268 rdmsrl(hwc->config_base, ctrl_val);
269 ctrl_val &= ~mask;
270 ctrl_val |= bits;
271 err = checking_wrmsrl(hwc->config_base, ctrl_val);
230} 272}
231 273
232static void 274static void
233__pmc_generic_enable(struct perf_counter *counter, 275__pmc_generic_enable(struct perf_counter *counter,
234 struct hw_perf_counter *hwc, int idx) 276 struct hw_perf_counter *hwc, int idx)
235{ 277{
278 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
279 return __pmc_fixed_enable(counter, hwc, idx);
280
236 wrmsr(hwc->config_base + idx, 281 wrmsr(hwc->config_base + idx,
237 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); 282 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
238} 283}
239 284
240static int fixed_mode_idx(struct hw_perf_counter *hwc) 285static int
286fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
241{ 287{
288 unsigned int event;
289
290 if (unlikely(hwc->nmi))
291 return -1;
292
293 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
294
295 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS]))
296 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
297 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES]))
298 return X86_PMC_IDX_FIXED_CPU_CYCLES;
299 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES]))
300 return X86_PMC_IDX_FIXED_BUS_CYCLES;
301
242 return -1; 302 return -1;
243} 303}
244 304
@@ -249,16 +309,39 @@ static int pmc_generic_enable(struct perf_counter *counter)
249{ 309{
250 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 310 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
251 struct hw_perf_counter *hwc = &counter->hw; 311 struct hw_perf_counter *hwc = &counter->hw;
252 int idx = hwc->idx; 312 int idx;
253 313
254 /* Try to get the previous counter again */ 314 idx = fixed_mode_idx(counter, hwc);
255 if (test_and_set_bit(idx, cpuc->used)) { 315 if (idx >= 0) {
256 idx = find_first_zero_bit(cpuc->used, nr_counters_generic); 316 /*
257 if (idx == nr_counters_generic) 317 * Try to get the fixed counter, if that is already taken
258 return -EAGAIN; 318 * then try to get a generic counter:
319 */
320 if (test_and_set_bit(idx, cpuc->used))
321 goto try_generic;
259 322
260 set_bit(idx, cpuc->used); 323 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
324 /*
325 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
326 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
327 */
328 hwc->counter_base =
329 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
261 hwc->idx = idx; 330 hwc->idx = idx;
331 } else {
332 idx = hwc->idx;
333 /* Try to get the previous generic counter again */
334 if (test_and_set_bit(idx, cpuc->used)) {
335try_generic:
336 idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
337 if (idx == nr_counters_generic)
338 return -EAGAIN;
339
340 set_bit(idx, cpuc->used);
341 hwc->idx = idx;
342 }
343 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
344 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
262 } 345 }
263 346
264 perf_counters_lapic_init(hwc->nmi); 347 perf_counters_lapic_init(hwc->nmi);
@@ -266,6 +349,10 @@ static int pmc_generic_enable(struct perf_counter *counter)
266 __pmc_generic_disable(counter, hwc, idx); 349 __pmc_generic_disable(counter, hwc, idx);
267 350
268 cpuc->counters[idx] = counter; 351 cpuc->counters[idx] = counter;
352 /*
353 * Make it visible before enabling the hw:
354 */
355 smp_wmb();
269 356
270 __hw_perf_counter_set_period(counter, hwc, idx); 357 __hw_perf_counter_set_period(counter, hwc, idx);
271 __pmc_generic_enable(counter, hwc, idx); 358 __pmc_generic_enable(counter, hwc, idx);
@@ -275,7 +362,7 @@ static int pmc_generic_enable(struct perf_counter *counter)
275 362
276void perf_counter_print_debug(void) 363void perf_counter_print_debug(void)
277{ 364{
278 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; 365 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
279 struct cpu_hw_counters *cpuc; 366 struct cpu_hw_counters *cpuc;
280 int cpu, idx; 367 int cpu, idx;
281 368
@@ -290,11 +377,13 @@ void perf_counter_print_debug(void)
290 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 377 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
291 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 378 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
292 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 379 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
380 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
293 381
294 printk(KERN_INFO "\n"); 382 printk(KERN_INFO "\n");
295 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); 383 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
296 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); 384 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
297 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); 385 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
386 printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed);
298 printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); 387 printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
299 388
300 for (idx = 0; idx < nr_counters_generic; idx++) { 389 for (idx = 0; idx < nr_counters_generic; idx++) {
@@ -303,13 +392,19 @@ void perf_counter_print_debug(void)
303 392
304 prev_left = per_cpu(prev_left[idx], cpu); 393 prev_left = per_cpu(prev_left[idx], cpu);
305 394
306 printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", 395 printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n",
307 cpu, idx, pmc_ctrl); 396 cpu, idx, pmc_ctrl);
308 printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", 397 printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n",
309 cpu, idx, pmc_count); 398 cpu, idx, pmc_count);
310 printk(KERN_INFO "CPU#%d: PMC%d left: %016llx\n", 399 printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n",
311 cpu, idx, prev_left); 400 cpu, idx, prev_left);
312 } 401 }
402 for (idx = 0; idx < nr_counters_fixed; idx++) {
403 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
404
405 printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n",
406 cpu, idx, pmc_count);
407 }
313 local_irq_enable(); 408 local_irq_enable();
314} 409}
315 410
@@ -323,6 +418,11 @@ static void pmc_generic_disable(struct perf_counter *counter)
323 418
324 clear_bit(idx, cpuc->used); 419 clear_bit(idx, cpuc->used);
325 cpuc->counters[idx] = NULL; 420 cpuc->counters[idx] = NULL;
421 /*
422 * Make sure the cleared pointer becomes visible before we
423 * (potentially) free the counter:
424 */
425 smp_wmb();
326 426
327 /* 427 /*
328 * Drain the remaining delta count out of a counter 428 * Drain the remaining delta count out of a counter
@@ -353,14 +453,11 @@ static void perf_save_and_restart(struct perf_counter *counter)
353{ 453{
354 struct hw_perf_counter *hwc = &counter->hw; 454 struct hw_perf_counter *hwc = &counter->hw;
355 int idx = hwc->idx; 455 int idx = hwc->idx;
356 u64 pmc_ctrl;
357
358 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
359 456
360 x86_perf_counter_update(counter, hwc, idx); 457 x86_perf_counter_update(counter, hwc, idx);
361 __hw_perf_counter_set_period(counter, hwc, idx); 458 __hw_perf_counter_set_period(counter, hwc, idx);
362 459
363 if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) 460 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
364 __pmc_generic_enable(counter, hwc, idx); 461 __pmc_generic_enable(counter, hwc, idx);
365} 462}
366 463
@@ -373,6 +470,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
373 * Store sibling timestamps (if any): 470 * Store sibling timestamps (if any):
374 */ 471 */
375 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { 472 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
473
376 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 474 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
377 perf_store_irq_data(sibling, counter->hw_event.type); 475 perf_store_irq_data(sibling, counter->hw_event.type);
378 perf_store_irq_data(sibling, atomic64_read(&counter->count)); 476 perf_store_irq_data(sibling, atomic64_read(&counter->count));
@@ -403,7 +501,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
403 501
404again: 502again:
405 ack = status; 503 ack = status;
406 for_each_bit(bit, (unsigned long *) &status, nr_counters_generic) { 504 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
407 struct perf_counter *counter = cpuc->counters[bit]; 505 struct perf_counter *counter = cpuc->counters[bit];
408 506
409 clear_bit(bit, (unsigned long *) &status); 507 clear_bit(bit, (unsigned long *) &status);
@@ -561,6 +659,9 @@ void __init init_hw_perf_counters(void)
561 perf_max_counters = nr_counters_generic; 659 perf_max_counters = nr_counters_generic;
562 660
563 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); 661 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width);
662 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
663 printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask);
664
564 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); 665 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length);
565 666
566 nr_counters_fixed = edx.split.num_counters_fixed; 667 nr_counters_fixed = edx.split.num_counters_fixed;