aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-03-12 15:06:35 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-12 15:06:37 -0500
commit0308635917273030db6121d67c41ef2279b30340 (patch)
treef65e386905199f7a1060119c53a51eb15c32b8e6 /arch/x86/kernel/cpu/perf_event.c
parent3997d3776a6e89586e76a0ef355bfbbd8a76966c (diff)
parent0b861225a5890f22445f08ca9cc7a87cff276ff7 (diff)
Merge branch 'perf/x86' into perf/core
Merge reason: The new P4 driver is stable and ready now for more testing. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c49
1 files changed, 32 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a6d92c34135c..978d297170a1 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -190,6 +190,8 @@ struct x86_pmu {
190 void (*enable_all)(void); 190 void (*enable_all)(void);
191 void (*enable)(struct perf_event *); 191 void (*enable)(struct perf_event *);
192 void (*disable)(struct perf_event *); 192 void (*disable)(struct perf_event *);
193 int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
194 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
193 unsigned eventsel; 195 unsigned eventsel;
194 unsigned perfctr; 196 unsigned perfctr;
195 u64 (*event_map)(int); 197 u64 (*event_map)(int);
@@ -415,6 +417,25 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
415 return 0; 417 return 0;
416} 418}
417 419
420static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
421{
422 /*
423 * Generate PMC IRQs:
424 * (keep 'enabled' bit clear for now)
425 */
426 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
427
428 /*
429 * Count user and OS events unless requested not to
430 */
431 if (!attr->exclude_user)
432 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
433 if (!attr->exclude_kernel)
434 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
435
436 return 0;
437}
438
418/* 439/*
419 * Setup the hardware configuration for a given attr_type 440 * Setup the hardware configuration for a given attr_type
420 */ 441 */
@@ -446,23 +467,13 @@ static int __hw_perf_event_init(struct perf_event *event)
446 467
447 event->destroy = hw_perf_event_destroy; 468 event->destroy = hw_perf_event_destroy;
448 469
449 /*
450 * Generate PMC IRQs:
451 * (keep 'enabled' bit clear for now)
452 */
453 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
454
455 hwc->idx = -1; 470 hwc->idx = -1;
456 hwc->last_cpu = -1; 471 hwc->last_cpu = -1;
457 hwc->last_tag = ~0ULL; 472 hwc->last_tag = ~0ULL;
458 473
459 /* 474 /* Processor specifics */
460 * Count user and OS events unless requested not to. 475 if (x86_pmu.hw_config(attr, hwc))
461 */ 476 return -EOPNOTSUPP;
462 if (!attr->exclude_user)
463 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
464 if (!attr->exclude_kernel)
465 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
466 477
467 if (!hwc->sample_period) { 478 if (!hwc->sample_period) {
468 hwc->sample_period = x86_pmu.max_period; 479 hwc->sample_period = x86_pmu.max_period;
@@ -517,7 +528,7 @@ static int __hw_perf_event_init(struct perf_event *event)
517 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
518 529
519 /* BTS is currently only allowed for user-mode. */ 530 /* BTS is currently only allowed for user-mode. */
520 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) 531 if (!attr->exclude_kernel)
521 return -EOPNOTSUPP; 532 return -EOPNOTSUPP;
522 } 533 }
523 534
@@ -925,7 +936,7 @@ static int x86_pmu_enable(struct perf_event *event)
925 if (n < 0) 936 if (n < 0)
926 return n; 937 return n;
927 938
928 ret = x86_schedule_events(cpuc, n, assign); 939 ret = x86_pmu.schedule_events(cpuc, n, assign);
929 if (ret) 940 if (ret)
930 return ret; 941 return ret;
931 /* 942 /*
@@ -1252,12 +1263,15 @@ int hw_perf_group_sched_in(struct perf_event *leader,
1252 int assign[X86_PMC_IDX_MAX]; 1263 int assign[X86_PMC_IDX_MAX];
1253 int n0, n1, ret; 1264 int n0, n1, ret;
1254 1265
1266 if (!x86_pmu_initialized())
1267 return 0;
1268
1255 /* n0 = total number of events */ 1269 /* n0 = total number of events */
1256 n0 = collect_events(cpuc, leader, true); 1270 n0 = collect_events(cpuc, leader, true);
1257 if (n0 < 0) 1271 if (n0 < 0)
1258 return n0; 1272 return n0;
1259 1273
1260 ret = x86_schedule_events(cpuc, n0, assign); 1274 ret = x86_pmu.schedule_events(cpuc, n0, assign);
1261 if (ret) 1275 if (ret)
1262 return ret; 1276 return ret;
1263 1277
@@ -1307,6 +1321,7 @@ undo:
1307 1321
1308#include "perf_event_amd.c" 1322#include "perf_event_amd.c"
1309#include "perf_event_p6.c" 1323#include "perf_event_p6.c"
1324#include "perf_event_p4.c"
1310#include "perf_event_intel_lbr.c" 1325#include "perf_event_intel_lbr.c"
1311#include "perf_event_intel_ds.c" 1326#include "perf_event_intel_ds.c"
1312#include "perf_event_intel.c" 1327#include "perf_event_intel.c"
@@ -1509,7 +1524,7 @@ static int validate_group(struct perf_event *event)
1509 1524
1510 fake_cpuc->n_events = n; 1525 fake_cpuc->n_events = n;
1511 1526
1512 ret = x86_schedule_events(fake_cpuc, n, NULL); 1527 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1513 1528
1514out_free: 1529out_free:
1515 kfree(fake_cpuc); 1530 kfree(fake_cpuc);