aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-29 05:25:09 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-08 06:29:30 -0400
commit1123e3ad73697d64ad99f0104bbe49f8b52d7d65 (patch)
treee3d8a1fb766dcabd6df6b1394e48a34bf25daf50 /arch
parentad689220614b6c7c0b13b70d742f358e9310e71e (diff)
perf_counter: Clean up x86 boot messages
Standardize and tidy up all the messages we print during perfcounter initialization. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c46
1 files changed, 24 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 71590e09d16e..0339d195a3f0 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -1298,23 +1298,22 @@ static int intel_pmu_init(void)
1298 if (version < 2) 1298 if (version < 2)
1299 return -ENODEV; 1299 return -ENODEV;
1300 1300
1301 x86_pmu = intel_pmu; 1301 x86_pmu = intel_pmu;
1302 x86_pmu.version = version; 1302 x86_pmu.version = version;
1303 x86_pmu.num_counters = eax.split.num_counters; 1303 x86_pmu.num_counters = eax.split.num_counters;
1304 x86_pmu.counter_bits = eax.split.bit_width;
1305 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
1304 1306
1305 /* 1307 /*
1306 * Quirk: v2 perfmon does not report fixed-purpose counters, so 1308 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1307 * assume at least 3 counters: 1309 * assume at least 3 counters:
1308 */ 1310 */
1309 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1311 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1310
1311 x86_pmu.counter_bits = eax.split.bit_width;
1312 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
1313 1312
1314 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1313 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1315 1314
1316 /* 1315 /*
1317 * Nehalem: 1316 * Install the hw-cache-events table:
1318 */ 1317 */
1319 switch (boot_cpu_data.x86_model) { 1318 switch (boot_cpu_data.x86_model) {
1320 case 17: 1319 case 17:
@@ -1322,7 +1321,7 @@ static int intel_pmu_init(void)
1322 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* 1321 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1323 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); 1322 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1324 1323
1325 pr_info("... installed Core2 event tables\n"); 1324 pr_cont("Core2 events, ");
1326 break; 1325 break;
1327 default: 1326 default:
1328 case 26: 1327 case 26:
@@ -1330,14 +1329,14 @@ static int intel_pmu_init(void)
1330 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* 1329 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1331 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); 1330 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1332 1331
1333 pr_info("... installed Nehalem/Corei7 event tables\n"); 1332 pr_cont("Nehalem/Corei7 events, ");
1334 break; 1333 break;
1335 case 28: 1334 case 28:
1336 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 1335 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1337 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX* 1336 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1338 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX); 1337 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1339 1338
1340 pr_info("... installed Atom event tables\n"); 1339 pr_cont("Atom events, ");
1341 break; 1340 break;
1342 } 1341 }
1343 return 0; 1342 return 0;
@@ -1353,6 +1352,8 @@ void __init init_hw_perf_counters(void)
1353{ 1352{
1354 int err; 1353 int err;
1355 1354
1355 pr_info("Performance Counters: ");
1356
1356 switch (boot_cpu_data.x86_vendor) { 1357 switch (boot_cpu_data.x86_vendor) {
1357 case X86_VENDOR_INTEL: 1358 case X86_VENDOR_INTEL:
1358 err = intel_pmu_init(); 1359 err = intel_pmu_init();
@@ -1363,14 +1364,13 @@ void __init init_hw_perf_counters(void)
1363 default: 1364 default:
1364 return; 1365 return;
1365 } 1366 }
1366 if (err != 0) 1367 if (err != 0) {
1368 pr_cont("no PMU driver, software counters only.\n");
1367 return; 1369 return;
1370 }
1368 1371
1369 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name); 1372 pr_cont("%s PMU driver.\n", x86_pmu.name);
1370 pr_info("... version: %d\n", x86_pmu.version);
1371 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1372 1373
1373 pr_info("... num counters: %d\n", x86_pmu.num_counters);
1374 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 1374 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1375 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 1375 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1376 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 1376 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
@@ -1379,23 +1379,25 @@ void __init init_hw_perf_counters(void)
1379 perf_counter_mask = (1 << x86_pmu.num_counters) - 1; 1379 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1380 perf_max_counters = x86_pmu.num_counters; 1380 perf_max_counters = x86_pmu.num_counters;
1381 1381
1382 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
1383 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1384
1385 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 1382 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1386 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 1383 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
1387 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 1384 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
1388 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); 1385 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1389 } 1386 }
1390 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
1391 1387
1392 perf_counter_mask |= 1388 perf_counter_mask |=
1393 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 1389 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
1394 1390
1395 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
1396
1397 perf_counters_lapic_init(); 1391 perf_counters_lapic_init();
1398 register_die_notifier(&perf_counter_nmi_notifier); 1392 register_die_notifier(&perf_counter_nmi_notifier);
1393
1394 pr_info("... version: %d\n", x86_pmu.version);
1395 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1396 pr_info("... generic counters: %d\n", x86_pmu.num_counters);
1397 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
1398 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1399 pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed);
1400 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
1399} 1401}
1400 1402
1401static inline void x86_pmu_read(struct perf_counter *counter) 1403static inline void x86_pmu_read(struct perf_counter *counter)