aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorJaswinder Singh Rajput <jaswinderrajput@gmail.com>2009-02-28 08:15:39 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-02 05:31:44 -0500
commita1ef58f442542d8b3e3b963339fbc522c36e827c (patch)
tree3dc6b0837b9f289fc9fddf84fbd52db51397dc8b /arch/x86/kernel/cpu/perf_counter.c
parent169e41eb7f5464c077a7e0e129f025759d04cc54 (diff)
x86: use pr_info in perf_counter.c
Impact: cleanup using pr_info in perf_counter.c fixes various 80 characters warnings and also indenting for conditional statement Signed-off-by: Jaswinder Singh Rajput <jaswinderrajput@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index a1f3646a3e8e..3b65f19a6681 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -454,18 +454,18 @@ void perf_counter_print_debug(void)
454 cpuc = &per_cpu(cpu_hw_counters, cpu); 454 cpuc = &per_cpu(cpu_hw_counters, cpu);
455 455
456 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 456 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
457 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); 457 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
458 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 458 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
459 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); 459 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
460 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); 460 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
461 461
462 printk(KERN_INFO "\n"); 462 pr_info("\n");
463 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); 463 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
464 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); 464 pr_info("CPU#%d: status: %016llx\n", cpu, status);
465 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); 465 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
466 printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed); 466 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
467 } 467 }
468 printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); 468 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
469 469
470 for (idx = 0; idx < nr_counters_generic; idx++) { 470 for (idx = 0; idx < nr_counters_generic; idx++) {
471 rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl); 471 rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
@@ -473,17 +473,17 @@ void perf_counter_print_debug(void)
473 473
474 prev_left = per_cpu(prev_left[idx], cpu); 474 prev_left = per_cpu(prev_left[idx], cpu);
475 475
476 printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n", 476 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
477 cpu, idx, pmc_ctrl); 477 cpu, idx, pmc_ctrl);
478 printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n", 478 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
479 cpu, idx, pmc_count); 479 cpu, idx, pmc_count);
480 printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n", 480 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
481 cpu, idx, prev_left); 481 cpu, idx, prev_left);
482 } 482 }
483 for (idx = 0; idx < nr_counters_fixed; idx++) { 483 for (idx = 0; idx < nr_counters_fixed; idx++) {
484 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 484 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
485 485
486 printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n", 486 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
487 cpu, idx, pmc_count); 487 cpu, idx, pmc_count);
488 } 488 }
489 local_irq_enable(); 489 local_irq_enable();
@@ -773,10 +773,10 @@ static struct pmc_x86_ops *pmc_intel_init(void)
773 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 773 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
774 return NULL; 774 return NULL;
775 775
776 printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); 776 pr_info("Intel Performance Monitoring support detected.\n");
777 printk(KERN_INFO "... version: %d\n", eax.split.version_id); 777 pr_info("... version: %d\n", eax.split.version_id);
778 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width); 778 pr_info("... bit width: %d\n", eax.split.bit_width);
779 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length); 779 pr_info("... mask length: %d\n", eax.split.mask_length);
780 780
781 nr_counters_generic = eax.split.num_counters; 781 nr_counters_generic = eax.split.num_counters;
782 nr_counters_fixed = edx.split.num_counters_fixed; 782 nr_counters_fixed = edx.split.num_counters_fixed;
@@ -790,7 +790,7 @@ static struct pmc_x86_ops *pmc_amd_init(void)
790 nr_counters_generic = 4; 790 nr_counters_generic = 4;
791 nr_counters_fixed = 0; 791 nr_counters_fixed = 0;
792 792
793 printk(KERN_INFO "AMD Performance Monitoring support detected.\n"); 793 pr_info("AMD Performance Monitoring support detected.\n");
794 794
795 return &pmc_amd_ops; 795 return &pmc_amd_ops;
796} 796}
@@ -811,7 +811,7 @@ void __init init_hw_perf_counters(void)
811 if (!pmc_ops) 811 if (!pmc_ops)
812 return; 812 return;
813 813
814 printk(KERN_INFO "... num counters: %d\n", nr_counters_generic); 814 pr_info("... num counters: %d\n", nr_counters_generic);
815 if (nr_counters_generic > X86_PMC_MAX_GENERIC) { 815 if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
816 nr_counters_generic = X86_PMC_MAX_GENERIC; 816 nr_counters_generic = X86_PMC_MAX_GENERIC;
817 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 817 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
@@ -820,18 +820,18 @@ void __init init_hw_perf_counters(void)
820 perf_counter_mask = (1 << nr_counters_generic) - 1; 820 perf_counter_mask = (1 << nr_counters_generic) - 1;
821 perf_max_counters = nr_counters_generic; 821 perf_max_counters = nr_counters_generic;
822 822
823 printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask); 823 pr_info("... value mask: %016Lx\n", counter_value_mask);
824 824
825 if (nr_counters_fixed > X86_PMC_MAX_FIXED) { 825 if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
826 nr_counters_fixed = X86_PMC_MAX_FIXED; 826 nr_counters_fixed = X86_PMC_MAX_FIXED;
827 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 827 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
828 nr_counters_fixed, X86_PMC_MAX_FIXED); 828 nr_counters_fixed, X86_PMC_MAX_FIXED);
829 } 829 }
830 printk(KERN_INFO "... fixed counters: %d\n", nr_counters_fixed); 830 pr_info("... fixed counters: %d\n", nr_counters_fixed);
831 831
832 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED; 832 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
833 833
834 printk(KERN_INFO "... counter mask: %016Lx\n", perf_counter_mask); 834 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
835 perf_counters_initialized = true; 835 perf_counters_initialized = true;
836 836
837 perf_counters_lapic_init(0); 837 perf_counters_lapic_init(0);