diff options
| author | Chen Yucong <slaoub@gmail.com> | 2016-02-01 22:45:02 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2016-02-03 04:30:03 -0500 |
| commit | 1b74dde7c47c19a73ea3e9fac95ac27b5d3d50c5 (patch) | |
| tree | a7aa0b43cec934d61c1d6d03e52f86a729037b91 /arch/x86/kernel | |
| parent | 16aaa53756501914a863ae7a15fcb070dc27c3d7 (diff) | |
x86/cpu: Convert printk(KERN_<LEVEL> ...) to pr_<level>(...)
- Use the more current logging style pr_<level>(...) instead of the old
printk(KERN_<LEVEL> ...).
- Convert pr_warning() to pr_warn().
Signed-off-by: Chen Yucong <slaoub@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1454384702-21707-1-git-send-email-slaoub@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
27 files changed, 146 insertions, 159 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index a07956a08936..97c59fd60702 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) | |||
| 117 | void (*f_vide)(void); | 117 | void (*f_vide)(void); |
| 118 | u64 d, d2; | 118 | u64 d, d2; |
| 119 | 119 | ||
| 120 | printk(KERN_INFO "AMD K6 stepping B detected - "); | 120 | pr_info("AMD K6 stepping B detected - "); |
| 121 | 121 | ||
| 122 | /* | 122 | /* |
| 123 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | 123 | * It looks like AMD fixed the 2.6.2 bug and improved indirect |
| @@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c) | |||
| 133 | d = d2-d; | 133 | d = d2-d; |
| 134 | 134 | ||
| 135 | if (d > 20*K6_BUG_LOOP) | 135 | if (d > 20*K6_BUG_LOOP) |
| 136 | printk(KERN_CONT | 136 | pr_cont("system stability may be impaired when more than 32 MB are used.\n"); |
| 137 | "system stability may be impaired when more than 32 MB are used.\n"); | ||
| 138 | else | 137 | else |
| 139 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); | 138 | pr_cont("probably OK (after B9730xxxx).\n"); |
| 140 | } | 139 | } |
| 141 | 140 | ||
| 142 | /* K6 with old style WHCR */ | 141 | /* K6 with old style WHCR */ |
| @@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) | |||
| 154 | wbinvd(); | 153 | wbinvd(); |
| 155 | wrmsr(MSR_K6_WHCR, l, h); | 154 | wrmsr(MSR_K6_WHCR, l, h); |
| 156 | local_irq_restore(flags); | 155 | local_irq_restore(flags); |
| 157 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | 156 | pr_info("Enabling old style K6 write allocation for %d Mb\n", |
| 158 | mbytes); | 157 | mbytes); |
| 159 | } | 158 | } |
| 160 | return; | 159 | return; |
| @@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) | |||
| 175 | wbinvd(); | 174 | wbinvd(); |
| 176 | wrmsr(MSR_K6_WHCR, l, h); | 175 | wrmsr(MSR_K6_WHCR, l, h); |
| 177 | local_irq_restore(flags); | 176 | local_irq_restore(flags); |
| 178 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | 177 | pr_info("Enabling new style K6 write allocation for %d Mb\n", |
| 179 | mbytes); | 178 | mbytes); |
| 180 | } | 179 | } |
| 181 | 180 | ||
| @@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) | |||
| 202 | */ | 201 | */ |
| 203 | if (c->x86_model >= 6 && c->x86_model <= 10) { | 202 | if (c->x86_model >= 6 && c->x86_model <= 10) { |
| 204 | if (!cpu_has(c, X86_FEATURE_XMM)) { | 203 | if (!cpu_has(c, X86_FEATURE_XMM)) { |
| 205 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | 204 | pr_info("Enabling disabled K7/SSE Support.\n"); |
| 206 | msr_clear_bit(MSR_K7_HWCR, 15); | 205 | msr_clear_bit(MSR_K7_HWCR, 15); |
| 207 | set_cpu_cap(c, X86_FEATURE_XMM); | 206 | set_cpu_cap(c, X86_FEATURE_XMM); |
| 208 | } | 207 | } |
| @@ -216,9 +215,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) | |||
| 216 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | 215 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
| 217 | rdmsr(MSR_K7_CLK_CTL, l, h); | 216 | rdmsr(MSR_K7_CLK_CTL, l, h); |
| 218 | if ((l & 0xfff00000) != 0x20000000) { | 217 | if ((l & 0xfff00000) != 0x20000000) { |
| 219 | printk(KERN_INFO | 218 | pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", |
| 220 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | 219 | l, ((l & 0x000fffff)|0x20000000)); |
| 221 | l, ((l & 0x000fffff)|0x20000000)); | ||
| 222 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | 220 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
| 223 | } | 221 | } |
| 224 | } | 222 | } |
| @@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) | |||
| 485 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | 483 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
| 486 | unsigned long pfn = tseg >> PAGE_SHIFT; | 484 | unsigned long pfn = tseg >> PAGE_SHIFT; |
| 487 | 485 | ||
| 488 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | 486 | pr_debug("tseg: %010llx\n", tseg); |
| 489 | if (pfn_range_is_mapped(pfn, pfn + 1)) | 487 | if (pfn_range_is_mapped(pfn, pfn + 1)) |
| 490 | set_memory_4k((unsigned long)__va(tseg), 1); | 488 | set_memory_4k((unsigned long)__va(tseg), 1); |
| 491 | } | 489 | } |
| @@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) | |||
| 500 | 498 | ||
| 501 | rdmsrl(MSR_K7_HWCR, val); | 499 | rdmsrl(MSR_K7_HWCR, val); |
| 502 | if (!(val & BIT(24))) | 500 | if (!(val & BIT(24))) |
| 503 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | 501 | pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); |
| 504 | "with P0 frequency!\n"); | ||
| 505 | } | 502 | } |
| 506 | } | 503 | } |
| 507 | 504 | ||
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 04f0fe5af83e..a972ac4c7e7d 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c | |||
| @@ -15,7 +15,7 @@ void __init check_bugs(void) | |||
| 15 | { | 15 | { |
| 16 | identify_boot_cpu(); | 16 | identify_boot_cpu(); |
| 17 | #if !defined(CONFIG_SMP) | 17 | #if !defined(CONFIG_SMP) |
| 18 | printk(KERN_INFO "CPU: "); | 18 | pr_info("CPU: "); |
| 19 | print_cpu_info(&boot_cpu_data); | 19 | print_cpu_info(&boot_cpu_data); |
| 20 | #endif | 20 | #endif |
| 21 | alternative_instructions(); | 21 | alternative_instructions(); |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index ae20be6e483c..ce197bb7c129 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
| @@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c) | |||
| 29 | rdmsr(MSR_VIA_FCR, lo, hi); | 29 | rdmsr(MSR_VIA_FCR, lo, hi); |
| 30 | lo |= ACE_FCR; /* enable ACE unit */ | 30 | lo |= ACE_FCR; /* enable ACE unit */ |
| 31 | wrmsr(MSR_VIA_FCR, lo, hi); | 31 | wrmsr(MSR_VIA_FCR, lo, hi); |
| 32 | printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); | 32 | pr_info("CPU: Enabled ACE h/w crypto\n"); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | /* enable RNG unit, if present and disabled */ | 35 | /* enable RNG unit, if present and disabled */ |
| @@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c) | |||
| 37 | rdmsr(MSR_VIA_RNG, lo, hi); | 37 | rdmsr(MSR_VIA_RNG, lo, hi); |
| 38 | lo |= RNG_ENABLE; /* enable RNG unit */ | 38 | lo |= RNG_ENABLE; /* enable RNG unit */ |
| 39 | wrmsr(MSR_VIA_RNG, lo, hi); | 39 | wrmsr(MSR_VIA_RNG, lo, hi); |
| 40 | printk(KERN_INFO "CPU: Enabled h/w RNG\n"); | 40 | pr_info("CPU: Enabled h/w RNG\n"); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | /* store Centaur Extended Feature Flags as | 43 | /* store Centaur Extended Feature Flags as |
| @@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
| 130 | name = "C6"; | 130 | name = "C6"; |
| 131 | fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; | 131 | fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; |
| 132 | fcr_clr = DPDC; | 132 | fcr_clr = DPDC; |
| 133 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); | 133 | pr_notice("Disabling bugged TSC.\n"); |
| 134 | clear_cpu_cap(c, X86_FEATURE_TSC); | 134 | clear_cpu_cap(c, X86_FEATURE_TSC); |
| 135 | break; | 135 | break; |
| 136 | case 8: | 136 | case 8: |
| @@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
| 163 | newlo = (lo|fcr_set) & (~fcr_clr); | 163 | newlo = (lo|fcr_set) & (~fcr_clr); |
| 164 | 164 | ||
| 165 | if (newlo != lo) { | 165 | if (newlo != lo) { |
| 166 | printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", | 166 | pr_info("Centaur FCR was 0x%X now 0x%X\n", |
| 167 | lo, newlo); | 167 | lo, newlo); |
| 168 | wrmsr(MSR_IDT_FCR1, newlo, hi); | 168 | wrmsr(MSR_IDT_FCR1, newlo, hi); |
| 169 | } else { | 169 | } else { |
| 170 | printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); | 170 | pr_info("Centaur FCR is 0x%X\n", lo); |
| 171 | } | 171 | } |
| 172 | /* Emulate MTRRs using Centaur's MCR. */ | 172 | /* Emulate MTRRs using Centaur's MCR. */ |
| 173 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); | 173 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 37830de8f60a..68a80e9b67fc 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
| 228 | lo |= 0x200000; | 228 | lo |= 0x200000; |
| 229 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 229 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 230 | 230 | ||
| 231 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 231 | pr_notice("CPU serial number disabled.\n"); |
| 232 | clear_cpu_cap(c, X86_FEATURE_PN); | 232 | clear_cpu_cap(c, X86_FEATURE_PN); |
| 233 | 233 | ||
| 234 | /* Disabling the serial number may affect the cpuid level */ | 234 | /* Disabling the serial number may affect the cpuid level */ |
| @@ -329,9 +329,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
| 329 | if (!warn) | 329 | if (!warn) |
| 330 | continue; | 330 | continue; |
| 331 | 331 | ||
| 332 | printk(KERN_WARNING | 332 | pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", |
| 333 | "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", | 333 | x86_cap_flag(df->feature), df->level); |
| 334 | x86_cap_flag(df->feature), df->level); | ||
| 335 | } | 334 | } |
| 336 | } | 335 | } |
| 337 | 336 | ||
| @@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c) | |||
| 510 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 509 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
| 511 | 510 | ||
| 512 | if (smp_num_siblings == 1) { | 511 | if (smp_num_siblings == 1) { |
| 513 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); | 512 | pr_info_once("CPU0: Hyper-Threading is disabled\n"); |
| 514 | goto out; | 513 | goto out; |
| 515 | } | 514 | } |
| 516 | 515 | ||
| @@ -531,10 +530,10 @@ void detect_ht(struct cpuinfo_x86 *c) | |||
| 531 | 530 | ||
| 532 | out: | 531 | out: |
| 533 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { | 532 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
| 534 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 533 | pr_info("CPU: Physical Processor ID: %d\n", |
| 535 | c->phys_proc_id); | 534 | c->phys_proc_id); |
| 536 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 535 | pr_info("CPU: Processor Core ID: %d\n", |
| 537 | c->cpu_core_id); | 536 | c->cpu_core_id); |
| 538 | printed = 1; | 537 | printed = 1; |
| 539 | } | 538 | } |
| 540 | #endif | 539 | #endif |
| @@ -559,9 +558,8 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c) | |||
| 559 | } | 558 | } |
| 560 | } | 559 | } |
| 561 | 560 | ||
| 562 | printk_once(KERN_ERR | 561 | pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ |
| 563 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ | 562 | "CPU: Your system may be unstable.\n", v); |
| 564 | "CPU: Your system may be unstable.\n", v); | ||
| 565 | 563 | ||
| 566 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 564 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 567 | this_cpu = &default_cpu; | 565 | this_cpu = &default_cpu; |
| @@ -760,7 +758,7 @@ void __init early_cpu_init(void) | |||
| 760 | int count = 0; | 758 | int count = 0; |
| 761 | 759 | ||
| 762 | #ifdef CONFIG_PROCESSOR_SELECT | 760 | #ifdef CONFIG_PROCESSOR_SELECT |
| 763 | printk(KERN_INFO "KERNEL supported cpus:\n"); | 761 | pr_info("KERNEL supported cpus:\n"); |
| 764 | #endif | 762 | #endif |
| 765 | 763 | ||
| 766 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 764 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
| @@ -778,7 +776,7 @@ void __init early_cpu_init(void) | |||
| 778 | for (j = 0; j < 2; j++) { | 776 | for (j = 0; j < 2; j++) { |
| 779 | if (!cpudev->c_ident[j]) | 777 | if (!cpudev->c_ident[j]) |
| 780 | continue; | 778 | continue; |
| 781 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | 779 | pr_info(" %s %s\n", cpudev->c_vendor, |
| 782 | cpudev->c_ident[j]); | 780 | cpudev->c_ident[j]); |
| 783 | } | 781 | } |
| 784 | } | 782 | } |
| @@ -1061,7 +1059,7 @@ static void __print_cpu_msr(void) | |||
| 1061 | for (index = index_min; index < index_max; index++) { | 1059 | for (index = index_min; index < index_max; index++) { |
| 1062 | if (rdmsrl_safe(index, &val)) | 1060 | if (rdmsrl_safe(index, &val)) |
| 1063 | continue; | 1061 | continue; |
| 1064 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | 1062 | pr_info(" MSR%08x: %016llx\n", index, val); |
| 1065 | } | 1063 | } |
| 1066 | } | 1064 | } |
| 1067 | } | 1065 | } |
| @@ -1100,19 +1098,19 @@ void print_cpu_info(struct cpuinfo_x86 *c) | |||
| 1100 | } | 1098 | } |
| 1101 | 1099 | ||
| 1102 | if (vendor && !strstr(c->x86_model_id, vendor)) | 1100 | if (vendor && !strstr(c->x86_model_id, vendor)) |
| 1103 | printk(KERN_CONT "%s ", vendor); | 1101 | pr_cont("%s ", vendor); |
| 1104 | 1102 | ||
| 1105 | if (c->x86_model_id[0]) | 1103 | if (c->x86_model_id[0]) |
| 1106 | printk(KERN_CONT "%s", c->x86_model_id); | 1104 | pr_cont("%s", c->x86_model_id); |
| 1107 | else | 1105 | else |
| 1108 | printk(KERN_CONT "%d86", c->x86); | 1106 | pr_cont("%d86", c->x86); |
| 1109 | 1107 | ||
| 1110 | printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); | 1108 | pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); |
| 1111 | 1109 | ||
| 1112 | if (c->x86_mask || c->cpuid_level >= 0) | 1110 | if (c->x86_mask || c->cpuid_level >= 0) |
| 1113 | printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); | 1111 | pr_cont(", stepping: 0x%x)\n", c->x86_mask); |
| 1114 | else | 1112 | else |
| 1115 | printk(KERN_CONT ")\n"); | 1113 | pr_cont(")\n"); |
| 1116 | 1114 | ||
| 1117 | print_cpu_msr(c); | 1115 | print_cpu_msr(c); |
| 1118 | } | 1116 | } |
| @@ -1438,7 +1436,7 @@ void cpu_init(void) | |||
| 1438 | 1436 | ||
| 1439 | show_ucode_info_early(); | 1437 | show_ucode_info_early(); |
| 1440 | 1438 | ||
| 1441 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1439 | pr_info("Initializing CPU#%d\n", cpu); |
| 1442 | 1440 | ||
| 1443 | if (cpu_feature_enabled(X86_FEATURE_VME) || | 1441 | if (cpu_feature_enabled(X86_FEATURE_VME) || |
| 1444 | cpu_has_tsc || | 1442 | cpu_has_tsc || |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index aaf152e79637..187bb583d0df 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -103,7 +103,7 @@ static void check_cx686_slop(struct cpuinfo_x86 *c) | |||
| 103 | local_irq_restore(flags); | 103 | local_irq_restore(flags); |
| 104 | 104 | ||
| 105 | if (ccr5 & 2) { /* possible wrong calibration done */ | 105 | if (ccr5 & 2) { /* possible wrong calibration done */ |
| 106 | printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n"); | 106 | pr_info("Recalibrating delay loop with SLOP bit reset\n"); |
| 107 | calibrate_delay(); | 107 | calibrate_delay(); |
| 108 | c->loops_per_jiffy = loops_per_jiffy; | 108 | c->loops_per_jiffy = loops_per_jiffy; |
| 109 | } | 109 | } |
| @@ -115,7 +115,7 @@ static void set_cx86_reorder(void) | |||
| 115 | { | 115 | { |
| 116 | u8 ccr3; | 116 | u8 ccr3; |
| 117 | 117 | ||
| 118 | printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n"); | 118 | pr_info("Enable Memory access reorder on Cyrix/NSC processor.\n"); |
| 119 | ccr3 = getCx86(CX86_CCR3); | 119 | ccr3 = getCx86(CX86_CCR3); |
| 120 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 120 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
| 121 | 121 | ||
| @@ -128,7 +128,7 @@ static void set_cx86_reorder(void) | |||
| 128 | 128 | ||
| 129 | static void set_cx86_memwb(void) | 129 | static void set_cx86_memwb(void) |
| 130 | { | 130 | { |
| 131 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 131 | pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
| 132 | 132 | ||
| 133 | /* CCR2 bit 2: unlock NW bit */ | 133 | /* CCR2 bit 2: unlock NW bit */ |
| 134 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); | 134 | setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); |
| @@ -268,7 +268,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) | |||
| 268 | * VSA1 we work around however. | 268 | * VSA1 we work around however. |
| 269 | */ | 269 | */ |
| 270 | 270 | ||
| 271 | printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); | 271 | pr_info("Working around Cyrix MediaGX virtual DMA bugs.\n"); |
| 272 | isa_dma_bridge_buggy = 2; | 272 | isa_dma_bridge_buggy = 2; |
| 273 | 273 | ||
| 274 | /* We do this before the PCI layer is running. However we | 274 | /* We do this before the PCI layer is running. However we |
| @@ -426,7 +426,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c) | |||
| 426 | if (dir0 == 5 || dir0 == 3) { | 426 | if (dir0 == 5 || dir0 == 3) { |
| 427 | unsigned char ccr3; | 427 | unsigned char ccr3; |
| 428 | unsigned long flags; | 428 | unsigned long flags; |
| 429 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 429 | pr_info("Enabling CPUID on Cyrix processor.\n"); |
| 430 | local_irq_save(flags); | 430 | local_irq_save(flags); |
| 431 | ccr3 = getCx86(CX86_CCR3); | 431 | ccr3 = getCx86(CX86_CCR3); |
| 432 | /* enable MAPEN */ | 432 | /* enable MAPEN */ |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index d820d8eae96b..73d391ae452f 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
| @@ -56,7 +56,7 @@ detect_hypervisor_vendor(void) | |||
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | if (max_pri) | 58 | if (max_pri) |
| 59 | printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name); | 59 | pr_info("Hypervisor detected: %s\n", x86_hyper->name); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | void init_hypervisor(struct cpuinfo_x86 *c) | 62 | void init_hypervisor(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 565648bc1a0a..05b9211ea0f7 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -61,7 +61,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) | |||
| 61 | */ | 61 | */ |
| 62 | if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && | 62 | if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && |
| 63 | c->microcode < 0x20e) { | 63 | c->microcode < 0x20e) { |
| 64 | printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); | 64 | pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); |
| 65 | clear_cpu_cap(c, X86_FEATURE_PSE); | 65 | clear_cpu_cap(c, X86_FEATURE_PSE); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| @@ -140,7 +140,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) | |||
| 140 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { | 140 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
| 141 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 141 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
| 142 | if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { | 142 | if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { |
| 143 | printk(KERN_INFO "Disabled fast string operations\n"); | 143 | pr_info("Disabled fast string operations\n"); |
| 144 | setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); | 144 | setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); |
| 145 | setup_clear_cpu_cap(X86_FEATURE_ERMS); | 145 | setup_clear_cpu_cap(X86_FEATURE_ERMS); |
| 146 | } | 146 | } |
| @@ -176,7 +176,7 @@ int ppro_with_ram_bug(void) | |||
| 176 | boot_cpu_data.x86 == 6 && | 176 | boot_cpu_data.x86 == 6 && |
| 177 | boot_cpu_data.x86_model == 1 && | 177 | boot_cpu_data.x86_model == 1 && |
| 178 | boot_cpu_data.x86_mask < 8) { | 178 | boot_cpu_data.x86_mask < 8) { |
| 179 | printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); | 179 | pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); |
| 180 | return 1; | 180 | return 1; |
| 181 | } | 181 | } |
| 182 | return 0; | 182 | return 0; |
| @@ -225,7 +225,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) | |||
| 225 | 225 | ||
| 226 | set_cpu_bug(c, X86_BUG_F00F); | 226 | set_cpu_bug(c, X86_BUG_F00F); |
| 227 | if (!f00f_workaround_enabled) { | 227 | if (!f00f_workaround_enabled) { |
| 228 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | 228 | pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); |
| 229 | f00f_workaround_enabled = 1; | 229 | f00f_workaround_enabled = 1; |
| 230 | } | 230 | } |
| 231 | } | 231 | } |
| @@ -244,7 +244,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) | |||
| 244 | * Forcefully enable PAE if kernel parameter "forcepae" is present. | 244 | * Forcefully enable PAE if kernel parameter "forcepae" is present. |
| 245 | */ | 245 | */ |
| 246 | if (forcepae) { | 246 | if (forcepae) { |
| 247 | printk(KERN_WARNING "PAE forced!\n"); | 247 | pr_warn("PAE forced!\n"); |
| 248 | set_cpu_cap(c, X86_FEATURE_PAE); | 248 | set_cpu_cap(c, X86_FEATURE_PAE); |
| 249 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); | 249 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); |
| 250 | } | 250 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 0b6c52388cf4..6ed779efff26 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -444,7 +444,7 @@ static ssize_t store_cache_disable(struct cacheinfo *this_leaf, | |||
| 444 | err = amd_set_l3_disable_slot(nb, cpu, slot, val); | 444 | err = amd_set_l3_disable_slot(nb, cpu, slot, val); |
| 445 | if (err) { | 445 | if (err) { |
| 446 | if (err == -EEXIST) | 446 | if (err == -EEXIST) |
| 447 | pr_warning("L3 slot %d in use/index already disabled!\n", | 447 | pr_warn("L3 slot %d in use/index already disabled!\n", |
| 448 | slot); | 448 | slot); |
| 449 | return err; | 449 | return err; |
| 450 | } | 450 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 4cfba4371a71..517619ea6498 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
| @@ -115,7 +115,7 @@ static int raise_local(void) | |||
| 115 | int cpu = m->extcpu; | 115 | int cpu = m->extcpu; |
| 116 | 116 | ||
| 117 | if (m->inject_flags & MCJ_EXCEPTION) { | 117 | if (m->inject_flags & MCJ_EXCEPTION) { |
| 118 | printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); | 118 | pr_info("Triggering MCE exception on CPU %d\n", cpu); |
| 119 | switch (context) { | 119 | switch (context) { |
| 120 | case MCJ_CTX_IRQ: | 120 | case MCJ_CTX_IRQ: |
| 121 | /* | 121 | /* |
| @@ -128,15 +128,15 @@ static int raise_local(void) | |||
| 128 | raise_exception(m, NULL); | 128 | raise_exception(m, NULL); |
| 129 | break; | 129 | break; |
| 130 | default: | 130 | default: |
| 131 | printk(KERN_INFO "Invalid MCE context\n"); | 131 | pr_info("Invalid MCE context\n"); |
| 132 | ret = -EINVAL; | 132 | ret = -EINVAL; |
| 133 | } | 133 | } |
| 134 | printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); | 134 | pr_info("MCE exception done on CPU %d\n", cpu); |
| 135 | } else if (m->status) { | 135 | } else if (m->status) { |
| 136 | printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); | 136 | pr_info("Starting machine check poll CPU %d\n", cpu); |
| 137 | raise_poll(m); | 137 | raise_poll(m); |
| 138 | mce_notify_irq(); | 138 | mce_notify_irq(); |
| 139 | printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu); | 139 | pr_info("Machine check poll done on CPU %d\n", cpu); |
| 140 | } else | 140 | } else |
| 141 | m->finished = 0; | 141 | m->finished = 0; |
| 142 | 142 | ||
| @@ -183,8 +183,7 @@ static void raise_mce(struct mce *m) | |||
| 183 | start = jiffies; | 183 | start = jiffies; |
| 184 | while (!cpumask_empty(mce_inject_cpumask)) { | 184 | while (!cpumask_empty(mce_inject_cpumask)) { |
| 185 | if (!time_before(jiffies, start + 2*HZ)) { | 185 | if (!time_before(jiffies, start + 2*HZ)) { |
| 186 | printk(KERN_ERR | 186 | pr_err("Timeout waiting for mce inject %lx\n", |
| 187 | "Timeout waiting for mce inject %lx\n", | ||
| 188 | *cpumask_bits(mce_inject_cpumask)); | 187 | *cpumask_bits(mce_inject_cpumask)); |
| 189 | break; | 188 | break; |
| 190 | } | 189 | } |
| @@ -241,7 +240,7 @@ static int inject_init(void) | |||
| 241 | { | 240 | { |
| 242 | if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) | 241 | if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) |
| 243 | return -ENOMEM; | 242 | return -ENOMEM; |
| 244 | printk(KERN_INFO "Machine check injector initialized\n"); | 243 | pr_info("Machine check injector initialized\n"); |
| 245 | register_mce_write_callback(mce_write); | 244 | register_mce_write_callback(mce_write); |
| 246 | register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, | 245 | register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, |
| 247 | "mce_notify"); | 246 | "mce_notify"); |
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 12402e10aeff..2a0717bf8033 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c | |||
| @@ -26,14 +26,12 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code) | |||
| 26 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); | 26 | rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); |
| 27 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); | 27 | rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); |
| 28 | 28 | ||
| 29 | printk(KERN_EMERG | 29 | pr_emerg("CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", |
| 30 | "CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n", | 30 | smp_processor_id(), loaddr, lotype); |
| 31 | smp_processor_id(), loaddr, lotype); | ||
| 32 | 31 | ||
| 33 | if (lotype & (1<<5)) { | 32 | if (lotype & (1<<5)) { |
| 34 | printk(KERN_EMERG | 33 | pr_emerg("CPU#%d: Possible thermal failure (CPU on fire ?).\n", |
| 35 | "CPU#%d: Possible thermal failure (CPU on fire ?).\n", | 34 | smp_processor_id()); |
| 36 | smp_processor_id()); | ||
| 37 | } | 35 | } |
| 38 | 36 | ||
| 39 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); | 37 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
| @@ -61,12 +59,10 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) | |||
| 61 | /* Read registers before enabling: */ | 59 | /* Read registers before enabling: */ |
| 62 | rdmsr(MSR_IA32_P5_MC_ADDR, l, h); | 60 | rdmsr(MSR_IA32_P5_MC_ADDR, l, h); |
| 63 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); | 61 | rdmsr(MSR_IA32_P5_MC_TYPE, l, h); |
| 64 | printk(KERN_INFO | 62 | pr_info("Intel old style machine check architecture supported.\n"); |
| 65 | "Intel old style machine check architecture supported.\n"); | ||
| 66 | 63 | ||
| 67 | /* Enable MCE: */ | 64 | /* Enable MCE: */ |
| 68 | cr4_set_bits(X86_CR4_MCE); | 65 | cr4_set_bits(X86_CR4_MCE); |
| 69 | printk(KERN_INFO | 66 | pr_info("Intel old style machine check reporting enabled on CPU#%d.\n", |
| 70 | "Intel old style machine check reporting enabled on CPU#%d.\n", | 67 | smp_processor_id()); |
| 71 | smp_processor_id()); | ||
| 72 | } | 68 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 2c5aaf8c2e2f..0b445c2ff735 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
| 190 | /* if we just entered the thermal event */ | 190 | /* if we just entered the thermal event */ |
| 191 | if (new_event) { | 191 | if (new_event) { |
| 192 | if (event == THERMAL_THROTTLING_EVENT) | 192 | if (event == THERMAL_THROTTLING_EVENT) |
| 193 | printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", | 193 | pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", |
| 194 | this_cpu, | 194 | this_cpu, |
| 195 | level == CORE_LEVEL ? "Core" : "Package", | 195 | level == CORE_LEVEL ? "Core" : "Package", |
| 196 | state->count); | 196 | state->count); |
| @@ -198,8 +198,7 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
| 198 | } | 198 | } |
| 199 | if (old_event) { | 199 | if (old_event) { |
| 200 | if (event == THERMAL_THROTTLING_EVENT) | 200 | if (event == THERMAL_THROTTLING_EVENT) |
| 201 | printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", | 201 | pr_info("CPU%d: %s temperature/speed normal\n", this_cpu, |
| 202 | this_cpu, | ||
| 203 | level == CORE_LEVEL ? "Core" : "Package"); | 202 | level == CORE_LEVEL ? "Core" : "Package"); |
| 204 | return 1; | 203 | return 1; |
| 205 | } | 204 | } |
| @@ -417,8 +416,8 @@ static void intel_thermal_interrupt(void) | |||
| 417 | 416 | ||
| 418 | static void unexpected_thermal_interrupt(void) | 417 | static void unexpected_thermal_interrupt(void) |
| 419 | { | 418 | { |
| 420 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", | 419 | pr_err("CPU%d: Unexpected LVT thermal interrupt!\n", |
| 421 | smp_processor_id()); | 420 | smp_processor_id()); |
| 422 | } | 421 | } |
| 423 | 422 | ||
| 424 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; | 423 | static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; |
| @@ -499,7 +498,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 499 | 498 | ||
| 500 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | 499 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 501 | if (system_state == SYSTEM_BOOTING) | 500 | if (system_state == SYSTEM_BOOTING) |
| 502 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu); | 501 | pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu); |
| 503 | return; | 502 | return; |
| 504 | } | 503 | } |
| 505 | 504 | ||
| @@ -557,8 +556,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 557 | l = apic_read(APIC_LVTTHMR); | 556 | l = apic_read(APIC_LVTTHMR); |
| 558 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 557 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
| 559 | 558 | ||
| 560 | printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n", | 559 | pr_info_once("CPU0: Thermal monitoring enabled (%s)\n", |
| 561 | tm2 ? "TM2" : "TM1"); | 560 | tm2 ? "TM2" : "TM1"); |
| 562 | 561 | ||
| 563 | /* enable thermal throttle processing */ | 562 | /* enable thermal throttle processing */ |
| 564 | atomic_set(&therm_throt_en, 1); | 563 | atomic_set(&therm_throt_en, 1); |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index 7245980186ee..fcf9ae9384f4 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
| @@ -12,8 +12,8 @@ | |||
| 12 | 12 | ||
| 13 | static void default_threshold_interrupt(void) | 13 | static void default_threshold_interrupt(void) |
| 14 | { | 14 | { |
| 15 | printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n", | 15 | pr_err("Unexpected threshold interrupt at vector %x\n", |
| 16 | THRESHOLD_APIC_VECTOR); | 16 | THRESHOLD_APIC_VECTOR); |
| 17 | } | 17 | } |
| 18 | 18 | ||
| 19 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | 19 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; |
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 01dd8702880b..c6a722e1d011 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c | |||
| @@ -17,7 +17,7 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code) | |||
| 17 | { | 17 | { |
| 18 | ist_enter(regs); | 18 | ist_enter(regs); |
| 19 | 19 | ||
| 20 | printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); | 20 | pr_emerg("CPU0: Machine Check Exception.\n"); |
| 21 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); | 21 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
| 22 | 22 | ||
| 23 | ist_exit(regs); | 23 | ist_exit(regs); |
| @@ -39,6 +39,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) | |||
| 39 | 39 | ||
| 40 | cr4_set_bits(X86_CR4_MCE); | 40 | cr4_set_bits(X86_CR4_MCE); |
| 41 | 41 | ||
| 42 | printk(KERN_INFO | 42 | pr_info("Winchip machine check reporting enabled on CPU#0.\n"); |
| 43 | "Winchip machine check reporting enabled on CPU#0.\n"); | ||
| 44 | } | 43 | } |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 2233f8a76615..75d3aab5f7b2 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
| @@ -953,7 +953,7 @@ struct microcode_ops * __init init_amd_microcode(void) | |||
| 953 | struct cpuinfo_x86 *c = &boot_cpu_data; | 953 | struct cpuinfo_x86 *c = &boot_cpu_data; |
| 954 | 954 | ||
| 955 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | 955 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { |
| 956 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); | 956 | pr_warn("AMD CPU family 0x%x not supported\n", c->x86); |
| 957 | return NULL; | 957 | return NULL; |
| 958 | } | 958 | } |
| 959 | 959 | ||
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 20e242ea1bc4..4e7c6933691c 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
| @@ -161,8 +161,8 @@ static void __init ms_hyperv_init_platform(void) | |||
| 161 | ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); | 161 | ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); |
| 162 | ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); | 162 | ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); |
| 163 | 163 | ||
| 164 | printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", | 164 | pr_info("HyperV: features 0x%x, hints 0x%x\n", |
| 165 | ms_hyperv.features, ms_hyperv.hints); | 165 | ms_hyperv.features, ms_hyperv.hints); |
| 166 | 166 | ||
| 167 | #ifdef CONFIG_X86_LOCAL_APIC | 167 | #ifdef CONFIG_X86_LOCAL_APIC |
| 168 | if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) { | 168 | if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) { |
| @@ -174,8 +174,8 @@ static void __init ms_hyperv_init_platform(void) | |||
| 174 | rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); | 174 | rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); |
| 175 | hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); | 175 | hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); |
| 176 | lapic_timer_frequency = hv_lapic_frequency; | 176 | lapic_timer_frequency = hv_lapic_frequency; |
| 177 | printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n", | 177 | pr_info("HyperV: LAPIC Timer Frequency: %#x\n", |
| 178 | lapic_timer_frequency); | 178 | lapic_timer_frequency); |
| 179 | } | 179 | } |
| 180 | #endif | 180 | #endif |
| 181 | 181 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c index 316fe3e60a97..3d689937fc1b 100644 --- a/arch/x86/kernel/cpu/mtrr/centaur.c +++ b/arch/x86/kernel/cpu/mtrr/centaur.c | |||
| @@ -103,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t | |||
| 103 | */ | 103 | */ |
| 104 | if (type != MTRR_TYPE_WRCOMB && | 104 | if (type != MTRR_TYPE_WRCOMB && |
| 105 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { | 105 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { |
| 106 | pr_warning("mtrr: only write-combining%s supported\n", | 106 | pr_warn("mtrr: only write-combining%s supported\n", |
| 107 | centaur_mcr_type ? " and uncacheable are" : " is"); | 107 | centaur_mcr_type ? " and uncacheable are" : " is"); |
| 108 | return -EINVAL; | 108 | return -EINVAL; |
| 109 | } | 109 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 0d98503c2245..31e951ce6dff 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
| @@ -57,9 +57,9 @@ static int __initdata nr_range; | |||
| 57 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | 57 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; |
| 58 | 58 | ||
| 59 | static int __initdata debug_print; | 59 | static int __initdata debug_print; |
| 60 | #define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) | 60 | #define Dprintk(x...) do { if (debug_print) pr_debug(x); } while (0) |
| 61 | 61 | ||
| 62 | #define BIOS_BUG_MSG KERN_WARNING \ | 62 | #define BIOS_BUG_MSG \ |
| 63 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" | 63 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
| 64 | 64 | ||
| 65 | static int __init | 65 | static int __init |
| @@ -81,9 +81,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
| 81 | base, base + size); | 81 | base, base + size); |
| 82 | } | 82 | } |
| 83 | if (debug_print) { | 83 | if (debug_print) { |
| 84 | printk(KERN_DEBUG "After WB checking\n"); | 84 | pr_debug("After WB checking\n"); |
| 85 | for (i = 0; i < nr_range; i++) | 85 | for (i = 0; i < nr_range; i++) |
| 86 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", | 86 | pr_debug("MTRR MAP PFN: %016llx - %016llx\n", |
| 87 | range[i].start, range[i].end); | 87 | range[i].start, range[i].end); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| @@ -101,7 +101,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
| 101 | (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && | 101 | (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && |
| 102 | (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { | 102 | (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { |
| 103 | /* Var MTRR contains UC entry below 1M? Skip it: */ | 103 | /* Var MTRR contains UC entry below 1M? Skip it: */ |
| 104 | printk(BIOS_BUG_MSG, i); | 104 | pr_warn(BIOS_BUG_MSG, i); |
| 105 | if (base + size <= (1<<(20-PAGE_SHIFT))) | 105 | if (base + size <= (1<<(20-PAGE_SHIFT))) |
| 106 | continue; | 106 | continue; |
| 107 | size -= (1<<(20-PAGE_SHIFT)) - base; | 107 | size -= (1<<(20-PAGE_SHIFT)) - base; |
| @@ -114,11 +114,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
| 114 | extra_remove_base + extra_remove_size); | 114 | extra_remove_base + extra_remove_size); |
| 115 | 115 | ||
| 116 | if (debug_print) { | 116 | if (debug_print) { |
| 117 | printk(KERN_DEBUG "After UC checking\n"); | 117 | pr_debug("After UC checking\n"); |
| 118 | for (i = 0; i < RANGE_NUM; i++) { | 118 | for (i = 0; i < RANGE_NUM; i++) { |
| 119 | if (!range[i].end) | 119 | if (!range[i].end) |
| 120 | continue; | 120 | continue; |
| 121 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", | 121 | pr_debug("MTRR MAP PFN: %016llx - %016llx\n", |
| 122 | range[i].start, range[i].end); | 122 | range[i].start, range[i].end); |
| 123 | } | 123 | } |
| 124 | } | 124 | } |
| @@ -126,9 +126,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
| 126 | /* sort the ranges */ | 126 | /* sort the ranges */ |
| 127 | nr_range = clean_sort_range(range, RANGE_NUM); | 127 | nr_range = clean_sort_range(range, RANGE_NUM); |
| 128 | if (debug_print) { | 128 | if (debug_print) { |
| 129 | printk(KERN_DEBUG "After sorting\n"); | 129 | pr_debug("After sorting\n"); |
| 130 | for (i = 0; i < nr_range; i++) | 130 | for (i = 0; i < nr_range; i++) |
| 131 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", | 131 | pr_debug("MTRR MAP PFN: %016llx - %016llx\n", |
| 132 | range[i].start, range[i].end); | 132 | range[i].start, range[i].end); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| @@ -544,7 +544,7 @@ static void __init print_out_mtrr_range_state(void) | |||
| 544 | start_base = to_size_factor(start_base, &start_factor), | 544 | start_base = to_size_factor(start_base, &start_factor), |
| 545 | type = range_state[i].type; | 545 | type = range_state[i].type; |
| 546 | 546 | ||
| 547 | printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", | 547 | pr_debug("reg %d, base: %ld%cB, range: %ld%cB, type %s\n", |
| 548 | i, start_base, start_factor, | 548 | i, start_base, start_factor, |
| 549 | size_base, size_factor, | 549 | size_base, size_factor, |
| 550 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : | 550 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : |
| @@ -713,7 +713,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 713 | return 0; | 713 | return 0; |
| 714 | 714 | ||
| 715 | /* Print original var MTRRs at first, for debugging: */ | 715 | /* Print original var MTRRs at first, for debugging: */ |
| 716 | printk(KERN_DEBUG "original variable MTRRs\n"); | 716 | pr_debug("original variable MTRRs\n"); |
| 717 | print_out_mtrr_range_state(); | 717 | print_out_mtrr_range_state(); |
| 718 | 718 | ||
| 719 | memset(range, 0, sizeof(range)); | 719 | memset(range, 0, sizeof(range)); |
| @@ -733,7 +733,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 733 | x_remove_base, x_remove_size); | 733 | x_remove_base, x_remove_size); |
| 734 | 734 | ||
| 735 | range_sums = sum_ranges(range, nr_range); | 735 | range_sums = sum_ranges(range, nr_range); |
| 736 | printk(KERN_INFO "total RAM covered: %ldM\n", | 736 | pr_info("total RAM covered: %ldM\n", |
| 737 | range_sums >> (20 - PAGE_SHIFT)); | 737 | range_sums >> (20 - PAGE_SHIFT)); |
| 738 | 738 | ||
| 739 | if (mtrr_chunk_size && mtrr_gran_size) { | 739 | if (mtrr_chunk_size && mtrr_gran_size) { |
| @@ -745,12 +745,11 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 745 | 745 | ||
| 746 | if (!result[i].bad) { | 746 | if (!result[i].bad) { |
| 747 | set_var_mtrr_all(address_bits); | 747 | set_var_mtrr_all(address_bits); |
| 748 | printk(KERN_DEBUG "New variable MTRRs\n"); | 748 | pr_debug("New variable MTRRs\n"); |
| 749 | print_out_mtrr_range_state(); | 749 | print_out_mtrr_range_state(); |
| 750 | return 1; | 750 | return 1; |
| 751 | } | 751 | } |
| 752 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " | 752 | pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n"); |
| 753 | "will find optimal one\n"); | ||
| 754 | } | 753 | } |
| 755 | 754 | ||
| 756 | i = 0; | 755 | i = 0; |
| @@ -768,7 +767,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 768 | x_remove_base, x_remove_size, i); | 767 | x_remove_base, x_remove_size, i); |
| 769 | if (debug_print) { | 768 | if (debug_print) { |
| 770 | mtrr_print_out_one_result(i); | 769 | mtrr_print_out_one_result(i); |
| 771 | printk(KERN_INFO "\n"); | 770 | pr_info("\n"); |
| 772 | } | 771 | } |
| 773 | 772 | ||
| 774 | i++; | 773 | i++; |
| @@ -779,7 +778,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 779 | index_good = mtrr_search_optimal_index(); | 778 | index_good = mtrr_search_optimal_index(); |
| 780 | 779 | ||
| 781 | if (index_good != -1) { | 780 | if (index_good != -1) { |
| 782 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); | 781 | pr_info("Found optimal setting for mtrr clean up\n"); |
| 783 | i = index_good; | 782 | i = index_good; |
| 784 | mtrr_print_out_one_result(i); | 783 | mtrr_print_out_one_result(i); |
| 785 | 784 | ||
| @@ -790,7 +789,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 790 | gran_size <<= 10; | 789 | gran_size <<= 10; |
| 791 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); | 790 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
| 792 | set_var_mtrr_all(address_bits); | 791 | set_var_mtrr_all(address_bits); |
| 793 | printk(KERN_DEBUG "New variable MTRRs\n"); | 792 | pr_debug("New variable MTRRs\n"); |
| 794 | print_out_mtrr_range_state(); | 793 | print_out_mtrr_range_state(); |
| 795 | return 1; | 794 | return 1; |
| 796 | } else { | 795 | } else { |
| @@ -799,8 +798,8 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 799 | mtrr_print_out_one_result(i); | 798 | mtrr_print_out_one_result(i); |
| 800 | } | 799 | } |
| 801 | 800 | ||
| 802 | printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); | 801 | pr_info("mtrr_cleanup: can not find optimal value\n"); |
| 803 | printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n"); | 802 | pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n"); |
| 804 | 803 | ||
| 805 | return 0; | 804 | return 0; |
| 806 | } | 805 | } |
| @@ -918,7 +917,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 918 | 917 | ||
| 919 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ | 918 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ |
| 920 | if (!highest_pfn) { | 919 | if (!highest_pfn) { |
| 921 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | 920 | pr_info("CPU MTRRs all blank - virtualized system.\n"); |
| 922 | return 0; | 921 | return 0; |
| 923 | } | 922 | } |
| 924 | 923 | ||
| @@ -973,7 +972,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 973 | end_pfn); | 972 | end_pfn); |
| 974 | 973 | ||
| 975 | if (total_trim_size) { | 974 | if (total_trim_size) { |
| 976 | pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); | 975 | pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", |
| 976 | total_trim_size >> 20); | ||
| 977 | 977 | ||
| 978 | if (!changed_by_mtrr_cleanup) | 978 | if (!changed_by_mtrr_cleanup) |
| 979 | WARN_ON(1); | 979 | WARN_ON(1); |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index c870af161008..fcbcb2f678ca 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
| @@ -55,7 +55,7 @@ static inline void k8_check_syscfg_dram_mod_en(void) | |||
| 55 | 55 | ||
| 56 | rdmsr(MSR_K8_SYSCFG, lo, hi); | 56 | rdmsr(MSR_K8_SYSCFG, lo, hi); |
| 57 | if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { | 57 | if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { |
| 58 | printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" | 58 | pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" |
| 59 | " not cleared by BIOS, clearing this bit\n", | 59 | " not cleared by BIOS, clearing this bit\n", |
| 60 | smp_processor_id()); | 60 | smp_processor_id()); |
| 61 | lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; | 61 | lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; |
| @@ -501,14 +501,14 @@ void __init mtrr_state_warn(void) | |||
| 501 | if (!mask) | 501 | if (!mask) |
| 502 | return; | 502 | return; |
| 503 | if (mask & MTRR_CHANGE_MASK_FIXED) | 503 | if (mask & MTRR_CHANGE_MASK_FIXED) |
| 504 | pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); | 504 | pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
| 505 | if (mask & MTRR_CHANGE_MASK_VARIABLE) | 505 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
| 506 | pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); | 506 | pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
| 507 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) | 507 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
| 508 | pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); | 508 | pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
| 509 | 509 | ||
| 510 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); | 510 | pr_info("mtrr: probably your BIOS does not setup all CPUs.\n"); |
| 511 | printk(KERN_INFO "mtrr: corrected configuration.\n"); | 511 | pr_info("mtrr: corrected configuration.\n"); |
| 512 | } | 512 | } |
| 513 | 513 | ||
| 514 | /* | 514 | /* |
| @@ -519,8 +519,7 @@ void __init mtrr_state_warn(void) | |||
| 519 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) | 519 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) |
| 520 | { | 520 | { |
| 521 | if (wrmsr_safe(msr, a, b) < 0) { | 521 | if (wrmsr_safe(msr, a, b) < 0) { |
| 522 | printk(KERN_ERR | 522 | pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", |
| 523 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", | ||
| 524 | smp_processor_id(), msr, a, b); | 523 | smp_processor_id(), msr, a, b); |
| 525 | } | 524 | } |
| 526 | } | 525 | } |
| @@ -607,7 +606,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 607 | tmp |= ~((1ULL<<(hi - 1)) - 1); | 606 | tmp |= ~((1ULL<<(hi - 1)) - 1); |
| 608 | 607 | ||
| 609 | if (tmp != mask) { | 608 | if (tmp != mask) { |
| 610 | printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); | 609 | pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); |
| 611 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 610 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); |
| 612 | mask = tmp; | 611 | mask = tmp; |
| 613 | } | 612 | } |
| @@ -858,13 +857,13 @@ int generic_validate_add_page(unsigned long base, unsigned long size, | |||
| 858 | boot_cpu_data.x86_model == 1 && | 857 | boot_cpu_data.x86_model == 1 && |
| 859 | boot_cpu_data.x86_mask <= 7) { | 858 | boot_cpu_data.x86_mask <= 7) { |
| 860 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { | 859 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
| 861 | pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); | 860 | pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
| 862 | return -EINVAL; | 861 | return -EINVAL; |
| 863 | } | 862 | } |
| 864 | if (!(base + size < 0x70000 || base > 0x7003F) && | 863 | if (!(base + size < 0x70000 || base > 0x7003F) && |
| 865 | (type == MTRR_TYPE_WRCOMB | 864 | (type == MTRR_TYPE_WRCOMB |
| 866 | || type == MTRR_TYPE_WRBACK)) { | 865 | || type == MTRR_TYPE_WRBACK)) { |
| 867 | pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); | 866 | pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
| 868 | return -EINVAL; | 867 | return -EINVAL; |
| 869 | } | 868 | } |
| 870 | } | 869 | } |
| @@ -878,7 +877,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, | |||
| 878 | lbase = lbase >> 1, last = last >> 1) | 877 | lbase = lbase >> 1, last = last >> 1) |
| 879 | ; | 878 | ; |
| 880 | if (lbase != last) { | 879 | if (lbase != last) { |
| 881 | pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); | 880 | pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); |
| 882 | return -EINVAL; | 881 | return -EINVAL; |
| 883 | } | 882 | } |
| 884 | return 0; | 883 | return 0; |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 5c3d149ee91c..ba80d68f683e 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
| @@ -300,24 +300,24 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 300 | return error; | 300 | return error; |
| 301 | 301 | ||
| 302 | if (type >= MTRR_NUM_TYPES) { | 302 | if (type >= MTRR_NUM_TYPES) { |
| 303 | pr_warning("mtrr: type: %u invalid\n", type); | 303 | pr_warn("mtrr: type: %u invalid\n", type); |
| 304 | return -EINVAL; | 304 | return -EINVAL; |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | /* If the type is WC, check that this processor supports it */ | 307 | /* If the type is WC, check that this processor supports it */ |
| 308 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { | 308 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { |
| 309 | pr_warning("mtrr: your processor doesn't support write-combining\n"); | 309 | pr_warn("mtrr: your processor doesn't support write-combining\n"); |
| 310 | return -ENOSYS; | 310 | return -ENOSYS; |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | if (!size) { | 313 | if (!size) { |
| 314 | pr_warning("mtrr: zero sized request\n"); | 314 | pr_warn("mtrr: zero sized request\n"); |
| 315 | return -EINVAL; | 315 | return -EINVAL; |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | if ((base | (base + size - 1)) >> | 318 | if ((base | (base + size - 1)) >> |
| 319 | (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { | 319 | (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { |
| 320 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); | 320 | pr_warn("mtrr: base or size exceeds the MTRR width\n"); |
| 321 | return -EINVAL; | 321 | return -EINVAL; |
| 322 | } | 322 | } |
| 323 | 323 | ||
| @@ -348,7 +348,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 348 | } else if (types_compatible(type, ltype)) | 348 | } else if (types_compatible(type, ltype)) |
| 349 | continue; | 349 | continue; |
| 350 | } | 350 | } |
| 351 | pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" | 351 | pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing" |
| 352 | " 0x%lx000,0x%lx000\n", base, size, lbase, | 352 | " 0x%lx000,0x%lx000\n", base, size, lbase, |
| 353 | lsize); | 353 | lsize); |
| 354 | goto out; | 354 | goto out; |
| @@ -357,7 +357,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 357 | if (ltype != type) { | 357 | if (ltype != type) { |
| 358 | if (types_compatible(type, ltype)) | 358 | if (types_compatible(type, ltype)) |
| 359 | continue; | 359 | continue; |
| 360 | pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", | 360 | pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", |
| 361 | base, size, mtrr_attrib_to_str(ltype), | 361 | base, size, mtrr_attrib_to_str(ltype), |
| 362 | mtrr_attrib_to_str(type)); | 362 | mtrr_attrib_to_str(type)); |
| 363 | goto out; | 363 | goto out; |
| @@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 395 | static int mtrr_check(unsigned long base, unsigned long size) | 395 | static int mtrr_check(unsigned long base, unsigned long size) |
| 396 | { | 396 | { |
| 397 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { | 397 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
| 398 | pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); | 398 | pr_warn("mtrr: size and base must be multiples of 4 kiB\n"); |
| 399 | pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); | 399 | pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
| 400 | dump_stack(); | 400 | dump_stack(); |
| 401 | return -1; | 401 | return -1; |
| @@ -493,16 +493,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
| 493 | } | 493 | } |
| 494 | } | 494 | } |
| 495 | if (reg >= max) { | 495 | if (reg >= max) { |
| 496 | pr_warning("mtrr: register: %d too big\n", reg); | 496 | pr_warn("mtrr: register: %d too big\n", reg); |
| 497 | goto out; | 497 | goto out; |
| 498 | } | 498 | } |
| 499 | mtrr_if->get(reg, &lbase, &lsize, <ype); | 499 | mtrr_if->get(reg, &lbase, &lsize, <ype); |
| 500 | if (lsize < 1) { | 500 | if (lsize < 1) { |
| 501 | pr_warning("mtrr: MTRR %d not used\n", reg); | 501 | pr_warn("mtrr: MTRR %d not used\n", reg); |
| 502 | goto out; | 502 | goto out; |
| 503 | } | 503 | } |
| 504 | if (mtrr_usage_table[reg] < 1) { | 504 | if (mtrr_usage_table[reg] < 1) { |
| 505 | pr_warning("mtrr: reg: %d has count=0\n", reg); | 505 | pr_warn("mtrr: reg: %d has count=0\n", reg); |
| 506 | goto out; | 506 | goto out; |
| 507 | } | 507 | } |
| 508 | if (--mtrr_usage_table[reg] < 1) | 508 | if (--mtrr_usage_table[reg] < 1) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1b443db2db50..7402c8182813 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -254,15 +254,16 @@ static bool check_hw_exists(void) | |||
| 254 | * We still allow the PMU driver to operate: | 254 | * We still allow the PMU driver to operate: |
| 255 | */ | 255 | */ |
| 256 | if (bios_fail) { | 256 | if (bios_fail) { |
| 257 | printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); | 257 | pr_cont("Broken BIOS detected, complain to your hardware vendor.\n"); |
| 258 | printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail); | 258 | pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", |
| 259 | reg_fail, val_fail); | ||
| 259 | } | 260 | } |
| 260 | 261 | ||
| 261 | return true; | 262 | return true; |
| 262 | 263 | ||
| 263 | msr_fail: | 264 | msr_fail: |
| 264 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); | 265 | pr_cont("Broken PMU hardware detected, using software events only.\n"); |
| 265 | printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n", | 266 | pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n", |
| 266 | boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, | 267 | boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, |
| 267 | reg, val_new); | 268 | reg, val_new); |
| 268 | 269 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index 989d3c215d2b..aa12f9509cfb 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
| @@ -670,7 +670,7 @@ static __init int perf_event_ibs_init(void) | |||
| 670 | perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); | 670 | perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); |
| 671 | 671 | ||
| 672 | register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); | 672 | register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); |
| 673 | printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); | 673 | pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps); |
| 674 | 674 | ||
| 675 | return 0; | 675 | return 0; |
| 676 | } | 676 | } |
| @@ -774,14 +774,14 @@ static int setup_ibs_ctl(int ibs_eilvt_off) | |||
| 774 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | 774 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); |
| 775 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { | 775 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { |
| 776 | pci_dev_put(cpu_cfg); | 776 | pci_dev_put(cpu_cfg); |
| 777 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | 777 | pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n", |
| 778 | "IBSCTL = 0x%08x\n", value); | 778 | value); |
| 779 | return -EINVAL; | 779 | return -EINVAL; |
| 780 | } | 780 | } |
| 781 | } while (1); | 781 | } while (1); |
| 782 | 782 | ||
| 783 | if (!nodes) { | 783 | if (!nodes) { |
| 784 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); | 784 | pr_debug("No CPU node configured for IBS\n"); |
| 785 | return -ENODEV; | 785 | return -ENODEV; |
| 786 | } | 786 | } |
| 787 | 787 | ||
| @@ -810,7 +810,7 @@ static void force_ibs_eilvt_setup(void) | |||
| 810 | preempt_enable(); | 810 | preempt_enable(); |
| 811 | 811 | ||
| 812 | if (offset == APIC_EILVT_NR_MAX) { | 812 | if (offset == APIC_EILVT_NR_MAX) { |
| 813 | printk(KERN_DEBUG "No EILVT entry available\n"); | 813 | pr_debug("No EILVT entry available\n"); |
| 814 | return; | 814 | return; |
| 815 | } | 815 | } |
| 816 | 816 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index 49742746a6c9..19a17363a21d 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c | |||
| @@ -536,7 +536,7 @@ static int __init amd_uncore_init(void) | |||
| 536 | if (ret) | 536 | if (ret) |
| 537 | goto fail_nb; | 537 | goto fail_nb; |
| 538 | 538 | ||
| 539 | printk(KERN_INFO "perf: AMD NB counters detected\n"); | 539 | pr_info("perf: AMD NB counters detected\n"); |
| 540 | ret = 0; | 540 | ret = 0; |
| 541 | } | 541 | } |
| 542 | 542 | ||
| @@ -550,7 +550,7 @@ static int __init amd_uncore_init(void) | |||
| 550 | if (ret) | 550 | if (ret) |
| 551 | goto fail_l2; | 551 | goto fail_l2; |
| 552 | 552 | ||
| 553 | printk(KERN_INFO "perf: AMD L2I counters detected\n"); | 553 | pr_info("perf: AMD L2I counters detected\n"); |
| 554 | ret = 0; | 554 | ret = 0; |
| 555 | } | 555 | } |
| 556 | 556 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 10602f0a438f..7c79261ed939 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
| @@ -1325,13 +1325,13 @@ void __init intel_ds_init(void) | |||
| 1325 | 1325 | ||
| 1326 | switch (format) { | 1326 | switch (format) { |
| 1327 | case 0: | 1327 | case 0: |
| 1328 | printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); | 1328 | pr_cont("PEBS fmt0%c, ", pebs_type); |
| 1329 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); | 1329 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); |
| 1330 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; | 1330 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; |
| 1331 | break; | 1331 | break; |
| 1332 | 1332 | ||
| 1333 | case 1: | 1333 | case 1: |
| 1334 | printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); | 1334 | pr_cont("PEBS fmt1%c, ", pebs_type); |
| 1335 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); | 1335 | x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); |
| 1336 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; | 1336 | x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; |
| 1337 | break; | 1337 | break; |
| @@ -1351,7 +1351,7 @@ void __init intel_ds_init(void) | |||
| 1351 | break; | 1351 | break; |
| 1352 | 1352 | ||
| 1353 | default: | 1353 | default: |
| 1354 | printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); | 1354 | pr_cont("no PEBS fmt%d%c, ", format, pebs_type); |
| 1355 | x86_pmu.pebs = 0; | 1355 | x86_pmu.pebs = 0; |
| 1356 | } | 1356 | } |
| 1357 | } | 1357 | } |
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 819d94982e07..f6f50c4ceaec 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
| @@ -51,7 +51,7 @@ void x86_init_rdrand(struct cpuinfo_x86 *c) | |||
| 51 | for (i = 0; i < SANITY_CHECK_LOOPS; i++) { | 51 | for (i = 0; i < SANITY_CHECK_LOOPS; i++) { |
| 52 | if (!rdrand_long(&tmp)) { | 52 | if (!rdrand_long(&tmp)) { |
| 53 | clear_cpu_cap(c, X86_FEATURE_RDRAND); | 53 | clear_cpu_cap(c, X86_FEATURE_RDRAND); |
| 54 | printk_once(KERN_WARNING "rdrand: disabled\n"); | 54 | pr_warn_once("rdrand: disabled\n"); |
| 55 | return; | 55 | return; |
| 56 | } | 56 | } |
| 57 | } | 57 | } |
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 4c60eaf0571c..cd531355e838 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c | |||
| @@ -87,10 +87,10 @@ void detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 87 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | 87 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
| 88 | 88 | ||
| 89 | if (!printed) { | 89 | if (!printed) { |
| 90 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 90 | pr_info("CPU: Physical Processor ID: %d\n", |
| 91 | c->phys_proc_id); | 91 | c->phys_proc_id); |
| 92 | if (c->x86_max_cores > 1) | 92 | if (c->x86_max_cores > 1) |
| 93 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 93 | pr_info("CPU: Processor Core ID: %d\n", |
| 94 | c->cpu_core_id); | 94 | c->cpu_core_id); |
| 95 | printed = 1; | 95 | printed = 1; |
| 96 | } | 96 | } |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 252da7aceca6..e3b4d1841175 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
| @@ -33,7 +33,7 @@ static void init_transmeta(struct cpuinfo_x86 *c) | |||
| 33 | if (max >= 0x80860001) { | 33 | if (max >= 0x80860001) { |
| 34 | cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); | 34 | cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); |
| 35 | if (cpu_rev != 0x02000000) { | 35 | if (cpu_rev != 0x02000000) { |
| 36 | printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", | 36 | pr_info("CPU: Processor revision %u.%u.%u.%u, %u MHz\n", |
| 37 | (cpu_rev >> 24) & 0xff, | 37 | (cpu_rev >> 24) & 0xff, |
| 38 | (cpu_rev >> 16) & 0xff, | 38 | (cpu_rev >> 16) & 0xff, |
| 39 | (cpu_rev >> 8) & 0xff, | 39 | (cpu_rev >> 8) & 0xff, |
| @@ -44,10 +44,10 @@ static void init_transmeta(struct cpuinfo_x86 *c) | |||
| 44 | if (max >= 0x80860002) { | 44 | if (max >= 0x80860002) { |
| 45 | cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); | 45 | cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); |
| 46 | if (cpu_rev == 0x02000000) { | 46 | if (cpu_rev == 0x02000000) { |
| 47 | printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", | 47 | pr_info("CPU: Processor revision %08X, %u MHz\n", |
| 48 | new_cpu_rev, cpu_freq); | 48 | new_cpu_rev, cpu_freq); |
| 49 | } | 49 | } |
| 50 | printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n", | 50 | pr_info("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n", |
| 51 | (cms_rev1 >> 24) & 0xff, | 51 | (cms_rev1 >> 24) & 0xff, |
| 52 | (cms_rev1 >> 16) & 0xff, | 52 | (cms_rev1 >> 16) & 0xff, |
| 53 | (cms_rev1 >> 8) & 0xff, | 53 | (cms_rev1 >> 8) & 0xff, |
| @@ -76,7 +76,7 @@ static void init_transmeta(struct cpuinfo_x86 *c) | |||
| 76 | (void *)&cpu_info[56], | 76 | (void *)&cpu_info[56], |
| 77 | (void *)&cpu_info[60]); | 77 | (void *)&cpu_info[60]); |
| 78 | cpu_info[64] = '\0'; | 78 | cpu_info[64] = '\0'; |
| 79 | printk(KERN_INFO "CPU: %s\n", cpu_info); | 79 | pr_info("CPU: %s\n", cpu_info); |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | /* Unhide possibly hidden capability flags */ | 82 | /* Unhide possibly hidden capability flags */ |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 628a059a9a06..364e58346897 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -62,7 +62,7 @@ static unsigned long vmware_get_tsc_khz(void) | |||
| 62 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 62 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
| 63 | do_div(tsc_hz, 1000); | 63 | do_div(tsc_hz, 1000); |
| 64 | BUG_ON(tsc_hz >> 32); | 64 | BUG_ON(tsc_hz >> 32); |
| 65 | printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", | 65 | pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n", |
| 66 | (unsigned long) tsc_hz / 1000, | 66 | (unsigned long) tsc_hz / 1000, |
| 67 | (unsigned long) tsc_hz % 1000); | 67 | (unsigned long) tsc_hz % 1000); |
| 68 | 68 | ||
| @@ -84,8 +84,7 @@ static void __init vmware_platform_setup(void) | |||
| 84 | if (ebx != UINT_MAX) | 84 | if (ebx != UINT_MAX) |
| 85 | x86_platform.calibrate_tsc = vmware_get_tsc_khz; | 85 | x86_platform.calibrate_tsc = vmware_get_tsc_khz; |
| 86 | else | 86 | else |
| 87 | printk(KERN_WARNING | 87 | pr_warn("Failed to get TSC freq from the hypervisor\n"); |
| 88 | "Failed to get TSC freq from the hypervisor\n"); | ||
| 89 | } | 88 | } |
| 90 | 89 | ||
| 91 | /* | 90 | /* |
