aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2012-05-21 22:50:07 -0400
committerIngo Molnar <mingo@kernel.org>2012-06-06 03:17:22 -0400
commitc767a54ba0657e52e6edaa97cbe0b0a8bf1c1655 (patch)
treed1430237ad10296e0e1a3d9b6f34c2bc56bcdfd7
parentf9ba7179ce91fb77b2adf6eaab3676ab3a1f5a15 (diff)
x86/debug: Add KERN_<LEVEL> to bare printks, convert printks to pr_<level>
Use a more current logging style: - Bare printks should have a KERN_<LEVEL> for consistency's sake - Add pr_fmt where appropriate - Neaten some macro definitions - Convert some Ok output to OK - Use "%s: ", __func__ in pr_fmt for summit - Convert some printks to pr_<level> Message output is not identical in all cases. Signed-off-by: Joe Perches <joe@perches.com> Cc: levinsasha928@gmail.com Link: http://lkml.kernel.org/r/1337655007.24226.10.camel@joe2Laptop [ merged two similar patches, tidied up the changelog ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/floppy.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h8
-rw-r--r--arch/x86/include/asm/pgtable-2level.h4
-rw-r--r--arch/x86/include/asm/pgtable-3level.h6
-rw-r--r--arch/x86/include/asm/pgtable_64.h8
-rw-r--r--arch/x86/kernel/alternative.c17
-rw-r--r--arch/x86/kernel/amd_nb.c10
-rw-r--r--arch/x86/kernel/apic/io_apic.c38
-rw-r--r--arch/x86/kernel/apic/summit_32.c22
-rw-r--r--arch/x86/kernel/apm_32.c29
-rw-r--r--arch/x86/kernel/cpu/bugs.c20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c14
-rw-r--r--arch/x86/kernel/dumpstack.c4
-rw-r--r--arch/x86/kernel/dumpstack_32.c24
-rw-r--r--arch/x86/kernel/dumpstack_64.c20
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/module.c32
-rw-r--r--arch/x86/kernel/pci-calgary_64.c34
-rw-r--r--arch/x86/kernel/process.c34
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--arch/x86/kernel/reboot.c14
-rw-r--r--arch/x86/kernel/signal.c5
-rw-r--r--arch/x86/kernel/smpboot.c97
-rw-r--r--arch/x86/kernel/traps.c19
-rw-r--r--arch/x86/kernel/tsc.c50
-rw-r--r--arch/x86/kernel/vm86_32.c6
-rw-r--r--arch/x86/kernel/vsyscall_64.c17
-rw-r--r--arch/x86/kernel/xsave.c12
29 files changed, 301 insertions, 279 deletions
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
index dbe82a5c5eac..d3d74698dce9 100644
--- a/arch/x86/include/asm/floppy.h
+++ b/arch/x86/include/asm/floppy.h
@@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
99 virtual_dma_residue += virtual_dma_count; 99 virtual_dma_residue += virtual_dma_count;
100 virtual_dma_count = 0; 100 virtual_dma_count = 0;
101#ifdef TRACE_FLPY_INT 101#ifdef TRACE_FLPY_INT
102 printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 102 printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
103 virtual_dma_count, virtual_dma_residue, calls, bytes, 103 virtual_dma_count, virtual_dma_residue, calls, bytes,
104 dma_wait); 104 dma_wait);
105 calls = 0; 105 calls = 0;
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b3a531746026..5ad24a89b19b 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -7,9 +7,13 @@
7#undef DEBUG 7#undef DEBUG
8 8
9#ifdef DEBUG 9#ifdef DEBUG
10#define DBG(x...) printk(x) 10#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
11#else 11#else
12#define DBG(x...) 12#define DBG(fmt, ...) \
13do { \
14 if (0) \
15 printk(fmt, ##__VA_ARGS__); \
16} while (0)
13#endif 17#endif
14 18
15#define PCI_PROBE_BIOS 0x0001 19#define PCI_PROBE_BIOS 0x0001
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 98391db840c6..f2b489cf1602 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -2,9 +2,9 @@
2#define _ASM_X86_PGTABLE_2LEVEL_H 2#define _ASM_X86_PGTABLE_2LEVEL_H
3 3
4#define pte_ERROR(e) \ 4#define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 5 pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
6#define pgd_ERROR(e) \ 6#define pgd_ERROR(e) \
7 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 7 pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
8 8
9/* 9/*
10 * Certain architectures need to do special things when PTEs 10 * Certain architectures need to do special things when PTEs
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 43876f16caf1..f824cfbaa9d4 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -9,13 +9,13 @@
9 */ 9 */
10 10
11#define pte_ERROR(e) \ 11#define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ 12 pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) 13 __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14#define pmd_ERROR(e) \ 14#define pmd_ERROR(e) \
15 printk("%s:%d: bad pmd %p(%016Lx).\n", \ 15 pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
16 __FILE__, __LINE__, &(e), pmd_val(e)) 16 __FILE__, __LINE__, &(e), pmd_val(e))
17#define pgd_ERROR(e) \ 17#define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \ 18 pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e)) 19 __FILE__, __LINE__, &(e), pgd_val(e))
20 20
21/* Rules for using set_pte: the pte being assigned *must* be 21/* Rules for using set_pte: the pte being assigned *must* be
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 975f709e09ae..8251be02301e 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[];
26extern void paging_init(void); 26extern void paging_init(void);
27 27
28#define pte_ERROR(e) \ 28#define pte_ERROR(e) \
29 printk("%s:%d: bad pte %p(%016lx).\n", \ 29 pr_err("%s:%d: bad pte %p(%016lx)\n", \
30 __FILE__, __LINE__, &(e), pte_val(e)) 30 __FILE__, __LINE__, &(e), pte_val(e))
31#define pmd_ERROR(e) \ 31#define pmd_ERROR(e) \
32 printk("%s:%d: bad pmd %p(%016lx).\n", \ 32 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
33 __FILE__, __LINE__, &(e), pmd_val(e)) 33 __FILE__, __LINE__, &(e), pmd_val(e))
34#define pud_ERROR(e) \ 34#define pud_ERROR(e) \
35 printk("%s:%d: bad pud %p(%016lx).\n", \ 35 pr_err("%s:%d: bad pud %p(%016lx)\n", \
36 __FILE__, __LINE__, &(e), pud_val(e)) 36 __FILE__, __LINE__, &(e), pud_val(e))
37#define pgd_ERROR(e) \ 37#define pgd_ERROR(e) \
38 printk("%s:%d: bad pgd %p(%016lx).\n", \ 38 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
39 __FILE__, __LINE__, &(e), pgd_val(e)) 39 __FILE__, __LINE__, &(e), pgd_val(e))
40 40
41struct mm_struct; 41struct mm_struct;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1f84794f0759..1729d720299a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) "SMP alternatives: " fmt
2
1#include <linux/module.h> 3#include <linux/module.h>
2#include <linux/sched.h> 4#include <linux/sched.h>
3#include <linux/mutex.h> 5#include <linux/mutex.h>
@@ -63,8 +65,11 @@ static int __init setup_noreplace_paravirt(char *str)
63__setup("noreplace-paravirt", setup_noreplace_paravirt); 65__setup("noreplace-paravirt", setup_noreplace_paravirt);
64#endif 66#endif
65 67
66#define DPRINTK(fmt, args...) if (debug_alternative) \ 68#define DPRINTK(fmt, ...) \
67 printk(KERN_DEBUG fmt, args) 69do { \
70 if (debug_alternative) \
71 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
72} while (0)
68 73
69/* 74/*
70 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes 75 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
@@ -428,7 +433,7 @@ void alternatives_smp_switch(int smp)
428 * If this still occurs then you should see a hang 433 * If this still occurs then you should see a hang
429 * or crash shortly after this line: 434 * or crash shortly after this line:
430 */ 435 */
431 printk("lockdep: fixing up alternatives.\n"); 436 pr_info("lockdep: fixing up alternatives\n");
432#endif 437#endif
433 438
434 if (noreplace_smp || smp_alt_once || skip_smp_alternatives) 439 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
@@ -444,14 +449,14 @@ void alternatives_smp_switch(int smp)
444 if (smp == smp_mode) { 449 if (smp == smp_mode) {
445 /* nothing */ 450 /* nothing */
446 } else if (smp) { 451 } else if (smp) {
447 printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); 452 pr_info("switching to SMP code\n");
448 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 453 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
449 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 454 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
450 list_for_each_entry(mod, &smp_alt_modules, next) 455 list_for_each_entry(mod, &smp_alt_modules, next)
451 alternatives_smp_lock(mod->locks, mod->locks_end, 456 alternatives_smp_lock(mod->locks, mod->locks_end,
452 mod->text, mod->text_end); 457 mod->text, mod->text_end);
453 } else { 458 } else {
454 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 459 pr_info("switching to UP code\n");
455 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 460 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
456 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 461 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
457 list_for_each_entry(mod, &smp_alt_modules, next) 462 list_for_each_entry(mod, &smp_alt_modules, next)
@@ -546,7 +551,7 @@ void __init alternative_instructions(void)
546#ifdef CONFIG_SMP 551#ifdef CONFIG_SMP
547 if (smp_alt_once) { 552 if (smp_alt_once) {
548 if (1 == num_possible_cpus()) { 553 if (1 == num_possible_cpus()) {
549 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 554 pr_info("switching to UP code\n");
550 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 555 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
551 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 556 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
552 557
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index be16854591cc..f29f6dd6bc08 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -2,6 +2,9 @@
2 * Shared support code for AMD K8 northbridges and derivates. 2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. 3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */ 4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
5#include <linux/types.h> 8#include <linux/types.h>
6#include <linux/slab.h> 9#include <linux/slab.h>
7#include <linux/init.h> 10#include <linux/init.h>
@@ -258,7 +261,7 @@ void amd_flush_garts(void)
258 } 261 }
259 spin_unlock_irqrestore(&gart_lock, flags); 262 spin_unlock_irqrestore(&gart_lock, flags);
260 if (!flushed) 263 if (!flushed)
261 printk("nothing to flush?\n"); 264 pr_notice("nothing to flush?\n");
262} 265}
263EXPORT_SYMBOL_GPL(amd_flush_garts); 266EXPORT_SYMBOL_GPL(amd_flush_garts);
264 267
@@ -269,11 +272,10 @@ static __init int init_amd_nbs(void)
269 err = amd_cache_northbridges(); 272 err = amd_cache_northbridges();
270 273
271 if (err < 0) 274 if (err < 0)
272 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); 275 pr_notice("Cannot enumerate AMD northbridges\n");
273 276
274 if (amd_cache_gart() < 0) 277 if (amd_cache_gart() < 0)
275 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, " 278 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
276 "GART support disabled.\n");
277 279
278 return err; 280 return err;
279} 281}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561d1a99..5155d6f806f5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -448,8 +448,8 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
448 448
449 entry = alloc_irq_pin_list(node); 449 entry = alloc_irq_pin_list(node);
450 if (!entry) { 450 if (!entry) {
451 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 451 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
452 node, apic, pin); 452 node, apic, pin);
453 return -ENOMEM; 453 return -ENOMEM;
454 } 454 }
455 entry->apic = apic; 455 entry->apic = apic;
@@ -661,7 +661,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
661 ioapic_mask_entry(apic, pin); 661 ioapic_mask_entry(apic, pin);
662 entry = ioapic_read_entry(apic, pin); 662 entry = ioapic_read_entry(apic, pin);
663 if (entry.irr) 663 if (entry.irr)
664 printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n", 664 pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
665 mpc_ioapic_id(apic), pin); 665 mpc_ioapic_id(apic), pin);
666} 666}
667 667
@@ -895,7 +895,7 @@ static int irq_polarity(int idx)
895 } 895 }
896 case 2: /* reserved */ 896 case 2: /* reserved */
897 { 897 {
898 printk(KERN_WARNING "broken BIOS!!\n"); 898 pr_warn("broken BIOS!!\n");
899 polarity = 1; 899 polarity = 1;
900 break; 900 break;
901 } 901 }
@@ -906,7 +906,7 @@ static int irq_polarity(int idx)
906 } 906 }
907 default: /* invalid */ 907 default: /* invalid */
908 { 908 {
909 printk(KERN_WARNING "broken BIOS!!\n"); 909 pr_warn("broken BIOS!!\n");
910 polarity = 1; 910 polarity = 1;
911 break; 911 break;
912 } 912 }
@@ -948,7 +948,7 @@ static int irq_trigger(int idx)
948 } 948 }
949 default: 949 default:
950 { 950 {
951 printk(KERN_WARNING "broken BIOS!!\n"); 951 pr_warn("broken BIOS!!\n");
952 trigger = 1; 952 trigger = 1;
953 break; 953 break;
954 } 954 }
@@ -962,7 +962,7 @@ static int irq_trigger(int idx)
962 } 962 }
963 case 2: /* reserved */ 963 case 2: /* reserved */
964 { 964 {
965 printk(KERN_WARNING "broken BIOS!!\n"); 965 pr_warn("broken BIOS!!\n");
966 trigger = 1; 966 trigger = 1;
967 break; 967 break;
968 } 968 }
@@ -973,7 +973,7 @@ static int irq_trigger(int idx)
973 } 973 }
974 default: /* invalid */ 974 default: /* invalid */
975 { 975 {
976 printk(KERN_WARNING "broken BIOS!!\n"); 976 pr_warn("broken BIOS!!\n");
977 trigger = 0; 977 trigger = 0;
978 break; 978 break;
979 } 979 }
@@ -991,7 +991,7 @@ static int pin_2_irq(int idx, int apic, int pin)
991 * Debugging check, we are in big trouble if this message pops up! 991 * Debugging check, we are in big trouble if this message pops up!
992 */ 992 */
993 if (mp_irqs[idx].dstirq != pin) 993 if (mp_irqs[idx].dstirq != pin)
994 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 994 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
995 995
996 if (test_bit(bus, mp_bus_not_pci)) { 996 if (test_bit(bus, mp_bus_not_pci)) {
997 irq = mp_irqs[idx].srcbusirq; 997 irq = mp_irqs[idx].srcbusirq;
@@ -1521,7 +1521,6 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1521 reg_03.raw = io_apic_read(ioapic_idx, 3); 1521 reg_03.raw = io_apic_read(ioapic_idx, 3);
1522 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1522 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1523 1523
1524 printk("\n");
1525 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); 1524 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1526 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1525 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1527 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1526 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
@@ -1578,7 +1577,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1578 i, 1577 i,
1579 ir_entry->index 1578 ir_entry->index
1580 ); 1579 );
1581 printk("%1d %1d %1d %1d %1d " 1580 pr_cont("%1d %1d %1d %1d %1d "
1582 "%1d %1d %X %02X\n", 1581 "%1d %1d %X %02X\n",
1583 ir_entry->format, 1582 ir_entry->format,
1584 ir_entry->mask, 1583 ir_entry->mask,
@@ -1598,7 +1597,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1598 i, 1597 i,
1599 entry.dest 1598 entry.dest
1600 ); 1599 );
1601 printk("%1d %1d %1d %1d %1d " 1600 pr_cont("%1d %1d %1d %1d %1d "
1602 "%1d %1d %02X\n", 1601 "%1d %1d %02X\n",
1603 entry.mask, 1602 entry.mask,
1604 entry.trigger, 1603 entry.trigger,
@@ -1651,8 +1650,8 @@ __apicdebuginit(void) print_IO_APICs(void)
1651 continue; 1650 continue;
1652 printk(KERN_DEBUG "IRQ%d ", irq); 1651 printk(KERN_DEBUG "IRQ%d ", irq);
1653 for_each_irq_pin(entry, cfg->irq_2_pin) 1652 for_each_irq_pin(entry, cfg->irq_2_pin)
1654 printk("-> %d:%d", entry->apic, entry->pin); 1653 pr_cont("-> %d:%d", entry->apic, entry->pin);
1655 printk("\n"); 1654 pr_cont("\n");
1656 } 1655 }
1657 1656
1658 printk(KERN_INFO ".................................... done.\n"); 1657 printk(KERN_INFO ".................................... done.\n");
@@ -1665,9 +1664,9 @@ __apicdebuginit(void) print_APIC_field(int base)
1665 printk(KERN_DEBUG); 1664 printk(KERN_DEBUG);
1666 1665
1667 for (i = 0; i < 8; i++) 1666 for (i = 0; i < 8; i++)
1668 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1667 pr_cont("%08x", apic_read(base + i*0x10));
1669 1668
1670 printk(KERN_CONT "\n"); 1669 pr_cont("\n");
1671} 1670}
1672 1671
1673__apicdebuginit(void) print_local_APIC(void *dummy) 1672__apicdebuginit(void) print_local_APIC(void *dummy)
@@ -1769,7 +1768,7 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
1769 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1768 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1770 } 1769 }
1771 } 1770 }
1772 printk("\n"); 1771 pr_cont("\n");
1773} 1772}
1774 1773
1775__apicdebuginit(void) print_local_APICs(int maxcpu) 1774__apicdebuginit(void) print_local_APICs(int maxcpu)
@@ -2065,7 +2064,7 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
2065 reg_00.raw = io_apic_read(ioapic_idx, 0); 2064 reg_00.raw = io_apic_read(ioapic_idx, 0);
2066 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2065 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2067 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) 2066 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
2068 printk("could not set ID!\n"); 2067 pr_cont("could not set ID!\n");
2069 else 2068 else
2070 apic_printk(APIC_VERBOSE, " ok.\n"); 2069 apic_printk(APIC_VERBOSE, " ok.\n");
2071 } 2070 }
@@ -3563,7 +3562,8 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id)
3563 3562
3564 /* Sanity check */ 3563 /* Sanity check */
3565 if (reg_00.bits.ID != apic_id) { 3564 if (reg_00.bits.ID != apic_id) {
3566 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3565 pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
3566 ioapic);
3567 return -1; 3567 return -1;
3568 } 3568 }
3569 } 3569 }
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 659897c00755..e97d542ccdd0 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -26,6 +26,8 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) "summit: %s: " fmt, __func__
30
29#include <linux/mm.h> 31#include <linux/mm.h>
30#include <linux/init.h> 32#include <linux/init.h>
31#include <asm/io.h> 33#include <asm/io.h>
@@ -235,8 +237,8 @@ static int summit_apic_id_registered(void)
235 237
236static void summit_setup_apic_routing(void) 238static void summit_setup_apic_routing(void)
237{ 239{
238 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", 240 pr_info("Enabling APIC mode: Summit. Using %d I/O APICs\n",
239 nr_ioapics); 241 nr_ioapics);
240} 242}
241 243
242static int summit_cpu_present_to_apicid(int mps_cpu) 244static int summit_cpu_present_to_apicid(int mps_cpu)
@@ -275,7 +277,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
275 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); 277 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
276 278
277 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 279 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
278 printk("%s: Not a valid mask!\n", __func__); 280 pr_err("Not a valid mask!\n");
279 return BAD_APICID; 281 return BAD_APICID;
280 } 282 }
281 apicid |= new_apicid; 283 apicid |= new_apicid;
@@ -355,7 +357,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
355 } 357 }
356 } 358 }
357 if (i == rio_table_hdr->num_rio_dev) { 359 if (i == rio_table_hdr->num_rio_dev) {
358 printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__); 360 pr_err("Couldn't find owner Cyclone for Winnipeg!\n");
359 return last_bus; 361 return last_bus;
360 } 362 }
361 363
@@ -366,7 +368,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
366 } 368 }
367 } 369 }
368 if (i == rio_table_hdr->num_scal_dev) { 370 if (i == rio_table_hdr->num_scal_dev) {
369 printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__); 371 pr_err("Couldn't find owner Twister for Cyclone!\n");
370 return last_bus; 372 return last_bus;
371 } 373 }
372 374
@@ -396,7 +398,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
396 num_buses = 9; 398 num_buses = 9;
397 break; 399 break;
398 default: 400 default:
399 printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__); 401 pr_info("Unsupported Winnipeg type!\n");
400 return last_bus; 402 return last_bus;
401 } 403 }
402 404
@@ -411,13 +413,15 @@ static int build_detail_arrays(void)
411 int i, scal_detail_size, rio_detail_size; 413 int i, scal_detail_size, rio_detail_size;
412 414
413 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { 415 if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
414 printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); 416 pr_warn("MAX_NUMNODES too low! Defined as %d, but system has %d nodes\n",
417 MAX_NUMNODES, rio_table_hdr->num_scal_dev);
415 return 0; 418 return 0;
416 } 419 }
417 420
418 switch (rio_table_hdr->version) { 421 switch (rio_table_hdr->version) {
419 default: 422 default:
420 printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version); 423 pr_warn("Invalid Rio Grande Table Version: %d\n",
424 rio_table_hdr->version);
421 return 0; 425 return 0;
422 case 2: 426 case 2:
423 scal_detail_size = 11; 427 scal_detail_size = 11;
@@ -462,7 +466,7 @@ void setup_summit(void)
462 offset = *((unsigned short *)(ptr + offset)); 466 offset = *((unsigned short *)(ptr + offset));
463 } 467 }
464 if (!rio_table_hdr) { 468 if (!rio_table_hdr) {
465 printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__); 469 pr_err("Unable to locate Rio Grande Table in EBDA - bailing!\n");
466 return; 470 return;
467 } 471 }
468 472
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 07b0c0db466c..d65464e43503 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -201,6 +201,8 @@
201 * http://www.microsoft.com/whdc/archive/amp_12.mspx] 201 * http://www.microsoft.com/whdc/archive/amp_12.mspx]
202 */ 202 */
203 203
204#define pr_fmt(fmt) "apm: " fmt
205
204#include <linux/module.h> 206#include <linux/module.h>
205 207
206#include <linux/poll.h> 208#include <linux/poll.h>
@@ -485,11 +487,11 @@ static void apm_error(char *str, int err)
485 if (error_table[i].key == err) 487 if (error_table[i].key == err)
486 break; 488 break;
487 if (i < ERROR_COUNT) 489 if (i < ERROR_COUNT)
488 printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); 490 pr_notice("%s: %s\n", str, error_table[i].msg);
489 else if (err < 0) 491 else if (err < 0)
490 printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err); 492 pr_notice("%s: linux error code %i\n", str, err);
491 else 493 else
492 printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", 494 pr_notice("%s: unknown error code %#2.2x\n",
493 str, err); 495 str, err);
494} 496}
495 497
@@ -1184,7 +1186,7 @@ static void queue_event(apm_event_t event, struct apm_user *sender)
1184 static int notified; 1186 static int notified;
1185 1187
1186 if (notified++ == 0) 1188 if (notified++ == 0)
1187 printk(KERN_ERR "apm: an event queue overflowed\n"); 1189 pr_err("an event queue overflowed\n");
1188 if (++as->event_tail >= APM_MAX_EVENTS) 1190 if (++as->event_tail >= APM_MAX_EVENTS)
1189 as->event_tail = 0; 1191 as->event_tail = 0;
1190 } 1192 }
@@ -1447,7 +1449,7 @@ static void apm_mainloop(void)
1447static int check_apm_user(struct apm_user *as, const char *func) 1449static int check_apm_user(struct apm_user *as, const char *func)
1448{ 1450{
1449 if (as == NULL || as->magic != APM_BIOS_MAGIC) { 1451 if (as == NULL || as->magic != APM_BIOS_MAGIC) {
1450 printk(KERN_ERR "apm: %s passed bad filp\n", func); 1452 pr_err("%s passed bad filp\n", func);
1451 return 1; 1453 return 1;
1452 } 1454 }
1453 return 0; 1455 return 0;
@@ -1586,7 +1588,7 @@ static int do_release(struct inode *inode, struct file *filp)
1586 as1 = as1->next) 1588 as1 = as1->next)
1587 ; 1589 ;
1588 if (as1 == NULL) 1590 if (as1 == NULL)
1589 printk(KERN_ERR "apm: filp not in user list\n"); 1591 pr_err("filp not in user list\n");
1590 else 1592 else
1591 as1->next = as->next; 1593 as1->next = as->next;
1592 } 1594 }
@@ -1600,11 +1602,9 @@ static int do_open(struct inode *inode, struct file *filp)
1600 struct apm_user *as; 1602 struct apm_user *as;
1601 1603
1602 as = kmalloc(sizeof(*as), GFP_KERNEL); 1604 as = kmalloc(sizeof(*as), GFP_KERNEL);
1603 if (as == NULL) { 1605 if (as == NULL)
1604 printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
1605 sizeof(*as));
1606 return -ENOMEM; 1606 return -ENOMEM;
1607 } 1607
1608 as->magic = APM_BIOS_MAGIC; 1608 as->magic = APM_BIOS_MAGIC;
1609 as->event_tail = as->event_head = 0; 1609 as->event_tail = as->event_head = 0;
1610 as->suspends_pending = as->standbys_pending = 0; 1610 as->suspends_pending = as->standbys_pending = 0;
@@ -2313,16 +2313,16 @@ static int __init apm_init(void)
2313 } 2313 }
2314 2314
2315 if (apm_info.disabled) { 2315 if (apm_info.disabled) {
2316 printk(KERN_NOTICE "apm: disabled on user request.\n"); 2316 pr_notice("disabled on user request.\n");
2317 return -ENODEV; 2317 return -ENODEV;
2318 } 2318 }
2319 if ((num_online_cpus() > 1) && !power_off && !smp) { 2319 if ((num_online_cpus() > 1) && !power_off && !smp) {
2320 printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n"); 2320 pr_notice("disabled - APM is not SMP safe.\n");
2321 apm_info.disabled = 1; 2321 apm_info.disabled = 1;
2322 return -ENODEV; 2322 return -ENODEV;
2323 } 2323 }
2324 if (!acpi_disabled) { 2324 if (!acpi_disabled) {
2325 printk(KERN_NOTICE "apm: overridden by ACPI.\n"); 2325 pr_notice("overridden by ACPI.\n");
2326 apm_info.disabled = 1; 2326 apm_info.disabled = 1;
2327 return -ENODEV; 2327 return -ENODEV;
2328 } 2328 }
@@ -2356,8 +2356,7 @@ static int __init apm_init(void)
2356 2356
2357 kapmd_task = kthread_create(apm, NULL, "kapmd"); 2357 kapmd_task = kthread_create(apm, NULL, "kapmd");
2358 if (IS_ERR(kapmd_task)) { 2358 if (IS_ERR(kapmd_task)) {
2359 printk(KERN_ERR "apm: disabled - Unable to start kernel " 2359 pr_err("disabled - Unable to start kernel thread\n");
2360 "thread.\n");
2361 err = PTR_ERR(kapmd_task); 2360 err = PTR_ERR(kapmd_task);
2362 kapmd_task = NULL; 2361 kapmd_task = NULL;
2363 remove_proc_entry("apm", NULL); 2362 remove_proc_entry("apm", NULL);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 46674fbb62ba..c97bb7b5a9f8 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -55,8 +55,8 @@ static void __init check_fpu(void)
55 55
56 if (!boot_cpu_data.hard_math) { 56 if (!boot_cpu_data.hard_math) {
57#ifndef CONFIG_MATH_EMULATION 57#ifndef CONFIG_MATH_EMULATION
58 printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); 58 pr_emerg("No coprocessor found and no math emulation present\n");
59 printk(KERN_EMERG "Giving up.\n"); 59 pr_emerg("Giving up\n");
60 for (;;) ; 60 for (;;) ;
61#endif 61#endif
62 return; 62 return;
@@ -86,7 +86,7 @@ static void __init check_fpu(void)
86 86
87 boot_cpu_data.fdiv_bug = fdiv_bug; 87 boot_cpu_data.fdiv_bug = fdiv_bug;
88 if (boot_cpu_data.fdiv_bug) 88 if (boot_cpu_data.fdiv_bug)
89 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); 89 pr_warn("Hmm, FPU with FDIV bug\n");
90} 90}
91 91
92static void __init check_hlt(void) 92static void __init check_hlt(void)
@@ -94,16 +94,16 @@ static void __init check_hlt(void)
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled()) 94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return; 95 return;
96 96
97 printk(KERN_INFO "Checking 'hlt' instruction... "); 97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) { 98 if (!boot_cpu_data.hlt_works_ok) {
99 printk("disabled\n"); 99 pr_cont("disabled\n");
100 return; 100 return;
101 } 101 }
102 halt(); 102 halt();
103 halt(); 103 halt();
104 halt(); 104 halt();
105 halt(); 105 halt();
106 printk(KERN_CONT "OK.\n"); 106 pr_cont("OK\n");
107} 107}
108 108
109/* 109/*
@@ -116,7 +116,7 @@ static void __init check_popad(void)
116#ifndef CONFIG_X86_POPAD_OK 116#ifndef CONFIG_X86_POPAD_OK
117 int res, inp = (int) &res; 117 int res, inp = (int) &res;
118 118
119 printk(KERN_INFO "Checking for popad bug... "); 119 pr_info("Checking for popad bug... ");
120 __asm__ __volatile__( 120 __asm__ __volatile__(
121 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx " 121 "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
122 : "=&a" (res) 122 : "=&a" (res)
@@ -127,9 +127,9 @@ static void __init check_popad(void)
127 * CPU hard. Too bad. 127 * CPU hard. Too bad.
128 */ 128 */
129 if (res != 12345678) 129 if (res != 12345678)
130 printk(KERN_CONT "Buggy.\n"); 130 pr_cont("Buggy\n");
131 else 131 else
132 printk(KERN_CONT "OK.\n"); 132 pr_cont("OK\n");
133#endif 133#endif
134} 134}
135 135
@@ -161,7 +161,7 @@ void __init check_bugs(void)
161{ 161{
162 identify_boot_cpu(); 162 identify_boot_cpu();
163#ifndef CONFIG_SMP 163#ifndef CONFIG_SMP
164 printk(KERN_INFO "CPU: "); 164 pr_info("CPU: ");
165 print_cpu_info(&boot_cpu_data); 165 print_cpu_info(&boot_cpu_data);
166#endif 166#endif
167 check_config(); 167 check_config();
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 0a687fd185e6..5623b4b5d511 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -7,6 +7,9 @@
7 * Copyright 2008 Intel Corporation 7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen 8 * Author: Andi Kleen
9 */ 9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
10#include <linux/thread_info.h> 13#include <linux/thread_info.h>
11#include <linux/capability.h> 14#include <linux/capability.h>
12#include <linux/miscdevice.h> 15#include <linux/miscdevice.h>
@@ -210,7 +213,7 @@ static void drain_mcelog_buffer(void)
210 cpu_relax(); 213 cpu_relax();
211 214
212 if (!m->finished && retries >= 4) { 215 if (!m->finished && retries >= 4) {
213 pr_err("MCE: skipping error being logged currently!\n"); 216 pr_err("skipping error being logged currently!\n");
214 break; 217 break;
215 } 218 }
216 } 219 }
@@ -1167,8 +1170,9 @@ int memory_failure(unsigned long pfn, int vector, int flags)
1167{ 1170{
1168 /* mce_severity() should not hand us an ACTION_REQUIRED error */ 1171 /* mce_severity() should not hand us an ACTION_REQUIRED error */
1169 BUG_ON(flags & MF_ACTION_REQUIRED); 1172 BUG_ON(flags & MF_ACTION_REQUIRED);
1170 printk(KERN_ERR "Uncorrected memory error in page 0x%lx ignored\n" 1173 pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1171 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn); 1174 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1175 pfn);
1172 1176
1173 return 0; 1177 return 0;
1174} 1178}
@@ -1358,11 +1362,10 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
1358 1362
1359 b = cap & MCG_BANKCNT_MASK; 1363 b = cap & MCG_BANKCNT_MASK;
1360 if (!banks) 1364 if (!banks)
1361 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b); 1365 pr_info("CPU supports %d MCE banks\n", b);
1362 1366
1363 if (b > MAX_NR_BANKS) { 1367 if (b > MAX_NR_BANKS) {
1364 printk(KERN_WARNING 1368 pr_warn("Using only %u machine check banks out of %u\n",
1365 "MCE: Using only %u machine check banks out of %u\n",
1366 MAX_NR_BANKS, b); 1369 MAX_NR_BANKS, b);
1367 b = MAX_NR_BANKS; 1370 b = MAX_NR_BANKS;
1368 } 1371 }
@@ -1419,7 +1422,7 @@ static void __mcheck_cpu_init_generic(void)
1419static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1422static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1420{ 1423{
1421 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1424 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1422 pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); 1425 pr_info("unknown CPU type - not enabling MCE support\n");
1423 return -EOPNOTSUPP; 1426 return -EOPNOTSUPP;
1424 } 1427 }
1425 1428
@@ -1574,7 +1577,7 @@ static void __mcheck_cpu_init_timer(void)
1574/* Handle unconfigured int18 (should never happen) */ 1577/* Handle unconfigured int18 (should never happen) */
1575static void unexpected_machine_check(struct pt_regs *regs, long error_code) 1578static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1576{ 1579{
1577 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", 1580 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1578 smp_processor_id()); 1581 smp_processor_id());
1579} 1582}
1580 1583
@@ -1893,8 +1896,7 @@ static int __init mcheck_enable(char *str)
1893 get_option(&str, &monarch_timeout); 1896 get_option(&str, &monarch_timeout);
1894 } 1897 }
1895 } else { 1898 } else {
1896 printk(KERN_INFO "mce argument %s ignored. Please use /sys\n", 1899 pr_info("mce argument %s ignored. Please use /sys\n", str);
1897 str);
1898 return 0; 1900 return 0;
1899 } 1901 }
1900 return 1; 1902 return 1;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546ec6aef..9e3f5d6e3d20 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -5,6 +5,8 @@
5 * among events on a single PMU. 5 * among events on a single PMU.
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
8#include <linux/stddef.h> 10#include <linux/stddef.h>
9#include <linux/types.h> 11#include <linux/types.h>
10#include <linux/init.h> 12#include <linux/init.h>
@@ -1000,7 +1002,7 @@ static void intel_pmu_reset(void)
1000 1002
1001 local_irq_save(flags); 1003 local_irq_save(flags);
1002 1004
1003 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 1005 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1004 1006
1005 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1007 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1006 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); 1008 checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
@@ -1638,14 +1640,14 @@ static __init void intel_clovertown_quirk(void)
1638 * But taken together it might just make sense to not enable PEBS on 1640 * But taken together it might just make sense to not enable PEBS on
1639 * these chips. 1641 * these chips.
1640 */ 1642 */
1641 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); 1643 pr_warn("PEBS disabled due to CPU errata\n");
1642 x86_pmu.pebs = 0; 1644 x86_pmu.pebs = 0;
1643 x86_pmu.pebs_constraints = NULL; 1645 x86_pmu.pebs_constraints = NULL;
1644} 1646}
1645 1647
1646static __init void intel_sandybridge_quirk(void) 1648static __init void intel_sandybridge_quirk(void)
1647{ 1649{
1648 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); 1650 pr_warn("PEBS disabled due to CPU errata\n");
1649 x86_pmu.pebs = 0; 1651 x86_pmu.pebs = 0;
1650 x86_pmu.pebs_constraints = NULL; 1652 x86_pmu.pebs_constraints = NULL;
1651} 1653}
@@ -1667,8 +1669,8 @@ static __init void intel_arch_events_quirk(void)
1667 /* disable event that reported as not presend by cpuid */ 1669 /* disable event that reported as not presend by cpuid */
1668 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { 1670 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1669 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; 1671 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
1670 printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n", 1672 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1671 intel_arch_events_map[bit].name); 1673 intel_arch_events_map[bit].name);
1672 } 1674 }
1673} 1675}
1674 1676
@@ -1687,7 +1689,7 @@ static __init void intel_nehalem_quirk(void)
1687 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; 1689 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1688 ebx.split.no_branch_misses_retired = 0; 1690 ebx.split.no_branch_misses_retired = 0;
1689 x86_pmu.events_maskl = ebx.full; 1691 x86_pmu.events_maskl = ebx.full;
1690 printk(KERN_INFO "CPU erratum AAJ80 worked around\n"); 1692 pr_info("CPU erratum AAJ80 worked around\n");
1691 } 1693 }
1692} 1694}
1693 1695
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 571246d81edf..87d3b5d663cd 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -27,8 +27,8 @@ static int die_counter;
27 27
28void printk_address(unsigned long address, int reliable) 28void printk_address(unsigned long address, int reliable)
29{ 29{
30 printk(" [<%p>] %s%pB\n", (void *) address, 30 pr_cont(" [<%p>] %s%pB\n",
31 reliable ? "" : "? ", (void *) address); 31 (void *)address, reliable ? "" : "? ", (void *)address);
32} 32}
33 33
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER 34#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index e0b1d783daab..3a8aced11ae9 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -73,11 +73,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
73 if (kstack_end(stack)) 73 if (kstack_end(stack))
74 break; 74 break;
75 if (i && ((i % STACKSLOTS_PER_LINE) == 0)) 75 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
76 printk(KERN_CONT "\n"); 76 pr_cont("\n");
77 printk(KERN_CONT " %08lx", *stack++); 77 pr_cont(" %08lx", *stack++);
78 touch_nmi_watchdog(); 78 touch_nmi_watchdog();
79 } 79 }
80 printk(KERN_CONT "\n"); 80 pr_cont("\n");
81 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 81 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
82} 82}
83 83
@@ -89,9 +89,9 @@ void show_regs(struct pt_regs *regs)
89 print_modules(); 89 print_modules();
90 __show_regs(regs, !user_mode_vm(regs)); 90 __show_regs(regs, !user_mode_vm(regs));
91 91
92 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", 92 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
93 TASK_COMM_LEN, current->comm, task_pid_nr(current), 93 TASK_COMM_LEN, current->comm, task_pid_nr(current),
94 current_thread_info(), current, task_thread_info(current)); 94 current_thread_info(), current, task_thread_info(current));
95 /* 95 /*
96 * When in-kernel, we also print out the stack and code at the 96 * When in-kernel, we also print out the stack and code at the
97 * time of the fault.. 97 * time of the fault..
@@ -102,10 +102,10 @@ void show_regs(struct pt_regs *regs)
102 unsigned char c; 102 unsigned char c;
103 u8 *ip; 103 u8 *ip;
104 104
105 printk(KERN_EMERG "Stack:\n"); 105 pr_emerg("Stack:\n");
106 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG); 106 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
107 107
108 printk(KERN_EMERG "Code: "); 108 pr_emerg("Code:");
109 109
110 ip = (u8 *)regs->ip - code_prologue; 110 ip = (u8 *)regs->ip - code_prologue;
111 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { 111 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
@@ -116,16 +116,16 @@ void show_regs(struct pt_regs *regs)
116 for (i = 0; i < code_len; i++, ip++) { 116 for (i = 0; i < code_len; i++, ip++) {
117 if (ip < (u8 *)PAGE_OFFSET || 117 if (ip < (u8 *)PAGE_OFFSET ||
118 probe_kernel_address(ip, c)) { 118 probe_kernel_address(ip, c)) {
119 printk(KERN_CONT " Bad EIP value."); 119 pr_cont(" Bad EIP value.");
120 break; 120 break;
121 } 121 }
122 if (ip == (u8 *)regs->ip) 122 if (ip == (u8 *)regs->ip)
123 printk(KERN_CONT "<%02x> ", c); 123 pr_cont(" <%02x>", c);
124 else 124 else
125 printk(KERN_CONT "%02x ", c); 125 pr_cont(" %02x", c);
126 } 126 }
127 } 127 }
128 printk(KERN_CONT "\n"); 128 pr_cont("\n");
129} 129}
130 130
131int is_valid_bugaddr(unsigned long ip) 131int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 791b76122aa8..c582e9c5bd1a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -228,20 +228,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
228 if (stack >= irq_stack && stack <= irq_stack_end) { 228 if (stack >= irq_stack && stack <= irq_stack_end) {
229 if (stack == irq_stack_end) { 229 if (stack == irq_stack_end) {
230 stack = (unsigned long *) (irq_stack_end[-1]); 230 stack = (unsigned long *) (irq_stack_end[-1]);
231 printk(KERN_CONT " <EOI> "); 231 pr_cont(" <EOI> ");
232 } 232 }
233 } else { 233 } else {
234 if (((long) stack & (THREAD_SIZE-1)) == 0) 234 if (((long) stack & (THREAD_SIZE-1)) == 0)
235 break; 235 break;
236 } 236 }
237 if (i && ((i % STACKSLOTS_PER_LINE) == 0)) 237 if (i && ((i % STACKSLOTS_PER_LINE) == 0))
238 printk(KERN_CONT "\n"); 238 pr_cont("\n");
239 printk(KERN_CONT " %016lx", *stack++); 239 pr_cont(" %016lx", *stack++);
240 touch_nmi_watchdog(); 240 touch_nmi_watchdog();
241 } 241 }
242 preempt_enable(); 242 preempt_enable();
243 243
244 printk(KERN_CONT "\n"); 244 pr_cont("\n");
245 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 245 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
246} 246}
247 247
@@ -256,8 +256,8 @@ void show_regs(struct pt_regs *regs)
256 printk("CPU %d ", cpu); 256 printk("CPU %d ", cpu);
257 print_modules(); 257 print_modules();
258 __show_regs(regs, 1); 258 __show_regs(regs, 1);
259 printk("Process %s (pid: %d, threadinfo %p, task %p)\n", 259 printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n",
260 cur->comm, cur->pid, task_thread_info(cur), cur); 260 cur->comm, cur->pid, task_thread_info(cur), cur);
261 261
262 /* 262 /*
263 * When in-kernel, we also print out the stack and code at the 263 * When in-kernel, we also print out the stack and code at the
@@ -284,16 +284,16 @@ void show_regs(struct pt_regs *regs)
284 for (i = 0; i < code_len; i++, ip++) { 284 for (i = 0; i < code_len; i++, ip++) {
285 if (ip < (u8 *)PAGE_OFFSET || 285 if (ip < (u8 *)PAGE_OFFSET ||
286 probe_kernel_address(ip, c)) { 286 probe_kernel_address(ip, c)) {
287 printk(KERN_CONT " Bad RIP value."); 287 pr_cont(" Bad RIP value.");
288 break; 288 break;
289 } 289 }
290 if (ip == (u8 *)regs->ip) 290 if (ip == (u8 *)regs->ip)
291 printk(KERN_CONT "<%02x> ", c); 291 pr_cont("<%02x> ", c);
292 else 292 else
293 printk(KERN_CONT "%02x ", c); 293 pr_cont("%02x ", c);
294 } 294 }
295 } 295 }
296 printk(KERN_CONT "\n"); 296 pr_cont("\n");
297} 297}
298 298
299int is_valid_bugaddr(unsigned long ip) 299int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3dafc6003b7c..1f5f1d5d2a02 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -294,9 +294,9 @@ void fixup_irqs(void)
294 raw_spin_unlock(&desc->lock); 294 raw_spin_unlock(&desc->lock);
295 295
296 if (break_affinity && set_affinity) 296 if (break_affinity && set_affinity)
297 printk("Broke affinity for irq %i\n", irq); 297 pr_notice("Broke affinity for irq %i\n", irq);
298 else if (!set_affinity) 298 else if (!set_affinity)
299 printk("Cannot set affinity for irq %i\n", irq); 299 pr_notice("Cannot set affinity for irq %i\n", irq);
300 } 300 }
301 301
302 /* 302 /*
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f21fd94ac897..202494d2ec6e 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -15,6 +15,9 @@
15 along with this program; if not, write to the Free Software 15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
18#include <linux/moduleloader.h> 21#include <linux/moduleloader.h>
19#include <linux/elf.h> 22#include <linux/elf.h>
20#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
@@ -30,9 +33,14 @@
30#include <asm/pgtable.h> 33#include <asm/pgtable.h>
31 34
32#if 0 35#if 0
33#define DEBUGP printk 36#define DEBUGP(fmt, ...) \
37 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
34#else 38#else
35#define DEBUGP(fmt...) 39#define DEBUGP(fmt, ...) \
40do { \
41 if (0) \
42 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
43} while (0)
36#endif 44#endif
37 45
38void *module_alloc(unsigned long size) 46void *module_alloc(unsigned long size)
@@ -56,8 +64,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
56 Elf32_Sym *sym; 64 Elf32_Sym *sym;
57 uint32_t *location; 65 uint32_t *location;
58 66
59 DEBUGP("Applying relocate section %u to %u\n", relsec, 67 DEBUGP("Applying relocate section %u to %u\n",
60 sechdrs[relsec].sh_info); 68 relsec, sechdrs[relsec].sh_info);
61 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 69 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
62 /* This is where to make the change */ 70 /* This is where to make the change */
63 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 71 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -77,7 +85,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
77 *location += sym->st_value - (uint32_t)location; 85 *location += sym->st_value - (uint32_t)location;
78 break; 86 break;
79 default: 87 default:
80 printk(KERN_ERR "module %s: Unknown relocation: %u\n", 88 pr_err("%s: Unknown relocation: %u\n",
81 me->name, ELF32_R_TYPE(rel[i].r_info)); 89 me->name, ELF32_R_TYPE(rel[i].r_info));
82 return -ENOEXEC; 90 return -ENOEXEC;
83 } 91 }
@@ -97,8 +105,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
97 void *loc; 105 void *loc;
98 u64 val; 106 u64 val;
99 107
100 DEBUGP("Applying relocate section %u to %u\n", relsec, 108 DEBUGP("Applying relocate section %u to %u\n",
101 sechdrs[relsec].sh_info); 109 relsec, sechdrs[relsec].sh_info);
102 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 110 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
103 /* This is where to make the change */ 111 /* This is where to make the change */
104 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 112 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -110,8 +118,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
110 + ELF64_R_SYM(rel[i].r_info); 118 + ELF64_R_SYM(rel[i].r_info);
111 119
112 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 120 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
113 (int)ELF64_R_TYPE(rel[i].r_info), 121 (int)ELF64_R_TYPE(rel[i].r_info),
114 sym->st_value, rel[i].r_addend, (u64)loc); 122 sym->st_value, rel[i].r_addend, (u64)loc);
115 123
116 val = sym->st_value + rel[i].r_addend; 124 val = sym->st_value + rel[i].r_addend;
117 125
@@ -140,7 +148,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
140#endif 148#endif
141 break; 149 break;
142 default: 150 default:
143 printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n", 151 pr_err("%s: Unknown rela relocation: %llu\n",
144 me->name, ELF64_R_TYPE(rel[i].r_info)); 152 me->name, ELF64_R_TYPE(rel[i].r_info));
145 return -ENOEXEC; 153 return -ENOEXEC;
146 } 154 }
@@ -148,9 +156,9 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
148 return 0; 156 return 0;
149 157
150overflow: 158overflow:
151 printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 159 pr_err("overflow in relocation type %d val %Lx\n",
152 (int)ELF64_R_TYPE(rel[i].r_info), val); 160 (int)ELF64_R_TYPE(rel[i].r_info), val);
153 printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", 161 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
154 me->name); 162 me->name);
155 return -ENOEXEC; 163 return -ENOEXEC;
156} 164}
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index b72838bae64a..299d49302e7d 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -22,6 +22,8 @@
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25#define pr_fmt(fmt) "Calgary: " fmt
26
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/types.h> 29#include <linux/types.h>
@@ -245,7 +247,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
245 offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0, 247 offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
246 npages, 0, boundary_size, 0); 248 npages, 0, boundary_size, 0);
247 if (offset == ~0UL) { 249 if (offset == ~0UL) {
248 printk(KERN_WARNING "Calgary: IOMMU full.\n"); 250 pr_warn("IOMMU full\n");
249 spin_unlock_irqrestore(&tbl->it_lock, flags); 251 spin_unlock_irqrestore(&tbl->it_lock, flags);
250 if (panic_on_overflow) 252 if (panic_on_overflow)
251 panic("Calgary: fix the allocator.\n"); 253 panic("Calgary: fix the allocator.\n");
@@ -271,8 +273,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
271 entry = iommu_range_alloc(dev, tbl, npages); 273 entry = iommu_range_alloc(dev, tbl, npages);
272 274
273 if (unlikely(entry == DMA_ERROR_CODE)) { 275 if (unlikely(entry == DMA_ERROR_CODE)) {
274 printk(KERN_WARNING "Calgary: failed to allocate %u pages in " 276 pr_warn("failed to allocate %u pages in iommu %p\n",
275 "iommu %p\n", npages, tbl); 277 npages, tbl);
276 return DMA_ERROR_CODE; 278 return DMA_ERROR_CODE;
277 } 279 }
278 280
@@ -561,8 +563,7 @@ static void calgary_tce_cache_blast(struct iommu_table *tbl)
561 i++; 563 i++;
562 } while ((val & 0xff) != 0xff && i < 100); 564 } while ((val & 0xff) != 0xff && i < 100);
563 if (i == 100) 565 if (i == 100)
564 printk(KERN_WARNING "Calgary: PCI bus not quiesced, " 566 pr_warn("PCI bus not quiesced, continuing anyway\n");
565 "continuing anyway\n");
566 567
567 /* invalidate TCE cache */ 568 /* invalidate TCE cache */
568 target = calgary_reg(bbar, tar_offset(tbl->it_busno)); 569 target = calgary_reg(bbar, tar_offset(tbl->it_busno));
@@ -604,8 +605,7 @@ begin:
604 i++; 605 i++;
605 } while ((val64 & 0xff) != 0xff && i < 100); 606 } while ((val64 & 0xff) != 0xff && i < 100);
606 if (i == 100) 607 if (i == 100)
607 printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, " 608 pr_warn("CalIOC2: PCI bus not quiesced, continuing anyway\n");
608 "continuing anyway\n");
609 609
610 /* 3. poll Page Migration DEBUG for SoftStopFault */ 610 /* 3. poll Page Migration DEBUG for SoftStopFault */
611 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG); 611 target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
@@ -617,8 +617,7 @@ begin:
617 if (++count < 100) 617 if (++count < 100)
618 goto begin; 618 goto begin;
619 else { 619 else {
620 printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, " 620 pr_warn("CalIOC2: too many SoftStopFaults, aborting TCE cache flush sequence!\n");
621 "aborting TCE cache flush sequence!\n");
622 return; /* pray for the best */ 621 return; /* pray for the best */
623 } 622 }
624 } 623 }
@@ -840,8 +839,8 @@ static void calgary_dump_error_regs(struct iommu_table *tbl)
840 plssr = be32_to_cpu(readl(target)); 839 plssr = be32_to_cpu(readl(target));
841 840
842 /* If no error, the agent ID in the CSR is not valid */ 841 /* If no error, the agent ID in the CSR is not valid */
843 printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, " 842 pr_emerg("DMA error on Calgary PHB 0x%x, 0x%08x@CSR 0x%08x@PLSSR\n",
844 "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr); 843 tbl->it_busno, csr, plssr);
845} 844}
846 845
847static void calioc2_dump_error_regs(struct iommu_table *tbl) 846static void calioc2_dump_error_regs(struct iommu_table *tbl)
@@ -867,22 +866,21 @@ static void calioc2_dump_error_regs(struct iommu_table *tbl)
867 target = calgary_reg(bbar, phboff | 0x800); 866 target = calgary_reg(bbar, phboff | 0x800);
868 mck = be32_to_cpu(readl(target)); 867 mck = be32_to_cpu(readl(target));
869 868
870 printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n", 869 pr_emerg("DMA error on CalIOC2 PHB 0x%x\n", tbl->it_busno);
871 tbl->it_busno);
872 870
873 printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n", 871 pr_emerg("0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
874 csr, plssr, csmr, mck); 872 csr, plssr, csmr, mck);
875 873
876 /* dump rest of error regs */ 874 /* dump rest of error regs */
877 printk(KERN_EMERG "Calgary: "); 875 pr_emerg("");
878 for (i = 0; i < ARRAY_SIZE(errregs); i++) { 876 for (i = 0; i < ARRAY_SIZE(errregs); i++) {
879 /* err regs are at 0x810 - 0x870 */ 877 /* err regs are at 0x810 - 0x870 */
880 erroff = (0x810 + (i * 0x10)); 878 erroff = (0x810 + (i * 0x10));
881 target = calgary_reg(bbar, phboff | erroff); 879 target = calgary_reg(bbar, phboff | erroff);
882 errregs[i] = be32_to_cpu(readl(target)); 880 errregs[i] = be32_to_cpu(readl(target));
883 printk("0x%08x@0x%lx ", errregs[i], erroff); 881 pr_cont("0x%08x@0x%lx ", errregs[i], erroff);
884 } 882 }
885 printk("\n"); 883 pr_cont("\n");
886 884
887 /* root complex status */ 885 /* root complex status */
888 target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS); 886 target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 735279e54e59..ef6a8456f719 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/errno.h> 3#include <linux/errno.h>
2#include <linux/kernel.h> 4#include <linux/kernel.h>
3#include <linux/mm.h> 5#include <linux/mm.h>
@@ -145,16 +147,14 @@ void show_regs_common(void)
145 /* Board Name is optional */ 147 /* Board Name is optional */
146 board = dmi_get_system_info(DMI_BOARD_NAME); 148 board = dmi_get_system_info(DMI_BOARD_NAME);
147 149
148 printk(KERN_CONT "\n"); 150 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
149 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", 151 current->pid, current->comm, print_tainted(),
150 current->pid, current->comm, print_tainted(), 152 init_utsname()->release,
151 init_utsname()->release, 153 (int)strcspn(init_utsname()->version, " "),
152 (int)strcspn(init_utsname()->version, " "), 154 init_utsname()->version,
153 init_utsname()->version); 155 vendor, product,
154 printk(KERN_CONT " %s %s", vendor, product); 156 board ? "/" : "",
155 if (board) 157 board ? board : "");
156 printk(KERN_CONT "/%s", board);
157 printk(KERN_CONT "\n");
158} 158}
159 159
160void flush_thread(void) 160void flush_thread(void)
@@ -645,7 +645,7 @@ static void amd_e400_idle(void)
645 amd_e400_c1e_detected = true; 645 amd_e400_c1e_detected = true;
646 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 646 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
647 mark_tsc_unstable("TSC halt in AMD C1E"); 647 mark_tsc_unstable("TSC halt in AMD C1E");
648 printk(KERN_INFO "System has AMD C1E enabled\n"); 648 pr_info("System has AMD C1E enabled\n");
649 } 649 }
650 } 650 }
651 651
@@ -659,8 +659,7 @@ static void amd_e400_idle(void)
659 */ 659 */
660 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, 660 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
661 &cpu); 661 &cpu);
662 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", 662 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
663 cpu);
664 } 663 }
665 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 664 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
666 665
@@ -681,8 +680,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
681{ 680{
682#ifdef CONFIG_SMP 681#ifdef CONFIG_SMP
683 if (pm_idle == poll_idle && smp_num_siblings > 1) { 682 if (pm_idle == poll_idle && smp_num_siblings > 1) {
684 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," 683 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
685 " performance may degrade.\n");
686 } 684 }
687#endif 685#endif
688 if (pm_idle) 686 if (pm_idle)
@@ -692,11 +690,11 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
692 /* 690 /*
693 * One CPU supports mwait => All CPUs supports mwait 691 * One CPU supports mwait => All CPUs supports mwait
694 */ 692 */
695 printk(KERN_INFO "using mwait in idle threads.\n"); 693 pr_info("using mwait in idle threads\n");
696 pm_idle = mwait_idle; 694 pm_idle = mwait_idle;
697 } else if (cpu_has_amd_erratum(amd_erratum_400)) { 695 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
698 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 696 /* E400: APIC timer interrupt does not wake up CPU from C1e */
699 printk(KERN_INFO "using AMD E400 aware idle routine\n"); 697 pr_info("using AMD E400 aware idle routine\n");
700 pm_idle = amd_e400_idle; 698 pm_idle = amd_e400_idle;
701 } else 699 } else
702 pm_idle = default_idle; 700 pm_idle = default_idle;
@@ -715,7 +713,7 @@ static int __init idle_setup(char *str)
715 return -EINVAL; 713 return -EINVAL;
716 714
717 if (!strcmp(str, "poll")) { 715 if (!strcmp(str, "poll")) {
718 printk("using polling idle threads.\n"); 716 pr_info("using polling idle threads\n");
719 pm_idle = poll_idle; 717 pm_idle = poll_idle;
720 boot_option_idle_override = IDLE_POLL; 718 boot_option_idle_override = IDLE_POLL;
721 } else if (!strcmp(str, "mwait")) { 719 } else if (!strcmp(str, "mwait")) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 61cdf7fdf099..85151f3e36e2 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -117,10 +117,10 @@ void release_thread(struct task_struct *dead_task)
117{ 117{
118 if (dead_task->mm) { 118 if (dead_task->mm) {
119 if (dead_task->mm->context.size) { 119 if (dead_task->mm->context.size) {
120 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", 120 pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n",
121 dead_task->comm, 121 dead_task->comm,
122 dead_task->mm->context.ldt, 122 dead_task->mm->context.ldt,
123 dead_task->mm->context.size); 123 dead_task->mm->context.size);
124 BUG(); 124 BUG();
125 } 125 }
126 } 126 }
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 79c45af81604..ab3f06260712 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/module.h> 3#include <linux/module.h>
2#include <linux/reboot.h> 4#include <linux/reboot.h>
3#include <linux/init.h> 5#include <linux/init.h>
@@ -152,7 +154,8 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
152{ 154{
153 if (reboot_type != BOOT_BIOS) { 155 if (reboot_type != BOOT_BIOS) {
154 reboot_type = BOOT_BIOS; 156 reboot_type = BOOT_BIOS;
155 printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident); 157 pr_info("%s series board detected. Selecting %s-method for reboots.\n",
158 "BIOS", d->ident);
156 } 159 }
157 return 0; 160 return 0;
158} 161}
@@ -207,8 +210,8 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
207{ 210{
208 if (reboot_type != BOOT_CF9) { 211 if (reboot_type != BOOT_CF9) {
209 reboot_type = BOOT_CF9; 212 reboot_type = BOOT_CF9;
210 printk(KERN_INFO "%s series board detected. " 213 pr_info("%s series board detected. Selecting %s-method for reboots.\n",
211 "Selecting PCI-method for reboots.\n", d->ident); 214 "PCI", d->ident);
212 } 215 }
213 return 0; 216 return 0;
214} 217}
@@ -217,7 +220,8 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
217{ 220{
218 if (reboot_type != BOOT_KBD) { 221 if (reboot_type != BOOT_KBD) {
219 reboot_type = BOOT_KBD; 222 reboot_type = BOOT_KBD;
220 printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident); 223 pr_info("%s series board detected. Selecting %s-method for reboot.\n",
224 "KBD", d->ident);
221 } 225 }
222 return 0; 226 return 0;
223} 227}
@@ -668,7 +672,7 @@ static void __machine_emergency_restart(int emergency)
668 672
669static void native_machine_restart(char *__unused) 673static void native_machine_restart(char *__unused)
670{ 674{
671 printk("machine restart\n"); 675 pr_notice("machine restart\n");
672 676
673 if (!reboot_force) 677 if (!reboot_force)
674 machine_shutdown(); 678 machine_shutdown();
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 21af737053aa..b280908a376e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -6,6 +6,9 @@
6 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes 6 * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
7 * 2000-2002 x86-64 support by Andi Kleen 7 * 2000-2002 x86-64 support by Andi Kleen
8 */ 8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
9#include <linux/sched.h> 12#include <linux/sched.h>
10#include <linux/mm.h> 13#include <linux/mm.h>
11#include <linux/smp.h> 14#include <linux/smp.h>
@@ -814,7 +817,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
814 me->comm, me->pid, where, frame, 817 me->comm, me->pid, where, frame,
815 regs->ip, regs->sp, regs->orig_ax); 818 regs->ip, regs->sp, regs->orig_ax);
816 print_vma_addr(" in ", regs->ip); 819 print_vma_addr(" in ", regs->ip);
817 printk(KERN_CONT "\n"); 820 pr_cont("\n");
818 } 821 }
819 822
820 force_sig(SIGSEGV, me); 823 force_sig(SIGSEGV, me);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fd019d78b1f4..456d64806c8f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1,4 +1,4 @@
1/* 1 /*
2 * x86 SMP booting functions 2 * x86 SMP booting functions
3 * 3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
@@ -39,6 +39,8 @@
39 * Glauber Costa : i386 and x86_64 integration 39 * Glauber Costa : i386 and x86_64 integration
40 */ 40 */
41 41
42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43
42#include <linux/init.h> 44#include <linux/init.h>
43#include <linux/smp.h> 45#include <linux/smp.h>
44#include <linux/module.h> 46#include <linux/module.h>
@@ -184,7 +186,7 @@ static void __cpuinit smp_callin(void)
184 * boards) 186 * boards)
185 */ 187 */
186 188
187 pr_debug("CALLIN, before setup_local_APIC().\n"); 189 pr_debug("CALLIN, before setup_local_APIC()\n");
188 if (apic->smp_callin_clear_local_apic) 190 if (apic->smp_callin_clear_local_apic)
189 apic->smp_callin_clear_local_apic(); 191 apic->smp_callin_clear_local_apic();
190 setup_local_APIC(); 192 setup_local_APIC();
@@ -420,17 +422,16 @@ static void impress_friends(void)
420 /* 422 /*
421 * Allow the user to impress friends. 423 * Allow the user to impress friends.
422 */ 424 */
423 pr_debug("Before bogomips.\n"); 425 pr_debug("Before bogomips\n");
424 for_each_possible_cpu(cpu) 426 for_each_possible_cpu(cpu)
425 if (cpumask_test_cpu(cpu, cpu_callout_mask)) 427 if (cpumask_test_cpu(cpu, cpu_callout_mask))
426 bogosum += cpu_data(cpu).loops_per_jiffy; 428 bogosum += cpu_data(cpu).loops_per_jiffy;
427 printk(KERN_INFO 429 pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
428 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
429 num_online_cpus(), 430 num_online_cpus(),
430 bogosum/(500000/HZ), 431 bogosum/(500000/HZ),
431 (bogosum/(5000/HZ))%100); 432 (bogosum/(5000/HZ))%100);
432 433
433 pr_debug("Before bogocount - setting activated=1.\n"); 434 pr_debug("Before bogocount - setting activated=1\n");
434} 435}
435 436
436void __inquire_remote_apic(int apicid) 437void __inquire_remote_apic(int apicid)
@@ -440,18 +441,17 @@ void __inquire_remote_apic(int apicid)
440 int timeout; 441 int timeout;
441 u32 status; 442 u32 status;
442 443
443 printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid); 444 pr_info("Inquiring remote APIC 0x%x...\n", apicid);
444 445
445 for (i = 0; i < ARRAY_SIZE(regs); i++) { 446 for (i = 0; i < ARRAY_SIZE(regs); i++) {
446 printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]); 447 pr_info("... APIC 0x%x %s: ", apicid, names[i]);
447 448
448 /* 449 /*
449 * Wait for idle. 450 * Wait for idle.
450 */ 451 */
451 status = safe_apic_wait_icr_idle(); 452 status = safe_apic_wait_icr_idle();
452 if (status) 453 if (status)
453 printk(KERN_CONT 454 pr_cont("a previous APIC delivery may have failed\n");
454 "a previous APIC delivery may have failed\n");
455 455
456 apic_icr_write(APIC_DM_REMRD | regs[i], apicid); 456 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
457 457
@@ -464,10 +464,10 @@ void __inquire_remote_apic(int apicid)
464 switch (status) { 464 switch (status) {
465 case APIC_ICR_RR_VALID: 465 case APIC_ICR_RR_VALID:
466 status = apic_read(APIC_RRR); 466 status = apic_read(APIC_RRR);
467 printk(KERN_CONT "%08x\n", status); 467 pr_cont("%08x\n", status);
468 break; 468 break;
469 default: 469 default:
470 printk(KERN_CONT "failed\n"); 470 pr_cont("failed\n");
471 } 471 }
472 } 472 }
473} 473}
@@ -501,12 +501,12 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
501 apic_write(APIC_ESR, 0); 501 apic_write(APIC_ESR, 0);
502 accept_status = (apic_read(APIC_ESR) & 0xEF); 502 accept_status = (apic_read(APIC_ESR) & 0xEF);
503 } 503 }
504 pr_debug("NMI sent.\n"); 504 pr_debug("NMI sent\n");
505 505
506 if (send_status) 506 if (send_status)
507 printk(KERN_ERR "APIC never delivered???\n"); 507 pr_err("APIC never delivered???\n");
508 if (accept_status) 508 if (accept_status)
509 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); 509 pr_err("APIC delivery error (%lx)\n", accept_status);
510 510
511 return (send_status | accept_status); 511 return (send_status | accept_status);
512} 512}
@@ -528,7 +528,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
528 apic_read(APIC_ESR); 528 apic_read(APIC_ESR);
529 } 529 }
530 530
531 pr_debug("Asserting INIT.\n"); 531 pr_debug("Asserting INIT\n");
532 532
533 /* 533 /*
534 * Turn INIT on target chip 534 * Turn INIT on target chip
@@ -544,7 +544,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
544 544
545 mdelay(10); 545 mdelay(10);
546 546
547 pr_debug("Deasserting INIT.\n"); 547 pr_debug("Deasserting INIT\n");
548 548
549 /* Target chip */ 549 /* Target chip */
550 /* Send IPI */ 550 /* Send IPI */
@@ -577,14 +577,14 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
577 /* 577 /*
578 * Run STARTUP IPI loop. 578 * Run STARTUP IPI loop.
579 */ 579 */
580 pr_debug("#startup loops: %d.\n", num_starts); 580 pr_debug("#startup loops: %d\n", num_starts);
581 581
582 for (j = 1; j <= num_starts; j++) { 582 for (j = 1; j <= num_starts; j++) {
583 pr_debug("Sending STARTUP #%d.\n", j); 583 pr_debug("Sending STARTUP #%d\n", j);
584 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 584 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
585 apic_write(APIC_ESR, 0); 585 apic_write(APIC_ESR, 0);
586 apic_read(APIC_ESR); 586 apic_read(APIC_ESR);
587 pr_debug("After apic_write.\n"); 587 pr_debug("After apic_write\n");
588 588
589 /* 589 /*
590 * STARTUP IPI 590 * STARTUP IPI
@@ -601,7 +601,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
601 */ 601 */
602 udelay(300); 602 udelay(300);
603 603
604 pr_debug("Startup point 1.\n"); 604 pr_debug("Startup point 1\n");
605 605
606 pr_debug("Waiting for send to finish...\n"); 606 pr_debug("Waiting for send to finish...\n");
607 send_status = safe_apic_wait_icr_idle(); 607 send_status = safe_apic_wait_icr_idle();
@@ -616,12 +616,12 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
616 if (send_status || accept_status) 616 if (send_status || accept_status)
617 break; 617 break;
618 } 618 }
619 pr_debug("After Startup.\n"); 619 pr_debug("After Startup\n");
620 620
621 if (send_status) 621 if (send_status)
622 printk(KERN_ERR "APIC never delivered???\n"); 622 pr_err("APIC never delivered???\n");
623 if (accept_status) 623 if (accept_status)
624 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); 624 pr_err("APIC delivery error (%lx)\n", accept_status);
625 625
626 return (send_status | accept_status); 626 return (send_status | accept_status);
627} 627}
@@ -635,11 +635,11 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
635 if (system_state == SYSTEM_BOOTING) { 635 if (system_state == SYSTEM_BOOTING) {
636 if (node != current_node) { 636 if (node != current_node) {
637 if (current_node > (-1)) 637 if (current_node > (-1))
638 pr_cont(" Ok.\n"); 638 pr_cont(" OK\n");
639 current_node = node; 639 current_node = node;
640 pr_info("Booting Node %3d, Processors ", node); 640 pr_info("Booting Node %3d, Processors ", node);
641 } 641 }
642 pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : ""); 642 pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : "");
643 return; 643 return;
644 } else 644 } else
645 pr_info("Booting Node %d Processor %d APIC 0x%x\n", 645 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
@@ -719,9 +719,9 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
719 /* 719 /*
720 * allow APs to start initializing. 720 * allow APs to start initializing.
721 */ 721 */
722 pr_debug("Before Callout %d.\n", cpu); 722 pr_debug("Before Callout %d\n", cpu);
723 cpumask_set_cpu(cpu, cpu_callout_mask); 723 cpumask_set_cpu(cpu, cpu_callout_mask);
724 pr_debug("After Callout %d.\n", cpu); 724 pr_debug("After Callout %d\n", cpu);
725 725
726 /* 726 /*
727 * Wait 5s total for a response 727 * Wait 5s total for a response
@@ -749,7 +749,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
749 pr_err("CPU%d: Stuck ??\n", cpu); 749 pr_err("CPU%d: Stuck ??\n", cpu);
750 else 750 else
751 /* trampoline code not run */ 751 /* trampoline code not run */
752 pr_err("CPU%d: Not responding.\n", cpu); 752 pr_err("CPU%d: Not responding\n", cpu);
753 if (apic->inquire_remote_apic) 753 if (apic->inquire_remote_apic)
754 apic->inquire_remote_apic(apicid); 754 apic->inquire_remote_apic(apicid);
755 } 755 }
@@ -794,7 +794,7 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
794 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || 794 if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
795 !physid_isset(apicid, phys_cpu_present_map) || 795 !physid_isset(apicid, phys_cpu_present_map) ||
796 !apic->apic_id_valid(apicid)) { 796 !apic->apic_id_valid(apicid)) {
797 printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); 797 pr_err("%s: bad cpu %d\n", __func__, cpu);
798 return -EINVAL; 798 return -EINVAL;
799 } 799 }
800 800
@@ -875,9 +875,8 @@ static int __init smp_sanity_check(unsigned max_cpus)
875 unsigned int cpu; 875 unsigned int cpu;
876 unsigned nr; 876 unsigned nr;
877 877
878 printk(KERN_WARNING 878 pr_warn("More than 8 CPUs detected - skipping them\n"
879 "More than 8 CPUs detected - skipping them.\n" 879 "Use CONFIG_X86_BIGSMP\n");
880 "Use CONFIG_X86_BIGSMP.\n");
881 880
882 nr = 0; 881 nr = 0;
883 for_each_present_cpu(cpu) { 882 for_each_present_cpu(cpu) {
@@ -898,8 +897,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
898#endif 897#endif
899 898
900 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { 899 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
901 printk(KERN_WARNING 900 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
902 "weird, boot CPU (#%d) not listed by the BIOS.\n",
903 hard_smp_processor_id()); 901 hard_smp_processor_id());
904 902
905 physid_set(hard_smp_processor_id(), phys_cpu_present_map); 903 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
@@ -911,11 +909,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
911 */ 909 */
912 if (!smp_found_config && !acpi_lapic) { 910 if (!smp_found_config && !acpi_lapic) {
913 preempt_enable(); 911 preempt_enable();
914 printk(KERN_NOTICE "SMP motherboard not detected.\n"); 912 pr_notice("SMP motherboard not detected\n");
915 disable_smp(); 913 disable_smp();
916 if (APIC_init_uniprocessor()) 914 if (APIC_init_uniprocessor())
917 printk(KERN_NOTICE "Local APIC not detected." 915 pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
918 " Using dummy APIC emulation.\n");
919 return -1; 916 return -1;
920 } 917 }
921 918
@@ -924,9 +921,8 @@ static int __init smp_sanity_check(unsigned max_cpus)
924 * CPU too, but we do it for the sake of robustness anyway. 921 * CPU too, but we do it for the sake of robustness anyway.
925 */ 922 */
926 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { 923 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
927 printk(KERN_NOTICE 924 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
928 "weird, boot CPU (#%d) not listed by the BIOS.\n", 925 boot_cpu_physical_apicid);
929 boot_cpu_physical_apicid);
930 physid_set(hard_smp_processor_id(), phys_cpu_present_map); 926 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
931 } 927 }
932 preempt_enable(); 928 preempt_enable();
@@ -939,8 +935,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
939 if (!disable_apic) { 935 if (!disable_apic) {
940 pr_err("BIOS bug, local APIC #%d not detected!...\n", 936 pr_err("BIOS bug, local APIC #%d not detected!...\n",
941 boot_cpu_physical_apicid); 937 boot_cpu_physical_apicid);
942 pr_err("... forcing use of dummy APIC emulation." 938 pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
943 "(tell your hw vendor)\n");
944 } 939 }
945 smpboot_clear_io_apic(); 940 smpboot_clear_io_apic();
946 disable_ioapic_support(); 941 disable_ioapic_support();
@@ -953,7 +948,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
953 * If SMP should be disabled, then really disable it! 948 * If SMP should be disabled, then really disable it!
954 */ 949 */
955 if (!max_cpus) { 950 if (!max_cpus) {
956 printk(KERN_INFO "SMP mode deactivated.\n"); 951 pr_info("SMP mode deactivated\n");
957 smpboot_clear_io_apic(); 952 smpboot_clear_io_apic();
958 953
959 connect_bsp_APIC(); 954 connect_bsp_APIC();
@@ -1005,7 +1000,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1005 1000
1006 1001
1007 if (smp_sanity_check(max_cpus) < 0) { 1002 if (smp_sanity_check(max_cpus) < 0) {
1008 printk(KERN_INFO "SMP disabled\n"); 1003 pr_info("SMP disabled\n");
1009 disable_smp(); 1004 disable_smp();
1010 goto out; 1005 goto out;
1011 } 1006 }
@@ -1043,7 +1038,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1043 * Set up local APIC timer on boot CPU. 1038 * Set up local APIC timer on boot CPU.
1044 */ 1039 */
1045 1040
1046 printk(KERN_INFO "CPU%d: ", 0); 1041 pr_info("CPU%d: ", 0);
1047 print_cpu_info(&cpu_data(0)); 1042 print_cpu_info(&cpu_data(0));
1048 x86_init.timers.setup_percpu_clockev(); 1043 x86_init.timers.setup_percpu_clockev();
1049 1044
@@ -1093,7 +1088,7 @@ void __init native_smp_prepare_boot_cpu(void)
1093 1088
1094void __init native_smp_cpus_done(unsigned int max_cpus) 1089void __init native_smp_cpus_done(unsigned int max_cpus)
1095{ 1090{
1096 pr_debug("Boot done.\n"); 1091 pr_debug("Boot done\n");
1097 1092
1098 nmi_selftest(); 1093 nmi_selftest();
1099 impress_friends(); 1094 impress_friends();
@@ -1154,8 +1149,7 @@ __init void prefill_possible_map(void)
1154 1149
1155 /* nr_cpu_ids could be reduced via nr_cpus= */ 1150 /* nr_cpu_ids could be reduced via nr_cpus= */
1156 if (possible > nr_cpu_ids) { 1151 if (possible > nr_cpu_ids) {
1157 printk(KERN_WARNING 1152 pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
1158 "%d Processors exceeds NR_CPUS limit of %d\n",
1159 possible, nr_cpu_ids); 1153 possible, nr_cpu_ids);
1160 possible = nr_cpu_ids; 1154 possible = nr_cpu_ids;
1161 } 1155 }
@@ -1164,13 +1158,12 @@ __init void prefill_possible_map(void)
1164 if (!setup_max_cpus) 1158 if (!setup_max_cpus)
1165#endif 1159#endif
1166 if (possible > i) { 1160 if (possible > i) {
1167 printk(KERN_WARNING 1161 pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1168 "%d Processors exceeds max_cpus limit of %u\n",
1169 possible, setup_max_cpus); 1162 possible, setup_max_cpus);
1170 possible = i; 1163 possible = i;
1171 } 1164 }
1172 1165
1173 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1166 pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1174 possible, max_t(int, possible - num_processors, 0)); 1167 possible, max_t(int, possible - num_processors, 0));
1175 1168
1176 for (i = 0; i < possible; i++) 1169 for (i = 0; i < possible; i++)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 05b31d92f69c..b481341c9369 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -9,6 +9,9 @@
9/* 9/*
10 * Handle hardware traps and faults. 10 * Handle hardware traps and faults.
11 */ 11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
12#include <linux/interrupt.h> 15#include <linux/interrupt.h>
13#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
14#include <linux/spinlock.h> 17#include <linux/spinlock.h>
@@ -143,12 +146,11 @@ trap_signal:
143#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
144 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
145 printk_ratelimit()) { 148 printk_ratelimit()) {
146 printk(KERN_INFO 149 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
147 "%s[%d] trap %s ip:%lx sp:%lx error:%lx", 150 tsk->comm, tsk->pid, str,
148 tsk->comm, tsk->pid, str, 151 regs->ip, regs->sp, error_code);
149 regs->ip, regs->sp, error_code);
150 print_vma_addr(" in ", regs->ip); 152 print_vma_addr(" in ", regs->ip);
151 printk("\n"); 153 pr_cont("\n");
152 } 154 }
153#endif 155#endif
154 156
@@ -269,12 +271,11 @@ do_general_protection(struct pt_regs *regs, long error_code)
269 271
270 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 272 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
271 printk_ratelimit()) { 273 printk_ratelimit()) {
272 printk(KERN_INFO 274 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
273 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
274 tsk->comm, task_pid_nr(tsk), 275 tsk->comm, task_pid_nr(tsk),
275 regs->ip, regs->sp, error_code); 276 regs->ip, regs->sp, error_code);
276 print_vma_addr(" in ", regs->ip); 277 print_vma_addr(" in ", regs->ip);
277 printk("\n"); 278 pr_cont("\n");
278 } 279 }
279 280
280 force_sig(SIGSEGV, tsk); 281 force_sig(SIGSEGV, tsk);
@@ -570,7 +571,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
570 conditional_sti(regs); 571 conditional_sti(regs);
571#if 0 572#if 0
572 /* No need to warn about this any longer. */ 573 /* No need to warn about this any longer. */
573 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); 574 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
574#endif 575#endif
575} 576}
576 577
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index fc0a147e3727..cfa5d4f7ca56 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/sched.h> 4#include <linux/sched.h>
3#include <linux/init.h> 5#include <linux/init.h>
@@ -84,8 +86,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
84#ifdef CONFIG_X86_TSC 86#ifdef CONFIG_X86_TSC
85int __init notsc_setup(char *str) 87int __init notsc_setup(char *str)
86{ 88{
87 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " 89 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
88 "cannot disable TSC completely.\n");
89 tsc_disabled = 1; 90 tsc_disabled = 1;
90 return 1; 91 return 1;
91} 92}
@@ -373,7 +374,7 @@ static unsigned long quick_pit_calibrate(void)
373 goto success; 374 goto success;
374 } 375 }
375 } 376 }
376 printk("Fast TSC calibration failed\n"); 377 pr_err("Fast TSC calibration failed\n");
377 return 0; 378 return 0;
378 379
379success: 380success:
@@ -392,7 +393,7 @@ success:
392 */ 393 */
393 delta *= PIT_TICK_RATE; 394 delta *= PIT_TICK_RATE;
394 do_div(delta, i*256*1000); 395 do_div(delta, i*256*1000);
395 printk("Fast TSC calibration using PIT\n"); 396 pr_info("Fast TSC calibration using PIT\n");
396 return delta; 397 return delta;
397} 398}
398 399
@@ -487,9 +488,8 @@ unsigned long native_calibrate_tsc(void)
487 * use the reference value, as it is more precise. 488 * use the reference value, as it is more precise.
488 */ 489 */
489 if (delta >= 90 && delta <= 110) { 490 if (delta >= 90 && delta <= 110) {
490 printk(KERN_INFO 491 pr_info("PIT calibration matches %s. %d loops\n",
491 "TSC: PIT calibration matches %s. %d loops\n", 492 hpet ? "HPET" : "PMTIMER", i + 1);
492 hpet ? "HPET" : "PMTIMER", i + 1);
493 return tsc_ref_min; 493 return tsc_ref_min;
494 } 494 }
495 495
@@ -511,38 +511,36 @@ unsigned long native_calibrate_tsc(void)
511 */ 511 */
512 if (tsc_pit_min == ULONG_MAX) { 512 if (tsc_pit_min == ULONG_MAX) {
513 /* PIT gave no useful value */ 513 /* PIT gave no useful value */
514 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); 514 pr_warn("Unable to calibrate against PIT\n");
515 515
516 /* We don't have an alternative source, disable TSC */ 516 /* We don't have an alternative source, disable TSC */
517 if (!hpet && !ref1 && !ref2) { 517 if (!hpet && !ref1 && !ref2) {
518 printk("TSC: No reference (HPET/PMTIMER) available\n"); 518 pr_notice("No reference (HPET/PMTIMER) available\n");
519 return 0; 519 return 0;
520 } 520 }
521 521
522 /* The alternative source failed as well, disable TSC */ 522 /* The alternative source failed as well, disable TSC */
523 if (tsc_ref_min == ULONG_MAX) { 523 if (tsc_ref_min == ULONG_MAX) {
524 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " 524 pr_warn("HPET/PMTIMER calibration failed\n");
525 "failed.\n");
526 return 0; 525 return 0;
527 } 526 }
528 527
529 /* Use the alternative source */ 528 /* Use the alternative source */
530 printk(KERN_INFO "TSC: using %s reference calibration\n", 529 pr_info("using %s reference calibration\n",
531 hpet ? "HPET" : "PMTIMER"); 530 hpet ? "HPET" : "PMTIMER");
532 531
533 return tsc_ref_min; 532 return tsc_ref_min;
534 } 533 }
535 534
536 /* We don't have an alternative source, use the PIT calibration value */ 535 /* We don't have an alternative source, use the PIT calibration value */
537 if (!hpet && !ref1 && !ref2) { 536 if (!hpet && !ref1 && !ref2) {
538 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 537 pr_info("Using PIT calibration value\n");
539 return tsc_pit_min; 538 return tsc_pit_min;
540 } 539 }
541 540
542 /* The alternative source failed, use the PIT calibration value */ 541 /* The alternative source failed, use the PIT calibration value */
543 if (tsc_ref_min == ULONG_MAX) { 542 if (tsc_ref_min == ULONG_MAX) {
544 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. " 543 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
545 "Using PIT calibration\n");
546 return tsc_pit_min; 544 return tsc_pit_min;
547 } 545 }
548 546
@@ -551,9 +549,9 @@ unsigned long native_calibrate_tsc(void)
551 * the PIT value as we know that there are PMTIMERs around 549 * the PIT value as we know that there are PMTIMERs around
552 * running at double speed. At least we let the user know: 550 * running at double speed. At least we let the user know:
553 */ 551 */
554 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", 552 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
555 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); 553 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
556 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 554 pr_info("Using PIT calibration value\n");
557 return tsc_pit_min; 555 return tsc_pit_min;
558} 556}
559 557
@@ -785,7 +783,7 @@ void mark_tsc_unstable(char *reason)
785 tsc_unstable = 1; 783 tsc_unstable = 1;
786 sched_clock_stable = 0; 784 sched_clock_stable = 0;
787 disable_sched_clock_irqtime(); 785 disable_sched_clock_irqtime();
788 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); 786 pr_info("Marking TSC unstable due to %s\n", reason);
789 /* Change only the rating, when not registered */ 787 /* Change only the rating, when not registered */
790 if (clocksource_tsc.mult) 788 if (clocksource_tsc.mult)
791 clocksource_mark_unstable(&clocksource_tsc); 789 clocksource_mark_unstable(&clocksource_tsc);
@@ -912,9 +910,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
912 goto out; 910 goto out;
913 911
914 tsc_khz = freq; 912 tsc_khz = freq;
915 printk(KERN_INFO "Refined TSC clocksource calibration: " 913 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
916 "%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000, 914 (unsigned long)tsc_khz / 1000,
917 (unsigned long)tsc_khz % 1000); 915 (unsigned long)tsc_khz % 1000);
918 916
919out: 917out:
920 clocksource_register_khz(&clocksource_tsc, tsc_khz); 918 clocksource_register_khz(&clocksource_tsc, tsc_khz);
@@ -970,9 +968,9 @@ void __init tsc_init(void)
970 return; 968 return;
971 } 969 }
972 970
973 printk("Detected %lu.%03lu MHz processor.\n", 971 pr_info("Detected %lu.%03lu MHz processor\n",
974 (unsigned long)cpu_khz / 1000, 972 (unsigned long)cpu_khz / 1000,
975 (unsigned long)cpu_khz % 1000); 973 (unsigned long)cpu_khz % 1000);
976 974
977 /* 975 /*
978 * Secondary CPUs do not run through tsc_init(), so set up 976 * Secondary CPUs do not run through tsc_init(), so set up
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 255f58ae71e8..54abcc0baf23 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -28,6 +28,8 @@
28 * 28 *
29 */ 29 */
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
31#include <linux/capability.h> 33#include <linux/capability.h>
32#include <linux/errno.h> 34#include <linux/errno.h>
33#include <linux/interrupt.h> 35#include <linux/interrupt.h>
@@ -137,14 +139,14 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
137 local_irq_enable(); 139 local_irq_enable();
138 140
139 if (!current->thread.vm86_info) { 141 if (!current->thread.vm86_info) {
140 printk("no vm86_info: BAD\n"); 142 pr_alert("no vm86_info: BAD\n");
141 do_exit(SIGSEGV); 143 do_exit(SIGSEGV);
142 } 144 }
143 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); 145 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
144 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs); 146 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
145 tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap); 147 tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
146 if (tmp) { 148 if (tmp) {
147 printk("vm86: could not access userspace vm86_info\n"); 149 pr_alert("could not access userspace vm86_info\n");
148 do_exit(SIGSEGV); 150 do_exit(SIGSEGV);
149 } 151 }
150 152
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 7515cf0e1805..acdc125ad44e 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -18,6 +18,8 @@
18 * use the vDSO. 18 * use the vDSO.
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/time.h> 23#include <linux/time.h>
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -111,18 +113,13 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
111static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 113static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
112 const char *message) 114 const char *message)
113{ 115{
114 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); 116 if (!show_unhandled_signals)
115 struct task_struct *tsk;
116
117 if (!show_unhandled_signals || !__ratelimit(&rs))
118 return; 117 return;
119 118
120 tsk = current; 119 pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
121 120 level, current->comm, task_pid_nr(current),
122 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", 121 message, regs->ip, regs->cs,
123 level, tsk->comm, task_pid_nr(tsk), 122 regs->sp, regs->ax, regs->si, regs->di);
124 message, regs->ip, regs->cs,
125 regs->sp, regs->ax, regs->si, regs->di);
126} 123}
127 124
128static int addr_to_vsyscall_nr(unsigned long addr) 125static int addr_to_vsyscall_nr(unsigned long addr)
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index bd18149b2b0f..3d3e20709119 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Author: Suresh Siddha <suresh.b.siddha@intel.com> 4 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
5 */ 5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
6#include <linux/bootmem.h> 9#include <linux/bootmem.h>
7#include <linux/compat.h> 10#include <linux/compat.h>
8#include <asm/i387.h> 11#include <asm/i387.h>
@@ -162,7 +165,7 @@ int save_i387_xstate(void __user *buf)
162 BUG_ON(sig_xstate_size < xstate_size); 165 BUG_ON(sig_xstate_size < xstate_size);
163 166
164 if ((unsigned long)buf % 64) 167 if ((unsigned long)buf % 64)
165 printk("save_i387_xstate: bad fpstate %p\n", buf); 168 pr_err("%s: bad fpstate %p\n", __func__, buf);
166 169
167 if (!used_math()) 170 if (!used_math())
168 return 0; 171 return 0;
@@ -422,7 +425,7 @@ static void __init xstate_enable_boot_cpu(void)
422 pcntxt_mask = eax + ((u64)edx << 32); 425 pcntxt_mask = eax + ((u64)edx << 32);
423 426
424 if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { 427 if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
425 printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n", 428 pr_err("FP/SSE not shown under xsave features 0x%llx\n",
426 pcntxt_mask); 429 pcntxt_mask);
427 BUG(); 430 BUG();
428 } 431 }
@@ -445,9 +448,8 @@ static void __init xstate_enable_boot_cpu(void)
445 448
446 setup_xstate_init(); 449 setup_xstate_init();
447 450
448 printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, " 451 pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
449 "cntxt size 0x%x\n", 452 pcntxt_mask, xstate_size);
450 pcntxt_mask, xstate_size);
451} 453}
452 454
453/* 455/*