aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/mce.h21
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c209
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c8
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--drivers/base/core.c21
-rw-r--r--include/linux/device.h7
8 files changed, 159 insertions, 115 deletions
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 54d73b1f00a0..d90c2fccc30c 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -119,6 +119,23 @@ struct mce_log {
119#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) 119#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
120 120
121#ifdef __KERNEL__ 121#ifdef __KERNEL__
122
123struct mca_config {
124 bool dont_log_ce;
125 bool cmci_disabled;
126 bool ignore_ce;
127 bool disabled;
128 bool ser;
129 bool bios_cmci_threshold;
130 u8 banks;
131 s8 bootlog;
132 int tolerant;
133 int monarch_timeout;
134 int panic_timeout;
135 u32 rip_msr;
136};
137
138extern struct mca_config mca_cfg;
122extern void mce_register_decode_chain(struct notifier_block *nb); 139extern void mce_register_decode_chain(struct notifier_block *nb);
123extern void mce_unregister_decode_chain(struct notifier_block *nb); 140extern void mce_unregister_decode_chain(struct notifier_block *nb);
124 141
@@ -126,7 +143,6 @@ extern void mce_unregister_decode_chain(struct notifier_block *nb);
126#include <linux/init.h> 143#include <linux/init.h>
127#include <linux/atomic.h> 144#include <linux/atomic.h>
128 145
129extern int mce_disabled;
130extern int mce_p5_enabled; 146extern int mce_p5_enabled;
131 147
132#ifdef CONFIG_X86_MCE 148#ifdef CONFIG_X86_MCE
@@ -159,9 +175,6 @@ DECLARE_PER_CPU(struct device *, mce_device);
159#define MAX_NR_BANKS 32 175#define MAX_NR_BANKS 32
160 176
161#ifdef CONFIG_X86_MCE_INTEL 177#ifdef CONFIG_X86_MCE_INTEL
162extern int mce_cmci_disabled;
163extern int mce_ignore_ce;
164extern int mce_bios_cmci_threshold;
165void mce_intel_feature_init(struct cpuinfo_x86 *c); 178void mce_intel_feature_init(struct cpuinfo_x86 *c);
166void cmci_clear(void); 179void cmci_clear(void);
167void cmci_reenable(void); 180void cmci_reenable(void);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 6a05c1d327a9..5b7d4fa5d3b7 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -24,8 +24,6 @@ struct mce_bank {
24int mce_severity(struct mce *a, int tolerant, char **msg); 24int mce_severity(struct mce *a, int tolerant, char **msg);
25struct dentry *mce_get_debugfs_dir(void); 25struct dentry *mce_get_debugfs_dir(void);
26 26
27extern int mce_ser;
28
29extern struct mce_bank *mce_banks; 27extern struct mce_bank *mce_banks;
30 28
31#ifdef CONFIG_X86_MCE_INTEL 29#ifdef CONFIG_X86_MCE_INTEL
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 13017626f9a8..beb1f1689e52 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -193,9 +193,9 @@ int mce_severity(struct mce *m, int tolerant, char **msg)
193 continue; 193 continue;
194 if ((m->mcgstatus & s->mcgmask) != s->mcgres) 194 if ((m->mcgstatus & s->mcgmask) != s->mcgres)
195 continue; 195 continue;
196 if (s->ser == SER_REQUIRED && !mce_ser) 196 if (s->ser == SER_REQUIRED && !mca_cfg.ser)
197 continue; 197 continue;
198 if (s->ser == NO_SER && mce_ser) 198 if (s->ser == NO_SER && mca_cfg.ser)
199 continue; 199 continue;
200 if (s->context && ctx != s->context) 200 if (s->context && ctx != s->context)
201 continue; 201 continue;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 46cbf8689692..80dbda84f1c3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -58,34 +58,26 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
58#define CREATE_TRACE_POINTS 58#define CREATE_TRACE_POINTS
59#include <trace/events/mce.h> 59#include <trace/events/mce.h>
60 60
61int mce_disabled __read_mostly;
62
63#define SPINUNIT 100 /* 100ns */ 61#define SPINUNIT 100 /* 100ns */
64 62
65atomic_t mce_entry; 63atomic_t mce_entry;
66 64
67DEFINE_PER_CPU(unsigned, mce_exception_count); 65DEFINE_PER_CPU(unsigned, mce_exception_count);
68 66
69/* 67struct mce_bank *mce_banks __read_mostly;
70 * Tolerant levels: 68
71 * 0: always panic on uncorrected errors, log corrected errors 69struct mca_config mca_cfg __read_mostly = {
72 * 1: panic or SIGBUS on uncorrected errors, log corrected errors 70 .bootlog = -1,
73 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors 71 /*
74 * 3: never panic or SIGBUS, log all errors (for testing only) 72 * Tolerant levels:
75 */ 73 * 0: always panic on uncorrected errors, log corrected errors
76static int tolerant __read_mostly = 1; 74 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
77static int banks __read_mostly; 75 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
78static int rip_msr __read_mostly; 76 * 3: never panic or SIGBUS, log all errors (for testing only)
79static int mce_bootlog __read_mostly = -1; 77 */
80static int monarch_timeout __read_mostly = -1; 78 .tolerant = 1,
81static int mce_panic_timeout __read_mostly; 79 .monarch_timeout = -1
82static int mce_dont_log_ce __read_mostly; 80};
83int mce_cmci_disabled __read_mostly;
84int mce_ignore_ce __read_mostly;
85int mce_ser __read_mostly;
86int mce_bios_cmci_threshold __read_mostly;
87
88struct mce_bank *mce_banks __read_mostly;
89 81
90/* User mode helper program triggered by machine check event */ 82/* User mode helper program triggered by machine check event */
91static unsigned long mce_need_notify; 83static unsigned long mce_need_notify;
@@ -302,7 +294,7 @@ static void wait_for_panic(void)
302 while (timeout-- > 0) 294 while (timeout-- > 0)
303 udelay(1); 295 udelay(1);
304 if (panic_timeout == 0) 296 if (panic_timeout == 0)
305 panic_timeout = mce_panic_timeout; 297 panic_timeout = mca_cfg.panic_timeout;
306 panic("Panicing machine check CPU died"); 298 panic("Panicing machine check CPU died");
307} 299}
308 300
@@ -360,7 +352,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
360 pr_emerg(HW_ERR "Machine check: %s\n", exp); 352 pr_emerg(HW_ERR "Machine check: %s\n", exp);
361 if (!fake_panic) { 353 if (!fake_panic) {
362 if (panic_timeout == 0) 354 if (panic_timeout == 0)
363 panic_timeout = mce_panic_timeout; 355 panic_timeout = mca_cfg.panic_timeout;
364 panic(msg); 356 panic(msg);
365 } else 357 } else
366 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); 358 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
@@ -372,7 +364,7 @@ static int msr_to_offset(u32 msr)
372{ 364{
373 unsigned bank = __this_cpu_read(injectm.bank); 365 unsigned bank = __this_cpu_read(injectm.bank);
374 366
375 if (msr == rip_msr) 367 if (msr == mca_cfg.rip_msr)
376 return offsetof(struct mce, ip); 368 return offsetof(struct mce, ip);
377 if (msr == MSR_IA32_MCx_STATUS(bank)) 369 if (msr == MSR_IA32_MCx_STATUS(bank))
378 return offsetof(struct mce, status); 370 return offsetof(struct mce, status);
@@ -451,8 +443,8 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
451 m->cs |= 3; 443 m->cs |= 3;
452 } 444 }
453 /* Use accurate RIP reporting if available. */ 445 /* Use accurate RIP reporting if available. */
454 if (rip_msr) 446 if (mca_cfg.rip_msr)
455 m->ip = mce_rdmsrl(rip_msr); 447 m->ip = mce_rdmsrl(mca_cfg.rip_msr);
456 } 448 }
457} 449}
458 450
@@ -513,7 +505,7 @@ static int mce_ring_add(unsigned long pfn)
513 505
514int mce_available(struct cpuinfo_x86 *c) 506int mce_available(struct cpuinfo_x86 *c)
515{ 507{
516 if (mce_disabled) 508 if (mca_cfg.disabled)
517 return 0; 509 return 0;
518 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 510 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
519} 511}
@@ -565,7 +557,7 @@ static void mce_read_aux(struct mce *m, int i)
565 /* 557 /*
566 * Mask the reported address by the reported granularity. 558 * Mask the reported address by the reported granularity.
567 */ 559 */
568 if (mce_ser && (m->status & MCI_STATUS_MISCV)) { 560 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
569 u8 shift = MCI_MISC_ADDR_LSB(m->misc); 561 u8 shift = MCI_MISC_ADDR_LSB(m->misc);
570 m->addr >>= shift; 562 m->addr >>= shift;
571 m->addr <<= shift; 563 m->addr <<= shift;
@@ -599,7 +591,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
599 591
600 mce_gather_info(&m, NULL); 592 mce_gather_info(&m, NULL);
601 593
602 for (i = 0; i < banks; i++) { 594 for (i = 0; i < mca_cfg.banks; i++) {
603 if (!mce_banks[i].ctl || !test_bit(i, *b)) 595 if (!mce_banks[i].ctl || !test_bit(i, *b))
604 continue; 596 continue;
605 597
@@ -620,7 +612,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
620 * TBD do the same check for MCI_STATUS_EN here? 612 * TBD do the same check for MCI_STATUS_EN here?
621 */ 613 */
622 if (!(flags & MCP_UC) && 614 if (!(flags & MCP_UC) &&
623 (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC))) 615 (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
624 continue; 616 continue;
625 617
626 mce_read_aux(&m, i); 618 mce_read_aux(&m, i);
@@ -631,7 +623,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
631 * Don't get the IP here because it's unlikely to 623 * Don't get the IP here because it's unlikely to
632 * have anything to do with the actual error location. 624 * have anything to do with the actual error location.
633 */ 625 */
634 if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) 626 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
635 mce_log(&m); 627 mce_log(&m);
636 628
637 /* 629 /*
@@ -658,14 +650,14 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
658{ 650{
659 int i, ret = 0; 651 int i, ret = 0;
660 652
661 for (i = 0; i < banks; i++) { 653 for (i = 0; i < mca_cfg.banks; i++) {
662 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); 654 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
663 if (m->status & MCI_STATUS_VAL) { 655 if (m->status & MCI_STATUS_VAL) {
664 __set_bit(i, validp); 656 __set_bit(i, validp);
665 if (quirk_no_way_out) 657 if (quirk_no_way_out)
666 quirk_no_way_out(i, m, regs); 658 quirk_no_way_out(i, m, regs);
667 } 659 }
668 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) 660 if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY)
669 ret = 1; 661 ret = 1;
670 } 662 }
671 return ret; 663 return ret;
@@ -696,11 +688,11 @@ static int mce_timed_out(u64 *t)
696 rmb(); 688 rmb();
697 if (atomic_read(&mce_paniced)) 689 if (atomic_read(&mce_paniced))
698 wait_for_panic(); 690 wait_for_panic();
699 if (!monarch_timeout) 691 if (!mca_cfg.monarch_timeout)
700 goto out; 692 goto out;
701 if ((s64)*t < SPINUNIT) { 693 if ((s64)*t < SPINUNIT) {
702 /* CHECKME: Make panic default for 1 too? */ 694 /* CHECKME: Make panic default for 1 too? */
703 if (tolerant < 1) 695 if (mca_cfg.tolerant < 1)
704 mce_panic("Timeout synchronizing machine check over CPUs", 696 mce_panic("Timeout synchronizing machine check over CPUs",
705 NULL, NULL); 697 NULL, NULL);
706 cpu_missing = 1; 698 cpu_missing = 1;
@@ -750,7 +742,8 @@ static void mce_reign(void)
750 * Grade the severity of the errors of all the CPUs. 742 * Grade the severity of the errors of all the CPUs.
751 */ 743 */
752 for_each_possible_cpu(cpu) { 744 for_each_possible_cpu(cpu) {
753 int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant, 745 int severity = mce_severity(&per_cpu(mces_seen, cpu),
746 mca_cfg.tolerant,
754 &nmsg); 747 &nmsg);
755 if (severity > global_worst) { 748 if (severity > global_worst) {
756 msg = nmsg; 749 msg = nmsg;
@@ -764,7 +757,7 @@ static void mce_reign(void)
764 * This dumps all the mces in the log buffer and stops the 757 * This dumps all the mces in the log buffer and stops the
765 * other CPUs. 758 * other CPUs.
766 */ 759 */
767 if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3) 760 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
768 mce_panic("Fatal Machine check", m, msg); 761 mce_panic("Fatal Machine check", m, msg);
769 762
770 /* 763 /*
@@ -777,7 +770,7 @@ static void mce_reign(void)
777 * No machine check event found. Must be some external 770 * No machine check event found. Must be some external
778 * source or one CPU is hung. Panic. 771 * source or one CPU is hung. Panic.
779 */ 772 */
780 if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3) 773 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
781 mce_panic("Machine check from unknown source", NULL, NULL); 774 mce_panic("Machine check from unknown source", NULL, NULL);
782 775
783 /* 776 /*
@@ -801,7 +794,7 @@ static int mce_start(int *no_way_out)
801{ 794{
802 int order; 795 int order;
803 int cpus = num_online_cpus(); 796 int cpus = num_online_cpus();
804 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; 797 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
805 798
806 if (!timeout) 799 if (!timeout)
807 return -1; 800 return -1;
@@ -865,7 +858,7 @@ static int mce_start(int *no_way_out)
865static int mce_end(int order) 858static int mce_end(int order)
866{ 859{
867 int ret = -1; 860 int ret = -1;
868 u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC; 861 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
869 862
870 if (!timeout) 863 if (!timeout)
871 goto reset; 864 goto reset;
@@ -946,7 +939,7 @@ static void mce_clear_state(unsigned long *toclear)
946{ 939{
947 int i; 940 int i;
948 941
949 for (i = 0; i < banks; i++) { 942 for (i = 0; i < mca_cfg.banks; i++) {
950 if (test_bit(i, toclear)) 943 if (test_bit(i, toclear))
951 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); 944 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
952 } 945 }
@@ -1011,6 +1004,7 @@ static void mce_clear_info(struct mce_info *mi)
1011 */ 1004 */
1012void do_machine_check(struct pt_regs *regs, long error_code) 1005void do_machine_check(struct pt_regs *regs, long error_code)
1013{ 1006{
1007 struct mca_config *cfg = &mca_cfg;
1014 struct mce m, *final; 1008 struct mce m, *final;
1015 int i; 1009 int i;
1016 int worst = 0; 1010 int worst = 0;
@@ -1022,7 +1016,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1022 int order; 1016 int order;
1023 /* 1017 /*
1024 * If no_way_out gets set, there is no safe way to recover from this 1018 * If no_way_out gets set, there is no safe way to recover from this
1025 * MCE. If tolerant is cranked up, we'll try anyway. 1019 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1026 */ 1020 */
1027 int no_way_out = 0; 1021 int no_way_out = 0;
1028 /* 1022 /*
@@ -1038,7 +1032,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1038 1032
1039 this_cpu_inc(mce_exception_count); 1033 this_cpu_inc(mce_exception_count);
1040 1034
1041 if (!banks) 1035 if (!cfg->banks)
1042 goto out; 1036 goto out;
1043 1037
1044 mce_gather_info(&m, regs); 1038 mce_gather_info(&m, regs);
@@ -1065,7 +1059,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1065 * because the first one to see it will clear it. 1059 * because the first one to see it will clear it.
1066 */ 1060 */
1067 order = mce_start(&no_way_out); 1061 order = mce_start(&no_way_out);
1068 for (i = 0; i < banks; i++) { 1062 for (i = 0; i < cfg->banks; i++) {
1069 __clear_bit(i, toclear); 1063 __clear_bit(i, toclear);
1070 if (!test_bit(i, valid_banks)) 1064 if (!test_bit(i, valid_banks))
1071 continue; 1065 continue;
@@ -1084,7 +1078,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1084 * Non uncorrected or non signaled errors are handled by 1078 * Non uncorrected or non signaled errors are handled by
1085 * machine_check_poll. Leave them alone, unless this panics. 1079 * machine_check_poll. Leave them alone, unless this panics.
1086 */ 1080 */
1087 if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 1081 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1088 !no_way_out) 1082 !no_way_out)
1089 continue; 1083 continue;
1090 1084
@@ -1093,7 +1087,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1093 */ 1087 */
1094 add_taint(TAINT_MACHINE_CHECK); 1088 add_taint(TAINT_MACHINE_CHECK);
1095 1089
1096 severity = mce_severity(&m, tolerant, NULL); 1090 severity = mce_severity(&m, cfg->tolerant, NULL);
1097 1091
1098 /* 1092 /*
1099 * When machine check was for corrected handler don't touch, 1093 * When machine check was for corrected handler don't touch,
@@ -1117,7 +1111,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1117 * When the ring overflows we just ignore the AO error. 1111 * When the ring overflows we just ignore the AO error.
1118 * RED-PEN add some logging mechanism when 1112 * RED-PEN add some logging mechanism when
1119 * usable_address or mce_add_ring fails. 1113 * usable_address or mce_add_ring fails.
1120 * RED-PEN don't ignore overflow for tolerant == 0 1114 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1121 */ 1115 */
1122 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) 1116 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1123 mce_ring_add(m.addr >> PAGE_SHIFT); 1117 mce_ring_add(m.addr >> PAGE_SHIFT);
@@ -1149,7 +1143,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1149 * issues we try to recover, or limit damage to the current 1143 * issues we try to recover, or limit damage to the current
1150 * process. 1144 * process.
1151 */ 1145 */
1152 if (tolerant < 3) { 1146 if (cfg->tolerant < 3) {
1153 if (no_way_out) 1147 if (no_way_out)
1154 mce_panic("Fatal machine check on current CPU", &m, msg); 1148 mce_panic("Fatal machine check on current CPU", &m, msg);
1155 if (worst == MCE_AR_SEVERITY) { 1149 if (worst == MCE_AR_SEVERITY) {
@@ -1377,11 +1371,13 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1377static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1371static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1378{ 1372{
1379 int i; 1373 int i;
1374 u8 num_banks = mca_cfg.banks;
1380 1375
1381 mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL); 1376 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1382 if (!mce_banks) 1377 if (!mce_banks)
1383 return -ENOMEM; 1378 return -ENOMEM;
1384 for (i = 0; i < banks; i++) { 1379
1380 for (i = 0; i < num_banks; i++) {
1385 struct mce_bank *b = &mce_banks[i]; 1381 struct mce_bank *b = &mce_banks[i];
1386 1382
1387 b->ctl = -1ULL; 1383 b->ctl = -1ULL;
@@ -1401,7 +1397,7 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
1401 rdmsrl(MSR_IA32_MCG_CAP, cap); 1397 rdmsrl(MSR_IA32_MCG_CAP, cap);
1402 1398
1403 b = cap & MCG_BANKCNT_MASK; 1399 b = cap & MCG_BANKCNT_MASK;
1404 if (!banks) 1400 if (!mca_cfg.banks)
1405 pr_info("CPU supports %d MCE banks\n", b); 1401 pr_info("CPU supports %d MCE banks\n", b);
1406 1402
1407 if (b > MAX_NR_BANKS) { 1403 if (b > MAX_NR_BANKS) {
@@ -1411,8 +1407,9 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
1411 } 1407 }
1412 1408
1413 /* Don't support asymmetric configurations today */ 1409 /* Don't support asymmetric configurations today */
1414 WARN_ON(banks != 0 && b != banks); 1410 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1415 banks = b; 1411 mca_cfg.banks = b;
1412
1416 if (!mce_banks) { 1413 if (!mce_banks) {
1417 int err = __mcheck_cpu_mce_banks_init(); 1414 int err = __mcheck_cpu_mce_banks_init();
1418 1415
@@ -1422,25 +1419,29 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
1422 1419
1423 /* Use accurate RIP reporting if available. */ 1420 /* Use accurate RIP reporting if available. */
1424 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) 1421 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1425 rip_msr = MSR_IA32_MCG_EIP; 1422 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1426 1423
1427 if (cap & MCG_SER_P) 1424 if (cap & MCG_SER_P)
1428 mce_ser = 1; 1425 mca_cfg.ser = true;
1429 1426
1430 return 0; 1427 return 0;
1431} 1428}
1432 1429
1433static void __mcheck_cpu_init_generic(void) 1430static void __mcheck_cpu_init_generic(void)
1434{ 1431{
1432 enum mcp_flags m_fl = 0;
1435 mce_banks_t all_banks; 1433 mce_banks_t all_banks;
1436 u64 cap; 1434 u64 cap;
1437 int i; 1435 int i;
1438 1436
1437 if (!mca_cfg.bootlog)
1438 m_fl = MCP_DONTLOG;
1439
1439 /* 1440 /*
1440 * Log the machine checks left over from the previous reset. 1441 * Log the machine checks left over from the previous reset.
1441 */ 1442 */
1442 bitmap_fill(all_banks, MAX_NR_BANKS); 1443 bitmap_fill(all_banks, MAX_NR_BANKS);
1443 machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); 1444 machine_check_poll(MCP_UC | m_fl, &all_banks);
1444 1445
1445 set_in_cr4(X86_CR4_MCE); 1446 set_in_cr4(X86_CR4_MCE);
1446 1447
@@ -1448,7 +1449,7 @@ static void __mcheck_cpu_init_generic(void)
1448 if (cap & MCG_CTL_P) 1449 if (cap & MCG_CTL_P)
1449 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1450 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1450 1451
1451 for (i = 0; i < banks; i++) { 1452 for (i = 0; i < mca_cfg.banks; i++) {
1452 struct mce_bank *b = &mce_banks[i]; 1453 struct mce_bank *b = &mce_banks[i];
1453 1454
1454 if (!b->init) 1455 if (!b->init)
@@ -1489,6 +1490,8 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1489/* Add per CPU specific workarounds here */ 1490/* Add per CPU specific workarounds here */
1490static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1491static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1491{ 1492{
1493 struct mca_config *cfg = &mca_cfg;
1494
1492 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1495 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1493 pr_info("unknown CPU type - not enabling MCE support\n"); 1496 pr_info("unknown CPU type - not enabling MCE support\n");
1494 return -EOPNOTSUPP; 1497 return -EOPNOTSUPP;
@@ -1496,7 +1499,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1496 1499
1497 /* This should be disabled by the BIOS, but isn't always */ 1500 /* This should be disabled by the BIOS, but isn't always */
1498 if (c->x86_vendor == X86_VENDOR_AMD) { 1501 if (c->x86_vendor == X86_VENDOR_AMD) {
1499 if (c->x86 == 15 && banks > 4) { 1502 if (c->x86 == 15 && cfg->banks > 4) {
1500 /* 1503 /*
1501 * disable GART TBL walk error reporting, which 1504 * disable GART TBL walk error reporting, which
1502 * trips off incorrectly with the IOMMU & 3ware 1505 * trips off incorrectly with the IOMMU & 3ware
@@ -1504,18 +1507,18 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1504 */ 1507 */
1505 clear_bit(10, (unsigned long *)&mce_banks[4].ctl); 1508 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1506 } 1509 }
1507 if (c->x86 <= 17 && mce_bootlog < 0) { 1510 if (c->x86 <= 17 && cfg->bootlog < 0) {
1508 /* 1511 /*
1509 * Lots of broken BIOS around that don't clear them 1512 * Lots of broken BIOS around that don't clear them
1510 * by default and leave crap in there. Don't log: 1513 * by default and leave crap in there. Don't log:
1511 */ 1514 */
1512 mce_bootlog = 0; 1515 cfg->bootlog = 0;
1513 } 1516 }
1514 /* 1517 /*
1515 * Various K7s with broken bank 0 around. Always disable 1518 * Various K7s with broken bank 0 around. Always disable
1516 * by default. 1519 * by default.
1517 */ 1520 */
1518 if (c->x86 == 6 && banks > 0) 1521 if (c->x86 == 6 && cfg->banks > 0)
1519 mce_banks[0].ctl = 0; 1522 mce_banks[0].ctl = 0;
1520 1523
1521 /* 1524 /*
@@ -1566,7 +1569,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1566 * valid event later, merely don't write CTL0. 1569 * valid event later, merely don't write CTL0.
1567 */ 1570 */
1568 1571
1569 if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0) 1572 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1570 mce_banks[0].init = 0; 1573 mce_banks[0].init = 0;
1571 1574
1572 /* 1575 /*
@@ -1574,23 +1577,23 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1574 * synchronization with a one second timeout. 1577 * synchronization with a one second timeout.
1575 */ 1578 */
1576 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 1579 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1577 monarch_timeout < 0) 1580 cfg->monarch_timeout < 0)
1578 monarch_timeout = USEC_PER_SEC; 1581 cfg->monarch_timeout = USEC_PER_SEC;
1579 1582
1580 /* 1583 /*
1581 * There are also broken BIOSes on some Pentium M and 1584 * There are also broken BIOSes on some Pentium M and
1582 * earlier systems: 1585 * earlier systems:
1583 */ 1586 */
1584 if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) 1587 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1585 mce_bootlog = 0; 1588 cfg->bootlog = 0;
1586 1589
1587 if (c->x86 == 6 && c->x86_model == 45) 1590 if (c->x86 == 6 && c->x86_model == 45)
1588 quirk_no_way_out = quirk_sandybridge_ifu; 1591 quirk_no_way_out = quirk_sandybridge_ifu;
1589 } 1592 }
1590 if (monarch_timeout < 0) 1593 if (cfg->monarch_timeout < 0)
1591 monarch_timeout = 0; 1594 cfg->monarch_timeout = 0;
1592 if (mce_bootlog != 0) 1595 if (cfg->bootlog != 0)
1593 mce_panic_timeout = 30; 1596 cfg->panic_timeout = 30;
1594 1597
1595 return 0; 1598 return 0;
1596} 1599}
@@ -1635,7 +1638,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1635 1638
1636 __this_cpu_write(mce_next_interval, iv); 1639 __this_cpu_write(mce_next_interval, iv);
1637 1640
1638 if (mce_ignore_ce || !iv) 1641 if (mca_cfg.ignore_ce || !iv)
1639 return; 1642 return;
1640 1643
1641 t->expires = round_jiffies(jiffies + iv); 1644 t->expires = round_jiffies(jiffies + iv);
@@ -1668,7 +1671,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
1668 */ 1671 */
1669void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1672void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
1670{ 1673{
1671 if (mce_disabled) 1674 if (mca_cfg.disabled)
1672 return; 1675 return;
1673 1676
1674 if (__mcheck_cpu_ancient_init(c)) 1677 if (__mcheck_cpu_ancient_init(c))
@@ -1678,7 +1681,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
1678 return; 1681 return;
1679 1682
1680 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { 1683 if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1681 mce_disabled = 1; 1684 mca_cfg.disabled = true;
1682 return; 1685 return;
1683 } 1686 }
1684 1687
@@ -1951,6 +1954,8 @@ static struct miscdevice mce_chrdev_device = {
1951 */ 1954 */
1952static int __init mcheck_enable(char *str) 1955static int __init mcheck_enable(char *str)
1953{ 1956{
1957 struct mca_config *cfg = &mca_cfg;
1958
1954 if (*str == 0) { 1959 if (*str == 0) {
1955 enable_p5_mce(); 1960 enable_p5_mce();
1956 return 1; 1961 return 1;
@@ -1958,22 +1963,22 @@ static int __init mcheck_enable(char *str)
1958 if (*str == '=') 1963 if (*str == '=')
1959 str++; 1964 str++;
1960 if (!strcmp(str, "off")) 1965 if (!strcmp(str, "off"))
1961 mce_disabled = 1; 1966 cfg->disabled = true;
1962 else if (!strcmp(str, "no_cmci")) 1967 else if (!strcmp(str, "no_cmci"))
1963 mce_cmci_disabled = 1; 1968 cfg->cmci_disabled = true;
1964 else if (!strcmp(str, "dont_log_ce")) 1969 else if (!strcmp(str, "dont_log_ce"))
1965 mce_dont_log_ce = 1; 1970 cfg->dont_log_ce = true;
1966 else if (!strcmp(str, "ignore_ce")) 1971 else if (!strcmp(str, "ignore_ce"))
1967 mce_ignore_ce = 1; 1972 cfg->ignore_ce = true;
1968 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 1973 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1969 mce_bootlog = (str[0] == 'b'); 1974 cfg->bootlog = (str[0] == 'b');
1970 else if (!strcmp(str, "bios_cmci_threshold")) 1975 else if (!strcmp(str, "bios_cmci_threshold"))
1971 mce_bios_cmci_threshold = 1; 1976 cfg->bios_cmci_threshold = true;
1972 else if (isdigit(str[0])) { 1977 else if (isdigit(str[0])) {
1973 get_option(&str, &tolerant); 1978 get_option(&str, &(cfg->tolerant));
1974 if (*str == ',') { 1979 if (*str == ',') {
1975 ++str; 1980 ++str;
1976 get_option(&str, &monarch_timeout); 1981 get_option(&str, &(cfg->monarch_timeout));
1977 } 1982 }
1978 } else { 1983 } else {
1979 pr_info("mce argument %s ignored. Please use /sys\n", str); 1984 pr_info("mce argument %s ignored. Please use /sys\n", str);
@@ -2002,7 +2007,7 @@ static int mce_disable_error_reporting(void)
2002{ 2007{
2003 int i; 2008 int i;
2004 2009
2005 for (i = 0; i < banks; i++) { 2010 for (i = 0; i < mca_cfg.banks; i++) {
2006 struct mce_bank *b = &mce_banks[i]; 2011 struct mce_bank *b = &mce_banks[i];
2007 2012
2008 if (b->init) 2013 if (b->init)
@@ -2142,15 +2147,15 @@ static ssize_t set_ignore_ce(struct device *s,
2142 if (strict_strtoull(buf, 0, &new) < 0) 2147 if (strict_strtoull(buf, 0, &new) < 0)
2143 return -EINVAL; 2148 return -EINVAL;
2144 2149
2145 if (mce_ignore_ce ^ !!new) { 2150 if (mca_cfg.ignore_ce ^ !!new) {
2146 if (new) { 2151 if (new) {
2147 /* disable ce features */ 2152 /* disable ce features */
2148 mce_timer_delete_all(); 2153 mce_timer_delete_all();
2149 on_each_cpu(mce_disable_cmci, NULL, 1); 2154 on_each_cpu(mce_disable_cmci, NULL, 1);
2150 mce_ignore_ce = 1; 2155 mca_cfg.ignore_ce = true;
2151 } else { 2156 } else {
2152 /* enable ce features */ 2157 /* enable ce features */
2153 mce_ignore_ce = 0; 2158 mca_cfg.ignore_ce = false;
2154 on_each_cpu(mce_enable_ce, (void *)1, 1); 2159 on_each_cpu(mce_enable_ce, (void *)1, 1);
2155 } 2160 }
2156 } 2161 }
@@ -2166,14 +2171,14 @@ static ssize_t set_cmci_disabled(struct device *s,
2166 if (strict_strtoull(buf, 0, &new) < 0) 2171 if (strict_strtoull(buf, 0, &new) < 0)
2167 return -EINVAL; 2172 return -EINVAL;
2168 2173
2169 if (mce_cmci_disabled ^ !!new) { 2174 if (mca_cfg.cmci_disabled ^ !!new) {
2170 if (new) { 2175 if (new) {
2171 /* disable cmci */ 2176 /* disable cmci */
2172 on_each_cpu(mce_disable_cmci, NULL, 1); 2177 on_each_cpu(mce_disable_cmci, NULL, 1);
2173 mce_cmci_disabled = 1; 2178 mca_cfg.cmci_disabled = true;
2174 } else { 2179 } else {
2175 /* enable cmci */ 2180 /* enable cmci */
2176 mce_cmci_disabled = 0; 2181 mca_cfg.cmci_disabled = false;
2177 on_each_cpu(mce_enable_ce, NULL, 1); 2182 on_each_cpu(mce_enable_ce, NULL, 1);
2178 } 2183 }
2179 } 2184 }
@@ -2190,9 +2195,9 @@ static ssize_t store_int_with_restart(struct device *s,
2190} 2195}
2191 2196
2192static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); 2197static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2193static DEVICE_INT_ATTR(tolerant, 0644, tolerant); 2198static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2194static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout); 2199static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2195static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce); 2200static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2196 2201
2197static struct dev_ext_attribute dev_attr_check_interval = { 2202static struct dev_ext_attribute dev_attr_check_interval = {
2198 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), 2203 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
@@ -2200,13 +2205,13 @@ static struct dev_ext_attribute dev_attr_check_interval = {
2200}; 2205};
2201 2206
2202static struct dev_ext_attribute dev_attr_ignore_ce = { 2207static struct dev_ext_attribute dev_attr_ignore_ce = {
2203 __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce), 2208 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2204 &mce_ignore_ce 2209 &mca_cfg.ignore_ce
2205}; 2210};
2206 2211
2207static struct dev_ext_attribute dev_attr_cmci_disabled = { 2212static struct dev_ext_attribute dev_attr_cmci_disabled = {
2208 __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled), 2213 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2209 &mce_cmci_disabled 2214 &mca_cfg.cmci_disabled
2210}; 2215};
2211 2216
2212static struct device_attribute *mce_device_attrs[] = { 2217static struct device_attribute *mce_device_attrs[] = {
@@ -2253,7 +2258,7 @@ static __cpuinit int mce_device_create(unsigned int cpu)
2253 if (err) 2258 if (err)
2254 goto error; 2259 goto error;
2255 } 2260 }
2256 for (j = 0; j < banks; j++) { 2261 for (j = 0; j < mca_cfg.banks; j++) {
2257 err = device_create_file(dev, &mce_banks[j].attr); 2262 err = device_create_file(dev, &mce_banks[j].attr);
2258 if (err) 2263 if (err)
2259 goto error2; 2264 goto error2;
@@ -2285,7 +2290,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
2285 for (i = 0; mce_device_attrs[i]; i++) 2290 for (i = 0; mce_device_attrs[i]; i++)
2286 device_remove_file(dev, mce_device_attrs[i]); 2291 device_remove_file(dev, mce_device_attrs[i]);
2287 2292
2288 for (i = 0; i < banks; i++) 2293 for (i = 0; i < mca_cfg.banks; i++)
2289 device_remove_file(dev, &mce_banks[i].attr); 2294 device_remove_file(dev, &mce_banks[i].attr);
2290 2295
2291 device_unregister(dev); 2296 device_unregister(dev);
@@ -2304,7 +2309,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2304 2309
2305 if (!(action & CPU_TASKS_FROZEN)) 2310 if (!(action & CPU_TASKS_FROZEN))
2306 cmci_clear(); 2311 cmci_clear();
2307 for (i = 0; i < banks; i++) { 2312 for (i = 0; i < mca_cfg.banks; i++) {
2308 struct mce_bank *b = &mce_banks[i]; 2313 struct mce_bank *b = &mce_banks[i];
2309 2314
2310 if (b->init) 2315 if (b->init)
@@ -2322,7 +2327,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2322 2327
2323 if (!(action & CPU_TASKS_FROZEN)) 2328 if (!(action & CPU_TASKS_FROZEN))
2324 cmci_reenable(); 2329 cmci_reenable();
2325 for (i = 0; i < banks; i++) { 2330 for (i = 0; i < mca_cfg.banks; i++) {
2326 struct mce_bank *b = &mce_banks[i]; 2331 struct mce_bank *b = &mce_banks[i];
2327 2332
2328 if (b->init) 2333 if (b->init)
@@ -2375,7 +2380,7 @@ static __init void mce_init_banks(void)
2375{ 2380{
2376 int i; 2381 int i;
2377 2382
2378 for (i = 0; i < banks; i++) { 2383 for (i = 0; i < mca_cfg.banks; i++) {
2379 struct mce_bank *b = &mce_banks[i]; 2384 struct mce_bank *b = &mce_banks[i];
2380 struct device_attribute *a = &b->attr; 2385 struct device_attribute *a = &b->attr;
2381 2386
@@ -2426,7 +2431,7 @@ device_initcall_sync(mcheck_init_device);
2426 */ 2431 */
2427static int __init mcheck_disable(char *str) 2432static int __init mcheck_disable(char *str)
2428{ 2433{
2429 mce_disabled = 1; 2434 mca_cfg.disabled = true;
2430 return 1; 2435 return 1;
2431} 2436}
2432__setup("nomce", mcheck_disable); 2437__setup("nomce", mcheck_disable);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 4f9a3cbfc4a3..402c454fbff0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -53,7 +53,7 @@ static int cmci_supported(int *banks)
53{ 53{
54 u64 cap; 54 u64 cap;
55 55
56 if (mce_cmci_disabled || mce_ignore_ce) 56 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
57 return 0; 57 return 0;
58 58
59 /* 59 /*
@@ -200,7 +200,7 @@ static void cmci_discover(int banks)
200 continue; 200 continue;
201 } 201 }
202 202
203 if (!mce_bios_cmci_threshold) { 203 if (!mca_cfg.bios_cmci_threshold) {
204 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; 204 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
205 val |= CMCI_THRESHOLD; 205 val |= CMCI_THRESHOLD;
206 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) { 206 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
@@ -227,7 +227,7 @@ static void cmci_discover(int banks)
227 * set the thresholds properly or does not work with 227 * set the thresholds properly or does not work with
228 * this boot option. Note down now and report later. 228 * this boot option. Note down now and report later.
229 */ 229 */
230 if (mce_bios_cmci_threshold && bios_zero_thresh && 230 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
231 (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) 231 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
232 bios_wrong_thresh = 1; 232 bios_wrong_thresh = 1;
233 } else { 233 } else {
@@ -235,7 +235,7 @@ static void cmci_discover(int banks)
235 } 235 }
236 } 236 }
237 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 237 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
238 if (mce_bios_cmci_threshold && bios_wrong_thresh) { 238 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
239 pr_info_once( 239 pr_info_once(
240 "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); 240 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
241 pr_info_once( 241 pr_info_once(
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 642d8805bc1b..df4176cdbb32 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1412,7 +1412,7 @@ __init void lguest_init(void)
1412 1412
1413 /* We don't have features. We have puppies! Puppies! */ 1413 /* We don't have features. We have puppies! Puppies! */
1414#ifdef CONFIG_X86_MCE 1414#ifdef CONFIG_X86_MCE
1415 mce_disabled = 1; 1415 mca_cfg.disabled = true;
1416#endif 1416#endif
1417#ifdef CONFIG_ACPI 1417#ifdef CONFIG_ACPI
1418 acpi_disabled = 1; 1418 acpi_disabled = 1;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 417913974df8..a235085e343c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -171,6 +171,27 @@ ssize_t device_show_int(struct device *dev,
171} 171}
172EXPORT_SYMBOL_GPL(device_show_int); 172EXPORT_SYMBOL_GPL(device_show_int);
173 173
174ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t size)
176{
177 struct dev_ext_attribute *ea = to_ext_attr(attr);
178
179 if (strtobool(buf, ea->var) < 0)
180 return -EINVAL;
181
182 return size;
183}
184EXPORT_SYMBOL_GPL(device_store_bool);
185
186ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
187 char *buf)
188{
189 struct dev_ext_attribute *ea = to_ext_attr(attr);
190
191 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
192}
193EXPORT_SYMBOL_GPL(device_show_bool);
194
174/** 195/**
175 * device_release - free device structure. 196 * device_release - free device structure.
176 * @kobj: device's kobject. 197 * @kobj: device's kobject.
diff --git a/include/linux/device.h b/include/linux/device.h
index 05292e488346..43dcda937ddf 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -498,6 +498,10 @@ ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
498 char *buf); 498 char *buf);
499ssize_t device_store_int(struct device *dev, struct device_attribute *attr, 499ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
500 const char *buf, size_t count); 500 const char *buf, size_t count);
501ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
502 char *buf);
503ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
504 const char *buf, size_t count);
501 505
502#define DEVICE_ATTR(_name, _mode, _show, _store) \ 506#define DEVICE_ATTR(_name, _mode, _show, _store) \
503 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 507 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
@@ -507,6 +511,9 @@ ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
507#define DEVICE_INT_ATTR(_name, _mode, _var) \ 511#define DEVICE_INT_ATTR(_name, _mode, _var) \
508 struct dev_ext_attribute dev_attr_##_name = \ 512 struct dev_ext_attribute dev_attr_##_name = \
509 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } 513 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
514#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
515 struct dev_ext_attribute dev_attr_##_name = \
516 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
510#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ 517#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
511 struct device_attribute dev_attr_##_name = \ 518 struct device_attribute dev_attr_##_name = \
512 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) 519 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)