aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/mcheck
diff options
context:
space:
mode:
authorBorislav Petkov <bp@alien8.de>2012-10-15 12:03:57 -0400
committerBorislav Petkov <bp@alien8.de>2012-10-26 08:37:56 -0400
commitd203f0b82481abc048e134ee4d0ea3efbee77bb1 (patch)
treecc0015c7589054586ece0719f014840a3ccadb73 /arch/x86/kernel/cpu/mcheck
parent91872392f08486f692887d2f06a333f512648f22 (diff)
x86, MCA: Convert dont_log_ce, banks and tolerant
Move those MCA configuration variables into struct mca_config and adjust the places they're used accordingly. Signed-off-by: Borislav Petkov <bp@alien8.de> Acked-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c97
1 files changed, 53 insertions, 44 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 46cbf8689692..10f4d256d9e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -66,20 +66,10 @@ atomic_t mce_entry;
66 66
67DEFINE_PER_CPU(unsigned, mce_exception_count); 67DEFINE_PER_CPU(unsigned, mce_exception_count);
68 68
69/*
70 * Tolerant levels:
71 * 0: always panic on uncorrected errors, log corrected errors
72 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
73 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
74 * 3: never panic or SIGBUS, log all errors (for testing only)
75 */
76static int tolerant __read_mostly = 1;
77static int banks __read_mostly;
78static int rip_msr __read_mostly; 69static int rip_msr __read_mostly;
79static int mce_bootlog __read_mostly = -1; 70static int mce_bootlog __read_mostly = -1;
80static int monarch_timeout __read_mostly = -1; 71static int monarch_timeout __read_mostly = -1;
81static int mce_panic_timeout __read_mostly; 72static int mce_panic_timeout __read_mostly;
82static int mce_dont_log_ce __read_mostly;
83int mce_cmci_disabled __read_mostly; 73int mce_cmci_disabled __read_mostly;
84int mce_ignore_ce __read_mostly; 74int mce_ignore_ce __read_mostly;
85int mce_ser __read_mostly; 75int mce_ser __read_mostly;
@@ -87,6 +77,17 @@ int mce_bios_cmci_threshold __read_mostly;
87 77
88struct mce_bank *mce_banks __read_mostly; 78struct mce_bank *mce_banks __read_mostly;
89 79
80struct mca_config mca_cfg __read_mostly = {
81 /*
82 * Tolerant levels:
83 * 0: always panic on uncorrected errors, log corrected errors
84 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
85 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
86 * 3: never panic or SIGBUS, log all errors (for testing only)
87 */
88 .tolerant = 1
89};
90
90/* User mode helper program triggered by machine check event */ 91/* User mode helper program triggered by machine check event */
91static unsigned long mce_need_notify; 92static unsigned long mce_need_notify;
92static char mce_helper[128]; 93static char mce_helper[128];
@@ -599,7 +600,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
599 600
600 mce_gather_info(&m, NULL); 601 mce_gather_info(&m, NULL);
601 602
602 for (i = 0; i < banks; i++) { 603 for (i = 0; i < mca_cfg.banks; i++) {
603 if (!mce_banks[i].ctl || !test_bit(i, *b)) 604 if (!mce_banks[i].ctl || !test_bit(i, *b))
604 continue; 605 continue;
605 606
@@ -631,7 +632,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
631 * Don't get the IP here because it's unlikely to 632 * Don't get the IP here because it's unlikely to
632 * have anything to do with the actual error location. 633 * have anything to do with the actual error location.
633 */ 634 */
634 if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) 635 if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
635 mce_log(&m); 636 mce_log(&m);
636 637
637 /* 638 /*
@@ -658,14 +659,14 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
658{ 659{
659 int i, ret = 0; 660 int i, ret = 0;
660 661
661 for (i = 0; i < banks; i++) { 662 for (i = 0; i < mca_cfg.banks; i++) {
662 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); 663 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
663 if (m->status & MCI_STATUS_VAL) { 664 if (m->status & MCI_STATUS_VAL) {
664 __set_bit(i, validp); 665 __set_bit(i, validp);
665 if (quirk_no_way_out) 666 if (quirk_no_way_out)
666 quirk_no_way_out(i, m, regs); 667 quirk_no_way_out(i, m, regs);
667 } 668 }
668 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) 669 if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY)
669 ret = 1; 670 ret = 1;
670 } 671 }
671 return ret; 672 return ret;
@@ -700,7 +701,7 @@ static int mce_timed_out(u64 *t)
700 goto out; 701 goto out;
701 if ((s64)*t < SPINUNIT) { 702 if ((s64)*t < SPINUNIT) {
702 /* CHECKME: Make panic default for 1 too? */ 703 /* CHECKME: Make panic default for 1 too? */
703 if (tolerant < 1) 704 if (mca_cfg.tolerant < 1)
704 mce_panic("Timeout synchronizing machine check over CPUs", 705 mce_panic("Timeout synchronizing machine check over CPUs",
705 NULL, NULL); 706 NULL, NULL);
706 cpu_missing = 1; 707 cpu_missing = 1;
@@ -750,7 +751,8 @@ static void mce_reign(void)
750 * Grade the severity of the errors of all the CPUs. 751 * Grade the severity of the errors of all the CPUs.
751 */ 752 */
752 for_each_possible_cpu(cpu) { 753 for_each_possible_cpu(cpu) {
753 int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant, 754 int severity = mce_severity(&per_cpu(mces_seen, cpu),
755 mca_cfg.tolerant,
754 &nmsg); 756 &nmsg);
755 if (severity > global_worst) { 757 if (severity > global_worst) {
756 msg = nmsg; 758 msg = nmsg;
@@ -764,7 +766,7 @@ static void mce_reign(void)
764 * This dumps all the mces in the log buffer and stops the 766 * This dumps all the mces in the log buffer and stops the
765 * other CPUs. 767 * other CPUs.
766 */ 768 */
767 if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3) 769 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
768 mce_panic("Fatal Machine check", m, msg); 770 mce_panic("Fatal Machine check", m, msg);
769 771
770 /* 772 /*
@@ -777,7 +779,7 @@ static void mce_reign(void)
777 * No machine check event found. Must be some external 779 * No machine check event found. Must be some external
778 * source or one CPU is hung. Panic. 780 * source or one CPU is hung. Panic.
779 */ 781 */
780 if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3) 782 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
781 mce_panic("Machine check from unknown source", NULL, NULL); 783 mce_panic("Machine check from unknown source", NULL, NULL);
782 784
783 /* 785 /*
@@ -946,7 +948,7 @@ static void mce_clear_state(unsigned long *toclear)
946{ 948{
947 int i; 949 int i;
948 950
949 for (i = 0; i < banks; i++) { 951 for (i = 0; i < mca_cfg.banks; i++) {
950 if (test_bit(i, toclear)) 952 if (test_bit(i, toclear))
951 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0); 953 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
952 } 954 }
@@ -1022,7 +1024,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1022 int order; 1024 int order;
1023 /* 1025 /*
1024 * If no_way_out gets set, there is no safe way to recover from this 1026 * If no_way_out gets set, there is no safe way to recover from this
1025 * MCE. If tolerant is cranked up, we'll try anyway. 1027 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1026 */ 1028 */
1027 int no_way_out = 0; 1029 int no_way_out = 0;
1028 /* 1030 /*
@@ -1038,7 +1040,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1038 1040
1039 this_cpu_inc(mce_exception_count); 1041 this_cpu_inc(mce_exception_count);
1040 1042
1041 if (!banks) 1043 if (!mca_cfg.banks)
1042 goto out; 1044 goto out;
1043 1045
1044 mce_gather_info(&m, regs); 1046 mce_gather_info(&m, regs);
@@ -1065,7 +1067,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1065 * because the first one to see it will clear it. 1067 * because the first one to see it will clear it.
1066 */ 1068 */
1067 order = mce_start(&no_way_out); 1069 order = mce_start(&no_way_out);
1068 for (i = 0; i < banks; i++) { 1070 for (i = 0; i < mca_cfg.banks; i++) {
1069 __clear_bit(i, toclear); 1071 __clear_bit(i, toclear);
1070 if (!test_bit(i, valid_banks)) 1072 if (!test_bit(i, valid_banks))
1071 continue; 1073 continue;
@@ -1093,7 +1095,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1093 */ 1095 */
1094 add_taint(TAINT_MACHINE_CHECK); 1096 add_taint(TAINT_MACHINE_CHECK);
1095 1097
1096 severity = mce_severity(&m, tolerant, NULL); 1098 severity = mce_severity(&m, mca_cfg.tolerant, NULL);
1097 1099
1098 /* 1100 /*
1099 * When machine check was for corrected handler don't touch, 1101 * When machine check was for corrected handler don't touch,
@@ -1117,7 +1119,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1117 * When the ring overflows we just ignore the AO error. 1119 * When the ring overflows we just ignore the AO error.
1118 * RED-PEN add some logging mechanism when 1120 * RED-PEN add some logging mechanism when
1119 * usable_address or mce_add_ring fails. 1121 * usable_address or mce_add_ring fails.
1120 * RED-PEN don't ignore overflow for tolerant == 0 1122 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1121 */ 1123 */
1122 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) 1124 if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1123 mce_ring_add(m.addr >> PAGE_SHIFT); 1125 mce_ring_add(m.addr >> PAGE_SHIFT);
@@ -1149,7 +1151,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1149 * issues we try to recover, or limit damage to the current 1151 * issues we try to recover, or limit damage to the current
1150 * process. 1152 * process.
1151 */ 1153 */
1152 if (tolerant < 3) { 1154 if (mca_cfg.tolerant < 3) {
1153 if (no_way_out) 1155 if (no_way_out)
1154 mce_panic("Fatal machine check on current CPU", &m, msg); 1156 mce_panic("Fatal machine check on current CPU", &m, msg);
1155 if (worst == MCE_AR_SEVERITY) { 1157 if (worst == MCE_AR_SEVERITY) {
@@ -1377,11 +1379,13 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1377static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1379static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1378{ 1380{
1379 int i; 1381 int i;
1382 u8 num_banks = mca_cfg.banks;
1380 1383
1381 mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL); 1384 mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1382 if (!mce_banks) 1385 if (!mce_banks)
1383 return -ENOMEM; 1386 return -ENOMEM;
1384 for (i = 0; i < banks; i++) { 1387
1388 for (i = 0; i < num_banks; i++) {
1385 struct mce_bank *b = &mce_banks[i]; 1389 struct mce_bank *b = &mce_banks[i];
1386 1390
1387 b->ctl = -1ULL; 1391 b->ctl = -1ULL;
@@ -1401,7 +1405,7 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
1401 rdmsrl(MSR_IA32_MCG_CAP, cap); 1405 rdmsrl(MSR_IA32_MCG_CAP, cap);
1402 1406
1403 b = cap & MCG_BANKCNT_MASK; 1407 b = cap & MCG_BANKCNT_MASK;
1404 if (!banks) 1408 if (!mca_cfg.banks)
1405 pr_info("CPU supports %d MCE banks\n", b); 1409 pr_info("CPU supports %d MCE banks\n", b);
1406 1410
1407 if (b > MAX_NR_BANKS) { 1411 if (b > MAX_NR_BANKS) {
@@ -1411,8 +1415,9 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
1411 } 1415 }
1412 1416
1413 /* Don't support asymmetric configurations today */ 1417 /* Don't support asymmetric configurations today */
1414 WARN_ON(banks != 0 && b != banks); 1418 WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1415 banks = b; 1419 mca_cfg.banks = b;
1420
1416 if (!mce_banks) { 1421 if (!mce_banks) {
1417 int err = __mcheck_cpu_mce_banks_init(); 1422 int err = __mcheck_cpu_mce_banks_init();
1418 1423
@@ -1448,7 +1453,7 @@ static void __mcheck_cpu_init_generic(void)
1448 if (cap & MCG_CTL_P) 1453 if (cap & MCG_CTL_P)
1449 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1454 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1450 1455
1451 for (i = 0; i < banks; i++) { 1456 for (i = 0; i < mca_cfg.banks; i++) {
1452 struct mce_bank *b = &mce_banks[i]; 1457 struct mce_bank *b = &mce_banks[i];
1453 1458
1454 if (!b->init) 1459 if (!b->init)
@@ -1489,6 +1494,8 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1489/* Add per CPU specific workarounds here */ 1494/* Add per CPU specific workarounds here */
1490static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1495static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1491{ 1496{
1497 struct mca_config *cfg = &mca_cfg;
1498
1492 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1499 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1493 pr_info("unknown CPU type - not enabling MCE support\n"); 1500 pr_info("unknown CPU type - not enabling MCE support\n");
1494 return -EOPNOTSUPP; 1501 return -EOPNOTSUPP;
@@ -1496,7 +1503,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1496 1503
1497 /* This should be disabled by the BIOS, but isn't always */ 1504 /* This should be disabled by the BIOS, but isn't always */
1498 if (c->x86_vendor == X86_VENDOR_AMD) { 1505 if (c->x86_vendor == X86_VENDOR_AMD) {
1499 if (c->x86 == 15 && banks > 4) { 1506 if (c->x86 == 15 && cfg->banks > 4) {
1500 /* 1507 /*
1501 * disable GART TBL walk error reporting, which 1508 * disable GART TBL walk error reporting, which
1502 * trips off incorrectly with the IOMMU & 3ware 1509 * trips off incorrectly with the IOMMU & 3ware
@@ -1515,7 +1522,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1515 * Various K7s with broken bank 0 around. Always disable 1522 * Various K7s with broken bank 0 around. Always disable
1516 * by default. 1523 * by default.
1517 */ 1524 */
1518 if (c->x86 == 6 && banks > 0) 1525 if (c->x86 == 6 && cfg->banks > 0)
1519 mce_banks[0].ctl = 0; 1526 mce_banks[0].ctl = 0;
1520 1527
1521 /* 1528 /*
@@ -1566,7 +1573,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1566 * valid event later, merely don't write CTL0. 1573 * valid event later, merely don't write CTL0.
1567 */ 1574 */
1568 1575
1569 if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0) 1576 if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1570 mce_banks[0].init = 0; 1577 mce_banks[0].init = 0;
1571 1578
1572 /* 1579 /*
@@ -1951,6 +1958,8 @@ static struct miscdevice mce_chrdev_device = {
1951 */ 1958 */
1952static int __init mcheck_enable(char *str) 1959static int __init mcheck_enable(char *str)
1953{ 1960{
1961 struct mca_config *cfg = &mca_cfg;
1962
1954 if (*str == 0) { 1963 if (*str == 0) {
1955 enable_p5_mce(); 1964 enable_p5_mce();
1956 return 1; 1965 return 1;
@@ -1962,7 +1971,7 @@ static int __init mcheck_enable(char *str)
1962 else if (!strcmp(str, "no_cmci")) 1971 else if (!strcmp(str, "no_cmci"))
1963 mce_cmci_disabled = 1; 1972 mce_cmci_disabled = 1;
1964 else if (!strcmp(str, "dont_log_ce")) 1973 else if (!strcmp(str, "dont_log_ce"))
1965 mce_dont_log_ce = 1; 1974 cfg->dont_log_ce = true;
1966 else if (!strcmp(str, "ignore_ce")) 1975 else if (!strcmp(str, "ignore_ce"))
1967 mce_ignore_ce = 1; 1976 mce_ignore_ce = 1;
1968 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 1977 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
@@ -1970,7 +1979,7 @@ static int __init mcheck_enable(char *str)
1970 else if (!strcmp(str, "bios_cmci_threshold")) 1979 else if (!strcmp(str, "bios_cmci_threshold"))
1971 mce_bios_cmci_threshold = 1; 1980 mce_bios_cmci_threshold = 1;
1972 else if (isdigit(str[0])) { 1981 else if (isdigit(str[0])) {
1973 get_option(&str, &tolerant); 1982 get_option(&str, &(cfg->tolerant));
1974 if (*str == ',') { 1983 if (*str == ',') {
1975 ++str; 1984 ++str;
1976 get_option(&str, &monarch_timeout); 1985 get_option(&str, &monarch_timeout);
@@ -2002,7 +2011,7 @@ static int mce_disable_error_reporting(void)
2002{ 2011{
2003 int i; 2012 int i;
2004 2013
2005 for (i = 0; i < banks; i++) { 2014 for (i = 0; i < mca_cfg.banks; i++) {
2006 struct mce_bank *b = &mce_banks[i]; 2015 struct mce_bank *b = &mce_banks[i];
2007 2016
2008 if (b->init) 2017 if (b->init)
@@ -2190,9 +2199,9 @@ static ssize_t store_int_with_restart(struct device *s,
2190} 2199}
2191 2200
2192static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); 2201static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2193static DEVICE_INT_ATTR(tolerant, 0644, tolerant); 2202static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2194static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout); 2203static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
2195static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce); 2204static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2196 2205
2197static struct dev_ext_attribute dev_attr_check_interval = { 2206static struct dev_ext_attribute dev_attr_check_interval = {
2198 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), 2207 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
@@ -2253,7 +2262,7 @@ static __cpuinit int mce_device_create(unsigned int cpu)
2253 if (err) 2262 if (err)
2254 goto error; 2263 goto error;
2255 } 2264 }
2256 for (j = 0; j < banks; j++) { 2265 for (j = 0; j < mca_cfg.banks; j++) {
2257 err = device_create_file(dev, &mce_banks[j].attr); 2266 err = device_create_file(dev, &mce_banks[j].attr);
2258 if (err) 2267 if (err)
2259 goto error2; 2268 goto error2;
@@ -2285,7 +2294,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
2285 for (i = 0; mce_device_attrs[i]; i++) 2294 for (i = 0; mce_device_attrs[i]; i++)
2286 device_remove_file(dev, mce_device_attrs[i]); 2295 device_remove_file(dev, mce_device_attrs[i]);
2287 2296
2288 for (i = 0; i < banks; i++) 2297 for (i = 0; i < mca_cfg.banks; i++)
2289 device_remove_file(dev, &mce_banks[i].attr); 2298 device_remove_file(dev, &mce_banks[i].attr);
2290 2299
2291 device_unregister(dev); 2300 device_unregister(dev);
@@ -2304,7 +2313,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2304 2313
2305 if (!(action & CPU_TASKS_FROZEN)) 2314 if (!(action & CPU_TASKS_FROZEN))
2306 cmci_clear(); 2315 cmci_clear();
2307 for (i = 0; i < banks; i++) { 2316 for (i = 0; i < mca_cfg.banks; i++) {
2308 struct mce_bank *b = &mce_banks[i]; 2317 struct mce_bank *b = &mce_banks[i];
2309 2318
2310 if (b->init) 2319 if (b->init)
@@ -2322,7 +2331,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2322 2331
2323 if (!(action & CPU_TASKS_FROZEN)) 2332 if (!(action & CPU_TASKS_FROZEN))
2324 cmci_reenable(); 2333 cmci_reenable();
2325 for (i = 0; i < banks; i++) { 2334 for (i = 0; i < mca_cfg.banks; i++) {
2326 struct mce_bank *b = &mce_banks[i]; 2335 struct mce_bank *b = &mce_banks[i];
2327 2336
2328 if (b->init) 2337 if (b->init)
@@ -2375,7 +2384,7 @@ static __init void mce_init_banks(void)
2375{ 2384{
2376 int i; 2385 int i;
2377 2386
2378 for (i = 0; i < banks; i++) { 2387 for (i = 0; i < mca_cfg.banks; i++) {
2379 struct mce_bank *b = &mce_banks[i]; 2388 struct mce_bank *b = &mce_banks[i];
2380 struct device_attribute *a = &b->attr; 2389 struct device_attribute *a = &b->attr;
2381 2390